diff options
680 files changed, 5827 insertions, 3303 deletions
diff --git a/.gitignore b/.gitignore index 705e09913dc2..1be78fd8163b 100644 --- a/.gitignore +++ b/.gitignore | |||
@@ -127,3 +127,7 @@ all.config | |||
127 | 127 | ||
128 | # Kdevelop4 | 128 | # Kdevelop4 |
129 | *.kdev4 | 129 | *.kdev4 |
130 | |||
131 | #Automatically generated by ASN.1 compiler | ||
132 | net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c | ||
133 | net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h | ||
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt index 611a75e4366e..badb26ac33dc 100644 --- a/Documentation/PCI/pci.txt +++ b/Documentation/PCI/pci.txt | |||
@@ -570,7 +570,9 @@ your driver if they're helpful, or just use plain hex constants. | |||
570 | The device IDs are arbitrary hex numbers (vendor controlled) and normally used | 570 | The device IDs are arbitrary hex numbers (vendor controlled) and normally used |
571 | only in a single location, the pci_device_id table. | 571 | only in a single location, the pci_device_id table. |
572 | 572 | ||
573 | Please DO submit new vendor/device IDs to http://pciids.sourceforge.net/. | 573 | Please DO submit new vendor/device IDs to http://pci-ids.ucw.cz/. |
574 | There are mirrors of the pci.ids file at http://pciids.sourceforge.net/ | ||
575 | and https://github.com/pciutils/pciids. | ||
574 | 576 | ||
575 | 577 | ||
576 | 578 | ||
diff --git a/Documentation/devicetree/bindings/misc/arm-charlcd.txt b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt index e28e2aac47f1..e28e2aac47f1 100644 --- a/Documentation/devicetree/bindings/misc/arm-charlcd.txt +++ b/Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt | |||
diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index 3c81f78b5c27..5d254ab13ebf 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt | |||
@@ -60,7 +60,7 @@ Examples | |||
60 | #size-cells = <0>; | 60 | #size-cells = <0>; |
61 | 61 | ||
62 | button@1 { | 62 | button@1 { |
63 | debounce_interval = <50>; | 63 | debounce-interval = <50>; |
64 | wakeup-source; | 64 | wakeup-source; |
65 | linux,code = <116>; | 65 | linux,code = <116>; |
66 | label = "POWER"; | 66 | label = "POWER"; |
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt index 28be51afdb6a..379eb763073e 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt | |||
@@ -22,7 +22,32 @@ Optional properties: | |||
22 | - clocks : thermal sensor's clock source. | 22 | - clocks : thermal sensor's clock source. |
23 | 23 | ||
24 | Example: | 24 | Example: |
25 | ocotp: ocotp@21bc000 { | ||
26 | #address-cells = <1>; | ||
27 | #size-cells = <1>; | ||
28 | compatible = "fsl,imx6sx-ocotp", "syscon"; | ||
29 | reg = <0x021bc000 0x4000>; | ||
30 | clocks = <&clks IMX6SX_CLK_OCOTP>; | ||
25 | 31 | ||
32 | tempmon_calib: calib@38 { | ||
33 | reg = <0x38 4>; | ||
34 | }; | ||
35 | |||
36 | tempmon_temp_grade: temp-grade@20 { | ||
37 | reg = <0x20 4>; | ||
38 | }; | ||
39 | }; | ||
40 | |||
41 | tempmon: tempmon { | ||
42 | compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon"; | ||
43 | interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; | ||
44 | fsl,tempmon = <&anatop>; | ||
45 | nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>; | ||
46 | nvmem-cell-names = "calib", "temp_grade"; | ||
47 | clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>; | ||
48 | }; | ||
49 | |||
50 | Legacy method (Deprecated): | ||
26 | tempmon { | 51 | tempmon { |
27 | compatible = "fsl,imx6q-tempmon"; | 52 | compatible = "fsl,imx6q-tempmon"; |
28 | fsl,tempmon = <&anatop>; | 53 | fsl,tempmon = <&anatop>; |
diff --git a/Documentation/gpu/tve200.rst b/Documentation/gpu/tve200.rst index 69b17b324e12..152ea9398f7e 100644 --- a/Documentation/gpu/tve200.rst +++ b/Documentation/gpu/tve200.rst | |||
@@ -3,4 +3,4 @@ | |||
3 | ================================== | 3 | ================================== |
4 | 4 | ||
5 | .. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c | 5 | .. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c |
6 | :doc: Faraday TV Encoder 200 | 6 | :doc: Faraday TV Encoder TVE200 DRM Driver |
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index d47702456926..65514c251318 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
@@ -28,8 +28,10 @@ Supported adapters: | |||
28 | * Intel Wildcat Point (PCH) | 28 | * Intel Wildcat Point (PCH) |
29 | * Intel Wildcat Point-LP (PCH) | 29 | * Intel Wildcat Point-LP (PCH) |
30 | * Intel BayTrail (SOC) | 30 | * Intel BayTrail (SOC) |
31 | * Intel Braswell (SOC) | ||
31 | * Intel Sunrise Point-H (PCH) | 32 | * Intel Sunrise Point-H (PCH) |
32 | * Intel Sunrise Point-LP (PCH) | 33 | * Intel Sunrise Point-LP (PCH) |
34 | * Intel Kaby Lake-H (PCH) | ||
33 | * Intel DNV (SOC) | 35 | * Intel DNV (SOC) |
34 | * Intel Broxton (SOC) | 36 | * Intel Broxton (SOC) |
35 | * Intel Lewisburg (PCH) | 37 | * Intel Lewisburg (PCH) |
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt index 2f09455a993a..d47480b61ac6 100644 --- a/Documentation/networking/segmentation-offloads.txt +++ b/Documentation/networking/segmentation-offloads.txt | |||
@@ -13,6 +13,7 @@ The following technologies are described: | |||
13 | * Generic Segmentation Offload - GSO | 13 | * Generic Segmentation Offload - GSO |
14 | * Generic Receive Offload - GRO | 14 | * Generic Receive Offload - GRO |
15 | * Partial Generic Segmentation Offload - GSO_PARTIAL | 15 | * Partial Generic Segmentation Offload - GSO_PARTIAL |
16 | * SCTP accelleration with GSO - GSO_BY_FRAGS | ||
16 | 17 | ||
17 | TCP Segmentation Offload | 18 | TCP Segmentation Offload |
18 | ======================== | 19 | ======================== |
@@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments. Many of the requirements for UDP | |||
49 | fragmentation offload are the same as TSO. However the IPv4 ID for | 50 | fragmentation offload are the same as TSO. However the IPv4 ID for |
50 | fragments should not increment as a single IPv4 datagram is fragmented. | 51 | fragments should not increment as a single IPv4 datagram is fragmented. |
51 | 52 | ||
53 | UFO is deprecated: modern kernels will no longer generate UFO skbs, but can | ||
54 | still receive them from tuntap and similar devices. Offload of UDP-based | ||
55 | tunnel protocols is still supported. | ||
56 | |||
52 | IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads | 57 | IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads |
53 | ======================================================== | 58 | ======================================================== |
54 | 59 | ||
@@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the | |||
83 | fact that the outer header also requests to have a non-zero checksum | 88 | fact that the outer header also requests to have a non-zero checksum |
84 | included in the outer header. | 89 | included in the outer header. |
85 | 90 | ||
86 | Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header | 91 | Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel |
87 | has requested a remote checksum offload. In this case the inner headers | 92 | header has requested a remote checksum offload. In this case the inner |
88 | will be left with a partial checksum and only the outer header checksum | 93 | headers will be left with a partial checksum and only the outer header |
89 | will be computed. | 94 | checksum will be computed. |
90 | 95 | ||
91 | Generic Segmentation Offload | 96 | Generic Segmentation Offload |
92 | ============================ | 97 | ============================ |
@@ -128,3 +133,28 @@ values for if the header was simply duplicated. The one exception to this | |||
128 | is the outer IPv4 ID field. It is up to the device drivers to guarantee | 133 | is the outer IPv4 ID field. It is up to the device drivers to guarantee |
129 | that the IPv4 ID field is incremented in the case that a given header does | 134 | that the IPv4 ID field is incremented in the case that a given header does |
130 | not have the DF bit set. | 135 | not have the DF bit set. |
136 | |||
137 | SCTP accelleration with GSO | ||
138 | =========================== | ||
139 | |||
140 | SCTP - despite the lack of hardware support - can still take advantage of | ||
141 | GSO to pass one large packet through the network stack, rather than | ||
142 | multiple small packets. | ||
143 | |||
144 | This requires a different approach to other offloads, as SCTP packets | ||
145 | cannot be just segmented to (P)MTU. Rather, the chunks must be contained in | ||
146 | IP segments, padding respected. So unlike regular GSO, SCTP can't just | ||
147 | generate a big skb, set gso_size to the fragmentation point and deliver it | ||
148 | to IP layer. | ||
149 | |||
150 | Instead, the SCTP protocol layer builds an skb with the segments correctly | ||
151 | padded and stored as chained skbs, and skb_segment() splits based on those. | ||
152 | To signal this, gso_size is set to the special value GSO_BY_FRAGS. | ||
153 | |||
154 | Therefore, any code in the core networking stack must be aware of the | ||
155 | possibility that gso_size will be GSO_BY_FRAGS and handle that case | ||
156 | appropriately. (For size checks, the skb_gso_validate_*_len family of | ||
157 | helpers do this automatically.) | ||
158 | |||
159 | This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits | ||
160 | set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE. | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 792fa8717d13..d6b3ff51a14f 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the | |||
123 | flag KVM_VM_MIPS_VZ. | 123 | flag KVM_VM_MIPS_VZ. |
124 | 124 | ||
125 | 125 | ||
126 | 4.3 KVM_GET_MSR_INDEX_LIST | 126 | 4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST |
127 | 127 | ||
128 | Capability: basic | 128 | Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST |
129 | Architectures: x86 | 129 | Architectures: x86 |
130 | Type: system | 130 | Type: system ioctl |
131 | Parameters: struct kvm_msr_list (in/out) | 131 | Parameters: struct kvm_msr_list (in/out) |
132 | Returns: 0 on success; -1 on error | 132 | Returns: 0 on success; -1 on error |
133 | Errors: | 133 | Errors: |
134 | EFAULT: the msr index list cannot be read from or written to | ||
134 | E2BIG: the msr index list is to be to fit in the array specified by | 135 | E2BIG: the msr index list is to be to fit in the array specified by |
135 | the user. | 136 | the user. |
136 | 137 | ||
@@ -139,16 +140,23 @@ struct kvm_msr_list { | |||
139 | __u32 indices[0]; | 140 | __u32 indices[0]; |
140 | }; | 141 | }; |
141 | 142 | ||
142 | This ioctl returns the guest msrs that are supported. The list varies | 143 | The user fills in the size of the indices array in nmsrs, and in return |
143 | by kvm version and host processor, but does not change otherwise. The | 144 | kvm adjusts nmsrs to reflect the actual number of msrs and fills in the |
144 | user fills in the size of the indices array in nmsrs, and in return | 145 | indices array with their numbers. |
145 | kvm adjusts nmsrs to reflect the actual number of msrs and fills in | 146 | |
146 | the indices array with their numbers. | 147 | KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list |
148 | varies by kvm version and host processor, but does not change otherwise. | ||
147 | 149 | ||
148 | Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are | 150 | Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are |
149 | not returned in the MSR list, as different vcpus can have a different number | 151 | not returned in the MSR list, as different vcpus can have a different number |
150 | of banks, as set via the KVM_X86_SETUP_MCE ioctl. | 152 | of banks, as set via the KVM_X86_SETUP_MCE ioctl. |
151 | 153 | ||
154 | KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed | ||
155 | to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities | ||
156 | and processor features that are exposed via MSRs (e.g., VMX capabilities). | ||
157 | This list also varies by kvm version and host processor, but does not change | ||
158 | otherwise. | ||
159 | |||
152 | 160 | ||
153 | 4.4 KVM_CHECK_EXTENSION | 161 | 4.4 KVM_CHECK_EXTENSION |
154 | 162 | ||
@@ -475,14 +483,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. | |||
475 | 483 | ||
476 | 4.18 KVM_GET_MSRS | 484 | 4.18 KVM_GET_MSRS |
477 | 485 | ||
478 | Capability: basic | 486 | Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) |
479 | Architectures: x86 | 487 | Architectures: x86 |
480 | Type: vcpu ioctl | 488 | Type: system ioctl, vcpu ioctl |
481 | Parameters: struct kvm_msrs (in/out) | 489 | Parameters: struct kvm_msrs (in/out) |
482 | Returns: 0 on success, -1 on error | 490 | Returns: number of msrs successfully returned; |
491 | -1 on error | ||
492 | |||
493 | When used as a system ioctl: | ||
494 | Reads the values of MSR-based features that are available for the VM. This | ||
495 | is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. | ||
496 | The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST | ||
497 | in a system ioctl. | ||
483 | 498 | ||
499 | When used as a vcpu ioctl: | ||
484 | Reads model-specific registers from the vcpu. Supported msr indices can | 500 | Reads model-specific registers from the vcpu. Supported msr indices can |
485 | be obtained using KVM_GET_MSR_INDEX_LIST. | 501 | be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. |
486 | 502 | ||
487 | struct kvm_msrs { | 503 | struct kvm_msrs { |
488 | __u32 nmsrs; /* number of msrs in entries */ | 504 | __u32 nmsrs; /* number of msrs in entries */ |
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index dcab6dc11e3b..87a7506f31c2 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
@@ -58,6 +58,10 @@ KVM_FEATURE_PV_TLB_FLUSH || 9 || guest checks this feature bit | |||
58 | || || before enabling paravirtualized | 58 | || || before enabling paravirtualized |
59 | || || tlb flush. | 59 | || || tlb flush. |
60 | ------------------------------------------------------------------------------ | 60 | ------------------------------------------------------------------------------ |
61 | KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit | ||
62 | || || can be enabled by setting bit 2 | ||
63 | || || when writing to msr 0x4b564d02 | ||
64 | ------------------------------------------------------------------------------ | ||
61 | KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side | 65 | KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side |
62 | || || per-cpu warps are expected in | 66 | || || per-cpu warps are expected in |
63 | || || kvmclock. | 67 | || || kvmclock. |
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index 1ebecc115dc6..f3f0d57ced8e 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt | |||
@@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02 | |||
170 | when asynchronous page faults are enabled on the vcpu 0 when | 170 | when asynchronous page faults are enabled on the vcpu 0 when |
171 | disabled. Bit 1 is 1 if asynchronous page faults can be injected | 171 | disabled. Bit 1 is 1 if asynchronous page faults can be injected |
172 | when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults | 172 | when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults |
173 | are delivered to L1 as #PF vmexits. | 173 | are delivered to L1 as #PF vmexits. Bit 2 can be set only if |
174 | KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. | ||
174 | 175 | ||
175 | First 4 byte of 64 byte memory location will be written to by | 176 | First 4 byte of 64 byte memory location will be written to by |
176 | the hypervisor at the time of asynchronous page fault (APF) | 177 | the hypervisor at the time of asynchronous page fault (APF) |
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt index f3e9d7e9ed6c..2953e3ec9a02 100644 --- a/Documentation/x86/topology.txt +++ b/Documentation/x86/topology.txt | |||
@@ -108,7 +108,7 @@ The topology of a system is described in the units of: | |||
108 | 108 | ||
109 | The number of online threads is also printed in /proc/cpuinfo "siblings." | 109 | The number of online threads is also printed in /proc/cpuinfo "siblings." |
110 | 110 | ||
111 | - topology_sibling_mask(): | 111 | - topology_sibling_cpumask(): |
112 | 112 | ||
113 | The cpumask contains all online threads in the core to which a thread | 113 | The cpumask contains all online threads in the core to which a thread |
114 | belongs. | 114 | belongs. |
diff --git a/MAINTAINERS b/MAINTAINERS index 9a7f76eadae9..1c95c6036098 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1238,7 +1238,7 @@ F: drivers/clk/at91 | |||
1238 | 1238 | ||
1239 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT | 1239 | ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT |
1240 | M: Nicolas Ferre <nicolas.ferre@microchip.com> | 1240 | M: Nicolas Ferre <nicolas.ferre@microchip.com> |
1241 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | 1241 | M: Alexandre Belloni <alexandre.belloni@bootlin.com> |
1242 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1242 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1243 | W: http://www.linux4sam.org | 1243 | W: http://www.linux4sam.org |
1244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git | 1244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git |
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support | |||
1590 | M: Jason Cooper <jason@lakedaemon.net> | 1590 | M: Jason Cooper <jason@lakedaemon.net> |
1591 | M: Andrew Lunn <andrew@lunn.ch> | 1591 | M: Andrew Lunn <andrew@lunn.ch> |
1592 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1592 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1593 | M: Gregory Clement <gregory.clement@free-electrons.com> | 1593 | M: Gregory Clement <gregory.clement@bootlin.com> |
1594 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1594 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1595 | S: Maintained | 1595 | S: Maintained |
1596 | F: Documentation/devicetree/bindings/soc/dove/ | 1596 | F: Documentation/devicetree/bindings/soc/dove/ |
@@ -1604,7 +1604,7 @@ F: arch/arm/boot/dts/orion5x* | |||
1604 | ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support | 1604 | ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support |
1605 | M: Jason Cooper <jason@lakedaemon.net> | 1605 | M: Jason Cooper <jason@lakedaemon.net> |
1606 | M: Andrew Lunn <andrew@lunn.ch> | 1606 | M: Andrew Lunn <andrew@lunn.ch> |
1607 | M: Gregory Clement <gregory.clement@free-electrons.com> | 1607 | M: Gregory Clement <gregory.clement@bootlin.com> |
1608 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1608 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1609 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1609 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1610 | S: Maintained | 1610 | S: Maintained |
@@ -1999,8 +1999,10 @@ M: Maxime Coquelin <mcoquelin.stm32@gmail.com> | |||
1999 | M: Alexandre Torgue <alexandre.torgue@st.com> | 1999 | M: Alexandre Torgue <alexandre.torgue@st.com> |
2000 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 2000 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
2001 | S: Maintained | 2001 | S: Maintained |
2002 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git | 2002 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next |
2003 | N: stm32 | 2003 | N: stm32 |
2004 | F: arch/arm/boot/dts/stm32* | ||
2005 | F: arch/arm/mach-stm32/ | ||
2004 | F: drivers/clocksource/armv7m_systick.c | 2006 | F: drivers/clocksource/armv7m_systick.c |
2005 | 2007 | ||
2006 | ARM/TANGO ARCHITECTURE | 2008 | ARM/TANGO ARCHITECTURE |
@@ -7909,7 +7911,6 @@ S: Maintained | |||
7909 | F: scripts/leaking_addresses.pl | 7911 | F: scripts/leaking_addresses.pl |
7910 | 7912 | ||
7911 | LED SUBSYSTEM | 7913 | LED SUBSYSTEM |
7912 | M: Richard Purdie <rpurdie@rpsys.net> | ||
7913 | M: Jacek Anaszewski <jacek.anaszewski@gmail.com> | 7914 | M: Jacek Anaszewski <jacek.anaszewski@gmail.com> |
7914 | M: Pavel Machek <pavel@ucw.cz> | 7915 | M: Pavel Machek <pavel@ucw.cz> |
7915 | L: linux-leds@vger.kernel.org | 7916 | L: linux-leds@vger.kernel.org |
@@ -10927,6 +10928,17 @@ L: linux-gpio@vger.kernel.org | |||
10927 | S: Supported | 10928 | S: Supported |
10928 | F: drivers/pinctrl/pinctrl-at91-pio4.* | 10929 | F: drivers/pinctrl/pinctrl-at91-pio4.* |
10929 | 10930 | ||
10931 | PIN CONTROLLER - FREESCALE | ||
10932 | M: Dong Aisheng <aisheng.dong@nxp.com> | ||
10933 | M: Fabio Estevam <festevam@gmail.com> | ||
10934 | M: Shawn Guo <shawnguo@kernel.org> | ||
10935 | M: Stefan Agner <stefan@agner.ch> | ||
10936 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
10937 | L: linux-gpio@vger.kernel.org | ||
10938 | S: Maintained | ||
10939 | F: drivers/pinctrl/freescale/ | ||
10940 | F: Documentation/devicetree/bindings/pinctrl/fsl,* | ||
10941 | |||
10930 | PIN CONTROLLER - INTEL | 10942 | PIN CONTROLLER - INTEL |
10931 | M: Mika Westerberg <mika.westerberg@linux.intel.com> | 10943 | M: Mika Westerberg <mika.westerberg@linux.intel.com> |
10932 | M: Heikki Krogerus <heikki.krogerus@linux.intel.com> | 10944 | M: Heikki Krogerus <heikki.krogerus@linux.intel.com> |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 16 | 3 | PATCHLEVEL = 16 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc2 | 5 | EXTRAVERSION = -rc3 |
6 | NAME = Fearless Coyote | 6 | NAME = Fearless Coyote |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | |||
489 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 489 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
490 | endif | 490 | endif |
491 | 491 | ||
492 | RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register | ||
493 | RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk | ||
494 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) | ||
495 | export RETPOLINE_CFLAGS | ||
496 | |||
492 | ifeq ($(config-targets),1) | 497 | ifeq ($(config-targets),1) |
493 | # =========================================================================== | 498 | # =========================================================================== |
494 | # *config targets only - make sure prerequisites are updated, and descend | 499 | # *config targets only - make sure prerequisites are updated, and descend |
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 46ebf14aed4e..8a2b331e43fe 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h | |||
@@ -6,7 +6,6 @@ | |||
6 | * Atomic exchange routines. | 6 | * Atomic exchange routines. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define __ASM__MB | ||
10 | #define ____xchg(type, args...) __xchg ## type ## _local(args) | 9 | #define ____xchg(type, args...) __xchg ## type ## _local(args) |
11 | #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) | 10 | #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) |
12 | #include <asm/xchg.h> | 11 | #include <asm/xchg.h> |
@@ -33,10 +32,6 @@ | |||
33 | cmpxchg_local((ptr), (o), (n)); \ | 32 | cmpxchg_local((ptr), (o), (n)); \ |
34 | }) | 33 | }) |
35 | 34 | ||
36 | #ifdef CONFIG_SMP | ||
37 | #undef __ASM__MB | ||
38 | #define __ASM__MB "\tmb\n" | ||
39 | #endif | ||
40 | #undef ____xchg | 35 | #undef ____xchg |
41 | #undef ____cmpxchg | 36 | #undef ____cmpxchg |
42 | #define ____xchg(type, args...) __xchg ##type(args) | 37 | #define ____xchg(type, args...) __xchg ##type(args) |
@@ -64,7 +59,6 @@ | |||
64 | cmpxchg((ptr), (o), (n)); \ | 59 | cmpxchg((ptr), (o), (n)); \ |
65 | }) | 60 | }) |
66 | 61 | ||
67 | #undef __ASM__MB | ||
68 | #undef ____cmpxchg | 62 | #undef ____cmpxchg |
69 | 63 | ||
70 | #endif /* _ALPHA_CMPXCHG_H */ | 64 | #endif /* _ALPHA_CMPXCHG_H */ |
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index 68dfb3cb7145..e2b59fac5257 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h | |||
@@ -12,6 +12,10 @@ | |||
12 | * Atomic exchange. | 12 | * Atomic exchange. |
13 | * Since it can be used to implement critical sections | 13 | * Since it can be used to implement critical sections |
14 | * it must clobber "memory" (also for interrupts in UP). | 14 | * it must clobber "memory" (also for interrupts in UP). |
15 | * | ||
16 | * The leading and the trailing memory barriers guarantee that these | ||
17 | * operations are fully ordered. | ||
18 | * | ||
15 | */ | 19 | */ |
16 | 20 | ||
17 | static inline unsigned long | 21 | static inline unsigned long |
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) | |||
19 | { | 23 | { |
20 | unsigned long ret, tmp, addr64; | 24 | unsigned long ret, tmp, addr64; |
21 | 25 | ||
26 | smp_mb(); | ||
22 | __asm__ __volatile__( | 27 | __asm__ __volatile__( |
23 | " andnot %4,7,%3\n" | 28 | " andnot %4,7,%3\n" |
24 | " insbl %1,%4,%1\n" | 29 | " insbl %1,%4,%1\n" |
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val) | |||
28 | " or %1,%2,%2\n" | 33 | " or %1,%2,%2\n" |
29 | " stq_c %2,0(%3)\n" | 34 | " stq_c %2,0(%3)\n" |
30 | " beq %2,2f\n" | 35 | " beq %2,2f\n" |
31 | __ASM__MB | ||
32 | ".subsection 2\n" | 36 | ".subsection 2\n" |
33 | "2: br 1b\n" | 37 | "2: br 1b\n" |
34 | ".previous" | 38 | ".previous" |
35 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | 39 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) |
36 | : "r" ((long)m), "1" (val) : "memory"); | 40 | : "r" ((long)m), "1" (val) : "memory"); |
41 | smp_mb(); | ||
37 | 42 | ||
38 | return ret; | 43 | return ret; |
39 | } | 44 | } |
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) | |||
43 | { | 48 | { |
44 | unsigned long ret, tmp, addr64; | 49 | unsigned long ret, tmp, addr64; |
45 | 50 | ||
51 | smp_mb(); | ||
46 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
47 | " andnot %4,7,%3\n" | 53 | " andnot %4,7,%3\n" |
48 | " inswl %1,%4,%1\n" | 54 | " inswl %1,%4,%1\n" |
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val) | |||
52 | " or %1,%2,%2\n" | 58 | " or %1,%2,%2\n" |
53 | " stq_c %2,0(%3)\n" | 59 | " stq_c %2,0(%3)\n" |
54 | " beq %2,2f\n" | 60 | " beq %2,2f\n" |
55 | __ASM__MB | ||
56 | ".subsection 2\n" | 61 | ".subsection 2\n" |
57 | "2: br 1b\n" | 62 | "2: br 1b\n" |
58 | ".previous" | 63 | ".previous" |
59 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | 64 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) |
60 | : "r" ((long)m), "1" (val) : "memory"); | 65 | : "r" ((long)m), "1" (val) : "memory"); |
66 | smp_mb(); | ||
61 | 67 | ||
62 | return ret; | 68 | return ret; |
63 | } | 69 | } |
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val) | |||
67 | { | 73 | { |
68 | unsigned long dummy; | 74 | unsigned long dummy; |
69 | 75 | ||
76 | smp_mb(); | ||
70 | __asm__ __volatile__( | 77 | __asm__ __volatile__( |
71 | "1: ldl_l %0,%4\n" | 78 | "1: ldl_l %0,%4\n" |
72 | " bis $31,%3,%1\n" | 79 | " bis $31,%3,%1\n" |
73 | " stl_c %1,%2\n" | 80 | " stl_c %1,%2\n" |
74 | " beq %1,2f\n" | 81 | " beq %1,2f\n" |
75 | __ASM__MB | ||
76 | ".subsection 2\n" | 82 | ".subsection 2\n" |
77 | "2: br 1b\n" | 83 | "2: br 1b\n" |
78 | ".previous" | 84 | ".previous" |
79 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | 85 | : "=&r" (val), "=&r" (dummy), "=m" (*m) |
80 | : "rI" (val), "m" (*m) : "memory"); | 86 | : "rI" (val), "m" (*m) : "memory"); |
87 | smp_mb(); | ||
81 | 88 | ||
82 | return val; | 89 | return val; |
83 | } | 90 | } |
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val) | |||
87 | { | 94 | { |
88 | unsigned long dummy; | 95 | unsigned long dummy; |
89 | 96 | ||
97 | smp_mb(); | ||
90 | __asm__ __volatile__( | 98 | __asm__ __volatile__( |
91 | "1: ldq_l %0,%4\n" | 99 | "1: ldq_l %0,%4\n" |
92 | " bis $31,%3,%1\n" | 100 | " bis $31,%3,%1\n" |
93 | " stq_c %1,%2\n" | 101 | " stq_c %1,%2\n" |
94 | " beq %1,2f\n" | 102 | " beq %1,2f\n" |
95 | __ASM__MB | ||
96 | ".subsection 2\n" | 103 | ".subsection 2\n" |
97 | "2: br 1b\n" | 104 | "2: br 1b\n" |
98 | ".previous" | 105 | ".previous" |
99 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | 106 | : "=&r" (val), "=&r" (dummy), "=m" (*m) |
100 | : "rI" (val), "m" (*m) : "memory"); | 107 | : "rI" (val), "m" (*m) : "memory"); |
108 | smp_mb(); | ||
101 | 109 | ||
102 | return val; | 110 | return val; |
103 | } | 111 | } |
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) | |||
128 | * store NEW in MEM. Return the initial value in MEM. Success is | 136 | * store NEW in MEM. Return the initial value in MEM. Success is |
129 | * indicated by comparing RETURN with OLD. | 137 | * indicated by comparing RETURN with OLD. |
130 | * | 138 | * |
131 | * The memory barrier should be placed in SMP only when we actually | 139 | * The leading and the trailing memory barriers guarantee that these |
132 | * make the change. If we don't change anything (so if the returned | 140 | * operations are fully ordered. |
133 | * prev is equal to old) then we aren't acquiring anything new and | 141 | * |
134 | * we don't need any memory barrier as far I can tell. | 142 | * The trailing memory barrier is placed in SMP unconditionally, in |
143 | * order to guarantee that dependency ordering is preserved when a | ||
144 | * dependency is headed by an unsuccessful operation. | ||
135 | */ | 145 | */ |
136 | 146 | ||
137 | static inline unsigned long | 147 | static inline unsigned long |
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) | |||
139 | { | 149 | { |
140 | unsigned long prev, tmp, cmp, addr64; | 150 | unsigned long prev, tmp, cmp, addr64; |
141 | 151 | ||
152 | smp_mb(); | ||
142 | __asm__ __volatile__( | 153 | __asm__ __volatile__( |
143 | " andnot %5,7,%4\n" | 154 | " andnot %5,7,%4\n" |
144 | " insbl %1,%5,%1\n" | 155 | " insbl %1,%5,%1\n" |
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) | |||
150 | " or %1,%2,%2\n" | 161 | " or %1,%2,%2\n" |
151 | " stq_c %2,0(%4)\n" | 162 | " stq_c %2,0(%4)\n" |
152 | " beq %2,3f\n" | 163 | " beq %2,3f\n" |
153 | __ASM__MB | ||
154 | "2:\n" | 164 | "2:\n" |
155 | ".subsection 2\n" | 165 | ".subsection 2\n" |
156 | "3: br 1b\n" | 166 | "3: br 1b\n" |
157 | ".previous" | 167 | ".previous" |
158 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | 168 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) |
159 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | 169 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); |
170 | smp_mb(); | ||
160 | 171 | ||
161 | return prev; | 172 | return prev; |
162 | } | 173 | } |
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) | |||
166 | { | 177 | { |
167 | unsigned long prev, tmp, cmp, addr64; | 178 | unsigned long prev, tmp, cmp, addr64; |
168 | 179 | ||
180 | smp_mb(); | ||
169 | __asm__ __volatile__( | 181 | __asm__ __volatile__( |
170 | " andnot %5,7,%4\n" | 182 | " andnot %5,7,%4\n" |
171 | " inswl %1,%5,%1\n" | 183 | " inswl %1,%5,%1\n" |
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) | |||
177 | " or %1,%2,%2\n" | 189 | " or %1,%2,%2\n" |
178 | " stq_c %2,0(%4)\n" | 190 | " stq_c %2,0(%4)\n" |
179 | " beq %2,3f\n" | 191 | " beq %2,3f\n" |
180 | __ASM__MB | ||
181 | "2:\n" | 192 | "2:\n" |
182 | ".subsection 2\n" | 193 | ".subsection 2\n" |
183 | "3: br 1b\n" | 194 | "3: br 1b\n" |
184 | ".previous" | 195 | ".previous" |
185 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | 196 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) |
186 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | 197 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); |
198 | smp_mb(); | ||
187 | 199 | ||
188 | return prev; | 200 | return prev; |
189 | } | 201 | } |
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) | |||
193 | { | 205 | { |
194 | unsigned long prev, cmp; | 206 | unsigned long prev, cmp; |
195 | 207 | ||
208 | smp_mb(); | ||
196 | __asm__ __volatile__( | 209 | __asm__ __volatile__( |
197 | "1: ldl_l %0,%5\n" | 210 | "1: ldl_l %0,%5\n" |
198 | " cmpeq %0,%3,%1\n" | 211 | " cmpeq %0,%3,%1\n" |
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) | |||
200 | " mov %4,%1\n" | 213 | " mov %4,%1\n" |
201 | " stl_c %1,%2\n" | 214 | " stl_c %1,%2\n" |
202 | " beq %1,3f\n" | 215 | " beq %1,3f\n" |
203 | __ASM__MB | ||
204 | "2:\n" | 216 | "2:\n" |
205 | ".subsection 2\n" | 217 | ".subsection 2\n" |
206 | "3: br 1b\n" | 218 | "3: br 1b\n" |
207 | ".previous" | 219 | ".previous" |
208 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | 220 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) |
209 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | 221 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); |
222 | smp_mb(); | ||
210 | 223 | ||
211 | return prev; | 224 | return prev; |
212 | } | 225 | } |
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) | |||
216 | { | 229 | { |
217 | unsigned long prev, cmp; | 230 | unsigned long prev, cmp; |
218 | 231 | ||
232 | smp_mb(); | ||
219 | __asm__ __volatile__( | 233 | __asm__ __volatile__( |
220 | "1: ldq_l %0,%5\n" | 234 | "1: ldq_l %0,%5\n" |
221 | " cmpeq %0,%3,%1\n" | 235 | " cmpeq %0,%3,%1\n" |
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) | |||
223 | " mov %4,%1\n" | 237 | " mov %4,%1\n" |
224 | " stq_c %1,%2\n" | 238 | " stq_c %1,%2\n" |
225 | " beq %1,3f\n" | 239 | " beq %1,3f\n" |
226 | __ASM__MB | ||
227 | "2:\n" | 240 | "2:\n" |
228 | ".subsection 2\n" | 241 | ".subsection 2\n" |
229 | "3: br 1b\n" | 242 | "3: br 1b\n" |
230 | ".previous" | 243 | ".previous" |
231 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | 244 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) |
232 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | 245 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); |
246 | smp_mb(); | ||
233 | 247 | ||
234 | return prev; | 248 | return prev; |
235 | } | 249 | } |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index f3a80cf164cc..d76bf4a83740 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -484,7 +484,6 @@ config ARC_CURR_IN_REG | |||
484 | 484 | ||
485 | config ARC_EMUL_UNALIGNED | 485 | config ARC_EMUL_UNALIGNED |
486 | bool "Emulate unaligned memory access (userspace only)" | 486 | bool "Emulate unaligned memory access (userspace only)" |
487 | default N | ||
488 | select SYSCTL_ARCH_UNALIGN_NO_WARN | 487 | select SYSCTL_ARCH_UNALIGN_NO_WARN |
489 | select SYSCTL_ARCH_UNALIGN_ALLOW | 488 | select SYSCTL_ARCH_UNALIGN_ALLOW |
490 | depends on ISA_ARCOMPACT | 489 | depends on ISA_ARCOMPACT |
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts index 70aec7d6ca60..626b694c7be7 100644 --- a/arch/arc/boot/dts/axs101.dts +++ b/arch/arc/boot/dts/axs101.dts | |||
@@ -17,6 +17,6 @@ | |||
17 | compatible = "snps,axs101", "snps,arc-sdp"; | 17 | compatible = "snps,axs101", "snps,arc-sdp"; |
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60"; | 20 | bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60 print-fatal-signals=1"; |
21 | }; | 21 | }; |
22 | }; | 22 | }; |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 74d070cd3c13..47b74fbc403c 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
@@ -214,13 +214,13 @@ | |||
214 | }; | 214 | }; |
215 | 215 | ||
216 | eeprom@0x54{ | 216 | eeprom@0x54{ |
217 | compatible = "24c01"; | 217 | compatible = "atmel,24c01"; |
218 | reg = <0x54>; | 218 | reg = <0x54>; |
219 | pagesize = <0x8>; | 219 | pagesize = <0x8>; |
220 | }; | 220 | }; |
221 | 221 | ||
222 | eeprom@0x57{ | 222 | eeprom@0x57{ |
223 | compatible = "24c04"; | 223 | compatible = "atmel,24c04"; |
224 | reg = <0x57>; | 224 | reg = <0x57>; |
225 | pagesize = <0x8>; | 225 | pagesize = <0x8>; |
226 | }; | 226 | }; |
diff --git a/arch/arc/boot/dts/haps_hs_idu.dts b/arch/arc/boot/dts/haps_hs_idu.dts index 215cddd0b63b..0c603308aeb3 100644 --- a/arch/arc/boot/dts/haps_hs_idu.dts +++ b/arch/arc/boot/dts/haps_hs_idu.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | }; | 22 | }; |
23 | 23 | ||
24 | chosen { | 24 | chosen { |
25 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug"; | 25 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | aliases { | 28 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index 5ee96b067c08..ff2f2c70c545 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts | |||
@@ -17,7 +17,7 @@ | |||
17 | interrupt-parent = <&core_intc>; | 17 | interrupt-parent = <&core_intc>; |
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 20 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | aliases { | 23 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts index 8d787b251f73..8e2489b16b0a 100644 --- a/arch/arc/boot/dts/nsim_hs.dts +++ b/arch/arc/boot/dts/nsim_hs.dts | |||
@@ -24,7 +24,7 @@ | |||
24 | }; | 24 | }; |
25 | 25 | ||
26 | chosen { | 26 | chosen { |
27 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 27 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | aliases { | 30 | aliases { |
diff --git a/arch/arc/boot/dts/nsim_hs_idu.dts b/arch/arc/boot/dts/nsim_hs_idu.dts index 4f98ebf71fd8..ed12f494721d 100644 --- a/arch/arc/boot/dts/nsim_hs_idu.dts +++ b/arch/arc/boot/dts/nsim_hs_idu.dts | |||
@@ -15,7 +15,7 @@ | |||
15 | interrupt-parent = <&core_intc>; | 15 | interrupt-parent = <&core_intc>; |
16 | 16 | ||
17 | chosen { | 17 | chosen { |
18 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8"; | 18 | bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1"; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | aliases { | 21 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index 3c391ba565ed..7842e5eb4ab5 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts | |||
@@ -20,7 +20,7 @@ | |||
20 | /* this is for console on PGU */ | 20 | /* this is for console on PGU */ |
21 | /* bootargs = "console=tty0 consoleblank=0"; */ | 21 | /* bootargs = "console=tty0 consoleblank=0"; */ |
22 | /* this is for console on serial */ | 22 | /* this is for console on serial */ |
23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24"; | 23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1"; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | aliases { | 26 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts index 14a727cbf4c9..b8838cf2b4ec 100644 --- a/arch/arc/boot/dts/nsimosci_hs.dts +++ b/arch/arc/boot/dts/nsimosci_hs.dts | |||
@@ -20,7 +20,7 @@ | |||
20 | /* this is for console on PGU */ | 20 | /* this is for console on PGU */ |
21 | /* bootargs = "console=tty0 consoleblank=0"; */ | 21 | /* bootargs = "console=tty0 consoleblank=0"; */ |
22 | /* this is for console on serial */ | 22 | /* this is for console on serial */ |
23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24"; | 23 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1"; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | aliases { | 26 | aliases { |
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts index 5052917d4a99..72a2c723f1f7 100644 --- a/arch/arc/boot/dts/nsimosci_hs_idu.dts +++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | chosen { | 19 | chosen { |
20 | /* this is for console on serial */ | 20 | /* this is for console on serial */ |
21 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24"; | 21 | bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1"; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | aliases { | 24 | aliases { |
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index ea022d47896c..21ec82466d62 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h | |||
@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address); | |||
23 | 23 | ||
24 | #define BUG() do { \ | 24 | #define BUG() do { \ |
25 | pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ | 25 | pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ |
26 | dump_stack(); \ | 26 | barrier_before_unreachable(); \ |
27 | __builtin_trap(); \ | ||
27 | } while (0) | 28 | } while (0) |
28 | 29 | ||
29 | #define HAVE_ARCH_BUG | 30 | #define HAVE_ARCH_BUG |
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 257a68f3c2fe..309f4e6721b3 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h | |||
@@ -184,7 +184,7 @@ | |||
184 | .macro FAKE_RET_FROM_EXCPN | 184 | .macro FAKE_RET_FROM_EXCPN |
185 | lr r9, [status32] | 185 | lr r9, [status32] |
186 | bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) | 186 | bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) |
187 | or r9, r9, (STATUS_L_MASK|STATUS_IE_MASK) | 187 | or r9, r9, STATUS_IE_MASK |
188 | kflag r9 | 188 | kflag r9 |
189 | .endm | 189 | .endm |
190 | 190 | ||
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index f61a52b01625..5fe84e481654 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); | |||
22 | 22 | ||
23 | static char smp_cpuinfo_buf[128]; | 23 | static char smp_cpuinfo_buf[128]; |
24 | 24 | ||
25 | /* | ||
26 | * Set mask to halt GFRC if any online core in SMP cluster is halted. | ||
27 | * Only works for ARC HS v3.0+, on earlier versions has no effect. | ||
28 | */ | ||
29 | static void mcip_update_gfrc_halt_mask(int cpu) | ||
30 | { | ||
31 | struct bcr_generic gfrc; | ||
32 | unsigned long flags; | ||
33 | u32 gfrc_halt_mask; | ||
34 | |||
35 | READ_BCR(ARC_REG_GFRC_BUILD, gfrc); | ||
36 | |||
37 | /* | ||
38 | * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in | ||
39 | * GFRC 0x3 version. | ||
40 | */ | ||
41 | if (gfrc.ver < 0x3) | ||
42 | return; | ||
43 | |||
44 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
45 | |||
46 | __mcip_cmd(CMD_GFRC_READ_CORE, 0); | ||
47 | gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | ||
48 | gfrc_halt_mask |= BIT(cpu); | ||
49 | __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); | ||
50 | |||
51 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
52 | } | ||
53 | |||
54 | static void mcip_update_debug_halt_mask(int cpu) | ||
55 | { | ||
56 | u32 mcip_mask = 0; | ||
57 | unsigned long flags; | ||
58 | |||
59 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
60 | |||
61 | /* | ||
62 | * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK | ||
63 | * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK | ||
64 | * and CMD_DEBUG_READ_SELECT. | ||
65 | */ | ||
66 | __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); | ||
67 | mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); | ||
68 | |||
69 | mcip_mask |= BIT(cpu); | ||
70 | |||
71 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); | ||
72 | /* | ||
73 | * Parameter specified halt cause: | ||
74 | * STATUS32[H]/actionpoint/breakpoint/self-halt | ||
75 | * We choose all of them (0xF). | ||
76 | */ | ||
77 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); | ||
78 | |||
79 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
80 | } | ||
81 | |||
25 | static void mcip_setup_per_cpu(int cpu) | 82 | static void mcip_setup_per_cpu(int cpu) |
26 | { | 83 | { |
84 | struct mcip_bcr mp; | ||
85 | |||
86 | READ_BCR(ARC_REG_MCIP_BCR, mp); | ||
87 | |||
27 | smp_ipi_irq_setup(cpu, IPI_IRQ); | 88 | smp_ipi_irq_setup(cpu, IPI_IRQ); |
28 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); | 89 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
90 | |||
91 | /* Update GFRC halt mask as new CPU came online */ | ||
92 | if (mp.gfrc) | ||
93 | mcip_update_gfrc_halt_mask(cpu); | ||
94 | |||
95 | /* Update MCIP debug mask as new CPU came online */ | ||
96 | if (mp.dbg) | ||
97 | mcip_update_debug_halt_mask(cpu); | ||
29 | } | 98 | } |
30 | 99 | ||
31 | static void mcip_ipi_send(int cpu) | 100 | static void mcip_ipi_send(int cpu) |
@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void) | |||
101 | IS_AVAIL1(mp.gfrc, "GFRC")); | 170 | IS_AVAIL1(mp.gfrc, "GFRC")); |
102 | 171 | ||
103 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; | 172 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
104 | |||
105 | if (mp.dbg) { | ||
106 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); | ||
107 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); | ||
108 | } | ||
109 | } | 173 | } |
110 | 174 | ||
111 | struct plat_smp_ops plat_smp_ops = { | 175 | struct plat_smp_ops plat_smp_ops = { |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 9d27331fe69a..b2cae79a25d7 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -51,7 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = { | |||
51 | { 0x51, "R2.0" }, | 51 | { 0x51, "R2.0" }, |
52 | { 0x52, "R2.1" }, | 52 | { 0x52, "R2.1" }, |
53 | { 0x53, "R3.0" }, | 53 | { 0x53, "R3.0" }, |
54 | { 0x54, "R4.0" }, | 54 | { 0x54, "R3.10a" }, |
55 | #endif | 55 | #endif |
56 | { 0x00, NULL } | 56 | { 0x00, NULL } |
57 | }; | 57 | }; |
@@ -373,7 +373,7 @@ static void arc_chk_core_config(void) | |||
373 | { | 373 | { |
374 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; | 374 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; |
375 | int saved = 0, present = 0; | 375 | int saved = 0, present = 0; |
376 | char *opt_nm = NULL;; | 376 | char *opt_nm = NULL; |
377 | 377 | ||
378 | if (!cpu->extn.timer0) | 378 | if (!cpu->extn.timer0) |
379 | panic("Timer0 is not present!\n"); | 379 | panic("Timer0 is not present!\n"); |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index efe8b4200a67..21d86c36692b 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/reboot.h> | 24 | #include <linux/reboot.h> |
25 | #include <linux/irqdomain.h> | 25 | #include <linux/irqdomain.h> |
26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
27 | #include <linux/of_fdt.h> | ||
27 | 28 | ||
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void) | |||
47 | { | 48 | { |
48 | } | 49 | } |
49 | 50 | ||
51 | static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) | ||
52 | { | ||
53 | unsigned long dt_root = of_get_flat_dt_root(); | ||
54 | const char *buf; | ||
55 | |||
56 | buf = of_get_flat_dt_prop(dt_root, name, NULL); | ||
57 | if (!buf) | ||
58 | return -EINVAL; | ||
59 | |||
60 | if (cpulist_parse(buf, cpumask)) | ||
61 | return -EINVAL; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Read from DeviceTree and setup cpu possible mask. If there is no | ||
68 | * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. | ||
69 | */ | ||
70 | static void __init arc_init_cpu_possible(void) | ||
71 | { | ||
72 | struct cpumask cpumask; | ||
73 | |||
74 | if (arc_get_cpu_map("possible-cpus", &cpumask)) { | ||
75 | pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n", | ||
76 | NR_CPUS); | ||
77 | |||
78 | cpumask_setall(&cpumask); | ||
79 | } | ||
80 | |||
81 | if (!cpumask_test_cpu(0, &cpumask)) | ||
82 | panic("Master cpu (cpu[0]) is missed in cpu possible mask!"); | ||
83 | |||
84 | init_cpu_possible(&cpumask); | ||
85 | } | ||
86 | |||
50 | /* | 87 | /* |
51 | * Called from setup_arch() before calling setup_processor() | 88 | * Called from setup_arch() before calling setup_processor() |
52 | * | 89 | * |
@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void) | |||
58 | */ | 95 | */ |
59 | void __init smp_init_cpus(void) | 96 | void __init smp_init_cpus(void) |
60 | { | 97 | { |
61 | unsigned int i; | 98 | arc_init_cpu_possible(); |
62 | |||
63 | for (i = 0; i < NR_CPUS; i++) | ||
64 | set_cpu_possible(i, true); | ||
65 | 99 | ||
66 | if (plat_smp_ops.init_early_smp) | 100 | if (plat_smp_ops.init_early_smp) |
67 | plat_smp_ops.init_early_smp(); | 101 | plat_smp_ops.init_early_smp(); |
@@ -70,16 +104,12 @@ void __init smp_init_cpus(void) | |||
70 | /* called from init ( ) => process 1 */ | 104 | /* called from init ( ) => process 1 */ |
71 | void __init smp_prepare_cpus(unsigned int max_cpus) | 105 | void __init smp_prepare_cpus(unsigned int max_cpus) |
72 | { | 106 | { |
73 | int i; | ||
74 | |||
75 | /* | 107 | /* |
76 | * if platform didn't set the present map already, do it now | 108 | * if platform didn't set the present map already, do it now |
77 | * boot cpu is set to present already by init/main.c | 109 | * boot cpu is set to present already by init/main.c |
78 | */ | 110 | */ |
79 | if (num_present_cpus() <= 1) { | 111 | if (num_present_cpus() <= 1) |
80 | for (i = 0; i < max_cpus; i++) | 112 | init_cpu_present(cpu_possible_mask); |
81 | set_cpu_present(i, true); | ||
82 | } | ||
83 | } | 113 | } |
84 | 114 | ||
85 | void __init smp_cpus_done(unsigned int max_cpus) | 115 | void __init smp_cpus_done(unsigned int max_cpus) |
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 333daab7def0..183391d4d33a 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
@@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table, | |||
366 | return; | 366 | return; |
367 | 367 | ||
368 | ret_err: | 368 | ret_err: |
369 | panic("Attention !!! Dwarf FDE parsing errors\n");; | 369 | panic("Attention !!! Dwarf FDE parsing errors\n"); |
370 | } | 370 | } |
371 | 371 | ||
372 | #ifdef CONFIG_MODULES | 372 | #ifdef CONFIG_MODULES |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index eee924dfffa6..2072f3451e9c 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
@@ -780,7 +780,10 @@ noinline static void slc_entire_op(const int op) | |||
780 | 780 | ||
781 | write_aux_reg(r, ctrl); | 781 | write_aux_reg(r, ctrl); |
782 | 782 | ||
783 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); | 783 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
784 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1); | ||
785 | else | ||
786 | write_aux_reg(ARC_REG_SLC_FLUSH, 0x1); | ||
784 | 787 | ||
785 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ | 788 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
786 | read_aux_reg(r); | 789 | read_aux_reg(r); |
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index 18045c38bcf1..db7cded1b7ad 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi | |||
@@ -55,7 +55,7 @@ | |||
55 | <0x3ff00100 0x100>; | 55 | <0x3ff00100 0x100>; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | smc@0x3404c000 { | 58 | smc@3404c000 { |
59 | compatible = "brcm,bcm11351-smc", "brcm,kona-smc"; | 59 | compatible = "brcm,bcm11351-smc", "brcm,kona-smc"; |
60 | reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */ | 60 | reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */ |
61 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi index 6dde95f21cef..266f2611dc22 100644 --- a/arch/arm/boot/dts/bcm21664.dtsi +++ b/arch/arm/boot/dts/bcm21664.dtsi | |||
@@ -55,7 +55,7 @@ | |||
55 | <0x3ff00100 0x100>; | 55 | <0x3ff00100 0x100>; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | smc@0x3404e000 { | 58 | smc@3404e000 { |
59 | compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; | 59 | compatible = "brcm,bcm21664-smc", "brcm,kona-smc"; |
60 | reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */ | 60 | reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */ |
61 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index 0e3d2a5ff208..a5c3824c8056 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi | |||
@@ -18,10 +18,10 @@ | |||
18 | soc { | 18 | soc { |
19 | ranges = <0x7e000000 0x20000000 0x02000000>; | 19 | ranges = <0x7e000000 0x20000000 0x02000000>; |
20 | dma-ranges = <0x40000000 0x00000000 0x20000000>; | 20 | dma-ranges = <0x40000000 0x00000000 0x20000000>; |
21 | }; | ||
21 | 22 | ||
22 | arm-pmu { | 23 | arm-pmu { |
23 | compatible = "arm,arm1176-pmu"; | 24 | compatible = "arm,arm1176-pmu"; |
24 | }; | ||
25 | }; | 25 | }; |
26 | }; | 26 | }; |
27 | 27 | ||
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi index 1dfd76442777..c933e8413884 100644 --- a/arch/arm/boot/dts/bcm2836.dtsi +++ b/arch/arm/boot/dts/bcm2836.dtsi | |||
@@ -9,19 +9,19 @@ | |||
9 | <0x40000000 0x40000000 0x00001000>; | 9 | <0x40000000 0x40000000 0x00001000>; |
10 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; | 10 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; |
11 | 11 | ||
12 | local_intc: local_intc { | 12 | local_intc: local_intc@40000000 { |
13 | compatible = "brcm,bcm2836-l1-intc"; | 13 | compatible = "brcm,bcm2836-l1-intc"; |
14 | reg = <0x40000000 0x100>; | 14 | reg = <0x40000000 0x100>; |
15 | interrupt-controller; | 15 | interrupt-controller; |
16 | #interrupt-cells = <2>; | 16 | #interrupt-cells = <2>; |
17 | interrupt-parent = <&local_intc>; | 17 | interrupt-parent = <&local_intc>; |
18 | }; | 18 | }; |
19 | }; | ||
19 | 20 | ||
20 | arm-pmu { | 21 | arm-pmu { |
21 | compatible = "arm,cortex-a7-pmu"; | 22 | compatible = "arm,cortex-a7-pmu"; |
22 | interrupt-parent = <&local_intc>; | 23 | interrupt-parent = <&local_intc>; |
23 | interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; | 24 | interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; |
24 | }; | ||
25 | }; | 25 | }; |
26 | 26 | ||
27 | timer { | 27 | timer { |
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index efa7d3387ab2..7704bb029605 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | <0x40000000 0x40000000 0x00001000>; | 8 | <0x40000000 0x40000000 0x00001000>; |
9 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; | 9 | dma-ranges = <0xc0000000 0x00000000 0x3f000000>; |
10 | 10 | ||
11 | local_intc: local_intc { | 11 | local_intc: local_intc@40000000 { |
12 | compatible = "brcm,bcm2836-l1-intc"; | 12 | compatible = "brcm,bcm2836-l1-intc"; |
13 | reg = <0x40000000 0x100>; | 13 | reg = <0x40000000 0x100>; |
14 | interrupt-controller; | 14 | interrupt-controller; |
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 18db25a5a66e..9d293decf8d3 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -465,7 +465,7 @@ | |||
465 | status = "disabled"; | 465 | status = "disabled"; |
466 | }; | 466 | }; |
467 | 467 | ||
468 | aux: aux@0x7e215000 { | 468 | aux: aux@7e215000 { |
469 | compatible = "brcm,bcm2835-aux"; | 469 | compatible = "brcm,bcm2835-aux"; |
470 | #clock-cells = <1>; | 470 | #clock-cells = <1>; |
471 | reg = <0x7e215000 0x8>; | 471 | reg = <0x7e215000 0x8>; |
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index 6a44b8021702..f0e2008f7490 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | memory { | 50 | memory { |
51 | device_type = "memory"; | 51 | device_type = "memory"; |
52 | reg = <0x60000000 0x80000000>; | 52 | reg = <0x60000000 0x20000000>; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | gpio-restart { | 55 | gpio-restart { |
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts index 08568ce24d06..da8bb9d60f99 100644 --- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts +++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts | |||
@@ -269,7 +269,7 @@ | |||
269 | 269 | ||
270 | sata: sata@46000000 { | 270 | sata: sata@46000000 { |
271 | /* The ROM uses this muxmode */ | 271 | /* The ROM uses this muxmode */ |
272 | cortina,gemini-ata-muxmode = <3>; | 272 | cortina,gemini-ata-muxmode = <0>; |
273 | cortina,gemini-enable-sata-bridge; | 273 | cortina,gemini-enable-sata-bridge; |
274 | status = "okay"; | 274 | status = "okay"; |
275 | }; | 275 | }; |
diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts index cf42c2f5cdc7..1281bc39b7ab 100644 --- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts +++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | /dts-v1/; | 43 | /dts-v1/; |
44 | 44 | ||
45 | #include "imx6q.dtsi" | 45 | #include "imx6dl.dtsi" |
46 | #include "imx6qdl-icore-rqs.dtsi" | 46 | #include "imx6qdl-icore-rqs.dtsi" |
47 | 47 | ||
48 | / { | 48 | / { |
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index c1aa7a4518fb..a30ee9fcb3ae 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
@@ -71,6 +71,8 @@ | |||
71 | }; | 71 | }; |
72 | 72 | ||
73 | &i2c1 { | 73 | &i2c1 { |
74 | pinctrl-names = "default"; | ||
75 | pinctrl-0 = <&i2c1_pins>; | ||
74 | clock-frequency = <2600000>; | 76 | clock-frequency = <2600000>; |
75 | 77 | ||
76 | twl: twl@48 { | 78 | twl: twl@48 { |
@@ -189,7 +191,12 @@ | |||
189 | >; | 191 | >; |
190 | }; | 192 | }; |
191 | 193 | ||
192 | 194 | i2c1_pins: pinmux_i2c1_pins { | |
195 | pinctrl-single,pins = < | ||
196 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | ||
197 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ | ||
198 | >; | ||
199 | }; | ||
193 | }; | 200 | }; |
194 | 201 | ||
195 | &omap3_pmx_wkup { | 202 | &omap3_pmx_wkup { |
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index b50b796e15c7..47915447a826 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi | |||
@@ -66,6 +66,8 @@ | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | &i2c1 { | 68 | &i2c1 { |
69 | pinctrl-names = "default"; | ||
70 | pinctrl-0 = <&i2c1_pins>; | ||
69 | clock-frequency = <2600000>; | 71 | clock-frequency = <2600000>; |
70 | 72 | ||
71 | twl: twl@48 { | 73 | twl: twl@48 { |
@@ -136,6 +138,12 @@ | |||
136 | OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ | 138 | OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ |
137 | >; | 139 | >; |
138 | }; | 140 | }; |
141 | i2c1_pins: pinmux_i2c1_pins { | ||
142 | pinctrl-single,pins = < | ||
143 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | ||
144 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ | ||
145 | >; | ||
146 | }; | ||
139 | }; | 147 | }; |
140 | 148 | ||
141 | &uart2 { | 149 | &uart2 { |
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index ec2c8baef62a..592e17fd4eeb 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
@@ -47,7 +47,7 @@ | |||
47 | gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 */ | 47 | gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 */ |
48 | wakeup-source; | 48 | wakeup-source; |
49 | autorepeat; | 49 | autorepeat; |
50 | debounce_interval = <50>; | 50 | debounce-interval = <50>; |
51 | }; | 51 | }; |
52 | }; | 52 | }; |
53 | 53 | ||
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 3b704cfed69a..a97458112ff6 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi | |||
@@ -280,7 +280,7 @@ | |||
280 | max-frequency = <37500000>; | 280 | max-frequency = <37500000>; |
281 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 281 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
282 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 282 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
283 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 283 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
284 | fifo-depth = <0x100>; | 284 | fifo-depth = <0x100>; |
285 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | 285 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; |
286 | resets = <&cru SRST_SDIO>; | 286 | resets = <&cru SRST_SDIO>; |
@@ -298,7 +298,7 @@ | |||
298 | max-frequency = <37500000>; | 298 | max-frequency = <37500000>; |
299 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 299 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
300 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 300 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
301 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 301 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
302 | default-sample-phase = <158>; | 302 | default-sample-phase = <158>; |
303 | disable-wp; | 303 | disable-wp; |
304 | dmas = <&pdma 12>; | 304 | dmas = <&pdma 12>; |
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 780ec3a99b21..341deaf62ff6 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi | |||
@@ -621,7 +621,7 @@ | |||
621 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 621 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
622 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, | 622 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, |
623 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; | 623 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; |
624 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 624 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
625 | fifo-depth = <0x100>; | 625 | fifo-depth = <0x100>; |
626 | pinctrl-names = "default"; | 626 | pinctrl-names = "default"; |
627 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; | 627 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; |
@@ -634,7 +634,7 @@ | |||
634 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; | 634 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; |
635 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 635 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
636 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 636 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
637 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 637 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
638 | fifo-depth = <0x100>; | 638 | fifo-depth = <0x100>; |
639 | pinctrl-names = "default"; | 639 | pinctrl-names = "default"; |
640 | pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; | 640 | pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; |
@@ -649,7 +649,7 @@ | |||
649 | max-frequency = <37500000>; | 649 | max-frequency = <37500000>; |
650 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 650 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
651 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 651 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
652 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 652 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
653 | bus-width = <8>; | 653 | bus-width = <8>; |
654 | default-sample-phase = <158>; | 654 | default-sample-phase = <158>; |
655 | fifo-depth = <0x100>; | 655 | fifo-depth = <0x100>; |
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi index 99cfae875e12..5eae4776ffde 100644 --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi | |||
@@ -110,26 +110,6 @@ | |||
110 | }; | 110 | }; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | &cpu0 { | ||
114 | cpu0-supply = <&vdd_cpu>; | ||
115 | operating-points = < | ||
116 | /* KHz uV */ | ||
117 | 1800000 1400000 | ||
118 | 1608000 1350000 | ||
119 | 1512000 1300000 | ||
120 | 1416000 1200000 | ||
121 | 1200000 1100000 | ||
122 | 1008000 1050000 | ||
123 | 816000 1000000 | ||
124 | 696000 950000 | ||
125 | 600000 900000 | ||
126 | 408000 900000 | ||
127 | 312000 900000 | ||
128 | 216000 900000 | ||
129 | 126000 900000 | ||
130 | >; | ||
131 | }; | ||
132 | |||
133 | &emmc { | 113 | &emmc { |
134 | status = "okay"; | 114 | status = "okay"; |
135 | bus-width = <8>; | 115 | bus-width = <8>; |
diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi index 8a74efdb6360..240e7a23d81f 100644 --- a/arch/arm/boot/dts/zx296702.dtsi +++ b/arch/arm/boot/dts/zx296702.dtsi | |||
@@ -56,7 +56,7 @@ | |||
56 | clocks = <&topclk ZX296702_A9_PERIPHCLK>; | 56 | clocks = <&topclk ZX296702_A9_PERIPHCLK>; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | l2cc: l2-cache-controller@0x00c00000 { | 59 | l2cc: l2-cache-controller@c00000 { |
60 | compatible = "arm,pl310-cache"; | 60 | compatible = "arm,pl310-cache"; |
61 | reg = <0x00c00000 0x1000>; | 61 | reg = <0x00c00000 0x1000>; |
62 | cache-unified; | 62 | cache-unified; |
@@ -67,30 +67,30 @@ | |||
67 | arm,double-linefill-incr = <0>; | 67 | arm,double-linefill-incr = <0>; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | pcu: pcu@0xa0008000 { | 70 | pcu: pcu@a0008000 { |
71 | compatible = "zte,zx296702-pcu"; | 71 | compatible = "zte,zx296702-pcu"; |
72 | reg = <0xa0008000 0x1000>; | 72 | reg = <0xa0008000 0x1000>; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | topclk: topclk@0x09800000 { | 75 | topclk: topclk@9800000 { |
76 | compatible = "zte,zx296702-topcrm-clk"; | 76 | compatible = "zte,zx296702-topcrm-clk"; |
77 | reg = <0x09800000 0x1000>; | 77 | reg = <0x09800000 0x1000>; |
78 | #clock-cells = <1>; | 78 | #clock-cells = <1>; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | lsp1clk: lsp1clk@0x09400000 { | 81 | lsp1clk: lsp1clk@9400000 { |
82 | compatible = "zte,zx296702-lsp1crpm-clk"; | 82 | compatible = "zte,zx296702-lsp1crpm-clk"; |
83 | reg = <0x09400000 0x1000>; | 83 | reg = <0x09400000 0x1000>; |
84 | #clock-cells = <1>; | 84 | #clock-cells = <1>; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | lsp0clk: lsp0clk@0x0b000000 { | 87 | lsp0clk: lsp0clk@b000000 { |
88 | compatible = "zte,zx296702-lsp0crpm-clk"; | 88 | compatible = "zte,zx296702-lsp0crpm-clk"; |
89 | reg = <0x0b000000 0x1000>; | 89 | reg = <0x0b000000 0x1000>; |
90 | #clock-cells = <1>; | 90 | #clock-cells = <1>; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | uart0: serial@0x09405000 { | 93 | uart0: serial@9405000 { |
94 | compatible = "zte,zx296702-uart"; | 94 | compatible = "zte,zx296702-uart"; |
95 | reg = <0x09405000 0x1000>; | 95 | reg = <0x09405000 0x1000>; |
96 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; | 96 | interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; |
@@ -98,7 +98,7 @@ | |||
98 | status = "disabled"; | 98 | status = "disabled"; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | uart1: serial@0x09406000 { | 101 | uart1: serial@9406000 { |
102 | compatible = "zte,zx296702-uart"; | 102 | compatible = "zte,zx296702-uart"; |
103 | reg = <0x09406000 0x1000>; | 103 | reg = <0x09406000 0x1000>; |
104 | interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; | 104 | interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; |
@@ -106,7 +106,7 @@ | |||
106 | status = "disabled"; | 106 | status = "disabled"; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | mmc0: mmc@0x09408000 { | 109 | mmc0: mmc@9408000 { |
110 | compatible = "snps,dw-mshc"; | 110 | compatible = "snps,dw-mshc"; |
111 | #address-cells = <1>; | 111 | #address-cells = <1>; |
112 | #size-cells = <0>; | 112 | #size-cells = <0>; |
@@ -119,7 +119,7 @@ | |||
119 | status = "disabled"; | 119 | status = "disabled"; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | mmc1: mmc@0x0b003000 { | 122 | mmc1: mmc@b003000 { |
123 | compatible = "snps,dw-mshc"; | 123 | compatible = "snps,dw-mshc"; |
124 | #address-cells = <1>; | 124 | #address-cells = <1>; |
125 | #size-cells = <0>; | 125 | #size-cells = <0>; |
@@ -132,7 +132,7 @@ | |||
132 | status = "disabled"; | 132 | status = "disabled"; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | sysctrl: sysctrl@0xa0007000 { | 135 | sysctrl: sysctrl@a0007000 { |
136 | compatible = "zte,sysctrl", "syscon"; | 136 | compatible = "zte,sysctrl", "syscon"; |
137 | reg = <0xa0007000 0x1000>; | 137 | reg = <0xa0007000 0x1000>; |
138 | }; | 138 | }; |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 2f145c4af93a..92674f247a12 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -319,7 +319,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y | |||
319 | CONFIG_RC_CORE=m | 319 | CONFIG_RC_CORE=m |
320 | CONFIG_MEDIA_CONTROLLER=y | 320 | CONFIG_MEDIA_CONTROLLER=y |
321 | CONFIG_VIDEO_V4L2_SUBDEV_API=y | 321 | CONFIG_VIDEO_V4L2_SUBDEV_API=y |
322 | CONFIG_LIRC=m | 322 | CONFIG_LIRC=y |
323 | CONFIG_RC_DEVICES=y | 323 | CONFIG_RC_DEVICES=y |
324 | CONFIG_IR_RX51=m | 324 | CONFIG_IR_RX51=m |
325 | CONFIG_V4L_PLATFORM_DRIVERS=y | 325 | CONFIG_V4L_PLATFORM_DRIVERS=y |
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 629f8e9981f1..cf2701cb0de8 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c | |||
@@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | static clock_access_fn __read_persistent_clock = dummy_clock_access; | 85 | static clock_access_fn __read_persistent_clock = dummy_clock_access; |
86 | static clock_access_fn __read_boot_clock = dummy_clock_access;; | 86 | static clock_access_fn __read_boot_clock = dummy_clock_access; |
87 | 87 | ||
88 | void read_persistent_clock64(struct timespec64 *ts) | 88 | void read_persistent_clock64(struct timespec64 *ts) |
89 | { | 89 | { |
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5638ce0c9524..63d6b404d88e 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile | |||
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING | |||
7 | 7 | ||
8 | KVM=../../../../virt/kvm | 8 | KVM=../../../../virt/kvm |
9 | 9 | ||
10 | CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) | ||
11 | |||
10 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o | 12 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
11 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o | 13 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o |
12 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o | 14 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o |
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o | |||
15 | obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o | 17 | obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o |
16 | obj-$(CONFIG_KVM_ARM_HOST) += vfp.o | 18 | obj-$(CONFIG_KVM_ARM_HOST) += vfp.o |
17 | obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o | 19 | obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o |
20 | CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE) | ||
21 | |||
18 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o | 22 | obj-$(CONFIG_KVM_ARM_HOST) += entry.o |
19 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o | 23 | obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o |
20 | obj-$(CONFIG_KVM_ARM_HOST) += switch.o | 24 | obj-$(CONFIG_KVM_ARM_HOST) += switch.o |
25 | CFLAGS_switch.o += $(CFLAGS_ARMV7VE) | ||
21 | obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o | 26 | obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o |
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index 111bda8cdebd..be4b8b0a40ad 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c | |||
@@ -20,6 +20,10 @@ | |||
20 | 20 | ||
21 | #include <asm/kvm_hyp.h> | 21 | #include <asm/kvm_hyp.h> |
22 | 22 | ||
23 | /* | ||
24 | * gcc before 4.9 doesn't understand -march=armv7ve, so we have to | ||
25 | * trick the assembler. | ||
26 | */ | ||
23 | __asm__(".arch_extension virt"); | 27 | __asm__(".arch_extension virt"); |
24 | 28 | ||
25 | void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) | 29 | void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) |
diff --git a/arch/arm/mach-clps711x/board-dt.c b/arch/arm/mach-clps711x/board-dt.c index ee1f83b1a332..4c89a8e9a2e3 100644 --- a/arch/arm/mach-clps711x/board-dt.c +++ b/arch/arm/mach-clps711x/board-dt.c | |||
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd) | |||
69 | soft_restart(0); | 69 | soft_restart(0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static const char *clps711x_compat[] __initconst = { | 72 | static const char *const clps711x_compat[] __initconst = { |
73 | "cirrus,ep7209", | 73 | "cirrus,ep7209", |
74 | NULL | 74 | NULL |
75 | }; | 75 | }; |
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index e457f299cd44..d6b11907380c 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = { | |||
368 | .flags = EE_ADDR2, | 368 | .flags = EE_ADDR2, |
369 | }; | 369 | }; |
370 | 370 | ||
371 | static struct spi_board_info dm355_evm_spi_info[] __initconst = { | 371 | static const struct spi_board_info dm355_evm_spi_info[] __initconst = { |
372 | { | 372 | { |
373 | .modalias = "at25", | 373 | .modalias = "at25", |
374 | .platform_data = &at25640a, | 374 | .platform_data = &at25640a, |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index be997243447b..fad9a5611a5d 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = { | |||
217 | .flags = EE_ADDR2, | 217 | .flags = EE_ADDR2, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static struct spi_board_info dm355_leopard_spi_info[] __initconst = { | 220 | static const struct spi_board_info dm355_leopard_spi_info[] __initconst = { |
221 | { | 221 | { |
222 | .modalias = "at25", | 222 | .modalias = "at25", |
223 | .platform_data = &at25640a, | 223 | .platform_data = &at25640a, |
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index e75741fb2c1d..e3780986d2a3 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = { | |||
726 | .flags = EE_ADDR2, | 726 | .flags = EE_ADDR2, |
727 | }; | 727 | }; |
728 | 728 | ||
729 | static struct spi_board_info dm365_evm_spi_info[] __initconst = { | 729 | static const struct spi_board_info dm365_evm_spi_info[] __initconst = { |
730 | { | 730 | { |
731 | .modalias = "at25", | 731 | .modalias = "at25", |
732 | .platform_data = &at25640, | 732 | .platform_data = &at25640, |
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 6b32dc527edc..2c20599cc350 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig | |||
@@ -41,7 +41,7 @@ config MACH_ARMADA_375 | |||
41 | depends on ARCH_MULTI_V7 | 41 | depends on ARCH_MULTI_V7 |
42 | select ARMADA_370_XP_IRQ | 42 | select ARMADA_370_XP_IRQ |
43 | select ARM_ERRATA_720789 | 43 | select ARM_ERRATA_720789 |
44 | select ARM_ERRATA_753970 | 44 | select PL310_ERRATA_753970 |
45 | select ARM_GIC | 45 | select ARM_GIC |
46 | select ARMADA_375_CLK | 46 | select ARMADA_375_CLK |
47 | select HAVE_ARM_SCU | 47 | select HAVE_ARM_SCU |
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X | |||
57 | bool "Marvell Armada 380/385 boards" | 57 | bool "Marvell Armada 380/385 boards" |
58 | depends on ARCH_MULTI_V7 | 58 | depends on ARCH_MULTI_V7 |
59 | select ARM_ERRATA_720789 | 59 | select ARM_ERRATA_720789 |
60 | select ARM_ERRATA_753970 | 60 | select PL310_ERRATA_753970 |
61 | select ARM_GIC | 61 | select ARM_GIC |
62 | select ARM_GLOBAL_TIMER | 62 | select ARM_GLOBAL_TIMER |
63 | select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK | 63 | select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK |
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index 43e3e188f521..fa512413a471 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c | |||
@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c) | |||
1011 | return -ENOMEM; | 1011 | return -ENOMEM; |
1012 | c->dent = d; | 1012 | c->dent = d; |
1013 | 1013 | ||
1014 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); | 1014 | d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); |
1015 | if (!d) { | 1015 | if (!d) { |
1016 | err = -ENOMEM; | 1016 | err = -ENOMEM; |
1017 | goto err_out; | 1017 | goto err_out; |
1018 | } | 1018 | } |
1019 | d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); | 1019 | d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); |
1020 | if (!d) { | 1020 | if (!d) { |
1021 | err = -ENOMEM; | 1021 | err = -ENOMEM; |
1022 | goto err_out; | 1022 | goto err_out; |
1023 | } | 1023 | } |
1024 | d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); | 1024 | d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); |
1025 | if (!d) { | 1025 | if (!d) { |
1026 | err = -ENOMEM; | 1026 | err = -ENOMEM; |
1027 | goto err_out; | 1027 | goto err_out; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 4bb6751864a5..fc5fb776a710 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -299,8 +299,6 @@ static void irq_save_context(void) | |||
299 | if (soc_is_dra7xx()) | 299 | if (soc_is_dra7xx()) |
300 | return; | 300 | return; |
301 | 301 | ||
302 | if (!sar_base) | ||
303 | sar_base = omap4_get_sar_ram_base(); | ||
304 | if (wakeupgen_ops && wakeupgen_ops->save_context) | 302 | if (wakeupgen_ops && wakeupgen_ops->save_context) |
305 | wakeupgen_ops->save_context(); | 303 | wakeupgen_ops->save_context(); |
306 | } | 304 | } |
@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node, | |||
598 | irq_hotplug_init(); | 596 | irq_hotplug_init(); |
599 | irq_pm_init(); | 597 | irq_pm_init(); |
600 | 598 | ||
599 | sar_base = omap4_get_sar_ram_base(); | ||
600 | |||
601 | return 0; | 601 | return 0; |
602 | } | 602 | } |
603 | IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); | 603 | IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 124f9af34a15..34156eca8e23 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -977,6 +977,9 @@ static int _enable_clocks(struct omap_hwmod *oh) | |||
977 | 977 | ||
978 | pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); | 978 | pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name); |
979 | 979 | ||
980 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
981 | _enable_optional_clocks(oh); | ||
982 | |||
980 | if (oh->_clk) | 983 | if (oh->_clk) |
981 | clk_enable(oh->_clk); | 984 | clk_enable(oh->_clk); |
982 | 985 | ||
@@ -985,9 +988,6 @@ static int _enable_clocks(struct omap_hwmod *oh) | |||
985 | clk_enable(os->_clk); | 988 | clk_enable(os->_clk); |
986 | } | 989 | } |
987 | 990 | ||
988 | if (oh->flags & HWMOD_OPT_CLKS_NEEDED) | ||
989 | _enable_optional_clocks(oh); | ||
990 | |||
991 | /* The opt clocks are controlled by the device driver. */ | 991 | /* The opt clocks are controlled by the device driver. */ |
992 | 992 | ||
993 | return 0; | 993 | return 0; |
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 366158a54fcd..6f68576e5695 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c | |||
@@ -186,7 +186,7 @@ static void omap_pm_end(void) | |||
186 | cpu_idle_poll_ctrl(false); | 186 | cpu_idle_poll_ctrl(false); |
187 | } | 187 | } |
188 | 188 | ||
189 | static void omap_pm_finish(void) | 189 | static void omap_pm_wake(void) |
190 | { | 190 | { |
191 | if (soc_is_omap34xx()) | 191 | if (soc_is_omap34xx()) |
192 | omap_prcm_irq_complete(); | 192 | omap_prcm_irq_complete(); |
@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = { | |||
196 | .begin = omap_pm_begin, | 196 | .begin = omap_pm_begin, |
197 | .end = omap_pm_end, | 197 | .end = omap_pm_end, |
198 | .enter = omap_pm_enter, | 198 | .enter = omap_pm_enter, |
199 | .finish = omap_pm_finish, | 199 | .wake = omap_pm_wake, |
200 | .valid = suspend_valid_only_mem, | 200 | .valid = suspend_valid_only_mem, |
201 | }; | 201 | }; |
202 | 202 | ||
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index ece09c9461f7..d61fbd7a2840 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = { | |||
156 | .tick_resume = omap2_gp_timer_shutdown, | 156 | .tick_resume = omap2_gp_timer_shutdown, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static struct property device_disabled = { | ||
160 | .name = "status", | ||
161 | .length = sizeof("disabled"), | ||
162 | .value = "disabled", | ||
163 | }; | ||
164 | |||
165 | static const struct of_device_id omap_timer_match[] __initconst = { | 159 | static const struct of_device_id omap_timer_match[] __initconst = { |
166 | { .compatible = "ti,omap2420-timer", }, | 160 | { .compatible = "ti,omap2420-timer", }, |
167 | { .compatible = "ti,omap3430-timer", }, | 161 | { .compatible = "ti,omap3430-timer", }, |
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id * | |||
203 | of_get_property(np, "ti,timer-secure", NULL))) | 197 | of_get_property(np, "ti,timer-secure", NULL))) |
204 | continue; | 198 | continue; |
205 | 199 | ||
206 | if (!of_device_is_compatible(np, "ti,omap-counter32k")) | 200 | if (!of_device_is_compatible(np, "ti,omap-counter32k")) { |
207 | of_add_property(np, &device_disabled); | 201 | struct property *prop; |
202 | |||
203 | prop = kzalloc(sizeof(*prop), GFP_KERNEL); | ||
204 | if (!prop) | ||
205 | return NULL; | ||
206 | prop->name = "status"; | ||
207 | prop->value = "disabled"; | ||
208 | prop->length = strlen(prop->value); | ||
209 | of_add_property(np, prop); | ||
210 | } | ||
208 | return np; | 211 | return np; |
209 | } | 212 | } |
210 | 213 | ||
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 57058ac46f49..7e5d7a083707 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
26 | #include <linux/perf/arm_pmu.h> | ||
27 | #include <linux/regulator/machine.h> | 26 | #include <linux/regulator/machine.h> |
28 | 27 | ||
29 | #include <asm/outercache.h> | 28 | #include <asm/outercache.h> |
@@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd) | |||
112 | prcmu_system_reset(0); | 111 | prcmu_system_reset(0); |
113 | } | 112 | } |
114 | 113 | ||
115 | /* | ||
116 | * The PMU IRQ lines of two cores are wired together into a single interrupt. | ||
117 | * Bounce the interrupt to the other core if it's not ours. | ||
118 | */ | ||
119 | static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler) | ||
120 | { | ||
121 | irqreturn_t ret = handler(irq, dev); | ||
122 | int other = !smp_processor_id(); | ||
123 | |||
124 | if (ret == IRQ_NONE && cpu_online(other)) | ||
125 | irq_set_affinity(irq, cpumask_of(other)); | ||
126 | |||
127 | /* | ||
128 | * We should be able to get away with the amount of IRQ_NONEs we give, | ||
129 | * while still having the spurious IRQ detection code kick in if the | ||
130 | * interrupt really starts hitting spuriously. | ||
131 | */ | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | static struct arm_pmu_platdata db8500_pmu_platdata = { | ||
136 | .handle_irq = db8500_pmu_handler, | ||
137 | .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD, | ||
138 | }; | ||
139 | |||
140 | static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { | ||
141 | /* Requires call-back bindings. */ | ||
142 | OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), | ||
143 | {}, | ||
144 | }; | ||
145 | |||
146 | static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = { | 114 | static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = { |
147 | OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL), | 115 | OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL), |
148 | {}, | 116 | {}, |
@@ -165,9 +133,6 @@ static void __init u8500_init_machine(void) | |||
165 | if (of_machine_is_compatible("st-ericsson,u8540")) | 133 | if (of_machine_is_compatible("st-ericsson,u8540")) |
166 | of_platform_populate(NULL, u8500_local_bus_nodes, | 134 | of_platform_populate(NULL, u8500_local_bus_nodes, |
167 | u8540_auxdata_lookup, NULL); | 135 | u8540_auxdata_lookup, NULL); |
168 | else | ||
169 | of_platform_populate(NULL, u8500_local_bus_nodes, | ||
170 | u8500_auxdata_lookup, NULL); | ||
171 | } | 136 | } |
172 | 137 | ||
173 | static const char * stericsson_dt_platform_compat[] = { | 138 | static const char * stericsson_dt_platform_compat[] = { |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index aff6994950ba..a2399fd66e97 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, | |||
472 | /***************************************************************************** | 472 | /***************************************************************************** |
473 | * Ethernet switch | 473 | * Ethernet switch |
474 | ****************************************************************************/ | 474 | ****************************************************************************/ |
475 | static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii"; | 475 | static __initdata struct mdio_board_info orion_ge00_switch_board_info = { |
476 | static __initdata struct mdio_board_info | 476 | .bus_id = "orion-mii", |
477 | orion_ge00_switch_board_info; | 477 | .modalias = "mv88e6085", |
478 | }; | ||
478 | 479 | ||
479 | void __init orion_ge00_switch_init(struct dsa_chip_data *d) | 480 | void __init orion_ge00_switch_init(struct dsa_chip_data *d) |
480 | { | 481 | { |
481 | struct mdio_board_info *bd; | ||
482 | unsigned int i; | 482 | unsigned int i; |
483 | 483 | ||
484 | if (!IS_BUILTIN(CONFIG_PHYLIB)) | 484 | if (!IS_BUILTIN(CONFIG_PHYLIB)) |
485 | return; | 485 | return; |
486 | 486 | ||
487 | for (i = 0; i < ARRAY_SIZE(d->port_names); i++) | 487 | for (i = 0; i < ARRAY_SIZE(d->port_names); i++) { |
488 | if (!strcmp(d->port_names[i], "cpu")) | 488 | if (!strcmp(d->port_names[i], "cpu")) { |
489 | d->netdev[i] = &orion_ge00.dev; | ||
489 | break; | 490 | break; |
491 | } | ||
492 | } | ||
490 | 493 | ||
491 | bd = &orion_ge00_switch_board_info; | 494 | orion_ge00_switch_board_info.mdio_addr = d->sw_addr; |
492 | bd->bus_id = orion_ge00_mvmdio_bus_name; | 495 | orion_ge00_switch_board_info.platform_data = d; |
493 | bd->mdio_addr = d->sw_addr; | ||
494 | d->netdev[i] = &orion_ge00.dev; | ||
495 | strcpy(bd->modalias, "mv88e6085"); | ||
496 | bd->platform_data = d; | ||
497 | 496 | ||
498 | mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); | 497 | mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); |
499 | } | 498 | } |
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index a80632641b39..70c776ef7aa7 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi | |||
@@ -165,14 +165,14 @@ | |||
165 | 165 | ||
166 | uart_A: serial@24000 { | 166 | uart_A: serial@24000 { |
167 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; | 167 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; |
168 | reg = <0x0 0x24000 0x0 0x14>; | 168 | reg = <0x0 0x24000 0x0 0x18>; |
169 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; | 169 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; |
170 | status = "disabled"; | 170 | status = "disabled"; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | uart_B: serial@23000 { | 173 | uart_B: serial@23000 { |
174 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; | 174 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart"; |
175 | reg = <0x0 0x23000 0x0 0x14>; | 175 | reg = <0x0 0x23000 0x0 0x18>; |
176 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; | 176 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; |
177 | status = "disabled"; | 177 | status = "disabled"; |
178 | }; | 178 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 6cb3c2a52baf..4ee2e7951482 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi | |||
@@ -235,14 +235,14 @@ | |||
235 | 235 | ||
236 | uart_A: serial@84c0 { | 236 | uart_A: serial@84c0 { |
237 | compatible = "amlogic,meson-gx-uart"; | 237 | compatible = "amlogic,meson-gx-uart"; |
238 | reg = <0x0 0x84c0 0x0 0x14>; | 238 | reg = <0x0 0x84c0 0x0 0x18>; |
239 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; | 239 | interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>; |
240 | status = "disabled"; | 240 | status = "disabled"; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | uart_B: serial@84dc { | 243 | uart_B: serial@84dc { |
244 | compatible = "amlogic,meson-gx-uart"; | 244 | compatible = "amlogic,meson-gx-uart"; |
245 | reg = <0x0 0x84dc 0x0 0x14>; | 245 | reg = <0x0 0x84dc 0x0 0x18>; |
246 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; | 246 | interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>; |
247 | status = "disabled"; | 247 | status = "disabled"; |
248 | }; | 248 | }; |
@@ -287,7 +287,7 @@ | |||
287 | 287 | ||
288 | uart_C: serial@8700 { | 288 | uart_C: serial@8700 { |
289 | compatible = "amlogic,meson-gx-uart"; | 289 | compatible = "amlogic,meson-gx-uart"; |
290 | reg = <0x0 0x8700 0x0 0x14>; | 290 | reg = <0x0 0x8700 0x0 0x18>; |
291 | interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>; | 291 | interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>; |
292 | status = "disabled"; | 292 | status = "disabled"; |
293 | }; | 293 | }; |
@@ -404,14 +404,14 @@ | |||
404 | 404 | ||
405 | uart_AO: serial@4c0 { | 405 | uart_AO: serial@4c0 { |
406 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; | 406 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; |
407 | reg = <0x0 0x004c0 0x0 0x14>; | 407 | reg = <0x0 0x004c0 0x0 0x18>; |
408 | interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>; | 408 | interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>; |
409 | status = "disabled"; | 409 | status = "disabled"; |
410 | }; | 410 | }; |
411 | 411 | ||
412 | uart_AO_B: serial@4e0 { | 412 | uart_AO_B: serial@4e0 { |
413 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; | 413 | compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; |
414 | reg = <0x0 0x004e0 0x0 0x14>; | 414 | reg = <0x0 0x004e0 0x0 0x18>; |
415 | interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; | 415 | interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>; |
416 | status = "disabled"; | 416 | status = "disabled"; |
417 | }; | 417 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 4f355f17eed6..c8514110b9da 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi | |||
@@ -631,6 +631,7 @@ | |||
631 | 631 | ||
632 | internal_phy: ethernet-phy@8 { | 632 | internal_phy: ethernet-phy@8 { |
633 | compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22"; | 633 | compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22"; |
634 | interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; | ||
634 | reg = <8>; | 635 | reg = <8>; |
635 | max-speed = <100>; | 636 | max-speed = <100>; |
636 | }; | 637 | }; |
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi index 4220fbdcb24a..ff5c4c47b22b 100644 --- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi | |||
@@ -98,7 +98,7 @@ | |||
98 | clock-output-names = "clk125mhz"; | 98 | clock-output-names = "clk125mhz"; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | pci { | 101 | pcie@30000000 { |
102 | compatible = "pci-host-ecam-generic"; | 102 | compatible = "pci-host-ecam-generic"; |
103 | device_type = "pci"; | 103 | device_type = "pci"; |
104 | #interrupt-cells = <1>; | 104 | #interrupt-cells = <1>; |
@@ -118,6 +118,7 @@ | |||
118 | ranges = | 118 | ranges = |
119 | <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 | 119 | <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 |
120 | 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; | 120 | 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; |
121 | bus-range = <0 0xff>; | ||
121 | interrupt-map-mask = <0 0 0 7>; | 122 | interrupt-map-mask = <0 0 0 7>; |
122 | interrupt-map = | 123 | interrupt-map = |
123 | /* addr pin ic icaddr icintr */ | 124 | /* addr pin ic icaddr icintr */ |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index e94fa1a53192..047641fe294c 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | |||
@@ -51,7 +51,7 @@ | |||
51 | #size-cells = <2>; | 51 | #size-cells = <2>; |
52 | ranges; | 52 | ranges; |
53 | 53 | ||
54 | ramoops@0x21f00000 { | 54 | ramoops@21f00000 { |
55 | compatible = "ramoops"; | 55 | compatible = "ramoops"; |
56 | reg = <0x0 0x21f00000 0x0 0x00100000>; | 56 | reg = <0x0 0x21f00000 0x0 0x00100000>; |
57 | record-size = <0x00020000>; | 57 | record-size = <0x00020000>; |
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 9fbe4705ee88..94597e33c806 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi | |||
@@ -341,7 +341,7 @@ | |||
341 | reg = <0 0x10005000 0 0x1000>; | 341 | reg = <0 0x10005000 0 0x1000>; |
342 | }; | 342 | }; |
343 | 343 | ||
344 | pio: pinctrl@0x10005000 { | 344 | pio: pinctrl@10005000 { |
345 | compatible = "mediatek,mt8173-pinctrl"; | 345 | compatible = "mediatek,mt8173-pinctrl"; |
346 | reg = <0 0x1000b000 0 0x1000>; | 346 | reg = <0 0x1000b000 0 0x1000>; |
347 | mediatek,pctl-regmap = <&syscfg_pctl_a>; | 347 | mediatek,pctl-regmap = <&syscfg_pctl_a>; |
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 492a011f14f6..1c8f1b86472d 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi | |||
@@ -140,16 +140,16 @@ | |||
140 | }; | 140 | }; |
141 | 141 | ||
142 | agnoc@0 { | 142 | agnoc@0 { |
143 | qcom,pcie@00600000 { | 143 | qcom,pcie@600000 { |
144 | perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>; | 144 | perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | qcom,pcie@00608000 { | 147 | qcom,pcie@608000 { |
148 | status = "okay"; | 148 | status = "okay"; |
149 | perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>; | 149 | perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>; |
150 | }; | 150 | }; |
151 | 151 | ||
152 | qcom,pcie@00610000 { | 152 | qcom,pcie@610000 { |
153 | status = "okay"; | 153 | status = "okay"; |
154 | perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>; | 154 | perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>; |
155 | }; | 155 | }; |
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 4b2afcc4fdf4..0a6f7952bbb1 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi | |||
@@ -840,7 +840,7 @@ | |||
840 | #size-cells = <1>; | 840 | #size-cells = <1>; |
841 | ranges; | 841 | ranges; |
842 | 842 | ||
843 | pcie0: qcom,pcie@00600000 { | 843 | pcie0: qcom,pcie@600000 { |
844 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 844 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
845 | status = "disabled"; | 845 | status = "disabled"; |
846 | power-domains = <&gcc PCIE0_GDSC>; | 846 | power-domains = <&gcc PCIE0_GDSC>; |
@@ -893,7 +893,7 @@ | |||
893 | 893 | ||
894 | }; | 894 | }; |
895 | 895 | ||
896 | pcie1: qcom,pcie@00608000 { | 896 | pcie1: qcom,pcie@608000 { |
897 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 897 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
898 | power-domains = <&gcc PCIE1_GDSC>; | 898 | power-domains = <&gcc PCIE1_GDSC>; |
899 | bus-range = <0x00 0xff>; | 899 | bus-range = <0x00 0xff>; |
@@ -946,7 +946,7 @@ | |||
946 | "bus_slave"; | 946 | "bus_slave"; |
947 | }; | 947 | }; |
948 | 948 | ||
949 | pcie2: qcom,pcie@00610000 { | 949 | pcie2: qcom,pcie@610000 { |
950 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; | 950 | compatible = "qcom,pcie-msm8996", "snps,dw-pcie"; |
951 | power-domains = <&gcc PCIE2_GDSC>; | 951 | power-domains = <&gcc PCIE2_GDSC>; |
952 | bus-range = <0x00 0xff>; | 952 | bus-range = <0x00 0xff>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 3890468678ce..28257724a56e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -132,17 +132,16 @@ | |||
132 | assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; | 132 | assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; |
133 | assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; | 133 | assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; |
134 | clock_in_out = "input"; | 134 | clock_in_out = "input"; |
135 | /* shows instability at 1GBit right now */ | ||
136 | max-speed = <100>; | ||
137 | phy-supply = <&vcc_io>; | 135 | phy-supply = <&vcc_io>; |
138 | phy-mode = "rgmii"; | 136 | phy-mode = "rgmii"; |
139 | pinctrl-names = "default"; | 137 | pinctrl-names = "default"; |
140 | pinctrl-0 = <&rgmiim1_pins>; | 138 | pinctrl-0 = <&rgmiim1_pins>; |
139 | snps,force_thresh_dma_mode; | ||
141 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; | 140 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; |
142 | snps,reset-active-low; | 141 | snps,reset-active-low; |
143 | snps,reset-delays-us = <0 10000 50000>; | 142 | snps,reset-delays-us = <0 10000 50000>; |
144 | tx_delay = <0x26>; | 143 | tx_delay = <0x24>; |
145 | rx_delay = <0x11>; | 144 | rx_delay = <0x18>; |
146 | status = "okay"; | 145 | status = "okay"; |
147 | }; | 146 | }; |
148 | 147 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index a037ee56fead..cae341554486 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi | |||
@@ -730,7 +730,7 @@ | |||
730 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; | 730 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; |
731 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, | 731 | clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, |
732 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; | 732 | <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; |
733 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 733 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
734 | fifo-depth = <0x100>; | 734 | fifo-depth = <0x100>; |
735 | status = "disabled"; | 735 | status = "disabled"; |
736 | }; | 736 | }; |
@@ -741,7 +741,7 @@ | |||
741 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; | 741 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; |
742 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, | 742 | clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, |
743 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; | 743 | <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; |
744 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 744 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
745 | fifo-depth = <0x100>; | 745 | fifo-depth = <0x100>; |
746 | status = "disabled"; | 746 | status = "disabled"; |
747 | }; | 747 | }; |
@@ -752,7 +752,7 @@ | |||
752 | interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; | 752 | interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; |
753 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, | 753 | clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, |
754 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; | 754 | <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; |
755 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 755 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
756 | fifo-depth = <0x100>; | 756 | fifo-depth = <0x100>; |
757 | status = "disabled"; | 757 | status = "disabled"; |
758 | }; | 758 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index aa4d07046a7b..03458ac44201 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
@@ -257,7 +257,7 @@ | |||
257 | max-frequency = <150000000>; | 257 | max-frequency = <150000000>; |
258 | clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, | 258 | clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, |
259 | <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; | 259 | <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; |
260 | clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; | 260 | clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; |
261 | fifo-depth = <0x100>; | 261 | fifo-depth = <0x100>; |
262 | interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; | 262 | interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; |
263 | resets = <&cru SRST_SDIO0>; | 263 | resets = <&cru SRST_SDIO0>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 0f873c897d0d..ce592a4c0c4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi | |||
@@ -457,7 +457,7 @@ | |||
457 | assigned-clocks = <&cru SCLK_PCIEPHY_REF>; | 457 | assigned-clocks = <&cru SCLK_PCIEPHY_REF>; |
458 | assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; | 458 | assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; |
459 | assigned-clock-rates = <100000000>; | 459 | assigned-clock-rates = <100000000>; |
460 | ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; | 460 | ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; |
461 | num-lanes = <4>; | 461 | num-lanes = <4>; |
462 | pinctrl-names = "default"; | 462 | pinctrl-names = "default"; |
463 | pinctrl-0 = <&pcie_clkreqn_cpm>; | 463 | pinctrl-0 = <&pcie_clkreqn_cpm>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 7aa2144e0d47..2605118d4b4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
@@ -1739,8 +1739,8 @@ | |||
1739 | compatible = "rockchip,rk3399-edp"; | 1739 | compatible = "rockchip,rk3399-edp"; |
1740 | reg = <0x0 0xff970000 0x0 0x8000>; | 1740 | reg = <0x0 0xff970000 0x0 0x8000>; |
1741 | interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>; | 1741 | interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>; |
1742 | clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>; | 1742 | clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>; |
1743 | clock-names = "dp", "pclk"; | 1743 | clock-names = "dp", "pclk", "grf"; |
1744 | pinctrl-names = "default"; | 1744 | pinctrl-names = "default"; |
1745 | pinctrl-0 = <&edp_hpd>; | 1745 | pinctrl-0 = <&edp_hpd>; |
1746 | power-domains = <&power RK3399_PD_EDP>; | 1746 | power-domains = <&power RK3399_PD_EDP>; |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index eda8c5f629fc..350c76a1d15b 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | #define MPIDR_UP_BITMASK (0x1 << 30) | 21 | #define MPIDR_UP_BITMASK (0x1 << 30) |
22 | #define MPIDR_MT_BITMASK (0x1 << 24) | 22 | #define MPIDR_MT_BITMASK (0x1 << 24) |
23 | #define MPIDR_HWID_BITMASK 0xff00ffffffUL | 23 | #define MPIDR_HWID_BITMASK UL(0xff00ffffff) |
24 | 24 | ||
25 | #define MPIDR_LEVEL_BITS_SHIFT 3 | 25 | #define MPIDR_LEVEL_BITS_SHIFT 3 |
26 | #define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) | 26 | #define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) |
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 472ef944e932..902f9edacbea 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h | |||
@@ -28,7 +28,7 @@ struct stackframe { | |||
28 | unsigned long fp; | 28 | unsigned long fp; |
29 | unsigned long pc; | 29 | unsigned long pc; |
30 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 30 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
31 | unsigned int graph; | 31 | int graph; |
32 | #endif | 32 | #endif |
33 | }; | 33 | }; |
34 | 34 | ||
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 543e11f0f657..e66b0fca99c2 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs) | |||
72 | * This is equivalent to the following test: | 72 | * This is equivalent to the following test: |
73 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 | 73 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
74 | */ | 74 | */ |
75 | static inline unsigned long __range_ok(unsigned long addr, unsigned long size) | 75 | static inline unsigned long __range_ok(const void __user *addr, unsigned long size) |
76 | { | 76 | { |
77 | unsigned long limit = current_thread_info()->addr_limit; | 77 | unsigned long ret, limit = current_thread_info()->addr_limit; |
78 | 78 | ||
79 | __chk_user_ptr(addr); | 79 | __chk_user_ptr(addr); |
80 | asm volatile( | 80 | asm volatile( |
81 | // A + B <= C + 1 for all A,B,C, in four easy steps: | 81 | // A + B <= C + 1 for all A,B,C, in four easy steps: |
82 | // 1: X = A + B; X' = X % 2^64 | 82 | // 1: X = A + B; X' = X % 2^64 |
83 | " adds %0, %0, %2\n" | 83 | " adds %0, %3, %2\n" |
84 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 | 84 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 |
85 | " csel %1, xzr, %1, hi\n" | 85 | " csel %1, xzr, %1, hi\n" |
86 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' | 86 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' |
@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size) | |||
92 | // testing X' - C == 0, subject to the previous adjustments. | 92 | // testing X' - C == 0, subject to the previous adjustments. |
93 | " sbcs xzr, %0, %1\n" | 93 | " sbcs xzr, %0, %1\n" |
94 | " cset %0, ls\n" | 94 | " cset %0, ls\n" |
95 | : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); | 95 | : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); |
96 | 96 | ||
97 | return addr; | 97 | return ret; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | 100 | /* |
@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size) | |||
104 | */ | 104 | */ |
105 | #define untagged_addr(addr) sign_extend64(addr, 55) | 105 | #define untagged_addr(addr) sign_extend64(addr, 55) |
106 | 106 | ||
107 | #define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) | 107 | #define access_ok(type, addr, size) __range_ok(addr, size) |
108 | #define user_addr_max get_fs | 108 | #define user_addr_max get_fs |
109 | 109 | ||
110 | #define _ASM_EXTABLE(from, to) \ | 110 | #define _ASM_EXTABLE(from, to) \ |
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c33b5e4010ab..68450e954d47 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) | |||
370 | static int swp_handler(struct pt_regs *regs, u32 instr) | 370 | static int swp_handler(struct pt_regs *regs, u32 instr) |
371 | { | 371 | { |
372 | u32 destreg, data, type, address = 0; | 372 | u32 destreg, data, type, address = 0; |
373 | const void __user *user_ptr; | ||
373 | int rn, rt2, res = 0; | 374 | int rn, rt2, res = 0; |
374 | 375 | ||
375 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); | 376 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); |
@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr) | |||
401 | aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); | 402 | aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); |
402 | 403 | ||
403 | /* Check access in reasonable access range for both SWP and SWPB */ | 404 | /* Check access in reasonable access range for both SWP and SWPB */ |
404 | if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { | 405 | user_ptr = (const void __user *)(unsigned long)(address & ~3); |
406 | if (!access_ok(VERIFY_WRITE, user_ptr, 4)) { | ||
405 | pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", | 407 | pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", |
406 | address); | 408 | address); |
407 | goto fault; | 409 | goto fault; |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 29b1f873e337..2985a067fc13 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { | |||
199 | }; | 199 | }; |
200 | 200 | ||
201 | static const struct arm64_ftr_bits ftr_ctr[] = { | 201 | static const struct arm64_ftr_bits ftr_ctr[] = { |
202 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ | 202 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ |
203 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */ | ||
204 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ | ||
203 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ | 205 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ |
204 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ | 206 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ |
205 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ | 207 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ |
206 | /* | 208 | /* |
207 | * Linux can handle differing I-cache policies. Userspace JITs will | 209 | * Linux can handle differing I-cache policies. Userspace JITs will |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 75b220ba73a3..85a251b6dfa8 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info) | |||
908 | int pmuver; | 908 | int pmuver; |
909 | 909 | ||
910 | dfr0 = read_sysreg(id_aa64dfr0_el1); | 910 | dfr0 = read_sysreg(id_aa64dfr0_el1); |
911 | pmuver = cpuid_feature_extract_signed_field(dfr0, | 911 | pmuver = cpuid_feature_extract_unsigned_field(dfr0, |
912 | ID_AA64DFR0_PMUVER_SHIFT); | 912 | ID_AA64DFR0_PMUVER_SHIFT); |
913 | if (pmuver < 1) | 913 | if (pmuver == 0xf || pmuver == 0) |
914 | return; | 914 | return; |
915 | 915 | ||
916 | probe->present = true; | 916 | probe->present = true; |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index ad8aeb098b31..c0da6efe5465 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs) | |||
220 | 220 | ||
221 | show_regs_print_info(KERN_DEFAULT); | 221 | show_regs_print_info(KERN_DEFAULT); |
222 | print_pstate(regs); | 222 | print_pstate(regs); |
223 | printk("pc : %pS\n", (void *)regs->pc); | 223 | |
224 | printk("lr : %pS\n", (void *)lr); | 224 | if (!user_mode(regs)) { |
225 | printk("pc : %pS\n", (void *)regs->pc); | ||
226 | printk("lr : %pS\n", (void *)lr); | ||
227 | } else { | ||
228 | printk("pc : %016llx\n", regs->pc); | ||
229 | printk("lr : %016llx\n", lr); | ||
230 | } | ||
231 | |||
225 | printk("sp : %016llx\n", sp); | 232 | printk("sp : %016llx\n", sp); |
226 | 233 | ||
227 | i = top_reg; | 234 | i = top_reg; |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6618036ae6d4..9ae31f7e2243 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type, | |||
1419 | u64 addr = 0; | 1419 | u64 addr = 0; |
1420 | u32 ctrl = 0; | 1420 | u32 ctrl = 0; |
1421 | 1421 | ||
1422 | int err, idx = compat_ptrace_hbp_num_to_idx(num);; | 1422 | int err, idx = compat_ptrace_hbp_num_to_idx(num); |
1423 | 1423 | ||
1424 | if (num & 1) { | 1424 | if (num & 1) { |
1425 | err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); | 1425 | err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); |
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 76809ccd309c..d5718a060672 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) | |||
59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 59 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
60 | if (tsk->ret_stack && | 60 | if (tsk->ret_stack && |
61 | (frame->pc == (unsigned long)return_to_handler)) { | 61 | (frame->pc == (unsigned long)return_to_handler)) { |
62 | if (WARN_ON_ONCE(frame->graph == -1)) | ||
63 | return -EINVAL; | ||
64 | if (frame->graph < -1) | ||
65 | frame->graph += FTRACE_NOTRACE_DEPTH; | ||
66 | |||
62 | /* | 67 | /* |
63 | * This is a case where function graph tracer has | 68 | * This is a case where function graph tracer has |
64 | * modified a return address (LR) in a stack frame | 69 | * modified a return address (LR) in a stack frame |
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 8b8bbd3eaa52..a382b2a1b84e 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c | |||
@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) | |||
57 | if (end < start || flags) | 57 | if (end < start || flags) |
58 | return -EINVAL; | 58 | return -EINVAL; |
59 | 59 | ||
60 | if (!access_ok(VERIFY_READ, start, end - start)) | 60 | if (!access_ok(VERIFY_READ, (const void __user *)start, end - start)) |
61 | return -EFAULT; | 61 | return -EFAULT; |
62 | 62 | ||
63 | return __do_compat_cache_op(start, end); | 63 | return __do_compat_cache_op(start, end); |
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index a4391280fba9..f258636273c9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c | |||
@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
52 | frame.fp = regs->regs[29]; | 52 | frame.fp = regs->regs[29]; |
53 | frame.pc = regs->pc; | 53 | frame.pc = regs->pc; |
54 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 54 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
55 | frame.graph = -1; /* no task info */ | 55 | frame.graph = current->curr_ret_stack; |
56 | #endif | 56 | #endif |
57 | do { | 57 | do { |
58 | int ret = unwind_frame(NULL, &frame); | 58 | int ret = unwind_frame(NULL, &frame); |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index bbb0fde2780e..eb2d15147e8d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -57,7 +57,7 @@ static const char *handler[]= { | |||
57 | "Error" | 57 | "Error" |
58 | }; | 58 | }; |
59 | 59 | ||
60 | int show_unhandled_signals = 1; | 60 | int show_unhandled_signals = 0; |
61 | 61 | ||
62 | static void dump_backtrace_entry(unsigned long where) | 62 | static void dump_backtrace_entry(unsigned long where) |
63 | { | 63 | { |
@@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) | |||
526 | } | 526 | } |
527 | #endif | 527 | #endif |
528 | 528 | ||
529 | if (show_unhandled_signals_ratelimited()) { | ||
530 | pr_info("%s[%d]: syscall %d\n", current->comm, | ||
531 | task_pid_nr(current), regs->syscallno); | ||
532 | dump_instr("", regs); | ||
533 | if (user_mode(regs)) | ||
534 | __show_regs(regs); | ||
535 | } | ||
536 | |||
537 | return sys_ni_syscall(); | 529 | return sys_ni_syscall(); |
538 | } | 530 | } |
539 | 531 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3161b853f29e..84a019f55022 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -933,6 +933,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) | |||
933 | { | 933 | { |
934 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | | 934 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | |
935 | pgprot_val(mk_sect_prot(prot))); | 935 | pgprot_val(mk_sect_prot(prot))); |
936 | |||
937 | /* ioremap_page_range doesn't honour BBM */ | ||
938 | if (pud_present(READ_ONCE(*pudp))) | ||
939 | return 0; | ||
940 | |||
936 | BUG_ON(phys & ~PUD_MASK); | 941 | BUG_ON(phys & ~PUD_MASK); |
937 | set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); | 942 | set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); |
938 | return 1; | 943 | return 1; |
@@ -942,6 +947,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) | |||
942 | { | 947 | { |
943 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | | 948 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | |
944 | pgprot_val(mk_sect_prot(prot))); | 949 | pgprot_val(mk_sect_prot(prot))); |
950 | |||
951 | /* ioremap_page_range doesn't honour BBM */ | ||
952 | if (pmd_present(READ_ONCE(*pmdp))) | ||
953 | return 0; | ||
954 | |||
945 | BUG_ON(phys & ~PMD_MASK); | 955 | BUG_ON(phys & ~PMD_MASK); |
946 | set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); | 956 | set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); |
947 | return 1; | 957 | return 1; |
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 1d4f1da7c58f..a93350451e8e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) | |||
250 | off = offsetof(struct bpf_array, map.max_entries); | 250 | off = offsetof(struct bpf_array, map.max_entries); |
251 | emit_a64_mov_i64(tmp, off, ctx); | 251 | emit_a64_mov_i64(tmp, off, ctx); |
252 | emit(A64_LDR32(tmp, r2, tmp), ctx); | 252 | emit(A64_LDR32(tmp, r2, tmp), ctx); |
253 | emit(A64_MOV(0, r3, r3), ctx); | ||
253 | emit(A64_CMP(0, r3, tmp), ctx); | 254 | emit(A64_CMP(0, r3, tmp), ctx); |
254 | emit(A64_B_(A64_COND_GE, jmp_offset), ctx); | 255 | emit(A64_B_(A64_COND_CS, jmp_offset), ctx); |
255 | 256 | ||
256 | /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) | 257 | /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) |
257 | * goto out; | 258 | * goto out; |
@@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) | |||
259 | */ | 260 | */ |
260 | emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); | 261 | emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); |
261 | emit(A64_CMP(1, tcc, tmp), ctx); | 262 | emit(A64_CMP(1, tcc, tmp), ctx); |
262 | emit(A64_B_(A64_COND_GT, jmp_offset), ctx); | 263 | emit(A64_B_(A64_COND_HI, jmp_offset), ctx); |
263 | emit(A64_ADD_I(1, tcc, tcc, 1), ctx); | 264 | emit(A64_ADD_I(1, tcc, tcc, 1), ctx); |
264 | 265 | ||
265 | /* prog = array->ptrs[index]; | 266 | /* prog = array->ptrs[index]; |
diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h index 905afeacfedf..06da9d49152a 100644 --- a/arch/cris/include/arch-v10/arch/bug.h +++ b/arch/cris/include/arch-v10/arch/bug.h | |||
@@ -44,18 +44,25 @@ struct bug_frame { | |||
44 | * not be used like this with newer versions of gcc. | 44 | * not be used like this with newer versions of gcc. |
45 | */ | 45 | */ |
46 | #define BUG() \ | 46 | #define BUG() \ |
47 | do { \ | ||
47 | __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ | 48 | __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ |
48 | "movu.w " __stringify(__LINE__) ",$r0\n\t"\ | 49 | "movu.w " __stringify(__LINE__) ",$r0\n\t"\ |
49 | "jump 0f\n\t" \ | 50 | "jump 0f\n\t" \ |
50 | ".section .rodata\n" \ | 51 | ".section .rodata\n" \ |
51 | "0:\t.string \"" __FILE__ "\"\n\t" \ | 52 | "0:\t.string \"" __FILE__ "\"\n\t" \ |
52 | ".previous") | 53 | ".previous"); \ |
54 | unreachable(); \ | ||
55 | } while (0) | ||
53 | #endif | 56 | #endif |
54 | 57 | ||
55 | #else | 58 | #else |
56 | 59 | ||
57 | /* This just causes an oops. */ | 60 | /* This just causes an oops. */ |
58 | #define BUG() (*(int *)0 = 0) | 61 | #define BUG() \ |
62 | do { \ | ||
63 | barrier_before_unreachable(); \ | ||
64 | __builtin_trap(); \ | ||
65 | } while (0) | ||
59 | 66 | ||
60 | #endif | 67 | #endif |
61 | 68 | ||
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h index bd3eeb8d1cfa..66b37a532765 100644 --- a/arch/ia64/include/asm/bug.h +++ b/arch/ia64/include/asm/bug.h | |||
@@ -4,7 +4,11 @@ | |||
4 | 4 | ||
5 | #ifdef CONFIG_BUG | 5 | #ifdef CONFIG_BUG |
6 | #define ia64_abort() __builtin_trap() | 6 | #define ia64_abort() __builtin_trap() |
7 | #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) | 7 | #define BUG() do { \ |
8 | printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ | ||
9 | barrier_before_unreachable(); \ | ||
10 | ia64_abort(); \ | ||
11 | } while (0) | ||
8 | 12 | ||
9 | /* should this BUG be made generic? */ | 13 | /* should this BUG be made generic? */ |
10 | #define HAVE_ARCH_BUG | 14 | #define HAVE_ARCH_BUG |
diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h index b7e2bf1ba4a6..275dca1435bf 100644 --- a/arch/m68k/include/asm/bug.h +++ b/arch/m68k/include/asm/bug.h | |||
@@ -8,16 +8,19 @@ | |||
8 | #ifndef CONFIG_SUN3 | 8 | #ifndef CONFIG_SUN3 |
9 | #define BUG() do { \ | 9 | #define BUG() do { \ |
10 | pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ | 10 | pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ |
11 | barrier_before_unreachable(); \ | ||
11 | __builtin_trap(); \ | 12 | __builtin_trap(); \ |
12 | } while (0) | 13 | } while (0) |
13 | #else | 14 | #else |
14 | #define BUG() do { \ | 15 | #define BUG() do { \ |
15 | pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ | 16 | pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ |
17 | barrier_before_unreachable(); \ | ||
16 | panic("BUG!"); \ | 18 | panic("BUG!"); \ |
17 | } while (0) | 19 | } while (0) |
18 | #endif | 20 | #endif |
19 | #else | 21 | #else |
20 | #define BUG() do { \ | 22 | #define BUG() do { \ |
23 | barrier_before_unreachable(); \ | ||
21 | __builtin_trap(); \ | 24 | __builtin_trap(); \ |
22 | } while (0) | 25 | } while (0) |
23 | #endif | 26 | #endif |
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1bd5c4f00d19..c22da16d67b8 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile | |||
@@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS | |||
126 | 126 | ||
127 | quiet_cmd_cpp_its_S = ITS $@ | 127 | quiet_cmd_cpp_its_S = ITS $@ |
128 | cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ | 128 | cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ |
129 | -D__ASSEMBLY__ \ | ||
129 | -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ | 130 | -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ |
130 | -DVMLINUX_BINARY="\"$(3)\"" \ | 131 | -DVMLINUX_BINARY="\"$(3)\"" \ |
131 | -DVMLINUX_COMPRESSION="\"$(2)\"" \ | 132 | -DVMLINUX_COMPRESSION="\"$(2)\"" \ |
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 946681db8dc3..9a0fa66b81ac 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h | |||
@@ -86,7 +86,6 @@ struct compat_flock { | |||
86 | compat_off_t l_len; | 86 | compat_off_t l_len; |
87 | s32 l_sysid; | 87 | s32 l_sysid; |
88 | compat_pid_t l_pid; | 88 | compat_pid_t l_pid; |
89 | short __unused; | ||
90 | s32 pad[4]; | 89 | s32 pad[4]; |
91 | }; | 90 | }; |
92 | 91 | ||
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 3742508cc534..bd5ce31936f5 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); | |||
26 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); | 26 | void flush_kernel_icache_range_asm(unsigned long, unsigned long); |
27 | void flush_user_dcache_range_asm(unsigned long, unsigned long); | 27 | void flush_user_dcache_range_asm(unsigned long, unsigned long); |
28 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); | 28 | void flush_kernel_dcache_range_asm(unsigned long, unsigned long); |
29 | void purge_kernel_dcache_range_asm(unsigned long, unsigned long); | ||
29 | void flush_kernel_dcache_page_asm(void *); | 30 | void flush_kernel_dcache_page_asm(void *); |
30 | void flush_kernel_icache_page(void *); | 31 | void flush_kernel_icache_page(void *); |
31 | 32 | ||
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 0e6ab6e4a4e9..2dbe5580a1a4 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency; | |||
316 | #define parisc_requires_coherency() (0) | 316 | #define parisc_requires_coherency() (0) |
317 | #endif | 317 | #endif |
318 | 318 | ||
319 | extern int running_on_qemu; | ||
320 | |||
319 | #endif /* __ASSEMBLY__ */ | 321 | #endif /* __ASSEMBLY__ */ |
320 | 322 | ||
321 | #endif /* __ASM_PARISC_PROCESSOR_H */ | 323 | #endif /* __ASM_PARISC_PROCESSOR_H */ |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 19c0c141bc3f..79089778725b 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); | |||
465 | int __flush_tlb_range(unsigned long sid, unsigned long start, | 465 | int __flush_tlb_range(unsigned long sid, unsigned long start, |
466 | unsigned long end) | 466 | unsigned long end) |
467 | { | 467 | { |
468 | unsigned long flags, size; | 468 | unsigned long flags; |
469 | 469 | ||
470 | size = (end - start); | 470 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
471 | if (size >= parisc_tlb_flush_threshold) { | 471 | end - start >= parisc_tlb_flush_threshold) { |
472 | flush_tlb_all(); | 472 | flush_tlb_all(); |
473 | return 1; | 473 | return 1; |
474 | } | 474 | } |
@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm) | |||
539 | struct vm_area_struct *vma; | 539 | struct vm_area_struct *vma; |
540 | pgd_t *pgd; | 540 | pgd_t *pgd; |
541 | 541 | ||
542 | /* Flush the TLB to avoid speculation if coherency is required. */ | ||
543 | if (parisc_requires_coherency()) | ||
544 | flush_tlb_all(); | ||
545 | |||
546 | /* Flushing the whole cache on each cpu takes forever on | 542 | /* Flushing the whole cache on each cpu takes forever on |
547 | rp3440, etc. So, avoid it if the mm isn't too big. */ | 543 | rp3440, etc. So, avoid it if the mm isn't too big. */ |
548 | if (mm_total_size(mm) >= parisc_cache_flush_threshold) { | 544 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
545 | mm_total_size(mm) >= parisc_cache_flush_threshold) { | ||
546 | flush_tlb_all(); | ||
549 | flush_cache_all(); | 547 | flush_cache_all(); |
550 | return; | 548 | return; |
551 | } | 549 | } |
@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm) | |||
553 | if (mm->context == mfsp(3)) { | 551 | if (mm->context == mfsp(3)) { |
554 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 552 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
555 | flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); | 553 | flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); |
556 | if ((vma->vm_flags & VM_EXEC) == 0) | 554 | if (vma->vm_flags & VM_EXEC) |
557 | continue; | 555 | flush_user_icache_range_asm(vma->vm_start, vma->vm_end); |
558 | flush_user_icache_range_asm(vma->vm_start, vma->vm_end); | 556 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); |
559 | } | 557 | } |
560 | return; | 558 | return; |
561 | } | 559 | } |
@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm) | |||
581 | void flush_cache_range(struct vm_area_struct *vma, | 579 | void flush_cache_range(struct vm_area_struct *vma, |
582 | unsigned long start, unsigned long end) | 580 | unsigned long start, unsigned long end) |
583 | { | 581 | { |
584 | BUG_ON(!vma->vm_mm->context); | 582 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
585 | 583 | end - start >= parisc_cache_flush_threshold) { | |
586 | /* Flush the TLB to avoid speculation if coherency is required. */ | ||
587 | if (parisc_requires_coherency()) | ||
588 | flush_tlb_range(vma, start, end); | 584 | flush_tlb_range(vma, start, end); |
589 | |||
590 | if ((end - start) >= parisc_cache_flush_threshold | ||
591 | || vma->vm_mm->context != mfsp(3)) { | ||
592 | flush_cache_all(); | 585 | flush_cache_all(); |
593 | return; | 586 | return; |
594 | } | 587 | } |
@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma, | |||
596 | flush_user_dcache_range_asm(start, end); | 589 | flush_user_dcache_range_asm(start, end); |
597 | if (vma->vm_flags & VM_EXEC) | 590 | if (vma->vm_flags & VM_EXEC) |
598 | flush_user_icache_range_asm(start, end); | 591 | flush_user_icache_range_asm(start, end); |
592 | flush_tlb_range(vma, start, end); | ||
599 | } | 593 | } |
600 | 594 | ||
601 | void | 595 | void |
@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
604 | BUG_ON(!vma->vm_mm->context); | 598 | BUG_ON(!vma->vm_mm->context); |
605 | 599 | ||
606 | if (pfn_valid(pfn)) { | 600 | if (pfn_valid(pfn)) { |
607 | if (parisc_requires_coherency()) | 601 | flush_tlb_page(vma, vmaddr); |
608 | flush_tlb_page(vma, vmaddr); | ||
609 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | 602 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
610 | } | 603 | } |
611 | } | 604 | } |
@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
613 | void flush_kernel_vmap_range(void *vaddr, int size) | 606 | void flush_kernel_vmap_range(void *vaddr, int size) |
614 | { | 607 | { |
615 | unsigned long start = (unsigned long)vaddr; | 608 | unsigned long start = (unsigned long)vaddr; |
609 | unsigned long end = start + size; | ||
616 | 610 | ||
617 | if ((unsigned long)size > parisc_cache_flush_threshold) | 611 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
612 | (unsigned long)size >= parisc_cache_flush_threshold) { | ||
613 | flush_tlb_kernel_range(start, end); | ||
618 | flush_data_cache(); | 614 | flush_data_cache(); |
619 | else | 615 | return; |
620 | flush_kernel_dcache_range_asm(start, start + size); | 616 | } |
617 | |||
618 | flush_kernel_dcache_range_asm(start, end); | ||
619 | flush_tlb_kernel_range(start, end); | ||
621 | } | 620 | } |
622 | EXPORT_SYMBOL(flush_kernel_vmap_range); | 621 | EXPORT_SYMBOL(flush_kernel_vmap_range); |
623 | 622 | ||
624 | void invalidate_kernel_vmap_range(void *vaddr, int size) | 623 | void invalidate_kernel_vmap_range(void *vaddr, int size) |
625 | { | 624 | { |
626 | unsigned long start = (unsigned long)vaddr; | 625 | unsigned long start = (unsigned long)vaddr; |
626 | unsigned long end = start + size; | ||
627 | 627 | ||
628 | if ((unsigned long)size > parisc_cache_flush_threshold) | 628 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && |
629 | (unsigned long)size >= parisc_cache_flush_threshold) { | ||
630 | flush_tlb_kernel_range(start, end); | ||
629 | flush_data_cache(); | 631 | flush_data_cache(); |
630 | else | 632 | return; |
631 | flush_kernel_dcache_range_asm(start, start + size); | 633 | } |
634 | |||
635 | purge_kernel_dcache_range_asm(start, end); | ||
636 | flush_tlb_kernel_range(start, end); | ||
632 | } | 637 | } |
633 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | 638 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); |
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index bbbe360b458f..fbb4e43fda05 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
@@ -138,6 +138,16 @@ $pgt_fill_loop: | |||
138 | std %dp,0x18(%r10) | 138 | std %dp,0x18(%r10) |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef CONFIG_64BIT | ||
142 | /* Get PDCE_PROC for monarch CPU. */ | ||
143 | #define MEM_PDC_LO 0x388 | ||
144 | #define MEM_PDC_HI 0x35C | ||
145 | ldw MEM_PDC_LO(%r0),%r3 | ||
146 | ldw MEM_PDC_HI(%r0),%r10 | ||
147 | depd %r10, 31, 32, %r3 /* move to upper word */ | ||
148 | #endif | ||
149 | |||
150 | |||
141 | #ifdef CONFIG_SMP | 151 | #ifdef CONFIG_SMP |
142 | /* Set the smp rendezvous address into page zero. | 152 | /* Set the smp rendezvous address into page zero. |
143 | ** It would be safer to do this in init_smp_config() but | 153 | ** It would be safer to do this in init_smp_config() but |
@@ -196,12 +206,6 @@ common_stext: | |||
196 | ** Someday, palo might not do this for the Monarch either. | 206 | ** Someday, palo might not do this for the Monarch either. |
197 | */ | 207 | */ |
198 | 2: | 208 | 2: |
199 | #define MEM_PDC_LO 0x388 | ||
200 | #define MEM_PDC_HI 0x35C | ||
201 | ldw MEM_PDC_LO(%r0),%r3 | ||
202 | ldw MEM_PDC_HI(%r0),%r6 | ||
203 | depd %r6, 31, 32, %r3 /* move to upper word */ | ||
204 | |||
205 | mfctl %cr30,%r6 /* PCX-W2 firmware bug */ | 209 | mfctl %cr30,%r6 /* PCX-W2 firmware bug */ |
206 | 210 | ||
207 | ldo PDC_PSW(%r0),%arg0 /* 21 */ | 211 | ldo PDC_PSW(%r0),%arg0 /* 21 */ |
@@ -268,6 +272,8 @@ $install_iva: | |||
268 | aligned_rfi: | 272 | aligned_rfi: |
269 | pcxt_ssm_bug | 273 | pcxt_ssm_bug |
270 | 274 | ||
275 | copy %r3, %arg0 /* PDCE_PROC for smp_callin() */ | ||
276 | |||
271 | rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ | 277 | rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ |
272 | /* Don't need NOPs, have 8 compliant insn before rfi */ | 278 | /* Don't need NOPs, have 8 compliant insn before rfi */ |
273 | 279 | ||
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 2d40c4ff3f69..67b0f7532e83 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) | |||
1110 | .procend | 1110 | .procend |
1111 | ENDPROC_CFI(flush_kernel_dcache_range_asm) | 1111 | ENDPROC_CFI(flush_kernel_dcache_range_asm) |
1112 | 1112 | ||
1113 | ENTRY_CFI(purge_kernel_dcache_range_asm) | ||
1114 | .proc | ||
1115 | .callinfo NO_CALLS | ||
1116 | .entry | ||
1117 | |||
1118 | ldil L%dcache_stride, %r1 | ||
1119 | ldw R%dcache_stride(%r1), %r23 | ||
1120 | ldo -1(%r23), %r21 | ||
1121 | ANDCM %r26, %r21, %r26 | ||
1122 | |||
1123 | 1: cmpb,COND(<<),n %r26, %r25,1b | ||
1124 | pdc,m %r23(%r26) | ||
1125 | |||
1126 | sync | ||
1127 | syncdma | ||
1128 | bv %r0(%r2) | ||
1129 | nop | ||
1130 | .exit | ||
1131 | |||
1132 | .procend | ||
1133 | ENDPROC_CFI(purge_kernel_dcache_range_asm) | ||
1134 | |||
1113 | ENTRY_CFI(flush_user_icache_range_asm) | 1135 | ENTRY_CFI(flush_user_icache_range_asm) |
1114 | .proc | 1136 | .proc |
1115 | .callinfo NO_CALLS | 1137 | .callinfo NO_CALLS |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 30c28ab14540..4065b5e48c9d 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum) | |||
292 | * Slaves start using C here. Indirectly called from smp_slave_stext. | 292 | * Slaves start using C here. Indirectly called from smp_slave_stext. |
293 | * Do what start_kernel() and main() do for boot strap processor (aka monarch) | 293 | * Do what start_kernel() and main() do for boot strap processor (aka monarch) |
294 | */ | 294 | */ |
295 | void __init smp_callin(void) | 295 | void __init smp_callin(unsigned long pdce_proc) |
296 | { | 296 | { |
297 | int slave_id = cpu_now_booting; | 297 | int slave_id = cpu_now_booting; |
298 | 298 | ||
299 | #ifdef CONFIG_64BIT | ||
300 | WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32 | ||
301 | | PAGE0->mem_pdc) != pdce_proc); | ||
302 | #endif | ||
303 | |||
299 | smp_cpu_init(slave_id); | 304 | smp_cpu_init(slave_id); |
300 | preempt_disable(); | 305 | preempt_disable(); |
301 | 306 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4b8fd6dc22da..f7e684560186 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
76 | next_tick = cpuinfo->it_value; | 76 | next_tick = cpuinfo->it_value; |
77 | 77 | ||
78 | /* Calculate how many ticks have elapsed. */ | 78 | /* Calculate how many ticks have elapsed. */ |
79 | now = mfctl(16); | ||
79 | do { | 80 | do { |
80 | ++ticks_elapsed; | 81 | ++ticks_elapsed; |
81 | next_tick += cpt; | 82 | next_tick += cpt; |
82 | now = mfctl(16); | ||
83 | } while (next_tick - now > cpt); | 83 | } while (next_tick - now > cpt); |
84 | 84 | ||
85 | /* Store (in CR16 cycles) up to when we are accounting right now. */ | 85 | /* Store (in CR16 cycles) up to when we are accounting right now. */ |
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
103 | * if one or the other wrapped. If "now" is "bigger" we'll end up | 103 | * if one or the other wrapped. If "now" is "bigger" we'll end up |
104 | * with a very large unsigned number. | 104 | * with a very large unsigned number. |
105 | */ | 105 | */ |
106 | while (next_tick - mfctl(16) > cpt) | 106 | now = mfctl(16); |
107 | while (next_tick - now > cpt) | ||
107 | next_tick += cpt; | 108 | next_tick += cpt; |
108 | 109 | ||
109 | /* Program the IT when to deliver the next interrupt. | 110 | /* Program the IT when to deliver the next interrupt. |
110 | * Only bottom 32-bits of next_tick are writable in CR16! | 111 | * Only bottom 32-bits of next_tick are writable in CR16! |
111 | * Timer interrupt will be delivered at least a few hundred cycles | 112 | * Timer interrupt will be delivered at least a few hundred cycles |
112 | * after the IT fires, so if we are too close (<= 500 cycles) to the | 113 | * after the IT fires, so if we are too close (<= 8000 cycles) to the |
113 | * next cycle, simply skip it. | 114 | * next cycle, simply skip it. |
114 | */ | 115 | */ |
115 | if (next_tick - mfctl(16) <= 500) | 116 | if (next_tick - now <= 8000) |
116 | next_tick += cpt; | 117 | next_tick += cpt; |
117 | mtctl(next_tick, 16); | 118 | mtctl(next_tick, 16); |
118 | 119 | ||
@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void) | |||
248 | * different sockets, so mark them unstable and lower rating on | 249 | * different sockets, so mark them unstable and lower rating on |
249 | * multi-socket SMP systems. | 250 | * multi-socket SMP systems. |
250 | */ | 251 | */ |
251 | if (num_online_cpus() > 1) { | 252 | if (num_online_cpus() > 1 && !running_on_qemu) { |
252 | int cpu; | 253 | int cpu; |
253 | unsigned long cpu0_loc; | 254 | unsigned long cpu0_loc; |
254 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; | 255 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 48f41399fc0b..cab32ee824d2 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -629,7 +629,12 @@ void __init mem_init(void) | |||
629 | #endif | 629 | #endif |
630 | 630 | ||
631 | mem_init_print_info(NULL); | 631 | mem_init_print_info(NULL); |
632 | #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ | 632 | |
633 | #if 0 | ||
634 | /* | ||
635 | * Do not expose the virtual kernel memory layout to userspace. | ||
636 | * But keep code for debugging purposes. | ||
637 | */ | ||
633 | printk("virtual kernel memory layout:\n" | 638 | printk("virtual kernel memory layout:\n" |
634 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" | 639 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
635 | " memory : 0x%px - 0x%px (%4ld MB)\n" | 640 | " memory : 0x%px - 0x%px (%4ld MB)\n" |
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 511acfd7ab0d..535add3f7791 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h | |||
@@ -52,7 +52,7 @@ | |||
52 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) | 52 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) |
53 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) | 53 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) |
54 | #define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000) | 54 | #define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000) |
55 | #define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000400000000) | 55 | #define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000) |
56 | 56 | ||
57 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
58 | 58 | ||
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index beea2182d754..0c0b66fc5bfb 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata) | |||
384 | eeh_pcid_put(dev); | 384 | eeh_pcid_put(dev); |
385 | pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); | 385 | pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); |
386 | #ifdef CONFIG_PCI_IOV | 386 | #ifdef CONFIG_PCI_IOV |
387 | eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); | 387 | if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) |
388 | eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); | ||
388 | #endif | 389 | #endif |
389 | return NULL; | 390 | return NULL; |
390 | } | 391 | } |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index adf044daafd7..d22c41c26bb3 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { | |||
874 | .mmu = 0, | 874 | .mmu = 0, |
875 | .hash_ext = 0, | 875 | .hash_ext = 0, |
876 | .radix_ext = 0, | 876 | .radix_ext = 0, |
877 | .byte22 = OV5_FEAT(OV5_DRC_INFO), | 877 | .byte22 = 0, |
878 | }, | 878 | }, |
879 | 879 | ||
880 | /* option vector 6: IBM PAPR hints */ | 880 | /* option vector 6: IBM PAPR hints */ |
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index f0f5cd4d2fe7..f9818d7d3381 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c | |||
@@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) | |||
188 | if (!qpage) { | 188 | if (!qpage) { |
189 | pr_err("Failed to allocate queue %d for VCPU %d\n", | 189 | pr_err("Failed to allocate queue %d for VCPU %d\n", |
190 | prio, xc->server_num); | 190 | prio, xc->server_num); |
191 | return -ENOMEM;; | 191 | return -ENOMEM; |
192 | } | 192 | } |
193 | memset(qpage, 0, 1 << xive->q_order); | 193 | memset(qpage, 0, 1 << xive->q_order); |
194 | 194 | ||
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index 916844f99c64..3f1803672c9b 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c | |||
@@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, | |||
98 | dr_cell->base_addr = cpu_to_be64(lmb->base_addr); | 98 | dr_cell->base_addr = cpu_to_be64(lmb->base_addr); |
99 | dr_cell->drc_index = cpu_to_be32(lmb->drc_index); | 99 | dr_cell->drc_index = cpu_to_be32(lmb->drc_index); |
100 | dr_cell->aa_index = cpu_to_be32(lmb->aa_index); | 100 | dr_cell->aa_index = cpu_to_be32(lmb->aa_index); |
101 | dr_cell->flags = cpu_to_be32(lmb->flags); | 101 | dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static int drmem_update_dt_v2(struct device_node *memory, | 104 | static int drmem_update_dt_v2(struct device_node *memory, |
@@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory, | |||
121 | } | 121 | } |
122 | 122 | ||
123 | if (prev_lmb->aa_index != lmb->aa_index || | 123 | if (prev_lmb->aa_index != lmb->aa_index || |
124 | prev_lmb->flags != lmb->flags) | 124 | drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) |
125 | lmb_sets++; | 125 | lmb_sets++; |
126 | 126 | ||
127 | prev_lmb = lmb; | 127 | prev_lmb = lmb; |
@@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory, | |||
150 | } | 150 | } |
151 | 151 | ||
152 | if (prev_lmb->aa_index != lmb->aa_index || | 152 | if (prev_lmb->aa_index != lmb->aa_index || |
153 | prev_lmb->flags != lmb->flags) { | 153 | drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) { |
154 | /* end of one set, start of another */ | 154 | /* end of one set, start of another */ |
155 | dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); | 155 | dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); |
156 | dr_cell++; | 156 | dr_cell++; |
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 872d1f6dd11e..a9636d8cba15 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
327 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | 327 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); |
328 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); | 328 | PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); |
329 | break; | 329 | break; |
330 | case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ | ||
331 | PPC_LWZ_OFFS(r_A, r_skb, K); | ||
332 | break; | ||
330 | case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ | 333 | case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ |
331 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); | 334 | PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); |
332 | break; | 335 | break; |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 496e47696ed0..a6c92c78c9b2 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |||
1854 | s64 rc; | 1854 | s64 rc; |
1855 | 1855 | ||
1856 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | 1856 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) |
1857 | return -ENODEV;; | 1857 | return -ENODEV; |
1858 | 1858 | ||
1859 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 1859 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
1860 | if (pe->tce_bypass_enabled) { | 1860 | if (pe->tce_bypass_enabled) { |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 4fb21e17504a..092715b9674b 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void) | |||
80 | if (np && of_property_read_bool(np, "disabled")) | 80 | if (np && of_property_read_bool(np, "disabled")) |
81 | enable--; | 81 | enable--; |
82 | 82 | ||
83 | np = of_get_child_by_name(fw_features, "speculation-policy-favor-security"); | ||
84 | if (np && of_property_read_bool(np, "disabled")) | ||
85 | enable = 0; | ||
86 | |||
83 | of_node_put(np); | 87 | of_node_put(np); |
84 | of_node_put(fw_features); | 88 | of_node_put(fw_features); |
85 | } | 89 | } |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 372d7ada1a0c..1a527625acf7 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void) | |||
482 | if (types == L1D_FLUSH_NONE) | 482 | if (types == L1D_FLUSH_NONE) |
483 | types = L1D_FLUSH_FALLBACK; | 483 | types = L1D_FLUSH_FALLBACK; |
484 | 484 | ||
485 | if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) | 485 | if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) || |
486 | (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))) | ||
486 | enable = false; | 487 | enable = false; |
487 | } else { | 488 | } else { |
488 | /* Default to fallback if case hcall is not available */ | 489 | /* Default to fallback if case hcall is not available */ |
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index b6722c246d9c..04807c7f64cc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -8,7 +8,6 @@ config RISCV | |||
8 | select OF | 8 | select OF |
9 | select OF_EARLY_FLATTREE | 9 | select OF_EARLY_FLATTREE |
10 | select OF_IRQ | 10 | select OF_IRQ |
11 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | ||
12 | select ARCH_WANT_FRAME_POINTERS | 11 | select ARCH_WANT_FRAME_POINTERS |
13 | select CLONE_BACKWARDS | 12 | select CLONE_BACKWARDS |
14 | select COMMON_CLK | 13 | select COMMON_CLK |
@@ -20,7 +19,6 @@ config RISCV | |||
20 | select GENERIC_STRNLEN_USER | 19 | select GENERIC_STRNLEN_USER |
21 | select GENERIC_SMP_IDLE_THREAD | 20 | select GENERIC_SMP_IDLE_THREAD |
22 | select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A | 21 | select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A |
23 | select ARCH_WANT_OPTIONAL_GPIOLIB | ||
24 | select HAVE_MEMBLOCK | 22 | select HAVE_MEMBLOCK |
25 | select HAVE_MEMBLOCK_NODE_MAP | 23 | select HAVE_MEMBLOCK_NODE_MAP |
26 | select HAVE_DMA_API_DEBUG | 24 | select HAVE_DMA_API_DEBUG |
@@ -34,7 +32,6 @@ config RISCV | |||
34 | select HAVE_ARCH_TRACEHOOK | 32 | select HAVE_ARCH_TRACEHOOK |
35 | select MODULES_USE_ELF_RELA if MODULES | 33 | select MODULES_USE_ELF_RELA if MODULES |
36 | select THREAD_INFO_IN_TASK | 34 | select THREAD_INFO_IN_TASK |
37 | select RISCV_IRQ_INTC | ||
38 | select RISCV_TIMER | 35 | select RISCV_TIMER |
39 | 36 | ||
40 | config MMU | 37 | config MMU |
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index c0319cbf1eec..5510366d169a 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h | |||
@@ -34,9 +34,9 @@ | |||
34 | #define wmb() RISCV_FENCE(ow,ow) | 34 | #define wmb() RISCV_FENCE(ow,ow) |
35 | 35 | ||
36 | /* These barriers do not need to enforce ordering on devices, just memory. */ | 36 | /* These barriers do not need to enforce ordering on devices, just memory. */ |
37 | #define smp_mb() RISCV_FENCE(rw,rw) | 37 | #define __smp_mb() RISCV_FENCE(rw,rw) |
38 | #define smp_rmb() RISCV_FENCE(r,r) | 38 | #define __smp_rmb() RISCV_FENCE(r,r) |
39 | #define smp_wmb() RISCV_FENCE(w,w) | 39 | #define __smp_wmb() RISCV_FENCE(w,w) |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * This is a very specific barrier: it's currently only used in two places in | 42 | * This is a very specific barrier: it's currently only used in two places in |
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 87fc045be51f..56fa592cfa34 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S | |||
@@ -172,6 +172,9 @@ ENTRY(handle_exception) | |||
172 | move a1, sp /* pt_regs */ | 172 | move a1, sp /* pt_regs */ |
173 | tail do_IRQ | 173 | tail do_IRQ |
174 | 1: | 174 | 1: |
175 | /* Exceptions run with interrupts enabled */ | ||
176 | csrs sstatus, SR_SIE | ||
177 | |||
175 | /* Handle syscalls */ | 178 | /* Handle syscalls */ |
176 | li t0, EXC_SYSCALL | 179 | li t0, EXC_SYSCALL |
177 | beq s4, t0, handle_syscall | 180 | beq s4, t0, handle_syscall |
@@ -198,8 +201,6 @@ handle_syscall: | |||
198 | */ | 201 | */ |
199 | addi s2, s2, 0x4 | 202 | addi s2, s2, 0x4 |
200 | REG_S s2, PT_SEPC(sp) | 203 | REG_S s2, PT_SEPC(sp) |
201 | /* System calls run with interrupts enabled */ | ||
202 | csrs sstatus, SR_SIE | ||
203 | /* Trace syscalls, but only if requested by the user. */ | 204 | /* Trace syscalls, but only if requested by the user. */ |
204 | REG_L t0, TASK_TI_FLAGS(tp) | 205 | REG_L t0, TASK_TI_FLAGS(tp) |
205 | andi t0, t0, _TIF_SYSCALL_TRACE | 206 | andi t0, t0, _TIF_SYSCALL_TRACE |
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 226eeb190f90..6e07ed37bbff 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S | |||
@@ -64,7 +64,7 @@ ENTRY(_start) | |||
64 | /* Start the kernel */ | 64 | /* Start the kernel */ |
65 | mv a0, s0 | 65 | mv a0, s0 |
66 | mv a1, s1 | 66 | mv a1, s1 |
67 | call sbi_save | 67 | call parse_dtb |
68 | tail start_kernel | 68 | tail start_kernel |
69 | 69 | ||
70 | relocate: | 70 | relocate: |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 09f7064e898c..c11f40c1b2a8 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
@@ -144,7 +144,7 @@ asmlinkage void __init setup_vm(void) | |||
144 | #endif | 144 | #endif |
145 | } | 145 | } |
146 | 146 | ||
147 | void __init sbi_save(unsigned int hartid, void *dtb) | 147 | void __init parse_dtb(unsigned int hartid, void *dtb) |
148 | { | 148 | { |
149 | early_init_dt_scan(__va(dtb)); | 149 | early_init_dt_scan(__va(dtb)); |
150 | } | 150 | } |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 9c7d70715862..07c6e81163bf 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -22,22 +22,6 @@ | |||
22 | #include "trace.h" | 22 | #include "trace.h" |
23 | #include "trace-s390.h" | 23 | #include "trace-s390.h" |
24 | 24 | ||
25 | |||
26 | static const intercept_handler_t instruction_handlers[256] = { | ||
27 | [0x01] = kvm_s390_handle_01, | ||
28 | [0x82] = kvm_s390_handle_lpsw, | ||
29 | [0x83] = kvm_s390_handle_diag, | ||
30 | [0xaa] = kvm_s390_handle_aa, | ||
31 | [0xae] = kvm_s390_handle_sigp, | ||
32 | [0xb2] = kvm_s390_handle_b2, | ||
33 | [0xb6] = kvm_s390_handle_stctl, | ||
34 | [0xb7] = kvm_s390_handle_lctl, | ||
35 | [0xb9] = kvm_s390_handle_b9, | ||
36 | [0xe3] = kvm_s390_handle_e3, | ||
37 | [0xe5] = kvm_s390_handle_e5, | ||
38 | [0xeb] = kvm_s390_handle_eb, | ||
39 | }; | ||
40 | |||
41 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) | 25 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) |
42 | { | 26 | { |
43 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; | 27 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; |
@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu) | |||
129 | 113 | ||
130 | static int handle_instruction(struct kvm_vcpu *vcpu) | 114 | static int handle_instruction(struct kvm_vcpu *vcpu) |
131 | { | 115 | { |
132 | intercept_handler_t handler; | ||
133 | |||
134 | vcpu->stat.exit_instruction++; | 116 | vcpu->stat.exit_instruction++; |
135 | trace_kvm_s390_intercept_instruction(vcpu, | 117 | trace_kvm_s390_intercept_instruction(vcpu, |
136 | vcpu->arch.sie_block->ipa, | 118 | vcpu->arch.sie_block->ipa, |
137 | vcpu->arch.sie_block->ipb); | 119 | vcpu->arch.sie_block->ipb); |
138 | handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; | 120 | |
139 | if (handler) | 121 | switch (vcpu->arch.sie_block->ipa >> 8) { |
140 | return handler(vcpu); | 122 | case 0x01: |
141 | return -EOPNOTSUPP; | 123 | return kvm_s390_handle_01(vcpu); |
124 | case 0x82: | ||
125 | return kvm_s390_handle_lpsw(vcpu); | ||
126 | case 0x83: | ||
127 | return kvm_s390_handle_diag(vcpu); | ||
128 | case 0xaa: | ||
129 | return kvm_s390_handle_aa(vcpu); | ||
130 | case 0xae: | ||
131 | return kvm_s390_handle_sigp(vcpu); | ||
132 | case 0xb2: | ||
133 | return kvm_s390_handle_b2(vcpu); | ||
134 | case 0xb6: | ||
135 | return kvm_s390_handle_stctl(vcpu); | ||
136 | case 0xb7: | ||
137 | return kvm_s390_handle_lctl(vcpu); | ||
138 | case 0xb9: | ||
139 | return kvm_s390_handle_b9(vcpu); | ||
140 | case 0xe3: | ||
141 | return kvm_s390_handle_e3(vcpu); | ||
142 | case 0xe5: | ||
143 | return kvm_s390_handle_e5(vcpu); | ||
144 | case 0xeb: | ||
145 | return kvm_s390_handle_eb(vcpu); | ||
146 | default: | ||
147 | return -EOPNOTSUPP; | ||
148 | } | ||
142 | } | 149 | } |
143 | 150 | ||
144 | static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) | 151 | static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index aabf46f5f883..b04616b57a94 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) | |||
169 | 169 | ||
170 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) | 170 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) |
171 | { | 171 | { |
172 | if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) | 172 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
173 | const u64 ckc = vcpu->arch.sie_block->ckc; | ||
174 | |||
175 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { | ||
176 | if ((s64)ckc >= (s64)now) | ||
177 | return 0; | ||
178 | } else if (ckc >= now) { | ||
173 | return 0; | 179 | return 0; |
180 | } | ||
174 | return ckc_interrupts_enabled(vcpu); | 181 | return ckc_interrupts_enabled(vcpu); |
175 | } | 182 | } |
176 | 183 | ||
@@ -187,12 +194,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) | |||
187 | return kvm_s390_get_cpu_timer(vcpu) >> 63; | 194 | return kvm_s390_get_cpu_timer(vcpu) >> 63; |
188 | } | 195 | } |
189 | 196 | ||
190 | static inline int is_ioirq(unsigned long irq_type) | ||
191 | { | ||
192 | return ((irq_type >= IRQ_PEND_IO_ISC_7) && | ||
193 | (irq_type <= IRQ_PEND_IO_ISC_0)); | ||
194 | } | ||
195 | |||
196 | static uint64_t isc_to_isc_bits(int isc) | 197 | static uint64_t isc_to_isc_bits(int isc) |
197 | { | 198 | { |
198 | return (0x80 >> isc) << 24; | 199 | return (0x80 >> isc) << 24; |
@@ -236,10 +237,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis | |||
236 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); | 237 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
237 | } | 238 | } |
238 | 239 | ||
239 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) | 240 | static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) |
240 | { | 241 | { |
241 | return vcpu->kvm->arch.float_int.pending_irqs | | 242 | return vcpu->kvm->arch.float_int.pending_irqs | |
242 | vcpu->arch.local_int.pending_irqs | | 243 | vcpu->arch.local_int.pending_irqs; |
244 | } | ||
245 | |||
246 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) | ||
247 | { | ||
248 | return pending_irqs_no_gisa(vcpu) | | ||
243 | kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; | 249 | kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; |
244 | } | 250 | } |
245 | 251 | ||
@@ -337,7 +343,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
337 | 343 | ||
338 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | 344 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) |
339 | { | 345 | { |
340 | if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) | 346 | if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) |
341 | return; | 347 | return; |
342 | else if (psw_ioint_disabled(vcpu)) | 348 | else if (psw_ioint_disabled(vcpu)) |
343 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); | 349 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); |
@@ -1011,24 +1017,6 @@ out: | |||
1011 | return rc; | 1017 | return rc; |
1012 | } | 1018 | } |
1013 | 1019 | ||
1014 | typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); | ||
1015 | |||
1016 | static const deliver_irq_t deliver_irq_funcs[] = { | ||
1017 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, | ||
1018 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, | ||
1019 | [IRQ_PEND_PROG] = __deliver_prog, | ||
1020 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, | ||
1021 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, | ||
1022 | [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, | ||
1023 | [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, | ||
1024 | [IRQ_PEND_RESTART] = __deliver_restart, | ||
1025 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, | ||
1026 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, | ||
1027 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, | ||
1028 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, | ||
1029 | [IRQ_PEND_VIRTIO] = __deliver_virtio, | ||
1030 | }; | ||
1031 | |||
1032 | /* Check whether an external call is pending (deliverable or not) */ | 1020 | /* Check whether an external call is pending (deliverable or not) */ |
1033 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) | 1021 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
1034 | { | 1022 | { |
@@ -1066,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
1066 | 1054 | ||
1067 | static u64 __calculate_sltime(struct kvm_vcpu *vcpu) | 1055 | static u64 __calculate_sltime(struct kvm_vcpu *vcpu) |
1068 | { | 1056 | { |
1069 | u64 now, cputm, sltime = 0; | 1057 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
1058 | const u64 ckc = vcpu->arch.sie_block->ckc; | ||
1059 | u64 cputm, sltime = 0; | ||
1070 | 1060 | ||
1071 | if (ckc_interrupts_enabled(vcpu)) { | 1061 | if (ckc_interrupts_enabled(vcpu)) { |
1072 | now = kvm_s390_get_tod_clock_fast(vcpu->kvm); | 1062 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { |
1073 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); | 1063 | if ((s64)now < (s64)ckc) |
1074 | /* already expired or overflow? */ | 1064 | sltime = tod_to_ns((s64)ckc - (s64)now); |
1075 | if (!sltime || vcpu->arch.sie_block->ckc <= now) | 1065 | } else if (now < ckc) { |
1066 | sltime = tod_to_ns(ckc - now); | ||
1067 | } | ||
1068 | /* already expired */ | ||
1069 | if (!sltime) | ||
1076 | return 0; | 1070 | return 0; |
1077 | if (cpu_timer_interrupts_enabled(vcpu)) { | 1071 | if (cpu_timer_interrupts_enabled(vcpu)) { |
1078 | cputm = kvm_s390_get_cpu_timer(vcpu); | 1072 | cputm = kvm_s390_get_cpu_timer(vcpu); |
@@ -1192,7 +1186,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
1192 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | 1186 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
1193 | { | 1187 | { |
1194 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 1188 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
1195 | deliver_irq_t func; | ||
1196 | int rc = 0; | 1189 | int rc = 0; |
1197 | unsigned long irq_type; | 1190 | unsigned long irq_type; |
1198 | unsigned long irqs; | 1191 | unsigned long irqs; |
@@ -1212,16 +1205,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
1212 | while ((irqs = deliverable_irqs(vcpu)) && !rc) { | 1205 | while ((irqs = deliverable_irqs(vcpu)) && !rc) { |
1213 | /* bits are in the reverse order of interrupt priority */ | 1206 | /* bits are in the reverse order of interrupt priority */ |
1214 | irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); | 1207 | irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); |
1215 | if (is_ioirq(irq_type)) { | 1208 | switch (irq_type) { |
1209 | case IRQ_PEND_IO_ISC_0: | ||
1210 | case IRQ_PEND_IO_ISC_1: | ||
1211 | case IRQ_PEND_IO_ISC_2: | ||
1212 | case IRQ_PEND_IO_ISC_3: | ||
1213 | case IRQ_PEND_IO_ISC_4: | ||
1214 | case IRQ_PEND_IO_ISC_5: | ||
1215 | case IRQ_PEND_IO_ISC_6: | ||
1216 | case IRQ_PEND_IO_ISC_7: | ||
1216 | rc = __deliver_io(vcpu, irq_type); | 1217 | rc = __deliver_io(vcpu, irq_type); |
1217 | } else { | 1218 | break; |
1218 | func = deliver_irq_funcs[irq_type]; | 1219 | case IRQ_PEND_MCHK_EX: |
1219 | if (!func) { | 1220 | case IRQ_PEND_MCHK_REP: |
1220 | WARN_ON_ONCE(func == NULL); | 1221 | rc = __deliver_machine_check(vcpu); |
1221 | clear_bit(irq_type, &li->pending_irqs); | 1222 | break; |
1222 | continue; | 1223 | case IRQ_PEND_PROG: |
1223 | } | 1224 | rc = __deliver_prog(vcpu); |
1224 | rc = func(vcpu); | 1225 | break; |
1226 | case IRQ_PEND_EXT_EMERGENCY: | ||
1227 | rc = __deliver_emergency_signal(vcpu); | ||
1228 | break; | ||
1229 | case IRQ_PEND_EXT_EXTERNAL: | ||
1230 | rc = __deliver_external_call(vcpu); | ||
1231 | break; | ||
1232 | case IRQ_PEND_EXT_CLOCK_COMP: | ||
1233 | rc = __deliver_ckc(vcpu); | ||
1234 | break; | ||
1235 | case IRQ_PEND_EXT_CPU_TIMER: | ||
1236 | rc = __deliver_cpu_timer(vcpu); | ||
1237 | break; | ||
1238 | case IRQ_PEND_RESTART: | ||
1239 | rc = __deliver_restart(vcpu); | ||
1240 | break; | ||
1241 | case IRQ_PEND_SET_PREFIX: | ||
1242 | rc = __deliver_set_prefix(vcpu); | ||
1243 | break; | ||
1244 | case IRQ_PEND_PFAULT_INIT: | ||
1245 | rc = __deliver_pfault_init(vcpu); | ||
1246 | break; | ||
1247 | case IRQ_PEND_EXT_SERVICE: | ||
1248 | rc = __deliver_service(vcpu); | ||
1249 | break; | ||
1250 | case IRQ_PEND_PFAULT_DONE: | ||
1251 | rc = __deliver_pfault_done(vcpu); | ||
1252 | break; | ||
1253 | case IRQ_PEND_VIRTIO: | ||
1254 | rc = __deliver_virtio(vcpu); | ||
1255 | break; | ||
1256 | default: | ||
1257 | WARN_ONCE(1, "Unknown pending irq type %ld", irq_type); | ||
1258 | clear_bit(irq_type, &li->pending_irqs); | ||
1225 | } | 1259 | } |
1226 | } | 1260 | } |
1227 | 1261 | ||
@@ -1701,7 +1735,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) | |||
1701 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); | 1735 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); |
1702 | break; | 1736 | break; |
1703 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1737 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1704 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); | 1738 | if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) |
1739 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); | ||
1705 | break; | 1740 | break; |
1706 | default: | 1741 | default: |
1707 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); | 1742 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba4c7092335a..77d7818130db 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void) | |||
179 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, | 179 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, |
180 | unsigned long end); | 180 | unsigned long end); |
181 | 181 | ||
182 | static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) | ||
183 | { | ||
184 | u8 delta_idx = 0; | ||
185 | |||
186 | /* | ||
187 | * The TOD jumps by delta, we have to compensate this by adding | ||
188 | * -delta to the epoch. | ||
189 | */ | ||
190 | delta = -delta; | ||
191 | |||
192 | /* sign-extension - we're adding to signed values below */ | ||
193 | if ((s64)delta < 0) | ||
194 | delta_idx = -1; | ||
195 | |||
196 | scb->epoch += delta; | ||
197 | if (scb->ecd & ECD_MEF) { | ||
198 | scb->epdx += delta_idx; | ||
199 | if (scb->epoch < delta) | ||
200 | scb->epdx += 1; | ||
201 | } | ||
202 | } | ||
203 | |||
182 | /* | 204 | /* |
183 | * This callback is executed during stop_machine(). All CPUs are therefore | 205 | * This callback is executed during stop_machine(). All CPUs are therefore |
184 | * temporarily stopped. In order not to change guest behavior, we have to | 206 | * temporarily stopped. In order not to change guest behavior, we have to |
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, | |||
194 | unsigned long long *delta = v; | 216 | unsigned long long *delta = v; |
195 | 217 | ||
196 | list_for_each_entry(kvm, &vm_list, vm_list) { | 218 | list_for_each_entry(kvm, &vm_list, vm_list) { |
197 | kvm->arch.epoch -= *delta; | ||
198 | kvm_for_each_vcpu(i, vcpu, kvm) { | 219 | kvm_for_each_vcpu(i, vcpu, kvm) { |
199 | vcpu->arch.sie_block->epoch -= *delta; | 220 | kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); |
221 | if (i == 0) { | ||
222 | kvm->arch.epoch = vcpu->arch.sie_block->epoch; | ||
223 | kvm->arch.epdx = vcpu->arch.sie_block->epdx; | ||
224 | } | ||
200 | if (vcpu->arch.cputm_enabled) | 225 | if (vcpu->arch.cputm_enabled) |
201 | vcpu->arch.cputm_start += *delta; | 226 | vcpu->arch.cputm_start += *delta; |
202 | if (vcpu->arch.vsie_block) | 227 | if (vcpu->arch.vsie_block) |
203 | vcpu->arch.vsie_block->epoch -= *delta; | 228 | kvm_clock_sync_scb(vcpu->arch.vsie_block, |
229 | *delta); | ||
204 | } | 230 | } |
205 | } | 231 | } |
206 | return NOTIFY_OK; | 232 | return NOTIFY_OK; |
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) | |||
902 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 928 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) |
903 | return -EFAULT; | 929 | return -EFAULT; |
904 | 930 | ||
905 | if (test_kvm_facility(kvm, 139)) | 931 | if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) |
906 | kvm_s390_set_tod_clock_ext(kvm, >od); | ||
907 | else if (gtod.epoch_idx == 0) | ||
908 | kvm_s390_set_tod_clock(kvm, gtod.tod); | ||
909 | else | ||
910 | return -EINVAL; | 932 | return -EINVAL; |
933 | kvm_s390_set_tod_clock(kvm, >od); | ||
911 | 934 | ||
912 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", | 935 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", |
913 | gtod.epoch_idx, gtod.tod); | 936 | gtod.epoch_idx, gtod.tod); |
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) | |||
932 | 955 | ||
933 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) | 956 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
934 | { | 957 | { |
935 | u64 gtod; | 958 | struct kvm_s390_vm_tod_clock gtod = { 0 }; |
936 | 959 | ||
937 | if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) | 960 | if (copy_from_user(>od.tod, (void __user *)attr->addr, |
961 | sizeof(gtod.tod))) | ||
938 | return -EFAULT; | 962 | return -EFAULT; |
939 | 963 | ||
940 | kvm_s390_set_tod_clock(kvm, gtod); | 964 | kvm_s390_set_tod_clock(kvm, >od); |
941 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); | 965 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); |
942 | return 0; | 966 | return 0; |
943 | } | 967 | } |
944 | 968 | ||
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
2389 | mutex_lock(&vcpu->kvm->lock); | 2413 | mutex_lock(&vcpu->kvm->lock); |
2390 | preempt_disable(); | 2414 | preempt_disable(); |
2391 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; | 2415 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; |
2416 | vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; | ||
2392 | preempt_enable(); | 2417 | preempt_enable(); |
2393 | mutex_unlock(&vcpu->kvm->lock); | 2418 | mutex_unlock(&vcpu->kvm->lock); |
2394 | if (!kvm_is_ucontrol(vcpu->kvm)) { | 2419 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
@@ -3021,8 +3046,8 @@ retry: | |||
3021 | return 0; | 3046 | return 0; |
3022 | } | 3047 | } |
3023 | 3048 | ||
3024 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 3049 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
3025 | const struct kvm_s390_vm_tod_clock *gtod) | 3050 | const struct kvm_s390_vm_tod_clock *gtod) |
3026 | { | 3051 | { |
3027 | struct kvm_vcpu *vcpu; | 3052 | struct kvm_vcpu *vcpu; |
3028 | struct kvm_s390_tod_clock_ext htod; | 3053 | struct kvm_s390_tod_clock_ext htod; |
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
3034 | get_tod_clock_ext((char *)&htod); | 3059 | get_tod_clock_ext((char *)&htod); |
3035 | 3060 | ||
3036 | kvm->arch.epoch = gtod->tod - htod.tod; | 3061 | kvm->arch.epoch = gtod->tod - htod.tod; |
3037 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; | 3062 | kvm->arch.epdx = 0; |
3038 | 3063 | if (test_kvm_facility(kvm, 139)) { | |
3039 | if (kvm->arch.epoch > gtod->tod) | 3064 | kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; |
3040 | kvm->arch.epdx -= 1; | 3065 | if (kvm->arch.epoch > gtod->tod) |
3066 | kvm->arch.epdx -= 1; | ||
3067 | } | ||
3041 | 3068 | ||
3042 | kvm_s390_vcpu_block_all(kvm); | 3069 | kvm_s390_vcpu_block_all(kvm); |
3043 | kvm_for_each_vcpu(i, vcpu, kvm) { | 3070 | kvm_for_each_vcpu(i, vcpu, kvm) { |
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | |||
3050 | mutex_unlock(&kvm->lock); | 3077 | mutex_unlock(&kvm->lock); |
3051 | } | 3078 | } |
3052 | 3079 | ||
3053 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) | ||
3054 | { | ||
3055 | struct kvm_vcpu *vcpu; | ||
3056 | int i; | ||
3057 | |||
3058 | mutex_lock(&kvm->lock); | ||
3059 | preempt_disable(); | ||
3060 | kvm->arch.epoch = tod - get_tod_clock(); | ||
3061 | kvm_s390_vcpu_block_all(kvm); | ||
3062 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
3063 | vcpu->arch.sie_block->epoch = kvm->arch.epoch; | ||
3064 | kvm_s390_vcpu_unblock_all(kvm); | ||
3065 | preempt_enable(); | ||
3066 | mutex_unlock(&kvm->lock); | ||
3067 | } | ||
3068 | |||
3069 | /** | 3080 | /** |
3070 | * kvm_arch_fault_in_page - fault-in guest page if necessary | 3081 | * kvm_arch_fault_in_page - fault-in guest page if necessary |
3071 | * @vcpu: The corresponding virtual cpu | 3082 | * @vcpu: The corresponding virtual cpu |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index bd31b37b0e6f..f55ac0ef99ea 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sclp.h> | 20 | #include <asm/sclp.h> |
21 | 21 | ||
22 | typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | /* Transactional Memory Execution related macros */ | 22 | /* Transactional Memory Execution related macros */ |
25 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) | 23 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) |
26 | #define TDB_FORMAT1 1 | 24 | #define TDB_FORMAT1 1 |
@@ -283,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | |||
283 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); | 281 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); |
284 | 282 | ||
285 | /* implemented in kvm-s390.c */ | 283 | /* implemented in kvm-s390.c */ |
286 | void kvm_s390_set_tod_clock_ext(struct kvm *kvm, | 284 | void kvm_s390_set_tod_clock(struct kvm *kvm, |
287 | const struct kvm_s390_vm_tod_clock *gtod); | 285 | const struct kvm_s390_vm_tod_clock *gtod); |
288 | void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); | ||
289 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); | 286 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
290 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); | 287 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
291 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | 288 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c4c4e157c036..f0b4185158af 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) | |||
85 | /* Handle SCK (SET CLOCK) interception */ | 85 | /* Handle SCK (SET CLOCK) interception */ |
86 | static int handle_set_clock(struct kvm_vcpu *vcpu) | 86 | static int handle_set_clock(struct kvm_vcpu *vcpu) |
87 | { | 87 | { |
88 | struct kvm_s390_vm_tod_clock gtod = { 0 }; | ||
88 | int rc; | 89 | int rc; |
89 | u8 ar; | 90 | u8 ar; |
90 | u64 op2, val; | 91 | u64 op2; |
91 | 92 | ||
92 | vcpu->stat.instruction_sck++; | 93 | vcpu->stat.instruction_sck++; |
93 | 94 | ||
@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) | |||
97 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); | 98 | op2 = kvm_s390_get_base_disp_s(vcpu, &ar); |
98 | if (op2 & 7) /* Operand must be on a doubleword boundary */ | 99 | if (op2 & 7) /* Operand must be on a doubleword boundary */ |
99 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 100 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
100 | rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); | 101 | rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); |
101 | if (rc) | 102 | if (rc) |
102 | return kvm_s390_inject_prog_cond(vcpu, rc); | 103 | return kvm_s390_inject_prog_cond(vcpu, rc); |
103 | 104 | ||
104 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); | 105 | VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); |
105 | kvm_s390_set_tod_clock(vcpu->kvm, val); | 106 | kvm_s390_set_tod_clock(vcpu->kvm, >od); |
106 | 107 | ||
107 | kvm_s390_set_psw_cc(vcpu, 0); | 108 | kvm_s390_set_psw_cc(vcpu, 0); |
108 | return 0; | 109 | return 0; |
@@ -795,55 +796,60 @@ out: | |||
795 | return rc; | 796 | return rc; |
796 | } | 797 | } |
797 | 798 | ||
798 | static const intercept_handler_t b2_handlers[256] = { | ||
799 | [0x02] = handle_stidp, | ||
800 | [0x04] = handle_set_clock, | ||
801 | [0x10] = handle_set_prefix, | ||
802 | [0x11] = handle_store_prefix, | ||
803 | [0x12] = handle_store_cpu_address, | ||
804 | [0x14] = kvm_s390_handle_vsie, | ||
805 | [0x21] = handle_ipte_interlock, | ||
806 | [0x29] = handle_iske, | ||
807 | [0x2a] = handle_rrbe, | ||
808 | [0x2b] = handle_sske, | ||
809 | [0x2c] = handle_test_block, | ||
810 | [0x30] = handle_io_inst, | ||
811 | [0x31] = handle_io_inst, | ||
812 | [0x32] = handle_io_inst, | ||
813 | [0x33] = handle_io_inst, | ||
814 | [0x34] = handle_io_inst, | ||
815 | [0x35] = handle_io_inst, | ||
816 | [0x36] = handle_io_inst, | ||
817 | [0x37] = handle_io_inst, | ||
818 | [0x38] = handle_io_inst, | ||
819 | [0x39] = handle_io_inst, | ||
820 | [0x3a] = handle_io_inst, | ||
821 | [0x3b] = handle_io_inst, | ||
822 | [0x3c] = handle_io_inst, | ||
823 | [0x50] = handle_ipte_interlock, | ||
824 | [0x56] = handle_sthyi, | ||
825 | [0x5f] = handle_io_inst, | ||
826 | [0x74] = handle_io_inst, | ||
827 | [0x76] = handle_io_inst, | ||
828 | [0x7d] = handle_stsi, | ||
829 | [0xb1] = handle_stfl, | ||
830 | [0xb2] = handle_lpswe, | ||
831 | }; | ||
832 | |||
833 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) | 799 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) |
834 | { | 800 | { |
835 | intercept_handler_t handler; | 801 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
836 | 802 | case 0x02: | |
837 | /* | 803 | return handle_stidp(vcpu); |
838 | * A lot of B2 instructions are priviledged. Here we check for | 804 | case 0x04: |
839 | * the privileged ones, that we can handle in the kernel. | 805 | return handle_set_clock(vcpu); |
840 | * Anything else goes to userspace. | 806 | case 0x10: |
841 | */ | 807 | return handle_set_prefix(vcpu); |
842 | handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 808 | case 0x11: |
843 | if (handler) | 809 | return handle_store_prefix(vcpu); |
844 | return handler(vcpu); | 810 | case 0x12: |
845 | 811 | return handle_store_cpu_address(vcpu); | |
846 | return -EOPNOTSUPP; | 812 | case 0x14: |
813 | return kvm_s390_handle_vsie(vcpu); | ||
814 | case 0x21: | ||
815 | case 0x50: | ||
816 | return handle_ipte_interlock(vcpu); | ||
817 | case 0x29: | ||
818 | return handle_iske(vcpu); | ||
819 | case 0x2a: | ||
820 | return handle_rrbe(vcpu); | ||
821 | case 0x2b: | ||
822 | return handle_sske(vcpu); | ||
823 | case 0x2c: | ||
824 | return handle_test_block(vcpu); | ||
825 | case 0x30: | ||
826 | case 0x31: | ||
827 | case 0x32: | ||
828 | case 0x33: | ||
829 | case 0x34: | ||
830 | case 0x35: | ||
831 | case 0x36: | ||
832 | case 0x37: | ||
833 | case 0x38: | ||
834 | case 0x39: | ||
835 | case 0x3a: | ||
836 | case 0x3b: | ||
837 | case 0x3c: | ||
838 | case 0x5f: | ||
839 | case 0x74: | ||
840 | case 0x76: | ||
841 | return handle_io_inst(vcpu); | ||
842 | case 0x56: | ||
843 | return handle_sthyi(vcpu); | ||
844 | case 0x7d: | ||
845 | return handle_stsi(vcpu); | ||
846 | case 0xb1: | ||
847 | return handle_stfl(vcpu); | ||
848 | case 0xb2: | ||
849 | return handle_lpswe(vcpu); | ||
850 | default: | ||
851 | return -EOPNOTSUPP; | ||
852 | } | ||
847 | } | 853 | } |
848 | 854 | ||
849 | static int handle_epsw(struct kvm_vcpu *vcpu) | 855 | static int handle_epsw(struct kvm_vcpu *vcpu) |
@@ -1105,25 +1111,22 @@ static int handle_essa(struct kvm_vcpu *vcpu) | |||
1105 | return 0; | 1111 | return 0; |
1106 | } | 1112 | } |
1107 | 1113 | ||
1108 | static const intercept_handler_t b9_handlers[256] = { | ||
1109 | [0x8a] = handle_ipte_interlock, | ||
1110 | [0x8d] = handle_epsw, | ||
1111 | [0x8e] = handle_ipte_interlock, | ||
1112 | [0x8f] = handle_ipte_interlock, | ||
1113 | [0xab] = handle_essa, | ||
1114 | [0xaf] = handle_pfmf, | ||
1115 | }; | ||
1116 | |||
1117 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) | 1114 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) |
1118 | { | 1115 | { |
1119 | intercept_handler_t handler; | 1116 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1120 | 1117 | case 0x8a: | |
1121 | /* This is handled just as for the B2 instructions. */ | 1118 | case 0x8e: |
1122 | handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 1119 | case 0x8f: |
1123 | if (handler) | 1120 | return handle_ipte_interlock(vcpu); |
1124 | return handler(vcpu); | 1121 | case 0x8d: |
1125 | 1122 | return handle_epsw(vcpu); | |
1126 | return -EOPNOTSUPP; | 1123 | case 0xab: |
1124 | return handle_essa(vcpu); | ||
1125 | case 0xaf: | ||
1126 | return handle_pfmf(vcpu); | ||
1127 | default: | ||
1128 | return -EOPNOTSUPP; | ||
1129 | } | ||
1127 | } | 1130 | } |
1128 | 1131 | ||
1129 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) | 1132 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) |
@@ -1271,22 +1274,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu) | |||
1271 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; | 1274 | return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; |
1272 | } | 1275 | } |
1273 | 1276 | ||
1274 | static const intercept_handler_t eb_handlers[256] = { | ||
1275 | [0x2f] = handle_lctlg, | ||
1276 | [0x25] = handle_stctg, | ||
1277 | [0x60] = handle_ri, | ||
1278 | [0x61] = handle_ri, | ||
1279 | [0x62] = handle_ri, | ||
1280 | }; | ||
1281 | |||
1282 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) | 1277 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) |
1283 | { | 1278 | { |
1284 | intercept_handler_t handler; | 1279 | switch (vcpu->arch.sie_block->ipb & 0x000000ff) { |
1285 | 1280 | case 0x25: | |
1286 | handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; | 1281 | return handle_stctg(vcpu); |
1287 | if (handler) | 1282 | case 0x2f: |
1288 | return handler(vcpu); | 1283 | return handle_lctlg(vcpu); |
1289 | return -EOPNOTSUPP; | 1284 | case 0x60: |
1285 | case 0x61: | ||
1286 | case 0x62: | ||
1287 | return handle_ri(vcpu); | ||
1288 | default: | ||
1289 | return -EOPNOTSUPP; | ||
1290 | } | ||
1290 | } | 1291 | } |
1291 | 1292 | ||
1292 | static int handle_tprot(struct kvm_vcpu *vcpu) | 1293 | static int handle_tprot(struct kvm_vcpu *vcpu) |
@@ -1346,10 +1347,12 @@ out_unlock: | |||
1346 | 1347 | ||
1347 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) | 1348 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) |
1348 | { | 1349 | { |
1349 | /* For e5xx... instructions we only handle TPROT */ | 1350 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1350 | if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) | 1351 | case 0x01: |
1351 | return handle_tprot(vcpu); | 1352 | return handle_tprot(vcpu); |
1352 | return -EOPNOTSUPP; | 1353 | default: |
1354 | return -EOPNOTSUPP; | ||
1355 | } | ||
1353 | } | 1356 | } |
1354 | 1357 | ||
1355 | static int handle_sckpf(struct kvm_vcpu *vcpu) | 1358 | static int handle_sckpf(struct kvm_vcpu *vcpu) |
@@ -1380,17 +1383,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu) | |||
1380 | return 0; | 1383 | return 0; |
1381 | } | 1384 | } |
1382 | 1385 | ||
1383 | static const intercept_handler_t x01_handlers[256] = { | ||
1384 | [0x04] = handle_ptff, | ||
1385 | [0x07] = handle_sckpf, | ||
1386 | }; | ||
1387 | |||
1388 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu) | 1386 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu) |
1389 | { | 1387 | { |
1390 | intercept_handler_t handler; | 1388 | switch (vcpu->arch.sie_block->ipa & 0x00ff) { |
1391 | 1389 | case 0x04: | |
1392 | handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; | 1390 | return handle_ptff(vcpu); |
1393 | if (handler) | 1391 | case 0x07: |
1394 | return handler(vcpu); | 1392 | return handle_sckpf(vcpu); |
1395 | return -EOPNOTSUPP; | 1393 | default: |
1394 | return -EOPNOTSUPP; | ||
1395 | } | ||
1396 | } | 1396 | } |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index ec772700ff96..8961e3970901 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
821 | { | 821 | { |
822 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; | 822 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
823 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; | 823 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
824 | int guest_bp_isolation; | ||
824 | int rc; | 825 | int rc; |
825 | 826 | ||
826 | handle_last_fault(vcpu, vsie_page); | 827 | handle_last_fault(vcpu, vsie_page); |
@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
831 | s390_handle_mcck(); | 832 | s390_handle_mcck(); |
832 | 833 | ||
833 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 834 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
835 | |||
836 | /* save current guest state of bp isolation override */ | ||
837 | guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
838 | |||
839 | /* | ||
840 | * The guest is running with BPBC, so we have to force it on for our | ||
841 | * nested guest. This is done by enabling BPBC globally, so the BPBC | ||
842 | * control in the SCB (which the nested guest can modify) is simply | ||
843 | * ignored. | ||
844 | */ | ||
845 | if (test_kvm_facility(vcpu->kvm, 82) && | ||
846 | vcpu->arch.sie_block->fpf & FPF_BPBC) | ||
847 | set_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
848 | |||
834 | local_irq_disable(); | 849 | local_irq_disable(); |
835 | guest_enter_irqoff(); | 850 | guest_enter_irqoff(); |
836 | local_irq_enable(); | 851 | local_irq_enable(); |
@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
840 | local_irq_disable(); | 855 | local_irq_disable(); |
841 | guest_exit_irqoff(); | 856 | guest_exit_irqoff(); |
842 | local_irq_enable(); | 857 | local_irq_enable(); |
858 | |||
859 | /* restore guest state for bp isolation override */ | ||
860 | if (!guest_bp_isolation) | ||
861 | clear_thread_flag(TIF_ISOLATE_BP_GUEST); | ||
862 | |||
843 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 863 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
844 | 864 | ||
845 | if (rc == -EINTR) { | 865 | if (rc == -EINTR) { |
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h index 6f17528356b2..ea53e418f6c0 100644 --- a/arch/sparc/include/asm/bug.h +++ b/arch/sparc/include/asm/bug.h | |||
@@ -9,10 +9,14 @@ | |||
9 | void do_BUG(const char *file, int line); | 9 | void do_BUG(const char *file, int line); |
10 | #define BUG() do { \ | 10 | #define BUG() do { \ |
11 | do_BUG(__FILE__, __LINE__); \ | 11 | do_BUG(__FILE__, __LINE__); \ |
12 | barrier_before_unreachable(); \ | ||
12 | __builtin_trap(); \ | 13 | __builtin_trap(); \ |
13 | } while (0) | 14 | } while (0) |
14 | #else | 15 | #else |
15 | #define BUG() __builtin_trap() | 16 | #define BUG() do { \ |
17 | barrier_before_unreachable(); \ | ||
18 | __builtin_trap(); \ | ||
19 | } while (0) | ||
16 | #endif | 20 | #endif |
17 | 21 | ||
18 | #define HAVE_ARCH_BUG | 22 | #define HAVE_ARCH_BUG |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1236b187824..eb7f43f23521 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -430,6 +430,7 @@ config GOLDFISH | |||
430 | config RETPOLINE | 430 | config RETPOLINE |
431 | bool "Avoid speculative indirect branches in kernel" | 431 | bool "Avoid speculative indirect branches in kernel" |
432 | default y | 432 | default y |
433 | select STACK_VALIDATION if HAVE_STACK_VALIDATION | ||
433 | help | 434 | help |
434 | Compile kernel with the retpoline compiler options to guard against | 435 | Compile kernel with the retpoline compiler options to guard against |
435 | kernel-to-user data leaks by avoiding speculative indirect | 436 | kernel-to-user data leaks by avoiding speculative indirect |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index fad55160dcb9..498c1b812300 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
232 | 232 | ||
233 | # Avoid indirect branches in kernel to deal with Spectre | 233 | # Avoid indirect branches in kernel to deal with Spectre |
234 | ifdef CONFIG_RETPOLINE | 234 | ifdef CONFIG_RETPOLINE |
235 | RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | 235 | ifneq ($(RETPOLINE_CFLAGS),) |
236 | ifneq ($(RETPOLINE_CFLAGS),) | 236 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE |
237 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE | 237 | endif |
238 | endif | ||
239 | endif | 238 | endif |
240 | 239 | ||
241 | archscripts: scripts_basic | 240 | archscripts: scripts_basic |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 353e20c3f114..886a9115af62 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) | |||
439 | struct efi_uga_draw_protocol *uga = NULL, *first_uga; | 439 | struct efi_uga_draw_protocol *uga = NULL, *first_uga; |
440 | efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; | 440 | efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; |
441 | unsigned long nr_ugas; | 441 | unsigned long nr_ugas; |
442 | u32 *handles = (u32 *)uga_handle;; | 442 | u32 *handles = (u32 *)uga_handle; |
443 | efi_status_t status = EFI_INVALID_PARAMETER; | 443 | efi_status_t status = EFI_INVALID_PARAMETER; |
444 | int i; | 444 | int i; |
445 | 445 | ||
@@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height) | |||
484 | struct efi_uga_draw_protocol *uga = NULL, *first_uga; | 484 | struct efi_uga_draw_protocol *uga = NULL, *first_uga; |
485 | efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; | 485 | efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; |
486 | unsigned long nr_ugas; | 486 | unsigned long nr_ugas; |
487 | u64 *handles = (u64 *)uga_handle;; | 487 | u64 *handles = (u64 *)uga_handle; |
488 | efi_status_t status = EFI_INVALID_PARAMETER; | 488 | efi_status_t status = EFI_INVALID_PARAMETER; |
489 | int i; | 489 | int i; |
490 | 490 | ||
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index dce7092ab24a..be63330c5511 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h | |||
@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
97 | 97 | ||
98 | #define SIZEOF_PTREGS 21*8 | 98 | #define SIZEOF_PTREGS 21*8 |
99 | 99 | ||
100 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax | 100 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 |
101 | /* | 101 | /* |
102 | * Push registers and sanitize registers of values that a | 102 | * Push registers and sanitize registers of values that a |
103 | * speculation attack might otherwise want to exploit. The | 103 | * speculation attack might otherwise want to exploit. The |
@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with | |||
105 | * could be put to use in a speculative execution gadget. | 105 | * could be put to use in a speculative execution gadget. |
106 | * Interleave XOR with PUSH for better uop scheduling: | 106 | * Interleave XOR with PUSH for better uop scheduling: |
107 | */ | 107 | */ |
108 | .if \save_ret | ||
109 | pushq %rsi /* pt_regs->si */ | ||
110 | movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ | ||
111 | movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ | ||
112 | .else | ||
108 | pushq %rdi /* pt_regs->di */ | 113 | pushq %rdi /* pt_regs->di */ |
109 | pushq %rsi /* pt_regs->si */ | 114 | pushq %rsi /* pt_regs->si */ |
115 | .endif | ||
110 | pushq \rdx /* pt_regs->dx */ | 116 | pushq \rdx /* pt_regs->dx */ |
111 | pushq %rcx /* pt_regs->cx */ | 117 | pushq %rcx /* pt_regs->cx */ |
112 | pushq \rax /* pt_regs->ax */ | 118 | pushq \rax /* pt_regs->ax */ |
113 | pushq %r8 /* pt_regs->r8 */ | 119 | pushq %r8 /* pt_regs->r8 */ |
114 | xorq %r8, %r8 /* nospec r8 */ | 120 | xorl %r8d, %r8d /* nospec r8 */ |
115 | pushq %r9 /* pt_regs->r9 */ | 121 | pushq %r9 /* pt_regs->r9 */ |
116 | xorq %r9, %r9 /* nospec r9 */ | 122 | xorl %r9d, %r9d /* nospec r9 */ |
117 | pushq %r10 /* pt_regs->r10 */ | 123 | pushq %r10 /* pt_regs->r10 */ |
118 | xorq %r10, %r10 /* nospec r10 */ | 124 | xorl %r10d, %r10d /* nospec r10 */ |
119 | pushq %r11 /* pt_regs->r11 */ | 125 | pushq %r11 /* pt_regs->r11 */ |
120 | xorq %r11, %r11 /* nospec r11*/ | 126 | xorl %r11d, %r11d /* nospec r11*/ |
121 | pushq %rbx /* pt_regs->rbx */ | 127 | pushq %rbx /* pt_regs->rbx */ |
122 | xorl %ebx, %ebx /* nospec rbx*/ | 128 | xorl %ebx, %ebx /* nospec rbx*/ |
123 | pushq %rbp /* pt_regs->rbp */ | 129 | pushq %rbp /* pt_regs->rbp */ |
124 | xorl %ebp, %ebp /* nospec rbp*/ | 130 | xorl %ebp, %ebp /* nospec rbp*/ |
125 | pushq %r12 /* pt_regs->r12 */ | 131 | pushq %r12 /* pt_regs->r12 */ |
126 | xorq %r12, %r12 /* nospec r12*/ | 132 | xorl %r12d, %r12d /* nospec r12*/ |
127 | pushq %r13 /* pt_regs->r13 */ | 133 | pushq %r13 /* pt_regs->r13 */ |
128 | xorq %r13, %r13 /* nospec r13*/ | 134 | xorl %r13d, %r13d /* nospec r13*/ |
129 | pushq %r14 /* pt_regs->r14 */ | 135 | pushq %r14 /* pt_regs->r14 */ |
130 | xorq %r14, %r14 /* nospec r14*/ | 136 | xorl %r14d, %r14d /* nospec r14*/ |
131 | pushq %r15 /* pt_regs->r15 */ | 137 | pushq %r15 /* pt_regs->r15 */ |
132 | xorq %r15, %r15 /* nospec r15*/ | 138 | xorl %r15d, %r15d /* nospec r15*/ |
133 | UNWIND_HINT_REGS | 139 | UNWIND_HINT_REGS |
140 | .if \save_ret | ||
141 | pushq %rsi /* return address on top of stack */ | ||
142 | .endif | ||
134 | .endm | 143 | .endm |
135 | 144 | ||
136 | .macro POP_REGS pop_rdi=1 skip_r11rcx=0 | 145 | .macro POP_REGS pop_rdi=1 skip_r11rcx=0 |
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
172 | */ | 181 | */ |
173 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 | 182 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 |
174 | #ifdef CONFIG_FRAME_POINTER | 183 | #ifdef CONFIG_FRAME_POINTER |
175 | .if \ptregs_offset | 184 | leaq 1+\ptregs_offset(%rsp), %rbp |
176 | leaq \ptregs_offset(%rsp), %rbp | ||
177 | .else | ||
178 | mov %rsp, %rbp | ||
179 | .endif | ||
180 | orq $0x1, %rbp | ||
181 | #endif | 185 | #endif |
182 | .endm | 186 | .endm |
183 | 187 | ||
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 16c2c022540d..6ad064c8cf35 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm) | |||
252 | * exist, overwrite the RSB with entries which capture | 252 | * exist, overwrite the RSB with entries which capture |
253 | * speculative execution to prevent attack. | 253 | * speculative execution to prevent attack. |
254 | */ | 254 | */ |
255 | /* Clobbers %ebx */ | 255 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
256 | FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
257 | #endif | 256 | #endif |
258 | 257 | ||
259 | /* restore callee-saved registers */ | 258 | /* restore callee-saved registers */ |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 8971bd64d515..d5c7f18f79ac 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm) | |||
364 | * exist, overwrite the RSB with entries which capture | 364 | * exist, overwrite the RSB with entries which capture |
365 | * speculative execution to prevent attack. | 365 | * speculative execution to prevent attack. |
366 | */ | 366 | */ |
367 | /* Clobbers %rbx */ | 367 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
368 | FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
369 | #endif | 368 | #endif |
370 | 369 | ||
371 | /* restore callee-saved registers */ | 370 | /* restore callee-saved registers */ |
@@ -449,9 +448,19 @@ END(irq_entries_start) | |||
449 | * | 448 | * |
450 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. | 449 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. |
451 | */ | 450 | */ |
452 | .macro ENTER_IRQ_STACK regs=1 old_rsp | 451 | .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 |
453 | DEBUG_ENTRY_ASSERT_IRQS_OFF | 452 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
453 | |||
454 | .if \save_ret | ||
455 | /* | ||
456 | * If save_ret is set, the original stack contains one additional | ||
457 | * entry -- the return address. Therefore, move the address one | ||
458 | * entry below %rsp to \old_rsp. | ||
459 | */ | ||
460 | leaq 8(%rsp), \old_rsp | ||
461 | .else | ||
454 | movq %rsp, \old_rsp | 462 | movq %rsp, \old_rsp |
463 | .endif | ||
455 | 464 | ||
456 | .if \regs | 465 | .if \regs |
457 | UNWIND_HINT_REGS base=\old_rsp | 466 | UNWIND_HINT_REGS base=\old_rsp |
@@ -497,6 +506,15 @@ END(irq_entries_start) | |||
497 | .if \regs | 506 | .if \regs |
498 | UNWIND_HINT_REGS indirect=1 | 507 | UNWIND_HINT_REGS indirect=1 |
499 | .endif | 508 | .endif |
509 | |||
510 | .if \save_ret | ||
511 | /* | ||
512 | * Push the return address to the stack. This return address can | ||
513 | * be found at the "real" original RSP, which was offset by 8 at | ||
514 | * the beginning of this macro. | ||
515 | */ | ||
516 | pushq -8(\old_rsp) | ||
517 | .endif | ||
500 | .endm | 518 | .endm |
501 | 519 | ||
502 | /* | 520 | /* |
@@ -520,27 +538,65 @@ END(irq_entries_start) | |||
520 | .endm | 538 | .endm |
521 | 539 | ||
522 | /* | 540 | /* |
523 | * Interrupt entry/exit. | 541 | * Interrupt entry helper function. |
524 | * | ||
525 | * Interrupt entry points save only callee clobbered registers in fast path. | ||
526 | * | 542 | * |
527 | * Entry runs with interrupts off. | 543 | * Entry runs with interrupts off. Stack layout at entry: |
544 | * +----------------------------------------------------+ | ||
545 | * | regs->ss | | ||
546 | * | regs->rsp | | ||
547 | * | regs->eflags | | ||
548 | * | regs->cs | | ||
549 | * | regs->ip | | ||
550 | * +----------------------------------------------------+ | ||
551 | * | regs->orig_ax = ~(interrupt number) | | ||
552 | * +----------------------------------------------------+ | ||
553 | * | return address | | ||
554 | * +----------------------------------------------------+ | ||
528 | */ | 555 | */ |
529 | 556 | ENTRY(interrupt_entry) | |
530 | /* 0(%rsp): ~(interrupt number) */ | 557 | UNWIND_HINT_FUNC |
531 | .macro interrupt func | 558 | ASM_CLAC |
532 | cld | 559 | cld |
533 | 560 | ||
534 | testb $3, CS-ORIG_RAX(%rsp) | 561 | testb $3, CS-ORIG_RAX+8(%rsp) |
535 | jz 1f | 562 | jz 1f |
536 | SWAPGS | 563 | SWAPGS |
537 | call switch_to_thread_stack | 564 | |
565 | /* | ||
566 | * Switch to the thread stack. The IRET frame and orig_ax are | ||
567 | * on the stack, as well as the return address. RDI..R12 are | ||
568 | * not (yet) on the stack and space has not (yet) been | ||
569 | * allocated for them. | ||
570 | */ | ||
571 | pushq %rdi | ||
572 | |||
573 | /* Need to switch before accessing the thread stack. */ | ||
574 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
575 | movq %rsp, %rdi | ||
576 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
577 | |||
578 | /* | ||
579 | * We have RDI, return address, and orig_ax on the stack on | ||
580 | * top of the IRET frame. That means offset=24 | ||
581 | */ | ||
582 | UNWIND_HINT_IRET_REGS base=%rdi offset=24 | ||
583 | |||
584 | pushq 7*8(%rdi) /* regs->ss */ | ||
585 | pushq 6*8(%rdi) /* regs->rsp */ | ||
586 | pushq 5*8(%rdi) /* regs->eflags */ | ||
587 | pushq 4*8(%rdi) /* regs->cs */ | ||
588 | pushq 3*8(%rdi) /* regs->ip */ | ||
589 | pushq 2*8(%rdi) /* regs->orig_ax */ | ||
590 | pushq 8(%rdi) /* return address */ | ||
591 | UNWIND_HINT_FUNC | ||
592 | |||
593 | movq (%rdi), %rdi | ||
538 | 1: | 594 | 1: |
539 | 595 | ||
540 | PUSH_AND_CLEAR_REGS | 596 | PUSH_AND_CLEAR_REGS save_ret=1 |
541 | ENCODE_FRAME_POINTER | 597 | ENCODE_FRAME_POINTER 8 |
542 | 598 | ||
543 | testb $3, CS(%rsp) | 599 | testb $3, CS+8(%rsp) |
544 | jz 1f | 600 | jz 1f |
545 | 601 | ||
546 | /* | 602 | /* |
@@ -548,7 +604,7 @@ END(irq_entries_start) | |||
548 | * | 604 | * |
549 | * We need to tell lockdep that IRQs are off. We can't do this until | 605 | * We need to tell lockdep that IRQs are off. We can't do this until |
550 | * we fix gsbase, and we should do it before enter_from_user_mode | 606 | * we fix gsbase, and we should do it before enter_from_user_mode |
551 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, | 607 | * (which can take locks). Since TRACE_IRQS_OFF is idempotent, |
552 | * the simplest way to handle it is to just call it twice if | 608 | * the simplest way to handle it is to just call it twice if |
553 | * we enter from user mode. There's no reason to optimize this since | 609 | * we enter from user mode. There's no reason to optimize this since |
554 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | 610 | * TRACE_IRQS_OFF is a no-op if lockdep is off. |
@@ -558,12 +614,15 @@ END(irq_entries_start) | |||
558 | CALL_enter_from_user_mode | 614 | CALL_enter_from_user_mode |
559 | 615 | ||
560 | 1: | 616 | 1: |
561 | ENTER_IRQ_STACK old_rsp=%rdi | 617 | ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 |
562 | /* We entered an interrupt context - irqs are off: */ | 618 | /* We entered an interrupt context - irqs are off: */ |
563 | TRACE_IRQS_OFF | 619 | TRACE_IRQS_OFF |
564 | 620 | ||
565 | call \func /* rdi points to pt_regs */ | 621 | ret |
566 | .endm | 622 | END(interrupt_entry) |
623 | |||
624 | |||
625 | /* Interrupt entry/exit. */ | ||
567 | 626 | ||
568 | /* | 627 | /* |
569 | * The interrupt stubs push (~vector+0x80) onto the stack and | 628 | * The interrupt stubs push (~vector+0x80) onto the stack and |
@@ -571,9 +630,10 @@ END(irq_entries_start) | |||
571 | */ | 630 | */ |
572 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 631 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
573 | common_interrupt: | 632 | common_interrupt: |
574 | ASM_CLAC | ||
575 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ | 633 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
576 | interrupt do_IRQ | 634 | call interrupt_entry |
635 | UNWIND_HINT_REGS indirect=1 | ||
636 | call do_IRQ /* rdi points to pt_regs */ | ||
577 | /* 0(%rsp): old RSP */ | 637 | /* 0(%rsp): old RSP */ |
578 | ret_from_intr: | 638 | ret_from_intr: |
579 | DISABLE_INTERRUPTS(CLBR_ANY) | 639 | DISABLE_INTERRUPTS(CLBR_ANY) |
@@ -766,10 +826,11 @@ END(common_interrupt) | |||
766 | .macro apicinterrupt3 num sym do_sym | 826 | .macro apicinterrupt3 num sym do_sym |
767 | ENTRY(\sym) | 827 | ENTRY(\sym) |
768 | UNWIND_HINT_IRET_REGS | 828 | UNWIND_HINT_IRET_REGS |
769 | ASM_CLAC | ||
770 | pushq $~(\num) | 829 | pushq $~(\num) |
771 | .Lcommon_\sym: | 830 | .Lcommon_\sym: |
772 | interrupt \do_sym | 831 | call interrupt_entry |
832 | UNWIND_HINT_REGS indirect=1 | ||
833 | call \do_sym /* rdi points to pt_regs */ | ||
773 | jmp ret_from_intr | 834 | jmp ret_from_intr |
774 | END(\sym) | 835 | END(\sym) |
775 | .endm | 836 | .endm |
@@ -832,34 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt | |||
832 | */ | 893 | */ |
833 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) | 894 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) |
834 | 895 | ||
835 | /* | ||
836 | * Switch to the thread stack. This is called with the IRET frame and | ||
837 | * orig_ax on the stack. (That is, RDI..R12 are not on the stack and | ||
838 | * space has not been allocated for them.) | ||
839 | */ | ||
840 | ENTRY(switch_to_thread_stack) | ||
841 | UNWIND_HINT_FUNC | ||
842 | |||
843 | pushq %rdi | ||
844 | /* Need to switch before accessing the thread stack. */ | ||
845 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
846 | movq %rsp, %rdi | ||
847 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
848 | UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI | ||
849 | |||
850 | pushq 7*8(%rdi) /* regs->ss */ | ||
851 | pushq 6*8(%rdi) /* regs->rsp */ | ||
852 | pushq 5*8(%rdi) /* regs->eflags */ | ||
853 | pushq 4*8(%rdi) /* regs->cs */ | ||
854 | pushq 3*8(%rdi) /* regs->ip */ | ||
855 | pushq 2*8(%rdi) /* regs->orig_ax */ | ||
856 | pushq 8(%rdi) /* return address */ | ||
857 | UNWIND_HINT_FUNC | ||
858 | |||
859 | movq (%rdi), %rdi | ||
860 | ret | ||
861 | END(switch_to_thread_stack) | ||
862 | |||
863 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 | 896 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
864 | ENTRY(\sym) | 897 | ENTRY(\sym) |
865 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 | 898 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
@@ -875,12 +908,8 @@ ENTRY(\sym) | |||
875 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 908 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
876 | .endif | 909 | .endif |
877 | 910 | ||
878 | /* Save all registers in pt_regs */ | ||
879 | PUSH_AND_CLEAR_REGS | ||
880 | ENCODE_FRAME_POINTER | ||
881 | |||
882 | .if \paranoid < 2 | 911 | .if \paranoid < 2 |
883 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ | 912 | testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ |
884 | jnz .Lfrom_usermode_switch_stack_\@ | 913 | jnz .Lfrom_usermode_switch_stack_\@ |
885 | .endif | 914 | .endif |
886 | 915 | ||
@@ -1130,13 +1159,15 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 | |||
1130 | #endif | 1159 | #endif |
1131 | 1160 | ||
1132 | /* | 1161 | /* |
1133 | * Switch gs if needed. | 1162 | * Save all registers in pt_regs, and switch gs if needed. |
1134 | * Use slow, but surefire "are we in kernel?" check. | 1163 | * Use slow, but surefire "are we in kernel?" check. |
1135 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | 1164 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise |
1136 | */ | 1165 | */ |
1137 | ENTRY(paranoid_entry) | 1166 | ENTRY(paranoid_entry) |
1138 | UNWIND_HINT_FUNC | 1167 | UNWIND_HINT_FUNC |
1139 | cld | 1168 | cld |
1169 | PUSH_AND_CLEAR_REGS save_ret=1 | ||
1170 | ENCODE_FRAME_POINTER 8 | ||
1140 | movl $1, %ebx | 1171 | movl $1, %ebx |
1141 | movl $MSR_GS_BASE, %ecx | 1172 | movl $MSR_GS_BASE, %ecx |
1142 | rdmsr | 1173 | rdmsr |
@@ -1181,12 +1212,14 @@ ENTRY(paranoid_exit) | |||
1181 | END(paranoid_exit) | 1212 | END(paranoid_exit) |
1182 | 1213 | ||
1183 | /* | 1214 | /* |
1184 | * Switch gs if needed. | 1215 | * Save all registers in pt_regs, and switch GS if needed. |
1185 | * Return: EBX=0: came from user mode; EBX=1: otherwise | 1216 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
1186 | */ | 1217 | */ |
1187 | ENTRY(error_entry) | 1218 | ENTRY(error_entry) |
1188 | UNWIND_HINT_REGS offset=8 | 1219 | UNWIND_HINT_FUNC |
1189 | cld | 1220 | cld |
1221 | PUSH_AND_CLEAR_REGS save_ret=1 | ||
1222 | ENCODE_FRAME_POINTER 8 | ||
1190 | testb $3, CS+8(%rsp) | 1223 | testb $3, CS+8(%rsp) |
1191 | jz .Lerror_kernelspace | 1224 | jz .Lerror_kernelspace |
1192 | 1225 | ||
@@ -1577,8 +1610,6 @@ end_repeat_nmi: | |||
1577 | * frame to point back to repeat_nmi. | 1610 | * frame to point back to repeat_nmi. |
1578 | */ | 1611 | */ |
1579 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 1612 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
1580 | PUSH_AND_CLEAR_REGS | ||
1581 | ENCODE_FRAME_POINTER | ||
1582 | 1613 | ||
1583 | /* | 1614 | /* |
1584 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit | 1615 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index fd65e016e413..e811dd9c5e99 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat) | |||
85 | pushq %rcx /* pt_regs->cx */ | 85 | pushq %rcx /* pt_regs->cx */ |
86 | pushq $-ENOSYS /* pt_regs->ax */ | 86 | pushq $-ENOSYS /* pt_regs->ax */ |
87 | pushq $0 /* pt_regs->r8 = 0 */ | 87 | pushq $0 /* pt_regs->r8 = 0 */ |
88 | xorq %r8, %r8 /* nospec r8 */ | 88 | xorl %r8d, %r8d /* nospec r8 */ |
89 | pushq $0 /* pt_regs->r9 = 0 */ | 89 | pushq $0 /* pt_regs->r9 = 0 */ |
90 | xorq %r9, %r9 /* nospec r9 */ | 90 | xorl %r9d, %r9d /* nospec r9 */ |
91 | pushq $0 /* pt_regs->r10 = 0 */ | 91 | pushq $0 /* pt_regs->r10 = 0 */ |
92 | xorq %r10, %r10 /* nospec r10 */ | 92 | xorl %r10d, %r10d /* nospec r10 */ |
93 | pushq $0 /* pt_regs->r11 = 0 */ | 93 | pushq $0 /* pt_regs->r11 = 0 */ |
94 | xorq %r11, %r11 /* nospec r11 */ | 94 | xorl %r11d, %r11d /* nospec r11 */ |
95 | pushq %rbx /* pt_regs->rbx */ | 95 | pushq %rbx /* pt_regs->rbx */ |
96 | xorl %ebx, %ebx /* nospec rbx */ | 96 | xorl %ebx, %ebx /* nospec rbx */ |
97 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ | 97 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
98 | xorl %ebp, %ebp /* nospec rbp */ | 98 | xorl %ebp, %ebp /* nospec rbp */ |
99 | pushq $0 /* pt_regs->r12 = 0 */ | 99 | pushq $0 /* pt_regs->r12 = 0 */ |
100 | xorq %r12, %r12 /* nospec r12 */ | 100 | xorl %r12d, %r12d /* nospec r12 */ |
101 | pushq $0 /* pt_regs->r13 = 0 */ | 101 | pushq $0 /* pt_regs->r13 = 0 */ |
102 | xorq %r13, %r13 /* nospec r13 */ | 102 | xorl %r13d, %r13d /* nospec r13 */ |
103 | pushq $0 /* pt_regs->r14 = 0 */ | 103 | pushq $0 /* pt_regs->r14 = 0 */ |
104 | xorq %r14, %r14 /* nospec r14 */ | 104 | xorl %r14d, %r14d /* nospec r14 */ |
105 | pushq $0 /* pt_regs->r15 = 0 */ | 105 | pushq $0 /* pt_regs->r15 = 0 */ |
106 | xorq %r15, %r15 /* nospec r15 */ | 106 | xorl %r15d, %r15d /* nospec r15 */ |
107 | cld | 107 | cld |
108 | 108 | ||
109 | /* | 109 | /* |
@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) | |||
224 | pushq %rbp /* pt_regs->cx (stashed in bp) */ | 224 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
225 | pushq $-ENOSYS /* pt_regs->ax */ | 225 | pushq $-ENOSYS /* pt_regs->ax */ |
226 | pushq $0 /* pt_regs->r8 = 0 */ | 226 | pushq $0 /* pt_regs->r8 = 0 */ |
227 | xorq %r8, %r8 /* nospec r8 */ | 227 | xorl %r8d, %r8d /* nospec r8 */ |
228 | pushq $0 /* pt_regs->r9 = 0 */ | 228 | pushq $0 /* pt_regs->r9 = 0 */ |
229 | xorq %r9, %r9 /* nospec r9 */ | 229 | xorl %r9d, %r9d /* nospec r9 */ |
230 | pushq $0 /* pt_regs->r10 = 0 */ | 230 | pushq $0 /* pt_regs->r10 = 0 */ |
231 | xorq %r10, %r10 /* nospec r10 */ | 231 | xorl %r10d, %r10d /* nospec r10 */ |
232 | pushq $0 /* pt_regs->r11 = 0 */ | 232 | pushq $0 /* pt_regs->r11 = 0 */ |
233 | xorq %r11, %r11 /* nospec r11 */ | 233 | xorl %r11d, %r11d /* nospec r11 */ |
234 | pushq %rbx /* pt_regs->rbx */ | 234 | pushq %rbx /* pt_regs->rbx */ |
235 | xorl %ebx, %ebx /* nospec rbx */ | 235 | xorl %ebx, %ebx /* nospec rbx */ |
236 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ | 236 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
237 | xorl %ebp, %ebp /* nospec rbp */ | 237 | xorl %ebp, %ebp /* nospec rbp */ |
238 | pushq $0 /* pt_regs->r12 = 0 */ | 238 | pushq $0 /* pt_regs->r12 = 0 */ |
239 | xorq %r12, %r12 /* nospec r12 */ | 239 | xorl %r12d, %r12d /* nospec r12 */ |
240 | pushq $0 /* pt_regs->r13 = 0 */ | 240 | pushq $0 /* pt_regs->r13 = 0 */ |
241 | xorq %r13, %r13 /* nospec r13 */ | 241 | xorl %r13d, %r13d /* nospec r13 */ |
242 | pushq $0 /* pt_regs->r14 = 0 */ | 242 | pushq $0 /* pt_regs->r14 = 0 */ |
243 | xorq %r14, %r14 /* nospec r14 */ | 243 | xorl %r14d, %r14d /* nospec r14 */ |
244 | pushq $0 /* pt_regs->r15 = 0 */ | 244 | pushq $0 /* pt_regs->r15 = 0 */ |
245 | xorq %r15, %r15 /* nospec r15 */ | 245 | xorl %r15d, %r15d /* nospec r15 */ |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * User mode is traced as though IRQs are on, and SYSENTER | 248 | * User mode is traced as though IRQs are on, and SYSENTER |
@@ -298,9 +298,9 @@ sysret32_from_system_call: | |||
298 | */ | 298 | */ |
299 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 | 299 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 |
300 | 300 | ||
301 | xorq %r8, %r8 | 301 | xorl %r8d, %r8d |
302 | xorq %r9, %r9 | 302 | xorl %r9d, %r9d |
303 | xorq %r10, %r10 | 303 | xorl %r10d, %r10d |
304 | swapgs | 304 | swapgs |
305 | sysretl | 305 | sysretl |
306 | END(entry_SYSCALL_compat) | 306 | END(entry_SYSCALL_compat) |
@@ -347,10 +347,23 @@ ENTRY(entry_INT80_compat) | |||
347 | */ | 347 | */ |
348 | movl %eax, %eax | 348 | movl %eax, %eax |
349 | 349 | ||
350 | /* switch to thread stack expects orig_ax and rdi to be pushed */ | ||
350 | pushq %rax /* pt_regs->orig_ax */ | 351 | pushq %rax /* pt_regs->orig_ax */ |
352 | pushq %rdi /* pt_regs->di */ | ||
353 | |||
354 | /* Need to switch before accessing the thread stack. */ | ||
355 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
356 | movq %rsp, %rdi | ||
357 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
358 | |||
359 | pushq 6*8(%rdi) /* regs->ss */ | ||
360 | pushq 5*8(%rdi) /* regs->rsp */ | ||
361 | pushq 4*8(%rdi) /* regs->eflags */ | ||
362 | pushq 3*8(%rdi) /* regs->cs */ | ||
363 | pushq 2*8(%rdi) /* regs->ip */ | ||
364 | pushq 1*8(%rdi) /* regs->orig_ax */ | ||
351 | 365 | ||
352 | /* switch to thread stack expects orig_ax to be pushed */ | 366 | movq (%rdi), %rdi /* restore %rdi */ |
353 | call switch_to_thread_stack | ||
354 | 367 | ||
355 | pushq %rdi /* pt_regs->di */ | 368 | pushq %rdi /* pt_regs->di */ |
356 | pushq %rsi /* pt_regs->si */ | 369 | pushq %rsi /* pt_regs->si */ |
@@ -358,25 +371,25 @@ ENTRY(entry_INT80_compat) | |||
358 | pushq %rcx /* pt_regs->cx */ | 371 | pushq %rcx /* pt_regs->cx */ |
359 | pushq $-ENOSYS /* pt_regs->ax */ | 372 | pushq $-ENOSYS /* pt_regs->ax */ |
360 | pushq $0 /* pt_regs->r8 = 0 */ | 373 | pushq $0 /* pt_regs->r8 = 0 */ |
361 | xorq %r8, %r8 /* nospec r8 */ | 374 | xorl %r8d, %r8d /* nospec r8 */ |
362 | pushq $0 /* pt_regs->r9 = 0 */ | 375 | pushq $0 /* pt_regs->r9 = 0 */ |
363 | xorq %r9, %r9 /* nospec r9 */ | 376 | xorl %r9d, %r9d /* nospec r9 */ |
364 | pushq $0 /* pt_regs->r10 = 0 */ | 377 | pushq $0 /* pt_regs->r10 = 0 */ |
365 | xorq %r10, %r10 /* nospec r10 */ | 378 | xorl %r10d, %r10d /* nospec r10 */ |
366 | pushq $0 /* pt_regs->r11 = 0 */ | 379 | pushq $0 /* pt_regs->r11 = 0 */ |
367 | xorq %r11, %r11 /* nospec r11 */ | 380 | xorl %r11d, %r11d /* nospec r11 */ |
368 | pushq %rbx /* pt_regs->rbx */ | 381 | pushq %rbx /* pt_regs->rbx */ |
369 | xorl %ebx, %ebx /* nospec rbx */ | 382 | xorl %ebx, %ebx /* nospec rbx */ |
370 | pushq %rbp /* pt_regs->rbp */ | 383 | pushq %rbp /* pt_regs->rbp */ |
371 | xorl %ebp, %ebp /* nospec rbp */ | 384 | xorl %ebp, %ebp /* nospec rbp */ |
372 | pushq %r12 /* pt_regs->r12 */ | 385 | pushq %r12 /* pt_regs->r12 */ |
373 | xorq %r12, %r12 /* nospec r12 */ | 386 | xorl %r12d, %r12d /* nospec r12 */ |
374 | pushq %r13 /* pt_regs->r13 */ | 387 | pushq %r13 /* pt_regs->r13 */ |
375 | xorq %r13, %r13 /* nospec r13 */ | 388 | xorl %r13d, %r13d /* nospec r13 */ |
376 | pushq %r14 /* pt_regs->r14 */ | 389 | pushq %r14 /* pt_regs->r14 */ |
377 | xorq %r14, %r14 /* nospec r14 */ | 390 | xorl %r14d, %r14d /* nospec r14 */ |
378 | pushq %r15 /* pt_regs->r15 */ | 391 | pushq %r15 /* pt_regs->r15 */ |
379 | xorq %r15, %r15 /* nospec r15 */ | 392 | xorl %r15d, %r15d /* nospec r15 */ |
380 | cld | 393 | cld |
381 | 394 | ||
382 | /* | 395 | /* |
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 4d4015ddcf26..c356098b6fb9 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H | 7 | #ifndef _ASM_X86_MACH_DEFAULT_APM_H |
8 | #define _ASM_X86_MACH_DEFAULT_APM_H | 8 | #define _ASM_X86_MACH_DEFAULT_APM_H |
9 | 9 | ||
10 | #include <asm/nospec-branch.h> | ||
11 | |||
10 | #ifdef APM_ZERO_SEGS | 12 | #ifdef APM_ZERO_SEGS |
11 | # define APM_DO_ZERO_SEGS \ | 13 | # define APM_DO_ZERO_SEGS \ |
12 | "pushl %%ds\n\t" \ | 14 | "pushl %%ds\n\t" \ |
@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
32 | * N.B. We do NOT need a cld after the BIOS call | 34 | * N.B. We do NOT need a cld after the BIOS call |
33 | * because we always save and restore the flags. | 35 | * because we always save and restore the flags. |
34 | */ | 36 | */ |
37 | firmware_restrict_branch_speculation_start(); | ||
35 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 38 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
36 | "pushl %%edi\n\t" | 39 | "pushl %%edi\n\t" |
37 | "pushl %%ebp\n\t" | 40 | "pushl %%ebp\n\t" |
@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | |||
44 | "=S" (*esi) | 47 | "=S" (*esi) |
45 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 48 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
46 | : "memory", "cc"); | 49 | : "memory", "cc"); |
50 | firmware_restrict_branch_speculation_end(); | ||
47 | } | 51 | } |
48 | 52 | ||
49 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | 53 | static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, |
@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
56 | * N.B. We do NOT need a cld after the BIOS call | 60 | * N.B. We do NOT need a cld after the BIOS call |
57 | * because we always save and restore the flags. | 61 | * because we always save and restore the flags. |
58 | */ | 62 | */ |
63 | firmware_restrict_branch_speculation_start(); | ||
59 | __asm__ __volatile__(APM_DO_ZERO_SEGS | 64 | __asm__ __volatile__(APM_DO_ZERO_SEGS |
60 | "pushl %%edi\n\t" | 65 | "pushl %%edi\n\t" |
61 | "pushl %%ebp\n\t" | 66 | "pushl %%ebp\n\t" |
@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, | |||
68 | "=S" (si) | 73 | "=S" (si) |
69 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | 74 | : "a" (func), "b" (ebx_in), "c" (ecx_in) |
70 | : "memory", "cc"); | 75 | : "memory", "cc"); |
76 | firmware_restrict_branch_speculation_end(); | ||
71 | return error; | 77 | return error; |
72 | } | 78 | } |
73 | 79 | ||
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 4d111616524b..1908214b9125 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h | |||
@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx) | |||
38 | INDIRECT_THUNK(si) | 38 | INDIRECT_THUNK(si) |
39 | INDIRECT_THUNK(di) | 39 | INDIRECT_THUNK(di) |
40 | INDIRECT_THUNK(bp) | 40 | INDIRECT_THUNK(bp) |
41 | asmlinkage void __fill_rsb(void); | ||
42 | asmlinkage void __clear_rsb(void); | ||
43 | |||
44 | #endif /* CONFIG_RETPOLINE */ | 41 | #endif /* CONFIG_RETPOLINE */ |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0dfe4d3f74e2..f41079da38c5 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -213,6 +213,7 @@ | |||
213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ | 213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ |
214 | 214 | ||
215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ | 215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
216 | #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ | ||
216 | 217 | ||
217 | /* Virtualization flags: Linux defined, word 8 */ | 218 | /* Virtualization flags: Linux defined, word 8 */ |
218 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 219 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 85f6ccb80b91..a399c1ebf6f0 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <asm/pgtable.h> | 6 | #include <asm/pgtable.h> |
7 | #include <asm/processor-flags.h> | 7 | #include <asm/processor-flags.h> |
8 | #include <asm/tlb.h> | 8 | #include <asm/tlb.h> |
9 | #include <asm/nospec-branch.h> | ||
9 | 10 | ||
10 | /* | 11 | /* |
11 | * We map the EFI regions needed for runtime services non-contiguously, | 12 | * We map the EFI regions needed for runtime services non-contiguously, |
@@ -36,8 +37,18 @@ | |||
36 | 37 | ||
37 | extern asmlinkage unsigned long efi_call_phys(void *, ...); | 38 | extern asmlinkage unsigned long efi_call_phys(void *, ...); |
38 | 39 | ||
39 | #define arch_efi_call_virt_setup() kernel_fpu_begin() | 40 | #define arch_efi_call_virt_setup() \ |
40 | #define arch_efi_call_virt_teardown() kernel_fpu_end() | 41 | ({ \ |
42 | kernel_fpu_begin(); \ | ||
43 | firmware_restrict_branch_speculation_start(); \ | ||
44 | }) | ||
45 | |||
46 | #define arch_efi_call_virt_teardown() \ | ||
47 | ({ \ | ||
48 | firmware_restrict_branch_speculation_end(); \ | ||
49 | kernel_fpu_end(); \ | ||
50 | }) | ||
51 | |||
41 | 52 | ||
42 | /* | 53 | /* |
43 | * Wrap all the virtual calls in a way that forces the parameters on the stack. | 54 | * Wrap all the virtual calls in a way that forces the parameters on the stack. |
@@ -73,6 +84,7 @@ struct efi_scratch { | |||
73 | efi_sync_low_kernel_mappings(); \ | 84 | efi_sync_low_kernel_mappings(); \ |
74 | preempt_disable(); \ | 85 | preempt_disable(); \ |
75 | __kernel_fpu_begin(); \ | 86 | __kernel_fpu_begin(); \ |
87 | firmware_restrict_branch_speculation_start(); \ | ||
76 | \ | 88 | \ |
77 | if (efi_scratch.use_pgd) { \ | 89 | if (efi_scratch.use_pgd) { \ |
78 | efi_scratch.prev_cr3 = __read_cr3(); \ | 90 | efi_scratch.prev_cr3 = __read_cr3(); \ |
@@ -91,6 +103,7 @@ struct efi_scratch { | |||
91 | __flush_tlb_all(); \ | 103 | __flush_tlb_all(); \ |
92 | } \ | 104 | } \ |
93 | \ | 105 | \ |
106 | firmware_restrict_branch_speculation_end(); \ | ||
94 | __kernel_fpu_end(); \ | 107 | __kernel_fpu_end(); \ |
95 | preempt_enable(); \ | 108 | preempt_enable(); \ |
96 | }) | 109 | }) |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dd6f57a54a26..b605a5b6a30c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -507,6 +507,7 @@ struct kvm_vcpu_arch { | |||
507 | u64 smi_count; | 507 | u64 smi_count; |
508 | bool tpr_access_reporting; | 508 | bool tpr_access_reporting; |
509 | u64 ia32_xss; | 509 | u64 ia32_xss; |
510 | u64 microcode_version; | ||
510 | 511 | ||
511 | /* | 512 | /* |
512 | * Paging state of the vcpu | 513 | * Paging state of the vcpu |
@@ -1095,6 +1096,8 @@ struct kvm_x86_ops { | |||
1095 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); | 1096 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); |
1096 | int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | 1097 | int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1097 | int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); | 1098 | int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); |
1099 | |||
1100 | int (*get_msr_feature)(struct kvm_msr_entry *entry); | ||
1098 | }; | 1101 | }; |
1099 | 1102 | ||
1100 | struct kvm_arch_async_pf { | 1103 | struct kvm_arch_async_pf { |
@@ -1464,7 +1467,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) | |||
1464 | #define put_smstate(type, buf, offset, val) \ | 1467 | #define put_smstate(type, buf, offset, val) \ |
1465 | *(type *)((buf) + (offset) - 0x7e00) = val | 1468 | *(type *)((buf) + (offset) - 0x7e00) = val |
1466 | 1469 | ||
1467 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, | ||
1468 | unsigned long start, unsigned long end); | ||
1469 | |||
1470 | #endif /* _ASM_X86_KVM_HOST_H */ | 1470 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 55520cec8b27..7fb1047d61c7 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -37,7 +37,12 @@ struct cpu_signature { | |||
37 | 37 | ||
38 | struct device; | 38 | struct device; |
39 | 39 | ||
40 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 40 | enum ucode_state { |
41 | UCODE_OK = 0, | ||
42 | UCODE_UPDATED, | ||
43 | UCODE_NFOUND, | ||
44 | UCODE_ERROR, | ||
45 | }; | ||
41 | 46 | ||
42 | struct microcode_ops { | 47 | struct microcode_ops { |
43 | enum ucode_state (*request_microcode_user) (int cpu, | 48 | enum ucode_state (*request_microcode_user) (int cpu, |
@@ -54,7 +59,7 @@ struct microcode_ops { | |||
54 | * are being called. | 59 | * are being called. |
55 | * See also the "Synchronization" section in microcode_core.c. | 60 | * See also the "Synchronization" section in microcode_core.c. |
56 | */ | 61 | */ |
57 | int (*apply_microcode) (int cpu); | 62 | enum ucode_state (*apply_microcode) (int cpu); |
58 | int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); | 63 | int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); |
59 | }; | 64 | }; |
60 | 65 | ||
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index c931b88982a0..1de72ce514cd 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot) | |||
74 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); | 74 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); |
75 | #else | 75 | #else |
76 | BUG(); | 76 | BUG(); |
77 | return (void *)fix_to_virt(FIX_HOLE); | ||
77 | #endif | 78 | #endif |
78 | } | 79 | } |
79 | 80 | ||
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 76b058533e47..d0dabeae0505 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -8,6 +8,50 @@ | |||
8 | #include <asm/cpufeatures.h> | 8 | #include <asm/cpufeatures.h> |
9 | #include <asm/msr-index.h> | 9 | #include <asm/msr-index.h> |
10 | 10 | ||
11 | /* | ||
12 | * Fill the CPU return stack buffer. | ||
13 | * | ||
14 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
15 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. | ||
16 | * | ||
17 | * This is required in various cases for retpoline and IBRS-based | ||
18 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
19 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
20 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
21 | * allow predictions from other (unwanted!) sources to be used. | ||
22 | * | ||
23 | * We define a CPP macro such that it can be used from both .S files and | ||
24 | * inline assembly. It's possible to do a .macro and then include that | ||
25 | * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. | ||
26 | */ | ||
27 | |||
28 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
29 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
30 | |||
31 | /* | ||
32 | * Google experimented with loop-unrolling and this turned out to be | ||
33 | * the optimal version — two calls, each with their own speculation | ||
34 | * trap should their return address end up getting used, in a loop. | ||
35 | */ | ||
36 | #define __FILL_RETURN_BUFFER(reg, nr, sp) \ | ||
37 | mov $(nr/2), reg; \ | ||
38 | 771: \ | ||
39 | call 772f; \ | ||
40 | 773: /* speculation trap */ \ | ||
41 | pause; \ | ||
42 | lfence; \ | ||
43 | jmp 773b; \ | ||
44 | 772: \ | ||
45 | call 774f; \ | ||
46 | 775: /* speculation trap */ \ | ||
47 | pause; \ | ||
48 | lfence; \ | ||
49 | jmp 775b; \ | ||
50 | 774: \ | ||
51 | dec reg; \ | ||
52 | jnz 771b; \ | ||
53 | add $(BITS_PER_LONG/8) * nr, sp; | ||
54 | |||
11 | #ifdef __ASSEMBLY__ | 55 | #ifdef __ASSEMBLY__ |
12 | 56 | ||
13 | /* | 57 | /* |
@@ -24,6 +68,18 @@ | |||
24 | .endm | 68 | .endm |
25 | 69 | ||
26 | /* | 70 | /* |
71 | * This should be used immediately before an indirect jump/call. It tells | ||
72 | * objtool the subsequent indirect jump/call is vouched safe for retpoline | ||
73 | * builds. | ||
74 | */ | ||
75 | .macro ANNOTATE_RETPOLINE_SAFE | ||
76 | .Lannotate_\@: | ||
77 | .pushsection .discard.retpoline_safe | ||
78 | _ASM_PTR .Lannotate_\@ | ||
79 | .popsection | ||
80 | .endm | ||
81 | |||
82 | /* | ||
27 | * These are the bare retpoline primitives for indirect jmp and call. | 83 | * These are the bare retpoline primitives for indirect jmp and call. |
28 | * Do not use these directly; they only exist to make the ALTERNATIVE | 84 | * Do not use these directly; they only exist to make the ALTERNATIVE |
29 | * invocation below less ugly. | 85 | * invocation below less ugly. |
@@ -59,9 +115,9 @@ | |||
59 | .macro JMP_NOSPEC reg:req | 115 | .macro JMP_NOSPEC reg:req |
60 | #ifdef CONFIG_RETPOLINE | 116 | #ifdef CONFIG_RETPOLINE |
61 | ANNOTATE_NOSPEC_ALTERNATIVE | 117 | ANNOTATE_NOSPEC_ALTERNATIVE |
62 | ALTERNATIVE_2 __stringify(jmp *\reg), \ | 118 | ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ |
63 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ | 119 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ |
64 | __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD | 120 | __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD |
65 | #else | 121 | #else |
66 | jmp *\reg | 122 | jmp *\reg |
67 | #endif | 123 | #endif |
@@ -70,18 +126,25 @@ | |||
70 | .macro CALL_NOSPEC reg:req | 126 | .macro CALL_NOSPEC reg:req |
71 | #ifdef CONFIG_RETPOLINE | 127 | #ifdef CONFIG_RETPOLINE |
72 | ANNOTATE_NOSPEC_ALTERNATIVE | 128 | ANNOTATE_NOSPEC_ALTERNATIVE |
73 | ALTERNATIVE_2 __stringify(call *\reg), \ | 129 | ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ |
74 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ | 130 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ |
75 | __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD | 131 | __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD |
76 | #else | 132 | #else |
77 | call *\reg | 133 | call *\reg |
78 | #endif | 134 | #endif |
79 | .endm | 135 | .endm |
80 | 136 | ||
81 | /* This clobbers the BX register */ | 137 | /* |
82 | .macro FILL_RETURN_BUFFER nr:req ftr:req | 138 | * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP |
139 | * monstrosity above, manually. | ||
140 | */ | ||
141 | .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req | ||
83 | #ifdef CONFIG_RETPOLINE | 142 | #ifdef CONFIG_RETPOLINE |
84 | ALTERNATIVE "", "call __clear_rsb", \ftr | 143 | ANNOTATE_NOSPEC_ALTERNATIVE |
144 | ALTERNATIVE "jmp .Lskip_rsb_\@", \ | ||
145 | __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ | ||
146 | \ftr | ||
147 | .Lskip_rsb_\@: | ||
85 | #endif | 148 | #endif |
86 | .endm | 149 | .endm |
87 | 150 | ||
@@ -93,6 +156,12 @@ | |||
93 | ".long 999b - .\n\t" \ | 156 | ".long 999b - .\n\t" \ |
94 | ".popsection\n\t" | 157 | ".popsection\n\t" |
95 | 158 | ||
159 | #define ANNOTATE_RETPOLINE_SAFE \ | ||
160 | "999:\n\t" \ | ||
161 | ".pushsection .discard.retpoline_safe\n\t" \ | ||
162 | _ASM_PTR " 999b\n\t" \ | ||
163 | ".popsection\n\t" | ||
164 | |||
96 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) | 165 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) |
97 | 166 | ||
98 | /* | 167 | /* |
@@ -102,6 +171,7 @@ | |||
102 | # define CALL_NOSPEC \ | 171 | # define CALL_NOSPEC \ |
103 | ANNOTATE_NOSPEC_ALTERNATIVE \ | 172 | ANNOTATE_NOSPEC_ALTERNATIVE \ |
104 | ALTERNATIVE( \ | 173 | ALTERNATIVE( \ |
174 | ANNOTATE_RETPOLINE_SAFE \ | ||
105 | "call *%[thunk_target]\n", \ | 175 | "call *%[thunk_target]\n", \ |
106 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ | 176 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ |
107 | X86_FEATURE_RETPOLINE) | 177 | X86_FEATURE_RETPOLINE) |
@@ -156,25 +226,90 @@ extern char __indirect_thunk_end[]; | |||
156 | static inline void vmexit_fill_RSB(void) | 226 | static inline void vmexit_fill_RSB(void) |
157 | { | 227 | { |
158 | #ifdef CONFIG_RETPOLINE | 228 | #ifdef CONFIG_RETPOLINE |
159 | alternative_input("", | 229 | unsigned long loops; |
160 | "call __fill_rsb", | 230 | |
161 | X86_FEATURE_RETPOLINE, | 231 | asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE |
162 | ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory")); | 232 | ALTERNATIVE("jmp 910f", |
233 | __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), | ||
234 | X86_FEATURE_RETPOLINE) | ||
235 | "910:" | ||
236 | : "=r" (loops), ASM_CALL_CONSTRAINT | ||
237 | : : "memory" ); | ||
163 | #endif | 238 | #endif |
164 | } | 239 | } |
165 | 240 | ||
241 | #define alternative_msr_write(_msr, _val, _feature) \ | ||
242 | asm volatile(ALTERNATIVE("", \ | ||
243 | "movl %[msr], %%ecx\n\t" \ | ||
244 | "movl %[val], %%eax\n\t" \ | ||
245 | "movl $0, %%edx\n\t" \ | ||
246 | "wrmsr", \ | ||
247 | _feature) \ | ||
248 | : : [msr] "i" (_msr), [val] "i" (_val) \ | ||
249 | : "eax", "ecx", "edx", "memory") | ||
250 | |||
166 | static inline void indirect_branch_prediction_barrier(void) | 251 | static inline void indirect_branch_prediction_barrier(void) |
167 | { | 252 | { |
168 | asm volatile(ALTERNATIVE("", | 253 | alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, |
169 | "movl %[msr], %%ecx\n\t" | 254 | X86_FEATURE_USE_IBPB); |
170 | "movl %[val], %%eax\n\t" | ||
171 | "movl $0, %%edx\n\t" | ||
172 | "wrmsr", | ||
173 | X86_FEATURE_USE_IBPB) | ||
174 | : : [msr] "i" (MSR_IA32_PRED_CMD), | ||
175 | [val] "i" (PRED_CMD_IBPB) | ||
176 | : "eax", "ecx", "edx", "memory"); | ||
177 | } | 255 | } |
178 | 256 | ||
257 | /* | ||
258 | * With retpoline, we must use IBRS to restrict branch prediction | ||
259 | * before calling into firmware. | ||
260 | * | ||
261 | * (Implemented as CPP macros due to header hell.) | ||
262 | */ | ||
263 | #define firmware_restrict_branch_speculation_start() \ | ||
264 | do { \ | ||
265 | preempt_disable(); \ | ||
266 | alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ | ||
267 | X86_FEATURE_USE_IBRS_FW); \ | ||
268 | } while (0) | ||
269 | |||
270 | #define firmware_restrict_branch_speculation_end() \ | ||
271 | do { \ | ||
272 | alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ | ||
273 | X86_FEATURE_USE_IBRS_FW); \ | ||
274 | preempt_enable(); \ | ||
275 | } while (0) | ||
276 | |||
179 | #endif /* __ASSEMBLY__ */ | 277 | #endif /* __ASSEMBLY__ */ |
278 | |||
279 | /* | ||
280 | * Below is used in the eBPF JIT compiler and emits the byte sequence | ||
281 | * for the following assembly: | ||
282 | * | ||
283 | * With retpolines configured: | ||
284 | * | ||
285 | * callq do_rop | ||
286 | * spec_trap: | ||
287 | * pause | ||
288 | * lfence | ||
289 | * jmp spec_trap | ||
290 | * do_rop: | ||
291 | * mov %rax,(%rsp) | ||
292 | * retq | ||
293 | * | ||
294 | * Without retpolines configured: | ||
295 | * | ||
296 | * jmp *%rax | ||
297 | */ | ||
298 | #ifdef CONFIG_RETPOLINE | ||
299 | # define RETPOLINE_RAX_BPF_JIT_SIZE 17 | ||
300 | # define RETPOLINE_RAX_BPF_JIT() \ | ||
301 | EMIT1_off32(0xE8, 7); /* callq do_rop */ \ | ||
302 | /* spec_trap: */ \ | ||
303 | EMIT2(0xF3, 0x90); /* pause */ \ | ||
304 | EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ | ||
305 | EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ | ||
306 | /* do_rop: */ \ | ||
307 | EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ | ||
308 | EMIT1(0xC3); /* retq */ | ||
309 | #else | ||
310 | # define RETPOLINE_RAX_BPF_JIT_SIZE 2 | ||
311 | # define RETPOLINE_RAX_BPF_JIT() \ | ||
312 | EMIT2(0xFF, 0xE0); /* jmp *%rax */ | ||
313 | #endif | ||
314 | |||
180 | #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ | 315 | #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 554841fab717..c83a2f418cea 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #ifdef CONFIG_PARAVIRT | 7 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/pgtable_types.h> | 8 | #include <asm/pgtable_types.h> |
9 | #include <asm/asm.h> | 9 | #include <asm/asm.h> |
10 | #include <asm/nospec-branch.h> | ||
10 | 11 | ||
11 | #include <asm/paravirt_types.h> | 12 | #include <asm/paravirt_types.h> |
12 | 13 | ||
@@ -879,23 +880,27 @@ extern void default_banner(void); | |||
879 | 880 | ||
880 | #define INTERRUPT_RETURN \ | 881 | #define INTERRUPT_RETURN \ |
881 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ | 882 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
882 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) | 883 | ANNOTATE_RETPOLINE_SAFE; \ |
884 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) | ||
883 | 885 | ||
884 | #define DISABLE_INTERRUPTS(clobbers) \ | 886 | #define DISABLE_INTERRUPTS(clobbers) \ |
885 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ | 887 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
886 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 888 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
889 | ANNOTATE_RETPOLINE_SAFE; \ | ||
887 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ | 890 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ |
888 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 891 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
889 | 892 | ||
890 | #define ENABLE_INTERRUPTS(clobbers) \ | 893 | #define ENABLE_INTERRUPTS(clobbers) \ |
891 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ | 894 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
892 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 895 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
896 | ANNOTATE_RETPOLINE_SAFE; \ | ||
893 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ | 897 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ |
894 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 898 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
895 | 899 | ||
896 | #ifdef CONFIG_X86_32 | 900 | #ifdef CONFIG_X86_32 |
897 | #define GET_CR0_INTO_EAX \ | 901 | #define GET_CR0_INTO_EAX \ |
898 | push %ecx; push %edx; \ | 902 | push %ecx; push %edx; \ |
903 | ANNOTATE_RETPOLINE_SAFE; \ | ||
899 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ | 904 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ |
900 | pop %edx; pop %ecx | 905 | pop %edx; pop %ecx |
901 | #else /* !CONFIG_X86_32 */ | 906 | #else /* !CONFIG_X86_32 */ |
@@ -917,21 +922,25 @@ extern void default_banner(void); | |||
917 | */ | 922 | */ |
918 | #define SWAPGS \ | 923 | #define SWAPGS \ |
919 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | 924 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ |
920 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ | 925 | ANNOTATE_RETPOLINE_SAFE; \ |
926 | call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ | ||
921 | ) | 927 | ) |
922 | 928 | ||
923 | #define GET_CR2_INTO_RAX \ | 929 | #define GET_CR2_INTO_RAX \ |
924 | call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) | 930 | ANNOTATE_RETPOLINE_SAFE; \ |
931 | call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); | ||
925 | 932 | ||
926 | #define USERGS_SYSRET64 \ | 933 | #define USERGS_SYSRET64 \ |
927 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ | 934 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ |
928 | CLBR_NONE, \ | 935 | CLBR_NONE, \ |
929 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) | 936 | ANNOTATE_RETPOLINE_SAFE; \ |
937 | jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) | ||
930 | 938 | ||
931 | #ifdef CONFIG_DEBUG_ENTRY | 939 | #ifdef CONFIG_DEBUG_ENTRY |
932 | #define SAVE_FLAGS(clobbers) \ | 940 | #define SAVE_FLAGS(clobbers) \ |
933 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ | 941 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ |
934 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ | 942 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
943 | ANNOTATE_RETPOLINE_SAFE; \ | ||
935 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ | 944 | call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ |
936 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) | 945 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
937 | #endif | 946 | #endif |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f624f1f10316..180bc0bff0fb 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <asm/desc_defs.h> | 43 | #include <asm/desc_defs.h> |
44 | #include <asm/kmap_types.h> | 44 | #include <asm/kmap_types.h> |
45 | #include <asm/pgtable_types.h> | 45 | #include <asm/pgtable_types.h> |
46 | #include <asm/nospec-branch.h> | ||
46 | 47 | ||
47 | struct page; | 48 | struct page; |
48 | struct thread_struct; | 49 | struct thread_struct; |
@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void); | |||
392 | * offset into the paravirt_patch_template structure, and can therefore be | 393 | * offset into the paravirt_patch_template structure, and can therefore be |
393 | * freely converted back into a structure offset. | 394 | * freely converted back into a structure offset. |
394 | */ | 395 | */ |
395 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" | 396 | #define PARAVIRT_CALL \ |
397 | ANNOTATE_RETPOLINE_SAFE \ | ||
398 | "call *%c[paravirt_opptr];" | ||
396 | 399 | ||
397 | /* | 400 | /* |
398 | * These macros are intended to wrap calls through one of the paravirt | 401 | * These macros are intended to wrap calls through one of the paravirt |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 63c2552b6b65..b444d83cfc95 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) | |||
350 | { | 350 | { |
351 | pmdval_t v = native_pmd_val(pmd); | 351 | pmdval_t v = native_pmd_val(pmd); |
352 | 352 | ||
353 | return __pmd(v | set); | 353 | return native_make_pmd(v | set); |
354 | } | 354 | } |
355 | 355 | ||
356 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) | 356 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) |
357 | { | 357 | { |
358 | pmdval_t v = native_pmd_val(pmd); | 358 | pmdval_t v = native_pmd_val(pmd); |
359 | 359 | ||
360 | return __pmd(v & ~clear); | 360 | return native_make_pmd(v & ~clear); |
361 | } | 361 | } |
362 | 362 | ||
363 | static inline pmd_t pmd_mkold(pmd_t pmd) | 363 | static inline pmd_t pmd_mkold(pmd_t pmd) |
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set) | |||
409 | { | 409 | { |
410 | pudval_t v = native_pud_val(pud); | 410 | pudval_t v = native_pud_val(pud); |
411 | 411 | ||
412 | return __pud(v | set); | 412 | return native_make_pud(v | set); |
413 | } | 413 | } |
414 | 414 | ||
415 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) | 415 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) |
416 | { | 416 | { |
417 | pudval_t v = native_pud_val(pud); | 417 | pudval_t v = native_pud_val(pud); |
418 | 418 | ||
419 | return __pud(v & ~clear); | 419 | return native_make_pud(v & ~clear); |
420 | } | 420 | } |
421 | 421 | ||
422 | static inline pud_t pud_mkold(pud_t pud) | 422 | static inline pud_t pud_mkold(pud_t pud) |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 3696398a9475..246f15b4e64c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud) | |||
323 | #else | 323 | #else |
324 | #include <asm-generic/pgtable-nopud.h> | 324 | #include <asm-generic/pgtable-nopud.h> |
325 | 325 | ||
326 | static inline pud_t native_make_pud(pudval_t val) | ||
327 | { | ||
328 | return (pud_t) { .p4d.pgd = native_make_pgd(val) }; | ||
329 | } | ||
330 | |||
326 | static inline pudval_t native_pud_val(pud_t pud) | 331 | static inline pudval_t native_pud_val(pud_t pud) |
327 | { | 332 | { |
328 | return native_pgd_val(pud.p4d.pgd); | 333 | return native_pgd_val(pud.p4d.pgd); |
@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) | |||
344 | #else | 349 | #else |
345 | #include <asm-generic/pgtable-nopmd.h> | 350 | #include <asm-generic/pgtable-nopmd.h> |
346 | 351 | ||
352 | static inline pmd_t native_make_pmd(pmdval_t val) | ||
353 | { | ||
354 | return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; | ||
355 | } | ||
356 | |||
347 | static inline pmdval_t native_pmd_val(pmd_t pmd) | 357 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
348 | { | 358 | { |
349 | return native_pgd_val(pmd.pud.p4d.pgd); | 359 | return native_pgd_val(pmd.pud.p4d.pgd); |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1bd9ed87606f..b0ccd4847a58 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -977,4 +977,5 @@ bool xen_set_default_idle(void); | |||
977 | 977 | ||
978 | void stop_this_cpu(void *dummy); | 978 | void stop_this_cpu(void *dummy); |
979 | void df_debug(struct pt_regs *regs, long error_code); | 979 | void df_debug(struct pt_regs *regs, long error_code); |
980 | void microcode_check(void); | ||
980 | #endif /* _ASM_X86_PROCESSOR_H */ | 981 | #endif /* _ASM_X86_PROCESSOR_H */ |
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index 4e44250e7d0d..d65171120e90 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h | |||
@@ -67,13 +67,13 @@ static __always_inline __must_check | |||
67 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) | 67 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
68 | { | 68 | { |
69 | GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, | 69 | GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, |
70 | r->refs.counter, "er", i, "%0", e); | 70 | r->refs.counter, "er", i, "%0", e, "cx"); |
71 | } | 71 | } |
72 | 72 | ||
73 | static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) | 73 | static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) |
74 | { | 74 | { |
75 | GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, | 75 | GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, |
76 | r->refs.counter, "%0", e); | 76 | r->refs.counter, "%0", e, "cx"); |
77 | } | 77 | } |
78 | 78 | ||
79 | static __always_inline __must_check | 79 | static __always_inline __must_check |
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index f91c365e57c3..4914a3e7c803 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h | |||
@@ -2,8 +2,7 @@ | |||
2 | #ifndef _ASM_X86_RMWcc | 2 | #ifndef _ASM_X86_RMWcc |
3 | #define _ASM_X86_RMWcc | 3 | #define _ASM_X86_RMWcc |
4 | 4 | ||
5 | #define __CLOBBERS_MEM "memory" | 5 | #define __CLOBBERS_MEM(clb...) "memory", ## clb |
6 | #define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx" | ||
7 | 6 | ||
8 | #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) | 7 | #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) |
9 | 8 | ||
@@ -40,18 +39,19 @@ do { \ | |||
40 | #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ | 39 | #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ |
41 | 40 | ||
42 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ | 41 | #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ |
43 | __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM) | 42 | __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) |
44 | 43 | ||
45 | #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \ | 44 | #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ |
46 | __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ | 45 | __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ |
47 | __CLOBBERS_MEM_CC_CX) | 46 | __CLOBBERS_MEM(clobbers)) |
48 | 47 | ||
49 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ | 48 | #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ |
50 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ | 49 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ |
51 | __CLOBBERS_MEM, vcon (val)) | 50 | __CLOBBERS_MEM(), vcon (val)) |
52 | 51 | ||
53 | #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \ | 52 | #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ |
53 | clobbers...) \ | ||
54 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ | 54 | __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ |
55 | __CLOBBERS_MEM_CC_CX, vcon (val)) | 55 | __CLOBBERS_MEM(clobbers), vcon (val)) |
56 | 56 | ||
57 | #endif /* _ASM_X86_RMWcc */ | 57 | #endif /* _ASM_X86_RMWcc */ |
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 197c2e6c7376..099414345865 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h | |||
@@ -241,24 +241,24 @@ | |||
241 | #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 | 241 | #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 |
242 | 242 | ||
243 | struct hv_reenlightenment_control { | 243 | struct hv_reenlightenment_control { |
244 | u64 vector:8; | 244 | __u64 vector:8; |
245 | u64 reserved1:8; | 245 | __u64 reserved1:8; |
246 | u64 enabled:1; | 246 | __u64 enabled:1; |
247 | u64 reserved2:15; | 247 | __u64 reserved2:15; |
248 | u64 target_vp:32; | 248 | __u64 target_vp:32; |
249 | }; | 249 | }; |
250 | 250 | ||
251 | #define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 | 251 | #define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 |
252 | #define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 | 252 | #define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 |
253 | 253 | ||
254 | struct hv_tsc_emulation_control { | 254 | struct hv_tsc_emulation_control { |
255 | u64 enabled:1; | 255 | __u64 enabled:1; |
256 | u64 reserved:63; | 256 | __u64 reserved:63; |
257 | }; | 257 | }; |
258 | 258 | ||
259 | struct hv_tsc_emulation_status { | 259 | struct hv_tsc_emulation_status { |
260 | u64 inprogress:1; | 260 | __u64 inprogress:1; |
261 | u64 reserved:63; | 261 | __u64 reserved:63; |
262 | }; | 262 | }; |
263 | 263 | ||
264 | #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 | 264 | #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 |
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 7a2ade4aa235..6cfa9c8cb7d6 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define KVM_FEATURE_PV_EOI 6 | 26 | #define KVM_FEATURE_PV_EOI 6 |
27 | #define KVM_FEATURE_PV_UNHALT 7 | 27 | #define KVM_FEATURE_PV_UNHALT 7 |
28 | #define KVM_FEATURE_PV_TLB_FLUSH 9 | 28 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
29 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 | ||
29 | 30 | ||
30 | /* The last 8 bits are used to indicate how to interpret the flags field | 31 | /* The last 8 bits are used to indicate how to interpret the flags field |
31 | * in pvclock structure. If no bits are set, all flags are ignored. | 32 | * in pvclock structure. If no bits are set, all flags are ignored. |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8ad2e410974f..7c5538769f7e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void) | |||
1603 | do { | 1603 | do { |
1604 | rep_nop(); | 1604 | rep_nop(); |
1605 | now = rdtsc(); | 1605 | now = rdtsc(); |
1606 | } while ((now - start) < 40000000000UL / HZ && | 1606 | } while ((now - start) < 40000000000ULL / HZ && |
1607 | time_before_eq(jiffies, end)); | 1607 | time_before_eq(jiffies, end)); |
1608 | } | 1608 | } |
1609 | 1609 | ||
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 3cc471beb50b..bb6f7a2148d7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, | |||
134 | { | 134 | { |
135 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 135 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
136 | struct irq_desc *desc = irq_data_to_desc(irqd); | 136 | struct irq_desc *desc = irq_data_to_desc(irqd); |
137 | bool managed = irqd_affinity_is_managed(irqd); | ||
137 | 138 | ||
138 | lockdep_assert_held(&vector_lock); | 139 | lockdep_assert_held(&vector_lock); |
139 | 140 | ||
140 | trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, | 141 | trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, |
141 | apicd->cpu); | 142 | apicd->cpu); |
142 | 143 | ||
143 | /* Setup the vector move, if required */ | 144 | /* |
144 | if (apicd->vector && cpu_online(apicd->cpu)) { | 145 | * If there is no vector associated or if the associated vector is |
146 | * the shutdown vector, which is associated to make PCI/MSI | ||
147 | * shutdown mode work, then there is nothing to release. Clear out | ||
148 | * prev_vector for this and the offlined target case. | ||
149 | */ | ||
150 | apicd->prev_vector = 0; | ||
151 | if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) | ||
152 | goto setnew; | ||
153 | /* | ||
154 | * If the target CPU of the previous vector is online, then mark | ||
155 | * the vector as move in progress and store it for cleanup when the | ||
156 | * first interrupt on the new vector arrives. If the target CPU is | ||
157 | * offline then the regular release mechanism via the cleanup | ||
158 | * vector is not possible and the vector can be immediately freed | ||
159 | * in the underlying matrix allocator. | ||
160 | */ | ||
161 | if (cpu_online(apicd->cpu)) { | ||
145 | apicd->move_in_progress = true; | 162 | apicd->move_in_progress = true; |
146 | apicd->prev_vector = apicd->vector; | 163 | apicd->prev_vector = apicd->vector; |
147 | apicd->prev_cpu = apicd->cpu; | 164 | apicd->prev_cpu = apicd->cpu; |
148 | } else { | 165 | } else { |
149 | apicd->prev_vector = 0; | 166 | irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, |
167 | managed); | ||
150 | } | 168 | } |
151 | 169 | ||
170 | setnew: | ||
152 | apicd->vector = newvec; | 171 | apicd->vector = newvec; |
153 | apicd->cpu = newcpu; | 172 | apicd->cpu = newcpu; |
154 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); | 173 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d71c8b54b696..bfca937bdcc3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -300,6 +300,15 @@ retpoline_auto: | |||
300 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); | 300 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
301 | pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); | 301 | pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); |
302 | } | 302 | } |
303 | |||
304 | /* | ||
305 | * Retpoline means the kernel is safe because it has no indirect | ||
306 | * branches. But firmware isn't, so use IBRS to protect that. | ||
307 | */ | ||
308 | if (boot_cpu_has(X86_FEATURE_IBRS)) { | ||
309 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); | ||
310 | pr_info("Enabling Restricted Speculation for firmware calls\n"); | ||
311 | } | ||
303 | } | 312 | } |
304 | 313 | ||
305 | #undef pr_fmt | 314 | #undef pr_fmt |
@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c | |||
326 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | 335 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
327 | return sprintf(buf, "Not affected\n"); | 336 | return sprintf(buf, "Not affected\n"); |
328 | 337 | ||
329 | return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], | 338 | return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
330 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", | 339 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", |
340 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", | ||
331 | spectre_v2_module_string()); | 341 | spectre_v2_module_string()); |
332 | } | 342 | } |
333 | #endif | 343 | #endif |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 824aee0117bb..348cf4821240 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void) | |||
1749 | return 0; | 1749 | return 0; |
1750 | } | 1750 | } |
1751 | core_initcall(init_cpu_syscore); | 1751 | core_initcall(init_cpu_syscore); |
1752 | |||
1753 | /* | ||
1754 | * The microcode loader calls this upon late microcode load to recheck features, | ||
1755 | * only when microcode has been updated. Caller holds microcode_mutex and CPU | ||
1756 | * hotplug lock. | ||
1757 | */ | ||
1758 | void microcode_check(void) | ||
1759 | { | ||
1760 | struct cpuinfo_x86 info; | ||
1761 | |||
1762 | perf_check_microcode(); | ||
1763 | |||
1764 | /* Reload CPUID max function as it might've changed. */ | ||
1765 | info.cpuid_level = cpuid_eax(0); | ||
1766 | |||
1767 | /* | ||
1768 | * Copy all capability leafs to pick up the synthetic ones so that | ||
1769 | * memcmp() below doesn't fail on that. The ones coming from CPUID will | ||
1770 | * get overwritten in get_cpu_cap(). | ||
1771 | */ | ||
1772 | memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); | ||
1773 | |||
1774 | get_cpu_cap(&info); | ||
1775 | |||
1776 | if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) | ||
1777 | return; | ||
1778 | |||
1779 | pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); | ||
1780 | pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | ||
1781 | } | ||
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index bdab7d2f51af..fca759d272a1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
@@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, | |||
1804 | goto out_common_fail; | 1804 | goto out_common_fail; |
1805 | } | 1805 | } |
1806 | closid = ret; | 1806 | closid = ret; |
1807 | ret = 0; | ||
1807 | 1808 | ||
1808 | rdtgrp->closid = closid; | 1809 | rdtgrp->closid = closid; |
1809 | list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); | 1810 | list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 330b8462d426..a998e1a7d46f 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, | |||
498 | return patch_size; | 498 | return patch_size; |
499 | } | 499 | } |
500 | 500 | ||
501 | static int apply_microcode_amd(int cpu) | 501 | static enum ucode_state apply_microcode_amd(int cpu) |
502 | { | 502 | { |
503 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 503 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
504 | struct microcode_amd *mc_amd; | 504 | struct microcode_amd *mc_amd; |
@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu) | |||
512 | 512 | ||
513 | p = find_patch(cpu); | 513 | p = find_patch(cpu); |
514 | if (!p) | 514 | if (!p) |
515 | return 0; | 515 | return UCODE_NFOUND; |
516 | 516 | ||
517 | mc_amd = p->data; | 517 | mc_amd = p->data; |
518 | uci->mc = p->data; | 518 | uci->mc = p->data; |
@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu) | |||
523 | if (rev >= mc_amd->hdr.patch_id) { | 523 | if (rev >= mc_amd->hdr.patch_id) { |
524 | c->microcode = rev; | 524 | c->microcode = rev; |
525 | uci->cpu_sig.rev = rev; | 525 | uci->cpu_sig.rev = rev; |
526 | return 0; | 526 | return UCODE_OK; |
527 | } | 527 | } |
528 | 528 | ||
529 | if (__apply_microcode_amd(mc_amd)) { | 529 | if (__apply_microcode_amd(mc_amd)) { |
530 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", | 530 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
531 | cpu, mc_amd->hdr.patch_id); | 531 | cpu, mc_amd->hdr.patch_id); |
532 | return -1; | 532 | return UCODE_ERROR; |
533 | } | 533 | } |
534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | 534 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, |
535 | mc_amd->hdr.patch_id); | 535 | mc_amd->hdr.patch_id); |
@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu) | |||
537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 537 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
538 | c->microcode = mc_amd->hdr.patch_id; | 538 | c->microcode = mc_amd->hdr.patch_id; |
539 | 539 | ||
540 | return 0; | 540 | return UCODE_UPDATED; |
541 | } | 541 | } |
542 | 542 | ||
543 | static int install_equiv_cpu_table(const u8 *buf) | 543 | static int install_equiv_cpu_table(const u8 *buf) |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 319dd65f98a2..aa1b9a422f2b 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu) | |||
374 | } | 374 | } |
375 | 375 | ||
376 | struct apply_microcode_ctx { | 376 | struct apply_microcode_ctx { |
377 | int err; | 377 | enum ucode_state err; |
378 | }; | 378 | }; |
379 | 379 | ||
380 | static void apply_microcode_local(void *arg) | 380 | static void apply_microcode_local(void *arg) |
@@ -489,31 +489,30 @@ static void __exit microcode_dev_exit(void) | |||
489 | /* fake device for request_firmware */ | 489 | /* fake device for request_firmware */ |
490 | static struct platform_device *microcode_pdev; | 490 | static struct platform_device *microcode_pdev; |
491 | 491 | ||
492 | static int reload_for_cpu(int cpu) | 492 | static enum ucode_state reload_for_cpu(int cpu) |
493 | { | 493 | { |
494 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 494 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
495 | enum ucode_state ustate; | 495 | enum ucode_state ustate; |
496 | int err = 0; | ||
497 | 496 | ||
498 | if (!uci->valid) | 497 | if (!uci->valid) |
499 | return err; | 498 | return UCODE_OK; |
500 | 499 | ||
501 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); | 500 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); |
502 | if (ustate == UCODE_OK) | 501 | if (ustate != UCODE_OK) |
503 | apply_microcode_on_target(cpu); | 502 | return ustate; |
504 | else | 503 | |
505 | if (ustate == UCODE_ERROR) | 504 | return apply_microcode_on_target(cpu); |
506 | err = -EINVAL; | ||
507 | return err; | ||
508 | } | 505 | } |
509 | 506 | ||
510 | static ssize_t reload_store(struct device *dev, | 507 | static ssize_t reload_store(struct device *dev, |
511 | struct device_attribute *attr, | 508 | struct device_attribute *attr, |
512 | const char *buf, size_t size) | 509 | const char *buf, size_t size) |
513 | { | 510 | { |
511 | enum ucode_state tmp_ret = UCODE_OK; | ||
512 | bool do_callback = false; | ||
514 | unsigned long val; | 513 | unsigned long val; |
514 | ssize_t ret = 0; | ||
515 | int cpu; | 515 | int cpu; |
516 | ssize_t ret = 0, tmp_ret; | ||
517 | 516 | ||
518 | ret = kstrtoul(buf, 0, &val); | 517 | ret = kstrtoul(buf, 0, &val); |
519 | if (ret) | 518 | if (ret) |
@@ -526,15 +525,21 @@ static ssize_t reload_store(struct device *dev, | |||
526 | mutex_lock(µcode_mutex); | 525 | mutex_lock(µcode_mutex); |
527 | for_each_online_cpu(cpu) { | 526 | for_each_online_cpu(cpu) { |
528 | tmp_ret = reload_for_cpu(cpu); | 527 | tmp_ret = reload_for_cpu(cpu); |
529 | if (tmp_ret != 0) | 528 | if (tmp_ret > UCODE_NFOUND) { |
530 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | 529 | pr_warn("Error reloading microcode on CPU %d\n", cpu); |
531 | 530 | ||
532 | /* save retval of the first encountered reload error */ | 531 | /* set retval for the first encountered reload error */ |
533 | if (!ret) | 532 | if (!ret) |
534 | ret = tmp_ret; | 533 | ret = -EINVAL; |
534 | } | ||
535 | |||
536 | if (tmp_ret == UCODE_UPDATED) | ||
537 | do_callback = true; | ||
535 | } | 538 | } |
536 | if (!ret) | 539 | |
537 | perf_check_microcode(); | 540 | if (!ret && do_callback) |
541 | microcode_check(); | ||
542 | |||
538 | mutex_unlock(µcode_mutex); | 543 | mutex_unlock(µcode_mutex); |
539 | put_online_cpus(); | 544 | put_online_cpus(); |
540 | 545 | ||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index a15db2b4e0d6..923054a6b760 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
772 | return 0; | 772 | return 0; |
773 | } | 773 | } |
774 | 774 | ||
775 | static int apply_microcode_intel(int cpu) | 775 | static enum ucode_state apply_microcode_intel(int cpu) |
776 | { | 776 | { |
777 | struct microcode_intel *mc; | 777 | struct microcode_intel *mc; |
778 | struct ucode_cpu_info *uci; | 778 | struct ucode_cpu_info *uci; |
@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu) | |||
782 | 782 | ||
783 | /* We should bind the task to the CPU */ | 783 | /* We should bind the task to the CPU */ |
784 | if (WARN_ON(raw_smp_processor_id() != cpu)) | 784 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
785 | return -1; | 785 | return UCODE_ERROR; |
786 | 786 | ||
787 | uci = ucode_cpu_info + cpu; | 787 | uci = ucode_cpu_info + cpu; |
788 | mc = uci->mc; | 788 | mc = uci->mc; |
@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu) | |||
790 | /* Look for a newer patch in our cache: */ | 790 | /* Look for a newer patch in our cache: */ |
791 | mc = find_patch(uci); | 791 | mc = find_patch(uci); |
792 | if (!mc) | 792 | if (!mc) |
793 | return 0; | 793 | return UCODE_NFOUND; |
794 | } | 794 | } |
795 | 795 | ||
796 | /* write microcode via MSR 0x79 */ | 796 | /* write microcode via MSR 0x79 */ |
@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu) | |||
801 | if (rev != mc->hdr.rev) { | 801 | if (rev != mc->hdr.rev) { |
802 | pr_err("CPU%d update to revision 0x%x failed\n", | 802 | pr_err("CPU%d update to revision 0x%x failed\n", |
803 | cpu, mc->hdr.rev); | 803 | cpu, mc->hdr.rev); |
804 | return -1; | 804 | return UCODE_ERROR; |
805 | } | 805 | } |
806 | 806 | ||
807 | if (rev != prev_rev) { | 807 | if (rev != prev_rev) { |
@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu) | |||
818 | uci->cpu_sig.rev = rev; | 818 | uci->cpu_sig.rev = rev; |
819 | c->microcode = rev; | 819 | c->microcode = rev; |
820 | 820 | ||
821 | return 0; | 821 | return UCODE_UPDATED; |
822 | } | 822 | } |
823 | 823 | ||
824 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | 824 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 04a625f0fcda..0f545b3cf926 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/nops.h> | 23 | #include <asm/nops.h> |
24 | #include "../entry/calling.h" | 24 | #include "../entry/calling.h" |
25 | #include <asm/export.h> | 25 | #include <asm/export.h> |
26 | #include <asm/nospec-branch.h> | ||
26 | 27 | ||
27 | #ifdef CONFIG_PARAVIRT | 28 | #ifdef CONFIG_PARAVIRT |
28 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64) | |||
134 | 135 | ||
135 | /* Ensure I am executing from virtual addresses */ | 136 | /* Ensure I am executing from virtual addresses */ |
136 | movq $1f, %rax | 137 | movq $1f, %rax |
138 | ANNOTATE_RETPOLINE_SAFE | ||
137 | jmp *%rax | 139 | jmp *%rax |
138 | 1: | 140 | 1: |
139 | UNWIND_HINT_EMPTY | 141 | UNWIND_HINT_EMPTY |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 4e37d1a851a6..bc1a27280c4b 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | static int kvmapf = 1; | 50 | static int kvmapf = 1; |
51 | 51 | ||
52 | static int parse_no_kvmapf(char *arg) | 52 | static int __init parse_no_kvmapf(char *arg) |
53 | { | 53 | { |
54 | kvmapf = 0; | 54 | kvmapf = 0; |
55 | return 0; | 55 | return 0; |
@@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg) | |||
58 | early_param("no-kvmapf", parse_no_kvmapf); | 58 | early_param("no-kvmapf", parse_no_kvmapf); |
59 | 59 | ||
60 | static int steal_acc = 1; | 60 | static int steal_acc = 1; |
61 | static int parse_no_stealacc(char *arg) | 61 | static int __init parse_no_stealacc(char *arg) |
62 | { | 62 | { |
63 | steal_acc = 0; | 63 | steal_acc = 0; |
64 | return 0; | 64 | return 0; |
@@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg) | |||
67 | early_param("no-steal-acc", parse_no_stealacc); | 67 | early_param("no-steal-acc", parse_no_stealacc); |
68 | 68 | ||
69 | static int kvmclock_vsyscall = 1; | 69 | static int kvmclock_vsyscall = 1; |
70 | static int parse_no_kvmclock_vsyscall(char *arg) | 70 | static int __init parse_no_kvmclock_vsyscall(char *arg) |
71 | { | 71 | { |
72 | kvmclock_vsyscall = 0; | 72 | kvmclock_vsyscall = 0; |
73 | return 0; | 73 | return 0; |
@@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void) | |||
341 | #endif | 341 | #endif |
342 | pa |= KVM_ASYNC_PF_ENABLED; | 342 | pa |= KVM_ASYNC_PF_ENABLED; |
343 | 343 | ||
344 | /* Async page fault support for L1 hypervisor is optional */ | 344 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) |
345 | if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, | 345 | pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; |
346 | (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) | 346 | |
347 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); | 347 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); |
348 | __this_cpu_write(apf_reason.enabled, 1); | 348 | __this_cpu_write(apf_reason.enabled, 1); |
349 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 349 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
350 | smp_processor_id()); | 350 | smp_processor_id()); |
@@ -545,7 +545,8 @@ static void __init kvm_guest_init(void) | |||
545 | pv_time_ops.steal_clock = kvm_steal_clock; | 545 | pv_time_ops.steal_clock = kvm_steal_clock; |
546 | } | 546 | } |
547 | 547 | ||
548 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) | 548 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
549 | !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) | ||
549 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; | 550 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; |
550 | 551 | ||
551 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) | 552 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
@@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void) | |||
633 | { | 634 | { |
634 | int cpu; | 635 | int cpu; |
635 | 636 | ||
636 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) { | 637 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
638 | !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | ||
637 | for_each_possible_cpu(cpu) { | 639 | for_each_possible_cpu(cpu) { |
638 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), | 640 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), |
639 | GFP_KERNEL, cpu_to_node(cpu)); | 641 | GFP_KERNEL, cpu_to_node(cpu)); |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 1f790cf9d38f..3b7427aa7d85 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, | |||
542 | goto overflow; | 542 | goto overflow; |
543 | break; | 543 | break; |
544 | case R_X86_64_PC32: | 544 | case R_X86_64_PC32: |
545 | case R_X86_64_PLT32: | ||
545 | value -= (u64)address; | 546 | value -= (u64)address; |
546 | *(u32 *)location = value; | 547 | *(u32 *)location = value; |
547 | break; | 548 | break; |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index da0c160e5589..f58336af095c 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
191 | goto overflow; | 191 | goto overflow; |
192 | break; | 192 | break; |
193 | case R_X86_64_PC32: | 193 | case R_X86_64_PC32: |
194 | case R_X86_64_PLT32: | ||
194 | if (*(u32 *)loc != 0) | 195 | if (*(u32 *)loc != 0) |
195 | goto invalid_relocation; | 196 | goto invalid_relocation; |
196 | val -= (u64)loc; | 197 | val -= (u64)loc; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9eee25d07586..ff99e2b6fc54 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1437,6 +1437,7 @@ static void remove_siblinginfo(int cpu) | |||
1437 | cpumask_clear(topology_sibling_cpumask(cpu)); | 1437 | cpumask_clear(topology_sibling_cpumask(cpu)); |
1438 | cpumask_clear(topology_core_cpumask(cpu)); | 1438 | cpumask_clear(topology_core_cpumask(cpu)); |
1439 | c->cpu_core_id = 0; | 1439 | c->cpu_core_id = 0; |
1440 | c->booted_cores = 0; | ||
1440 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); | 1441 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
1441 | recompute_smt_state(); | 1442 | recompute_smt_state(); |
1442 | } | 1443 | } |
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 1f9188f5357c..feb28fee6cea 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <asm/unwind.h> | 5 | #include <asm/unwind.h> |
6 | #include <asm/orc_types.h> | 6 | #include <asm/orc_types.h> |
7 | #include <asm/orc_lookup.h> | 7 | #include <asm/orc_lookup.h> |
8 | #include <asm/sections.h> | ||
9 | 8 | ||
10 | #define orc_warn(fmt, ...) \ | 9 | #define orc_warn(fmt, ...) \ |
11 | printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) | 10 | printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) |
@@ -148,7 +147,7 @@ static struct orc_entry *orc_find(unsigned long ip) | |||
148 | } | 147 | } |
149 | 148 | ||
150 | /* vmlinux .init slow lookup: */ | 149 | /* vmlinux .init slow lookup: */ |
151 | if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext) | 150 | if (init_kernel_text(ip)) |
152 | return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, | 151 | return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, |
153 | __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); | 152 | __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); |
154 | 153 | ||
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a0c5a69bc7c4..b671fc2d0422 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
607 | (1 << KVM_FEATURE_PV_EOI) | | 607 | (1 << KVM_FEATURE_PV_EOI) | |
608 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | | 608 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
609 | (1 << KVM_FEATURE_PV_UNHALT) | | 609 | (1 << KVM_FEATURE_PV_UNHALT) | |
610 | (1 << KVM_FEATURE_PV_TLB_FLUSH); | 610 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
611 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT); | ||
611 | 612 | ||
612 | if (sched_info_on()) | 613 | if (sched_info_on()) |
613 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); | 614 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 924ac8ce9d50..391dda8d43b7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -2002,14 +2002,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
2002 | 2002 | ||
2003 | void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) | 2003 | void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) |
2004 | { | 2004 | { |
2005 | struct kvm_lapic *apic; | 2005 | struct kvm_lapic *apic = vcpu->arch.apic; |
2006 | int i; | 2006 | int i; |
2007 | 2007 | ||
2008 | apic_debug("%s\n", __func__); | 2008 | if (!apic) |
2009 | return; | ||
2009 | 2010 | ||
2010 | ASSERT(vcpu); | 2011 | apic_debug("%s\n", __func__); |
2011 | apic = vcpu->arch.apic; | ||
2012 | ASSERT(apic != NULL); | ||
2013 | 2012 | ||
2014 | /* Stop the timer in case it's a reset to an active apic */ | 2013 | /* Stop the timer in case it's a reset to an active apic */ |
2015 | hrtimer_cancel(&apic->lapic_timer.timer); | 2014 | hrtimer_cancel(&apic->lapic_timer.timer); |
@@ -2165,7 +2164,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) | |||
2165 | */ | 2164 | */ |
2166 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; | 2165 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; |
2167 | static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ | 2166 | static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ |
2168 | kvm_lapic_reset(vcpu, false); | ||
2169 | kvm_iodevice_init(&apic->dev, &apic_mmio_ops); | 2167 | kvm_iodevice_init(&apic->dev, &apic_mmio_ops); |
2170 | 2168 | ||
2171 | return 0; | 2169 | return 0; |
@@ -2569,7 +2567,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
2569 | 2567 | ||
2570 | pe = xchg(&apic->pending_events, 0); | 2568 | pe = xchg(&apic->pending_events, 0); |
2571 | if (test_bit(KVM_APIC_INIT, &pe)) { | 2569 | if (test_bit(KVM_APIC_INIT, &pe)) { |
2572 | kvm_lapic_reset(vcpu, true); | ||
2573 | kvm_vcpu_reset(vcpu, true); | 2570 | kvm_vcpu_reset(vcpu, true); |
2574 | if (kvm_vcpu_is_bsp(apic->vcpu)) | 2571 | if (kvm_vcpu_is_bsp(apic->vcpu)) |
2575 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 2572 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 46ff304140c7..f551962ac294 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3029,7 +3029,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) | |||
3029 | return RET_PF_RETRY; | 3029 | return RET_PF_RETRY; |
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | return -EFAULT; | 3032 | return RET_PF_EMULATE; |
3033 | } | 3033 | } |
3034 | 3034 | ||
3035 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | 3035 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3e488a74828..be9c839e2c89 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
50 | #include <asm/kvm_para.h> | 50 | #include <asm/kvm_para.h> |
51 | #include <asm/irq_remapping.h> | 51 | #include <asm/irq_remapping.h> |
52 | #include <asm/microcode.h> | ||
52 | #include <asm/nospec-branch.h> | 53 | #include <asm/nospec-branch.h> |
53 | 54 | ||
54 | #include <asm/virtext.h> | 55 | #include <asm/virtext.h> |
@@ -178,6 +179,8 @@ struct vcpu_svm { | |||
178 | uint64_t sysenter_eip; | 179 | uint64_t sysenter_eip; |
179 | uint64_t tsc_aux; | 180 | uint64_t tsc_aux; |
180 | 181 | ||
182 | u64 msr_decfg; | ||
183 | |||
181 | u64 next_rip; | 184 | u64 next_rip; |
182 | 185 | ||
183 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; | 186 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; |
@@ -300,6 +303,8 @@ module_param(vgif, int, 0444); | |||
300 | static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); | 303 | static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); |
301 | module_param(sev, int, 0444); | 304 | module_param(sev, int, 0444); |
302 | 305 | ||
306 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; | ||
307 | |||
303 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 308 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
304 | static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); | 309 | static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); |
305 | static void svm_complete_interrupts(struct vcpu_svm *svm); | 310 | static void svm_complete_interrupts(struct vcpu_svm *svm); |
@@ -1383,6 +1388,7 @@ static void init_vmcb(struct vcpu_svm *svm) | |||
1383 | set_intercept(svm, INTERCEPT_SKINIT); | 1388 | set_intercept(svm, INTERCEPT_SKINIT); |
1384 | set_intercept(svm, INTERCEPT_WBINVD); | 1389 | set_intercept(svm, INTERCEPT_WBINVD); |
1385 | set_intercept(svm, INTERCEPT_XSETBV); | 1390 | set_intercept(svm, INTERCEPT_XSETBV); |
1391 | set_intercept(svm, INTERCEPT_RSM); | ||
1386 | 1392 | ||
1387 | if (!kvm_mwait_in_guest()) { | 1393 | if (!kvm_mwait_in_guest()) { |
1388 | set_intercept(svm, INTERCEPT_MONITOR); | 1394 | set_intercept(svm, INTERCEPT_MONITOR); |
@@ -1902,6 +1908,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
1902 | u32 dummy; | 1908 | u32 dummy; |
1903 | u32 eax = 1; | 1909 | u32 eax = 1; |
1904 | 1910 | ||
1911 | vcpu->arch.microcode_version = 0x01000065; | ||
1905 | svm->spec_ctrl = 0; | 1912 | svm->spec_ctrl = 0; |
1906 | 1913 | ||
1907 | if (!init_event) { | 1914 | if (!init_event) { |
@@ -3699,6 +3706,12 @@ static int emulate_on_interception(struct vcpu_svm *svm) | |||
3699 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 3706 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
3700 | } | 3707 | } |
3701 | 3708 | ||
3709 | static int rsm_interception(struct vcpu_svm *svm) | ||
3710 | { | ||
3711 | return x86_emulate_instruction(&svm->vcpu, 0, 0, | ||
3712 | rsm_ins_bytes, 2) == EMULATE_DONE; | ||
3713 | } | ||
3714 | |||
3702 | static int rdpmc_interception(struct vcpu_svm *svm) | 3715 | static int rdpmc_interception(struct vcpu_svm *svm) |
3703 | { | 3716 | { |
3704 | int err; | 3717 | int err; |
@@ -3860,6 +3873,22 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
3860 | return 0; | 3873 | return 0; |
3861 | } | 3874 | } |
3862 | 3875 | ||
3876 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) | ||
3877 | { | ||
3878 | msr->data = 0; | ||
3879 | |||
3880 | switch (msr->index) { | ||
3881 | case MSR_F10H_DECFG: | ||
3882 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) | ||
3883 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; | ||
3884 | break; | ||
3885 | default: | ||
3886 | return 1; | ||
3887 | } | ||
3888 | |||
3889 | return 0; | ||
3890 | } | ||
3891 | |||
3863 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | 3892 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
3864 | { | 3893 | { |
3865 | struct vcpu_svm *svm = to_svm(vcpu); | 3894 | struct vcpu_svm *svm = to_svm(vcpu); |
@@ -3935,9 +3964,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3935 | 3964 | ||
3936 | msr_info->data = svm->spec_ctrl; | 3965 | msr_info->data = svm->spec_ctrl; |
3937 | break; | 3966 | break; |
3938 | case MSR_IA32_UCODE_REV: | ||
3939 | msr_info->data = 0x01000065; | ||
3940 | break; | ||
3941 | case MSR_F15H_IC_CFG: { | 3967 | case MSR_F15H_IC_CFG: { |
3942 | 3968 | ||
3943 | int family, model; | 3969 | int family, model; |
@@ -3955,6 +3981,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3955 | msr_info->data = 0x1E; | 3981 | msr_info->data = 0x1E; |
3956 | } | 3982 | } |
3957 | break; | 3983 | break; |
3984 | case MSR_F10H_DECFG: | ||
3985 | msr_info->data = svm->msr_decfg; | ||
3986 | break; | ||
3958 | default: | 3987 | default: |
3959 | return kvm_get_msr_common(vcpu, msr_info); | 3988 | return kvm_get_msr_common(vcpu, msr_info); |
3960 | } | 3989 | } |
@@ -4133,6 +4162,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
4133 | case MSR_VM_IGNNE: | 4162 | case MSR_VM_IGNNE: |
4134 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); | 4163 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
4135 | break; | 4164 | break; |
4165 | case MSR_F10H_DECFG: { | ||
4166 | struct kvm_msr_entry msr_entry; | ||
4167 | |||
4168 | msr_entry.index = msr->index; | ||
4169 | if (svm_get_msr_feature(&msr_entry)) | ||
4170 | return 1; | ||
4171 | |||
4172 | /* Check the supported bits */ | ||
4173 | if (data & ~msr_entry.data) | ||
4174 | return 1; | ||
4175 | |||
4176 | /* Don't allow the guest to change a bit, #GP */ | ||
4177 | if (!msr->host_initiated && (data ^ msr_entry.data)) | ||
4178 | return 1; | ||
4179 | |||
4180 | svm->msr_decfg = data; | ||
4181 | break; | ||
4182 | } | ||
4136 | case MSR_IA32_APICBASE: | 4183 | case MSR_IA32_APICBASE: |
4137 | if (kvm_vcpu_apicv_active(vcpu)) | 4184 | if (kvm_vcpu_apicv_active(vcpu)) |
4138 | avic_update_vapic_bar(to_svm(vcpu), data); | 4185 | avic_update_vapic_bar(to_svm(vcpu), data); |
@@ -4541,7 +4588,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
4541 | [SVM_EXIT_MWAIT] = mwait_interception, | 4588 | [SVM_EXIT_MWAIT] = mwait_interception, |
4542 | [SVM_EXIT_XSETBV] = xsetbv_interception, | 4589 | [SVM_EXIT_XSETBV] = xsetbv_interception, |
4543 | [SVM_EXIT_NPF] = npf_interception, | 4590 | [SVM_EXIT_NPF] = npf_interception, |
4544 | [SVM_EXIT_RSM] = emulate_on_interception, | 4591 | [SVM_EXIT_RSM] = rsm_interception, |
4545 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, | 4592 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
4546 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, | 4593 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
4547 | }; | 4594 | }; |
@@ -5355,7 +5402,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
5355 | * being speculatively taken. | 5402 | * being speculatively taken. |
5356 | */ | 5403 | */ |
5357 | if (svm->spec_ctrl) | 5404 | if (svm->spec_ctrl) |
5358 | wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); | 5405 | native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); |
5359 | 5406 | ||
5360 | asm volatile ( | 5407 | asm volatile ( |
5361 | "push %%" _ASM_BP "; \n\t" | 5408 | "push %%" _ASM_BP "; \n\t" |
@@ -5464,11 +5511,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
5464 | * If the L02 MSR bitmap does not intercept the MSR, then we need to | 5511 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
5465 | * save it. | 5512 | * save it. |
5466 | */ | 5513 | */ |
5467 | if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) | 5514 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
5468 | rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); | 5515 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
5469 | 5516 | ||
5470 | if (svm->spec_ctrl) | 5517 | if (svm->spec_ctrl) |
5471 | wrmsrl(MSR_IA32_SPEC_CTRL, 0); | 5518 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
5472 | 5519 | ||
5473 | /* Eliminate branch target predictions from guest mode */ | 5520 | /* Eliminate branch target predictions from guest mode */ |
5474 | vmexit_fill_RSB(); | 5521 | vmexit_fill_RSB(); |
@@ -6236,16 +6283,18 @@ e_free: | |||
6236 | 6283 | ||
6237 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) | 6284 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) |
6238 | { | 6285 | { |
6286 | void __user *measure = (void __user *)(uintptr_t)argp->data; | ||
6239 | struct kvm_sev_info *sev = &kvm->arch.sev_info; | 6287 | struct kvm_sev_info *sev = &kvm->arch.sev_info; |
6240 | struct sev_data_launch_measure *data; | 6288 | struct sev_data_launch_measure *data; |
6241 | struct kvm_sev_launch_measure params; | 6289 | struct kvm_sev_launch_measure params; |
6290 | void __user *p = NULL; | ||
6242 | void *blob = NULL; | 6291 | void *blob = NULL; |
6243 | int ret; | 6292 | int ret; |
6244 | 6293 | ||
6245 | if (!sev_guest(kvm)) | 6294 | if (!sev_guest(kvm)) |
6246 | return -ENOTTY; | 6295 | return -ENOTTY; |
6247 | 6296 | ||
6248 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) | 6297 | if (copy_from_user(¶ms, measure, sizeof(params))) |
6249 | return -EFAULT; | 6298 | return -EFAULT; |
6250 | 6299 | ||
6251 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 6300 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
@@ -6256,17 +6305,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6256 | if (!params.len) | 6305 | if (!params.len) |
6257 | goto cmd; | 6306 | goto cmd; |
6258 | 6307 | ||
6259 | if (params.uaddr) { | 6308 | p = (void __user *)(uintptr_t)params.uaddr; |
6309 | if (p) { | ||
6260 | if (params.len > SEV_FW_BLOB_MAX_SIZE) { | 6310 | if (params.len > SEV_FW_BLOB_MAX_SIZE) { |
6261 | ret = -EINVAL; | 6311 | ret = -EINVAL; |
6262 | goto e_free; | 6312 | goto e_free; |
6263 | } | 6313 | } |
6264 | 6314 | ||
6265 | if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) { | ||
6266 | ret = -EFAULT; | ||
6267 | goto e_free; | ||
6268 | } | ||
6269 | |||
6270 | ret = -ENOMEM; | 6315 | ret = -ENOMEM; |
6271 | blob = kmalloc(params.len, GFP_KERNEL); | 6316 | blob = kmalloc(params.len, GFP_KERNEL); |
6272 | if (!blob) | 6317 | if (!blob) |
@@ -6290,13 +6335,13 @@ cmd: | |||
6290 | goto e_free_blob; | 6335 | goto e_free_blob; |
6291 | 6336 | ||
6292 | if (blob) { | 6337 | if (blob) { |
6293 | if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len)) | 6338 | if (copy_to_user(p, blob, params.len)) |
6294 | ret = -EFAULT; | 6339 | ret = -EFAULT; |
6295 | } | 6340 | } |
6296 | 6341 | ||
6297 | done: | 6342 | done: |
6298 | params.len = data->len; | 6343 | params.len = data->len; |
6299 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) | 6344 | if (copy_to_user(measure, ¶ms, sizeof(params))) |
6300 | ret = -EFAULT; | 6345 | ret = -EFAULT; |
6301 | e_free_blob: | 6346 | e_free_blob: |
6302 | kfree(blob); | 6347 | kfree(blob); |
@@ -6597,7 +6642,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6597 | struct page **pages; | 6642 | struct page **pages; |
6598 | void *blob, *hdr; | 6643 | void *blob, *hdr; |
6599 | unsigned long n; | 6644 | unsigned long n; |
6600 | int ret; | 6645 | int ret, offset; |
6601 | 6646 | ||
6602 | if (!sev_guest(kvm)) | 6647 | if (!sev_guest(kvm)) |
6603 | return -ENOTTY; | 6648 | return -ENOTTY; |
@@ -6623,6 +6668,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6623 | if (!data) | 6668 | if (!data) |
6624 | goto e_unpin_memory; | 6669 | goto e_unpin_memory; |
6625 | 6670 | ||
6671 | offset = params.guest_uaddr & (PAGE_SIZE - 1); | ||
6672 | data->guest_address = __sme_page_pa(pages[0]) + offset; | ||
6673 | data->guest_len = params.guest_len; | ||
6674 | |||
6626 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); | 6675 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); |
6627 | if (IS_ERR(blob)) { | 6676 | if (IS_ERR(blob)) { |
6628 | ret = PTR_ERR(blob); | 6677 | ret = PTR_ERR(blob); |
@@ -6637,8 +6686,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) | |||
6637 | ret = PTR_ERR(hdr); | 6686 | ret = PTR_ERR(hdr); |
6638 | goto e_free_blob; | 6687 | goto e_free_blob; |
6639 | } | 6688 | } |
6640 | data->trans_address = __psp_pa(blob); | 6689 | data->hdr_address = __psp_pa(hdr); |
6641 | data->trans_len = params.trans_len; | 6690 | data->hdr_len = params.hdr_len; |
6642 | 6691 | ||
6643 | data->handle = sev->handle; | 6692 | data->handle = sev->handle; |
6644 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); | 6693 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); |
@@ -6821,6 +6870,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
6821 | .vcpu_unblocking = svm_vcpu_unblocking, | 6870 | .vcpu_unblocking = svm_vcpu_unblocking, |
6822 | 6871 | ||
6823 | .update_bp_intercept = update_bp_intercept, | 6872 | .update_bp_intercept = update_bp_intercept, |
6873 | .get_msr_feature = svm_get_msr_feature, | ||
6824 | .get_msr = svm_get_msr, | 6874 | .get_msr = svm_get_msr, |
6825 | .set_msr = svm_set_msr, | 6875 | .set_msr = svm_set_msr, |
6826 | .get_segment_base = svm_get_segment_base, | 6876 | .get_segment_base = svm_get_segment_base, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3dec126aa302..051dab74e4e9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/apic.h> | 51 | #include <asm/apic.h> |
52 | #include <asm/irq_remapping.h> | 52 | #include <asm/irq_remapping.h> |
53 | #include <asm/mmu_context.h> | 53 | #include <asm/mmu_context.h> |
54 | #include <asm/microcode.h> | ||
54 | #include <asm/nospec-branch.h> | 55 | #include <asm/nospec-branch.h> |
55 | 56 | ||
56 | #include "trace.h" | 57 | #include "trace.h" |
@@ -3226,6 +3227,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, | |||
3226 | return !(val & ~valid_bits); | 3227 | return !(val & ~valid_bits); |
3227 | } | 3228 | } |
3228 | 3229 | ||
3230 | static int vmx_get_msr_feature(struct kvm_msr_entry *msr) | ||
3231 | { | ||
3232 | return 1; | ||
3233 | } | ||
3234 | |||
3229 | /* | 3235 | /* |
3230 | * Reads an msr value (of 'msr_index') into 'pdata'. | 3236 | * Reads an msr value (of 'msr_index') into 'pdata'. |
3231 | * Returns 0 on success, non-0 otherwise. | 3237 | * Returns 0 on success, non-0 otherwise. |
@@ -4485,7 +4491,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
4485 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, | 4491 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, |
4486 | SECONDARY_EXEC_DESC); | 4492 | SECONDARY_EXEC_DESC); |
4487 | hw_cr4 &= ~X86_CR4_UMIP; | 4493 | hw_cr4 &= ~X86_CR4_UMIP; |
4488 | } else | 4494 | } else if (!is_guest_mode(vcpu) || |
4495 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
4489 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | 4496 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, |
4490 | SECONDARY_EXEC_DESC); | 4497 | SECONDARY_EXEC_DESC); |
4491 | 4498 | ||
@@ -5765,6 +5772,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
5765 | vmx->rmode.vm86_active = 0; | 5772 | vmx->rmode.vm86_active = 0; |
5766 | vmx->spec_ctrl = 0; | 5773 | vmx->spec_ctrl = 0; |
5767 | 5774 | ||
5775 | vcpu->arch.microcode_version = 0x100000000ULL; | ||
5768 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 5776 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
5769 | kvm_set_cr8(vcpu, 0); | 5777 | kvm_set_cr8(vcpu, 0); |
5770 | 5778 | ||
@@ -9452,7 +9460,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9452 | * being speculatively taken. | 9460 | * being speculatively taken. |
9453 | */ | 9461 | */ |
9454 | if (vmx->spec_ctrl) | 9462 | if (vmx->spec_ctrl) |
9455 | wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); | 9463 | native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); |
9456 | 9464 | ||
9457 | vmx->__launched = vmx->loaded_vmcs->launched; | 9465 | vmx->__launched = vmx->loaded_vmcs->launched; |
9458 | asm( | 9466 | asm( |
@@ -9587,11 +9595,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9587 | * If the L02 MSR bitmap does not intercept the MSR, then we need to | 9595 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
9588 | * save it. | 9596 | * save it. |
9589 | */ | 9597 | */ |
9590 | if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) | 9598 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
9591 | rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); | 9599 | vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
9592 | 9600 | ||
9593 | if (vmx->spec_ctrl) | 9601 | if (vmx->spec_ctrl) |
9594 | wrmsrl(MSR_IA32_SPEC_CTRL, 0); | 9602 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); |
9595 | 9603 | ||
9596 | /* Eliminate branch target predictions from guest mode */ | 9604 | /* Eliminate branch target predictions from guest mode */ |
9597 | vmexit_fill_RSB(); | 9605 | vmexit_fill_RSB(); |
@@ -11199,7 +11207,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
11199 | if (ret) | 11207 | if (ret) |
11200 | return ret; | 11208 | return ret; |
11201 | 11209 | ||
11202 | if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) | 11210 | /* |
11211 | * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken | ||
11212 | * by event injection, halt vcpu. | ||
11213 | */ | ||
11214 | if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && | ||
11215 | !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) | ||
11203 | return kvm_vcpu_halt(vcpu); | 11216 | return kvm_vcpu_halt(vcpu); |
11204 | 11217 | ||
11205 | vmx->nested.nested_run_pending = 1; | 11218 | vmx->nested.nested_run_pending = 1; |
@@ -12290,6 +12303,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
12290 | .vcpu_put = vmx_vcpu_put, | 12303 | .vcpu_put = vmx_vcpu_put, |
12291 | 12304 | ||
12292 | .update_bp_intercept = update_exception_bitmap, | 12305 | .update_bp_intercept = update_exception_bitmap, |
12306 | .get_msr_feature = vmx_get_msr_feature, | ||
12293 | .get_msr = vmx_get_msr, | 12307 | .get_msr = vmx_get_msr, |
12294 | .set_msr = vmx_set_msr, | 12308 | .set_msr = vmx_set_msr, |
12295 | .get_segment_base = vmx_get_segment_base, | 12309 | .get_segment_base = vmx_get_segment_base, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c8a0b545ac20..18b5ca7a3197 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1049,6 +1049,45 @@ static u32 emulated_msrs[] = { | |||
1049 | 1049 | ||
1050 | static unsigned num_emulated_msrs; | 1050 | static unsigned num_emulated_msrs; |
1051 | 1051 | ||
1052 | /* | ||
1053 | * List of msr numbers which are used to expose MSR-based features that | ||
1054 | * can be used by a hypervisor to validate requested CPU features. | ||
1055 | */ | ||
1056 | static u32 msr_based_features[] = { | ||
1057 | MSR_F10H_DECFG, | ||
1058 | MSR_IA32_UCODE_REV, | ||
1059 | }; | ||
1060 | |||
1061 | static unsigned int num_msr_based_features; | ||
1062 | |||
1063 | static int kvm_get_msr_feature(struct kvm_msr_entry *msr) | ||
1064 | { | ||
1065 | switch (msr->index) { | ||
1066 | case MSR_IA32_UCODE_REV: | ||
1067 | rdmsrl(msr->index, msr->data); | ||
1068 | break; | ||
1069 | default: | ||
1070 | if (kvm_x86_ops->get_msr_feature(msr)) | ||
1071 | return 1; | ||
1072 | } | ||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) | ||
1077 | { | ||
1078 | struct kvm_msr_entry msr; | ||
1079 | int r; | ||
1080 | |||
1081 | msr.index = index; | ||
1082 | r = kvm_get_msr_feature(&msr); | ||
1083 | if (r) | ||
1084 | return r; | ||
1085 | |||
1086 | *data = msr.data; | ||
1087 | |||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1052 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) | 1091 | bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) |
1053 | { | 1092 | { |
1054 | if (efer & efer_reserved_bits) | 1093 | if (efer & efer_reserved_bits) |
@@ -2222,7 +2261,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2222 | 2261 | ||
2223 | switch (msr) { | 2262 | switch (msr) { |
2224 | case MSR_AMD64_NB_CFG: | 2263 | case MSR_AMD64_NB_CFG: |
2225 | case MSR_IA32_UCODE_REV: | ||
2226 | case MSR_IA32_UCODE_WRITE: | 2264 | case MSR_IA32_UCODE_WRITE: |
2227 | case MSR_VM_HSAVE_PA: | 2265 | case MSR_VM_HSAVE_PA: |
2228 | case MSR_AMD64_PATCH_LOADER: | 2266 | case MSR_AMD64_PATCH_LOADER: |
@@ -2230,6 +2268,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2230 | case MSR_AMD64_DC_CFG: | 2268 | case MSR_AMD64_DC_CFG: |
2231 | break; | 2269 | break; |
2232 | 2270 | ||
2271 | case MSR_IA32_UCODE_REV: | ||
2272 | if (msr_info->host_initiated) | ||
2273 | vcpu->arch.microcode_version = data; | ||
2274 | break; | ||
2233 | case MSR_EFER: | 2275 | case MSR_EFER: |
2234 | return set_efer(vcpu, data); | 2276 | return set_efer(vcpu, data); |
2235 | case MSR_K7_HWCR: | 2277 | case MSR_K7_HWCR: |
@@ -2525,7 +2567,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2525 | msr_info->data = 0; | 2567 | msr_info->data = 0; |
2526 | break; | 2568 | break; |
2527 | case MSR_IA32_UCODE_REV: | 2569 | case MSR_IA32_UCODE_REV: |
2528 | msr_info->data = 0x100000000ULL; | 2570 | msr_info->data = vcpu->arch.microcode_version; |
2529 | break; | 2571 | break; |
2530 | case MSR_MTRRcap: | 2572 | case MSR_MTRRcap: |
2531 | case 0x200 ... 0x2ff: | 2573 | case 0x200 ... 0x2ff: |
@@ -2680,13 +2722,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, | |||
2680 | int (*do_msr)(struct kvm_vcpu *vcpu, | 2722 | int (*do_msr)(struct kvm_vcpu *vcpu, |
2681 | unsigned index, u64 *data)) | 2723 | unsigned index, u64 *data)) |
2682 | { | 2724 | { |
2683 | int i, idx; | 2725 | int i; |
2684 | 2726 | ||
2685 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
2686 | for (i = 0; i < msrs->nmsrs; ++i) | 2727 | for (i = 0; i < msrs->nmsrs; ++i) |
2687 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) | 2728 | if (do_msr(vcpu, entries[i].index, &entries[i].data)) |
2688 | break; | 2729 | break; |
2689 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
2690 | 2730 | ||
2691 | return i; | 2731 | return i; |
2692 | } | 2732 | } |
@@ -2785,6 +2825,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2785 | case KVM_CAP_SET_BOOT_CPU_ID: | 2825 | case KVM_CAP_SET_BOOT_CPU_ID: |
2786 | case KVM_CAP_SPLIT_IRQCHIP: | 2826 | case KVM_CAP_SPLIT_IRQCHIP: |
2787 | case KVM_CAP_IMMEDIATE_EXIT: | 2827 | case KVM_CAP_IMMEDIATE_EXIT: |
2828 | case KVM_CAP_GET_MSR_FEATURES: | ||
2788 | r = 1; | 2829 | r = 1; |
2789 | break; | 2830 | break; |
2790 | case KVM_CAP_ADJUST_CLOCK: | 2831 | case KVM_CAP_ADJUST_CLOCK: |
@@ -2899,6 +2940,31 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
2899 | goto out; | 2940 | goto out; |
2900 | r = 0; | 2941 | r = 0; |
2901 | break; | 2942 | break; |
2943 | case KVM_GET_MSR_FEATURE_INDEX_LIST: { | ||
2944 | struct kvm_msr_list __user *user_msr_list = argp; | ||
2945 | struct kvm_msr_list msr_list; | ||
2946 | unsigned int n; | ||
2947 | |||
2948 | r = -EFAULT; | ||
2949 | if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) | ||
2950 | goto out; | ||
2951 | n = msr_list.nmsrs; | ||
2952 | msr_list.nmsrs = num_msr_based_features; | ||
2953 | if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) | ||
2954 | goto out; | ||
2955 | r = -E2BIG; | ||
2956 | if (n < msr_list.nmsrs) | ||
2957 | goto out; | ||
2958 | r = -EFAULT; | ||
2959 | if (copy_to_user(user_msr_list->indices, &msr_based_features, | ||
2960 | num_msr_based_features * sizeof(u32))) | ||
2961 | goto out; | ||
2962 | r = 0; | ||
2963 | break; | ||
2964 | } | ||
2965 | case KVM_GET_MSRS: | ||
2966 | r = msr_io(NULL, argp, do_get_msr_feature, 1); | ||
2967 | break; | ||
2902 | } | 2968 | } |
2903 | default: | 2969 | default: |
2904 | r = -EINVAL; | 2970 | r = -EINVAL; |
@@ -3636,12 +3702,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3636 | r = 0; | 3702 | r = 0; |
3637 | break; | 3703 | break; |
3638 | } | 3704 | } |
3639 | case KVM_GET_MSRS: | 3705 | case KVM_GET_MSRS: { |
3706 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3640 | r = msr_io(vcpu, argp, do_get_msr, 1); | 3707 | r = msr_io(vcpu, argp, do_get_msr, 1); |
3708 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
3641 | break; | 3709 | break; |
3642 | case KVM_SET_MSRS: | 3710 | } |
3711 | case KVM_SET_MSRS: { | ||
3712 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3643 | r = msr_io(vcpu, argp, do_set_msr, 0); | 3713 | r = msr_io(vcpu, argp, do_set_msr, 0); |
3714 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
3644 | break; | 3715 | break; |
3716 | } | ||
3645 | case KVM_TPR_ACCESS_REPORTING: { | 3717 | case KVM_TPR_ACCESS_REPORTING: { |
3646 | struct kvm_tpr_access_ctl tac; | 3718 | struct kvm_tpr_access_ctl tac; |
3647 | 3719 | ||
@@ -4464,6 +4536,19 @@ static void kvm_init_msr_list(void) | |||
4464 | j++; | 4536 | j++; |
4465 | } | 4537 | } |
4466 | num_emulated_msrs = j; | 4538 | num_emulated_msrs = j; |
4539 | |||
4540 | for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { | ||
4541 | struct kvm_msr_entry msr; | ||
4542 | |||
4543 | msr.index = msr_based_features[i]; | ||
4544 | if (kvm_get_msr_feature(&msr)) | ||
4545 | continue; | ||
4546 | |||
4547 | if (j < i) | ||
4548 | msr_based_features[j] = msr_based_features[i]; | ||
4549 | j++; | ||
4550 | } | ||
4551 | num_msr_based_features = j; | ||
4467 | } | 4552 | } |
4468 | 4553 | ||
4469 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, | 4554 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, |
@@ -8017,6 +8102,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
8017 | 8102 | ||
8018 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | 8103 | void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
8019 | { | 8104 | { |
8105 | kvm_lapic_reset(vcpu, init_event); | ||
8106 | |||
8020 | vcpu->arch.hflags = 0; | 8107 | vcpu->arch.hflags = 0; |
8021 | 8108 | ||
8022 | vcpu->arch.smi_pending = 0; | 8109 | vcpu->arch.smi_pending = 0; |
@@ -8460,10 +8547,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) | |||
8460 | return r; | 8547 | return r; |
8461 | } | 8548 | } |
8462 | 8549 | ||
8463 | if (!size) { | 8550 | if (!size) |
8464 | r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); | 8551 | vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); |
8465 | WARN_ON(r < 0); | ||
8466 | } | ||
8467 | 8552 | ||
8468 | return 0; | 8553 | return 0; |
8469 | } | 8554 | } |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 91e9700cc6dc..25a972c61b0a 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -28,7 +28,6 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o | |||
28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o | 28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o |
29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o | 29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o |
30 | lib-$(CONFIG_RETPOLINE) += retpoline.o | 30 | lib-$(CONFIG_RETPOLINE) += retpoline.o |
31 | OBJECT_FILES_NON_STANDARD_retpoline.o :=y | ||
32 | 31 | ||
33 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o | 32 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o |
34 | 33 | ||
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 480edc3a5e03..c909961e678a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <asm/alternative-asm.h> | 7 | #include <asm/alternative-asm.h> |
8 | #include <asm/export.h> | 8 | #include <asm/export.h> |
9 | #include <asm/nospec-branch.h> | 9 | #include <asm/nospec-branch.h> |
10 | #include <asm/bitsperlong.h> | ||
11 | 10 | ||
12 | .macro THUNK reg | 11 | .macro THUNK reg |
13 | .section .text.__x86.indirect_thunk | 12 | .section .text.__x86.indirect_thunk |
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13) | |||
47 | GENERATE_THUNK(r14) | 46 | GENERATE_THUNK(r14) |
48 | GENERATE_THUNK(r15) | 47 | GENERATE_THUNK(r15) |
49 | #endif | 48 | #endif |
50 | |||
51 | /* | ||
52 | * Fill the CPU return stack buffer. | ||
53 | * | ||
54 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
55 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. | ||
56 | * | ||
57 | * This is required in various cases for retpoline and IBRS-based | ||
58 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
59 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
60 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
61 | * allow predictions from other (unwanted!) sources to be used. | ||
62 | * | ||
63 | * Google experimented with loop-unrolling and this turned out to be | ||
64 | * the optimal version - two calls, each with their own speculation | ||
65 | * trap should their return address end up getting used, in a loop. | ||
66 | */ | ||
67 | .macro STUFF_RSB nr:req sp:req | ||
68 | mov $(\nr / 2), %_ASM_BX | ||
69 | .align 16 | ||
70 | 771: | ||
71 | call 772f | ||
72 | 773: /* speculation trap */ | ||
73 | pause | ||
74 | lfence | ||
75 | jmp 773b | ||
76 | .align 16 | ||
77 | 772: | ||
78 | call 774f | ||
79 | 775: /* speculation trap */ | ||
80 | pause | ||
81 | lfence | ||
82 | jmp 775b | ||
83 | .align 16 | ||
84 | 774: | ||
85 | dec %_ASM_BX | ||
86 | jnz 771b | ||
87 | add $((BITS_PER_LONG/8) * \nr), \sp | ||
88 | .endm | ||
89 | |||
90 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
91 | |||
92 | ENTRY(__fill_rsb) | ||
93 | STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP | ||
94 | ret | ||
95 | END(__fill_rsb) | ||
96 | EXPORT_SYMBOL_GPL(__fill_rsb) | ||
97 | |||
98 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
99 | |||
100 | ENTRY(__clear_rsb) | ||
101 | STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP | ||
102 | ret | ||
103 | END(__clear_rsb) | ||
104 | EXPORT_SYMBOL_GPL(__clear_rsb) | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 800de815519c..c88573d90f3e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, | |||
1248 | tsk = current; | 1248 | tsk = current; |
1249 | mm = tsk->mm; | 1249 | mm = tsk->mm; |
1250 | 1250 | ||
1251 | /* | ||
1252 | * Detect and handle instructions that would cause a page fault for | ||
1253 | * both a tracked kernel page and a userspace page. | ||
1254 | */ | ||
1255 | prefetchw(&mm->mmap_sem); | 1251 | prefetchw(&mm->mmap_sem); |
1256 | 1252 | ||
1257 | if (unlikely(kmmio_fault(regs, address))) | 1253 | if (unlikely(kmmio_fault(regs, address))) |
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 01f682cf77a8..40a6085063d6 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/processor-flags.h> | 16 | #include <asm/processor-flags.h> |
17 | #include <asm/msr-index.h> | 17 | #include <asm/msr-index.h> |
18 | #include <asm/nospec-branch.h> | ||
18 | 19 | ||
19 | .text | 20 | .text |
20 | .code64 | 21 | .code64 |
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute) | |||
59 | movq %rax, %r8 /* Workarea encryption routine */ | 60 | movq %rax, %r8 /* Workarea encryption routine */ |
60 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ | 61 | addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ |
61 | 62 | ||
63 | ANNOTATE_RETPOLINE_SAFE | ||
62 | call *%rax /* Call the encryption routine */ | 64 | call *%rax /* Call the encryption routine */ |
63 | 65 | ||
64 | pop %r12 | 66 | pop %r12 |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4923d92f918d..45e4eb5bcbb2 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/if_vlan.h> | 13 | #include <linux/if_vlan.h> |
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/set_memory.h> | 15 | #include <asm/set_memory.h> |
16 | #include <asm/nospec-branch.h> | ||
16 | #include <linux/bpf.h> | 17 | #include <linux/bpf.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
290 | EMIT2(0x89, 0xD2); /* mov edx, edx */ | 291 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
291 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | 292 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ |
292 | offsetof(struct bpf_array, map.max_entries)); | 293 | offsetof(struct bpf_array, map.max_entries)); |
293 | #define OFFSET1 43 /* number of bytes to jump */ | 294 | #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ |
294 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ | 295 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ |
295 | label1 = cnt; | 296 | label1 = cnt; |
296 | 297 | ||
@@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
299 | */ | 300 | */ |
300 | EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ | 301 | EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ |
301 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ | 302 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ |
302 | #define OFFSET2 32 | 303 | #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) |
303 | EMIT2(X86_JA, OFFSET2); /* ja out */ | 304 | EMIT2(X86_JA, OFFSET2); /* ja out */ |
304 | label2 = cnt; | 305 | label2 = cnt; |
305 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ | 306 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ |
@@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
313 | * goto out; | 314 | * goto out; |
314 | */ | 315 | */ |
315 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ | 316 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ |
316 | #define OFFSET3 10 | 317 | #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) |
317 | EMIT2(X86_JE, OFFSET3); /* je out */ | 318 | EMIT2(X86_JE, OFFSET3); /* je out */ |
318 | label3 = cnt; | 319 | label3 = cnt; |
319 | 320 | ||
@@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
326 | * rdi == ctx (1st arg) | 327 | * rdi == ctx (1st arg) |
327 | * rax == prog->bpf_func + prologue_size | 328 | * rax == prog->bpf_func + prologue_size |
328 | */ | 329 | */ |
329 | EMIT2(0xFF, 0xE0); /* jmp rax */ | 330 | RETPOLINE_RAX_BPF_JIT(); |
330 | 331 | ||
331 | /* out: */ | 332 | /* out: */ |
332 | BUILD_BUG_ON(cnt - label1 != OFFSET1); | 333 | BUILD_BUG_ON(cnt - label1 != OFFSET1); |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 174c59774cc9..a7a7677265b6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -460,7 +460,7 @@ static int nmi_setup(void) | |||
460 | goto fail; | 460 | goto fail; |
461 | 461 | ||
462 | for_each_possible_cpu(cpu) { | 462 | for_each_possible_cpu(cpu) { |
463 | if (!cpu) | 463 | if (!IS_ENABLED(CONFIG_SMP) || !cpu) |
464 | continue; | 464 | continue; |
465 | 465 | ||
466 | memcpy(per_cpu(cpu_msrs, cpu).counters, | 466 | memcpy(per_cpu(cpu_msrs, cpu).counters, |
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index de53bd15df5a..24bb7598774e 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S | |||
@@ -102,7 +102,7 @@ ENTRY(startup_32) | |||
102 | * don't we'll eventually crash trying to execute encrypted | 102 | * don't we'll eventually crash trying to execute encrypted |
103 | * instructions. | 103 | * instructions. |
104 | */ | 104 | */ |
105 | bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags | 105 | btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags |
106 | jnc .Ldone | 106 | jnc .Ldone |
107 | movl $MSR_K8_SYSCFG, %ecx | 107 | movl $MSR_K8_SYSCFG, %ecx |
108 | rdmsr | 108 | rdmsr |
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 5d73c443e778..220e97841e49 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c | |||
@@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, | |||
770 | break; | 770 | break; |
771 | 771 | ||
772 | case R_X86_64_PC32: | 772 | case R_X86_64_PC32: |
773 | case R_X86_64_PLT32: | ||
773 | /* | 774 | /* |
774 | * PC relative relocations don't need to be adjusted unless | 775 | * PC relative relocations don't need to be adjusted unless |
775 | * referencing a percpu symbol. | 776 | * referencing a percpu symbol. |
777 | * | ||
778 | * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. | ||
776 | */ | 779 | */ |
777 | if (is_percpu_sym(sym, symname)) | 780 | if (is_percpu_sym(sym, symname)) |
778 | add_reloc(&relocs32neg, offset); | 781 | add_reloc(&relocs32neg, offset); |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index c047f42552e1..3c2c2530737e 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -1376,8 +1376,6 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1376 | 1376 | ||
1377 | if (!xen_initial_domain()) { | 1377 | if (!xen_initial_domain()) { |
1378 | add_preferred_console("xenboot", 0, NULL); | 1378 | add_preferred_console("xenboot", 0, NULL); |
1379 | add_preferred_console("tty", 0, NULL); | ||
1380 | add_preferred_console("hvc", 0, NULL); | ||
1381 | if (pci_xen) | 1379 | if (pci_xen) |
1382 | x86_init.pci.arch_init = pci_xen_init; | 1380 | x86_init.pci.arch_init = pci_xen_init; |
1383 | } else { | 1381 | } else { |
@@ -1410,6 +1408,10 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1410 | 1408 | ||
1411 | xen_boot_params_init_edd(); | 1409 | xen_boot_params_init_edd(); |
1412 | } | 1410 | } |
1411 | |||
1412 | add_preferred_console("tty", 0, NULL); | ||
1413 | add_preferred_console("hvc", 0, NULL); | ||
1414 | |||
1413 | #ifdef CONFIG_PCI | 1415 | #ifdef CONFIG_PCI |
1414 | /* PCI BIOS service won't work from a PV guest. */ | 1416 | /* PCI BIOS service won't work from a PV guest. */ |
1415 | pci_probe &= ~PCI_PROBE_BIOS; | 1417 | pci_probe &= ~PCI_PROBE_BIOS; |
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 623720a11143..732631ce250f 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c | |||
@@ -16,6 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/dma-contiguous.h> | 18 | #include <linux/dma-contiguous.h> |
19 | #include <linux/dma-direct.h> | ||
19 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
20 | #include <linux/highmem.h> | 21 | #include <linux/highmem.h> |
21 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
123 | unsigned long attrs) | 124 | unsigned long attrs) |
124 | { | 125 | { |
125 | unsigned long ret; | 126 | unsigned long ret; |
126 | unsigned long uncached = 0; | 127 | unsigned long uncached; |
127 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 128 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
128 | struct page *page = NULL; | 129 | struct page *page = NULL; |
129 | 130 | ||
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
144 | if (!page) | 145 | if (!page) |
145 | return NULL; | 146 | return NULL; |
146 | 147 | ||
147 | ret = (unsigned long)page_address(page); | 148 | *handle = phys_to_dma(dev, page_to_phys(page)); |
148 | 149 | ||
149 | /* We currently don't support coherent memory outside KSEG */ | 150 | #ifdef CONFIG_MMU |
151 | if (PageHighMem(page)) { | ||
152 | void *p; | ||
150 | 153 | ||
154 | p = dma_common_contiguous_remap(page, size, VM_MAP, | ||
155 | pgprot_noncached(PAGE_KERNEL), | ||
156 | __builtin_return_address(0)); | ||
157 | if (!p) { | ||
158 | if (!dma_release_from_contiguous(dev, page, count)) | ||
159 | __free_pages(page, get_order(size)); | ||
160 | } | ||
161 | return p; | ||
162 | } | ||
163 | #endif | ||
164 | ret = (unsigned long)page_address(page); | ||
151 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || | 165 | BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || |
152 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); | 166 | ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); |
153 | 167 | ||
154 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; | 168 | uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; |
155 | *handle = virt_to_bus((void *)ret); | ||
156 | __invalidate_dcache_range(ret, size); | 169 | __invalidate_dcache_range(ret, size); |
157 | 170 | ||
158 | return (void *)uncached; | 171 | return (void *)uncached; |
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, | |||
161 | static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, | 174 | static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, |
162 | dma_addr_t dma_handle, unsigned long attrs) | 175 | dma_addr_t dma_handle, unsigned long attrs) |
163 | { | 176 | { |
164 | unsigned long addr = (unsigned long)vaddr + | ||
165 | XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
166 | struct page *page = virt_to_page(addr); | ||
167 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 177 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
168 | 178 | unsigned long addr = (unsigned long)vaddr; | |
169 | BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || | 179 | struct page *page; |
170 | addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); | 180 | |
181 | if (addr >= XCHAL_KSEG_BYPASS_VADDR && | ||
182 | addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) { | ||
183 | addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; | ||
184 | page = virt_to_page(addr); | ||
185 | } else { | ||
186 | #ifdef CONFIG_MMU | ||
187 | dma_common_free_remap(vaddr, size, VM_MAP); | ||
188 | #endif | ||
189 | page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); | ||
190 | } | ||
171 | 191 | ||
172 | if (!dma_release_from_contiguous(dev, page, count)) | 192 | if (!dma_release_from_contiguous(dev, page, count)) |
173 | __free_pages(page, get_order(size)); | 193 | __free_pages(page, get_order(size)); |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index d776ec0d7b22..34aead7dcb48 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -79,19 +79,75 @@ void __init zones_init(void) | |||
79 | free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); | 79 | free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HIGHMEM | ||
83 | static void __init free_area_high(unsigned long pfn, unsigned long end) | ||
84 | { | ||
85 | for (; pfn < end; pfn++) | ||
86 | free_highmem_page(pfn_to_page(pfn)); | ||
87 | } | ||
88 | |||
89 | static void __init free_highpages(void) | ||
90 | { | ||
91 | unsigned long max_low = max_low_pfn; | ||
92 | struct memblock_region *mem, *res; | ||
93 | |||
94 | reset_all_zones_managed_pages(); | ||
95 | /* set highmem page free */ | ||
96 | for_each_memblock(memory, mem) { | ||
97 | unsigned long start = memblock_region_memory_base_pfn(mem); | ||
98 | unsigned long end = memblock_region_memory_end_pfn(mem); | ||
99 | |||
100 | /* Ignore complete lowmem entries */ | ||
101 | if (end <= max_low) | ||
102 | continue; | ||
103 | |||
104 | if (memblock_is_nomap(mem)) | ||
105 | continue; | ||
106 | |||
107 | /* Truncate partial highmem entries */ | ||
108 | if (start < max_low) | ||
109 | start = max_low; | ||
110 | |||
111 | /* Find and exclude any reserved regions */ | ||
112 | for_each_memblock(reserved, res) { | ||
113 | unsigned long res_start, res_end; | ||
114 | |||
115 | res_start = memblock_region_reserved_base_pfn(res); | ||
116 | res_end = memblock_region_reserved_end_pfn(res); | ||
117 | |||
118 | if (res_end < start) | ||
119 | continue; | ||
120 | if (res_start < start) | ||
121 | res_start = start; | ||
122 | if (res_start > end) | ||
123 | res_start = end; | ||
124 | if (res_end > end) | ||
125 | res_end = end; | ||
126 | if (res_start != start) | ||
127 | free_area_high(start, res_start); | ||
128 | start = res_end; | ||
129 | if (start == end) | ||
130 | break; | ||
131 | } | ||
132 | |||
133 | /* And now free anything which remains */ | ||
134 | if (start < end) | ||
135 | free_area_high(start, end); | ||
136 | } | ||
137 | } | ||
138 | #else | ||
139 | static void __init free_highpages(void) | ||
140 | { | ||
141 | } | ||
142 | #endif | ||
143 | |||
82 | /* | 144 | /* |
83 | * Initialize memory pages. | 145 | * Initialize memory pages. |
84 | */ | 146 | */ |
85 | 147 | ||
86 | void __init mem_init(void) | 148 | void __init mem_init(void) |
87 | { | 149 | { |
88 | #ifdef CONFIG_HIGHMEM | 150 | free_highpages(); |
89 | unsigned long tmp; | ||
90 | |||
91 | reset_all_zones_managed_pages(); | ||
92 | for (tmp = max_low_pfn; tmp < max_pfn; tmp++) | ||
93 | free_highmem_page(pfn_to_page(tmp)); | ||
94 | #endif | ||
95 | 151 | ||
96 | max_mapnr = max_pfn - ARCH_PFN_OFFSET; | 152 | max_mapnr = max_pfn - ARCH_PFN_OFFSET; |
97 | high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); | 153 | high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4117524ca45b..c2033a232a44 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -812,7 +812,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
812 | struct gendisk *disk; | 812 | struct gendisk *disk; |
813 | struct request_queue *q; | 813 | struct request_queue *q; |
814 | struct blkcg_gq *blkg; | 814 | struct blkcg_gq *blkg; |
815 | struct module *owner; | ||
816 | unsigned int major, minor; | 815 | unsigned int major, minor; |
817 | int key_len, part, ret; | 816 | int key_len, part, ret; |
818 | char *body; | 817 | char *body; |
@@ -904,9 +903,7 @@ fail_unlock: | |||
904 | spin_unlock_irq(q->queue_lock); | 903 | spin_unlock_irq(q->queue_lock); |
905 | rcu_read_unlock(); | 904 | rcu_read_unlock(); |
906 | fail: | 905 | fail: |
907 | owner = disk->fops->owner; | 906 | put_disk_and_module(disk); |
908 | put_disk(disk); | ||
909 | module_put(owner); | ||
910 | /* | 907 | /* |
911 | * If queue was bypassing, we should retry. Do so after a | 908 | * If queue was bypassing, we should retry. Do so after a |
912 | * short msleep(). It isn't strictly necessary but queue | 909 | * short msleep(). It isn't strictly necessary but queue |
@@ -931,13 +928,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); | |||
931 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | 928 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
932 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | 929 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
933 | { | 930 | { |
934 | struct module *owner; | ||
935 | |||
936 | spin_unlock_irq(ctx->disk->queue->queue_lock); | 931 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
937 | rcu_read_unlock(); | 932 | rcu_read_unlock(); |
938 | owner = ctx->disk->fops->owner; | 933 | put_disk_and_module(ctx->disk); |
939 | put_disk(ctx->disk); | ||
940 | module_put(owner); | ||
941 | } | 934 | } |
942 | EXPORT_SYMBOL_GPL(blkg_conf_finish); | 935 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
943 | 936 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 2d1a7bbe0634..6d82c4f7fadd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2434,7 +2434,7 @@ blk_qc_t submit_bio(struct bio *bio) | |||
2434 | unsigned int count; | 2434 | unsigned int count; |
2435 | 2435 | ||
2436 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) | 2436 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) |
2437 | count = queue_logical_block_size(bio->bi_disk->queue); | 2437 | count = queue_logical_block_size(bio->bi_disk->queue) >> 9; |
2438 | else | 2438 | else |
2439 | count = bio_sectors(bio); | 2439 | count = bio_sectors(bio); |
2440 | 2440 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 357492712b0e..16e83e6df404 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -712,7 +712,6 @@ static void __blk_mq_requeue_request(struct request *rq) | |||
712 | 712 | ||
713 | trace_block_rq_requeue(q, rq); | 713 | trace_block_rq_requeue(q, rq); |
714 | wbt_requeue(q->rq_wb, &rq->issue_stat); | 714 | wbt_requeue(q->rq_wb, &rq->issue_stat); |
715 | blk_mq_sched_requeue_request(rq); | ||
716 | 715 | ||
717 | if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) { | 716 | if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) { |
718 | blk_mq_rq_update_state(rq, MQ_RQ_IDLE); | 717 | blk_mq_rq_update_state(rq, MQ_RQ_IDLE); |
@@ -725,6 +724,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) | |||
725 | { | 724 | { |
726 | __blk_mq_requeue_request(rq); | 725 | __blk_mq_requeue_request(rq); |
727 | 726 | ||
727 | /* this request will be re-inserted to io scheduler queue */ | ||
728 | blk_mq_sched_requeue_request(rq); | ||
729 | |||
728 | BUG_ON(blk_queued_rq(rq)); | 730 | BUG_ON(blk_queued_rq(rq)); |
729 | blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); | 731 | blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); |
730 | } | 732 | } |
diff --git a/block/genhd.c b/block/genhd.c index 88a53c188cb7..9656f9e9f99e 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -547,7 +547,7 @@ static int exact_lock(dev_t devt, void *data) | |||
547 | { | 547 | { |
548 | struct gendisk *p = data; | 548 | struct gendisk *p = data; |
549 | 549 | ||
550 | if (!get_disk(p)) | 550 | if (!get_disk_and_module(p)) |
551 | return -1; | 551 | return -1; |
552 | return 0; | 552 | return 0; |
553 | } | 553 | } |
@@ -717,6 +717,11 @@ void del_gendisk(struct gendisk *disk) | |||
717 | blk_integrity_del(disk); | 717 | blk_integrity_del(disk); |
718 | disk_del_events(disk); | 718 | disk_del_events(disk); |
719 | 719 | ||
720 | /* | ||
721 | * Block lookups of the disk until all bdevs are unhashed and the | ||
722 | * disk is marked as dead (GENHD_FL_UP cleared). | ||
723 | */ | ||
724 | down_write(&disk->lookup_sem); | ||
720 | /* invalidate stuff */ | 725 | /* invalidate stuff */ |
721 | disk_part_iter_init(&piter, disk, | 726 | disk_part_iter_init(&piter, disk, |
722 | DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); | 727 | DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); |
@@ -731,6 +736,7 @@ void del_gendisk(struct gendisk *disk) | |||
731 | bdev_unhash_inode(disk_devt(disk)); | 736 | bdev_unhash_inode(disk_devt(disk)); |
732 | set_capacity(disk, 0); | 737 | set_capacity(disk, 0); |
733 | disk->flags &= ~GENHD_FL_UP; | 738 | disk->flags &= ~GENHD_FL_UP; |
739 | up_write(&disk->lookup_sem); | ||
734 | 740 | ||
735 | if (!(disk->flags & GENHD_FL_HIDDEN)) | 741 | if (!(disk->flags & GENHD_FL_HIDDEN)) |
736 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); | 742 | sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); |
@@ -809,16 +815,28 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) | |||
809 | 815 | ||
810 | spin_lock_bh(&ext_devt_lock); | 816 | spin_lock_bh(&ext_devt_lock); |
811 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 817 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
812 | if (part && get_disk(part_to_disk(part))) { | 818 | if (part && get_disk_and_module(part_to_disk(part))) { |
813 | *partno = part->partno; | 819 | *partno = part->partno; |
814 | disk = part_to_disk(part); | 820 | disk = part_to_disk(part); |
815 | } | 821 | } |
816 | spin_unlock_bh(&ext_devt_lock); | 822 | spin_unlock_bh(&ext_devt_lock); |
817 | } | 823 | } |
818 | 824 | ||
819 | if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) { | 825 | if (!disk) |
820 | put_disk(disk); | 826 | return NULL; |
827 | |||
828 | /* | ||
829 | * Synchronize with del_gendisk() to not return disk that is being | ||
830 | * destroyed. | ||
831 | */ | ||
832 | down_read(&disk->lookup_sem); | ||
833 | if (unlikely((disk->flags & GENHD_FL_HIDDEN) || | ||
834 | !(disk->flags & GENHD_FL_UP))) { | ||
835 | up_read(&disk->lookup_sem); | ||
836 | put_disk_and_module(disk); | ||
821 | disk = NULL; | 837 | disk = NULL; |
838 | } else { | ||
839 | up_read(&disk->lookup_sem); | ||
822 | } | 840 | } |
823 | return disk; | 841 | return disk; |
824 | } | 842 | } |
@@ -1418,6 +1436,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) | |||
1418 | kfree(disk); | 1436 | kfree(disk); |
1419 | return NULL; | 1437 | return NULL; |
1420 | } | 1438 | } |
1439 | init_rwsem(&disk->lookup_sem); | ||
1421 | disk->node_id = node_id; | 1440 | disk->node_id = node_id; |
1422 | if (disk_expand_part_tbl(disk, 0)) { | 1441 | if (disk_expand_part_tbl(disk, 0)) { |
1423 | free_part_stats(&disk->part0); | 1442 | free_part_stats(&disk->part0); |
@@ -1453,7 +1472,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) | |||
1453 | } | 1472 | } |
1454 | EXPORT_SYMBOL(__alloc_disk_node); | 1473 | EXPORT_SYMBOL(__alloc_disk_node); |
1455 | 1474 | ||
1456 | struct kobject *get_disk(struct gendisk *disk) | 1475 | struct kobject *get_disk_and_module(struct gendisk *disk) |
1457 | { | 1476 | { |
1458 | struct module *owner; | 1477 | struct module *owner; |
1459 | struct kobject *kobj; | 1478 | struct kobject *kobj; |
@@ -1471,17 +1490,30 @@ struct kobject *get_disk(struct gendisk *disk) | |||
1471 | return kobj; | 1490 | return kobj; |
1472 | 1491 | ||
1473 | } | 1492 | } |
1474 | 1493 | EXPORT_SYMBOL(get_disk_and_module); | |
1475 | EXPORT_SYMBOL(get_disk); | ||
1476 | 1494 | ||
1477 | void put_disk(struct gendisk *disk) | 1495 | void put_disk(struct gendisk *disk) |
1478 | { | 1496 | { |
1479 | if (disk) | 1497 | if (disk) |
1480 | kobject_put(&disk_to_dev(disk)->kobj); | 1498 | kobject_put(&disk_to_dev(disk)->kobj); |
1481 | } | 1499 | } |
1482 | |||
1483 | EXPORT_SYMBOL(put_disk); | 1500 | EXPORT_SYMBOL(put_disk); |
1484 | 1501 | ||
1502 | /* | ||
1503 | * This is a counterpart of get_disk_and_module() and thus also of | ||
1504 | * get_gendisk(). | ||
1505 | */ | ||
1506 | void put_disk_and_module(struct gendisk *disk) | ||
1507 | { | ||
1508 | if (disk) { | ||
1509 | struct module *owner = disk->fops->owner; | ||
1510 | |||
1511 | put_disk(disk); | ||
1512 | module_put(owner); | ||
1513 | } | ||
1514 | } | ||
1515 | EXPORT_SYMBOL(put_disk_and_module); | ||
1516 | |||
1485 | static void set_disk_ro_uevent(struct gendisk *gd, int ro) | 1517 | static void set_disk_ro_uevent(struct gendisk *gd, int ro) |
1486 | { | 1518 | { |
1487 | char event[] = "DISK_RO=1"; | 1519 | char event[] = "DISK_RO=1"; |
diff --git a/block/ioctl.c b/block/ioctl.c index 1668506d8ed8..3884d810efd2 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, | |||
225 | 225 | ||
226 | if (start + len > i_size_read(bdev->bd_inode)) | 226 | if (start + len > i_size_read(bdev->bd_inode)) |
227 | return -EINVAL; | 227 | return -EINVAL; |
228 | truncate_inode_pages_range(mapping, start, start + len); | 228 | truncate_inode_pages_range(mapping, start, start + len - 1); |
229 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, | 229 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, |
230 | GFP_KERNEL, flags); | 230 | GFP_KERNEL, flags); |
231 | } | 231 | } |
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index f95c60774ce8..0d6d25e32e1f 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c | |||
@@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = { | |||
833 | .limit_depth = kyber_limit_depth, | 833 | .limit_depth = kyber_limit_depth, |
834 | .prepare_request = kyber_prepare_request, | 834 | .prepare_request = kyber_prepare_request, |
835 | .finish_request = kyber_finish_request, | 835 | .finish_request = kyber_finish_request, |
836 | .requeue_request = kyber_finish_request, | ||
836 | .completed_request = kyber_completed_request, | 837 | .completed_request = kyber_completed_request, |
837 | .dispatch_request = kyber_dispatch_request, | 838 | .dispatch_request = kyber_dispatch_request, |
838 | .has_work = kyber_has_work, | 839 | .has_work = kyber_has_work, |
diff --git a/block/mq-deadline.c b/block/mq-deadline.c index c56f211c8440..8ec0ba9f5386 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c | |||
@@ -536,12 +536,21 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |||
536 | } | 536 | } |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * Nothing to do here. This is defined only to ensure that .finish_request | ||
540 | * method is called upon request completion. | ||
541 | */ | ||
542 | static void dd_prepare_request(struct request *rq, struct bio *bio) | ||
543 | { | ||
544 | } | ||
545 | |||
546 | /* | ||
539 | * For zoned block devices, write unlock the target zone of | 547 | * For zoned block devices, write unlock the target zone of |
540 | * completed write requests. Do this while holding the zone lock | 548 | * completed write requests. Do this while holding the zone lock |
541 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | 549 | * spinlock so that the zone is never unlocked while deadline_fifo_request() |
542 | * while deadline_next_request() are executing. | 550 | * or deadline_next_request() are executing. This function is called for |
551 | * all requests, whether or not these requests complete successfully. | ||
543 | */ | 552 | */ |
544 | static void dd_completed_request(struct request *rq) | 553 | static void dd_finish_request(struct request *rq) |
545 | { | 554 | { |
546 | struct request_queue *q = rq->q; | 555 | struct request_queue *q = rq->q; |
547 | 556 | ||
@@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = { | |||
756 | .ops.mq = { | 765 | .ops.mq = { |
757 | .insert_requests = dd_insert_requests, | 766 | .insert_requests = dd_insert_requests, |
758 | .dispatch_request = dd_dispatch_request, | 767 | .dispatch_request = dd_dispatch_request, |
759 | .completed_request = dd_completed_request, | 768 | .prepare_request = dd_prepare_request, |
769 | .finish_request = dd_finish_request, | ||
760 | .next_request = elv_rb_latter_request, | 770 | .next_request = elv_rb_latter_request, |
761 | .former_request = elv_rb_former_request, | 771 | .former_request = elv_rb_former_request, |
762 | .bio_merge = dd_bio_merge, | 772 | .bio_merge = dd_bio_merge, |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 91622db9aedf..08dabcd8b6ae 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf) | |||
51 | 51 | ||
52 | EXPORT_SYMBOL(bdevname); | 52 | EXPORT_SYMBOL(bdevname); |
53 | 53 | ||
54 | const char *bio_devname(struct bio *bio, char *buf) | ||
55 | { | ||
56 | return disk_name(bio->bi_disk, bio->bi_partno, buf); | ||
57 | } | ||
58 | EXPORT_SYMBOL(bio_devname); | ||
59 | |||
54 | /* | 60 | /* |
55 | * There's very little reason to use this, you should really | 61 | * There's very little reason to use this, you should really |
56 | * have a struct block_device just about everywhere and use | 62 | * have a struct block_device just about everywhere and use |
diff --git a/block/sed-opal.c b/block/sed-opal.c index 9ed51d0c6b1d..e4929eec547f 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c | |||
@@ -490,7 +490,7 @@ static int opal_discovery0_end(struct opal_dev *dev) | |||
490 | 490 | ||
491 | if (!found_com_id) { | 491 | if (!found_com_id) { |
492 | pr_debug("Could not find OPAL comid for device. Returning early\n"); | 492 | pr_debug("Could not find OPAL comid for device. Returning early\n"); |
493 | return -EOPNOTSUPP;; | 493 | return -EOPNOTSUPP; |
494 | } | 494 | } |
495 | 495 | ||
496 | dev->comid = comid; | 496 | dev->comid = comid; |
diff --git a/certs/blacklist_nohashes.c b/certs/blacklist_nohashes.c index 73fd99098ad7..753b703ef0ef 100644 --- a/certs/blacklist_nohashes.c +++ b/certs/blacklist_nohashes.c | |||
@@ -1,6 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include "blacklist.h" | 2 | #include "blacklist.h" |
3 | 3 | ||
4 | const char __initdata *const blacklist_hashes[] = { | 4 | const char __initconst *const blacklist_hashes[] = { |
5 | NULL | 5 | NULL |
6 | }; | 6 | }; |
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index 1f4e25f10049..598906b1e28d 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c | |||
@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, | |||
106 | pr_devel("sinfo %u: Direct signer is key %x\n", | 106 | pr_devel("sinfo %u: Direct signer is key %x\n", |
107 | sinfo->index, key_serial(key)); | 107 | sinfo->index, key_serial(key)); |
108 | x509 = NULL; | 108 | x509 = NULL; |
109 | sig = sinfo->sig; | ||
109 | goto matched; | 110 | goto matched; |
110 | } | 111 | } |
111 | if (PTR_ERR(key) != -ENOKEY) | 112 | if (PTR_ERR(key) != -ENOKEY) |
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index 39e6de0c2761..97c77f66b20d 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c | |||
@@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, | |||
270 | sinfo->index); | 270 | sinfo->index); |
271 | return 0; | 271 | return 0; |
272 | } | 272 | } |
273 | ret = public_key_verify_signature(p->pub, p->sig); | 273 | ret = public_key_verify_signature(p->pub, x509->sig); |
274 | if (ret < 0) | 274 | if (ret < 0) |
275 | return ret; | 275 | return ret; |
276 | x509->signer = p; | 276 | x509->signer = p; |
@@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7, | |||
366 | * | 366 | * |
367 | * (*) -EBADMSG if some part of the message was invalid, or: | 367 | * (*) -EBADMSG if some part of the message was invalid, or: |
368 | * | 368 | * |
369 | * (*) 0 if no signature chains were found to be blacklisted or to contain | 369 | * (*) 0 if a signature chain passed verification, or: |
370 | * unsupported crypto, or: | ||
371 | * | 370 | * |
372 | * (*) -EKEYREJECTED if a blacklisted key was encountered, or: | 371 | * (*) -EKEYREJECTED if a blacklisted key was encountered, or: |
373 | * | 372 | * |
@@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7, | |||
423 | 422 | ||
424 | for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { | 423 | for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { |
425 | ret = pkcs7_verify_one(pkcs7, sinfo); | 424 | ret = pkcs7_verify_one(pkcs7, sinfo); |
426 | if (sinfo->blacklisted && actual_ret == -ENOPKG) | 425 | if (sinfo->blacklisted) { |
427 | actual_ret = -EKEYREJECTED; | 426 | if (actual_ret == -ENOPKG) |
427 | actual_ret = -EKEYREJECTED; | ||
428 | continue; | ||
429 | } | ||
428 | if (ret < 0) { | 430 | if (ret < 0) { |
429 | if (ret == -ENOPKG) { | 431 | if (ret == -ENOPKG) { |
430 | sinfo->unsupported_crypto = true; | 432 | sinfo->unsupported_crypto = true; |
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index de996586762a..e929fe1e4106 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c | |||
@@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey, | |||
79 | 79 | ||
80 | BUG_ON(!pkey); | 80 | BUG_ON(!pkey); |
81 | BUG_ON(!sig); | 81 | BUG_ON(!sig); |
82 | BUG_ON(!sig->digest); | ||
83 | BUG_ON(!sig->s); | 82 | BUG_ON(!sig->s); |
84 | 83 | ||
84 | if (!sig->digest) | ||
85 | return -ENOPKG; | ||
86 | |||
85 | alg_name = sig->pkey_algo; | 87 | alg_name = sig->pkey_algo; |
86 | if (strcmp(sig->pkey_algo, "rsa") == 0) { | 88 | if (strcmp(sig->pkey_algo, "rsa") == 0) { |
87 | /* The data wangled by the RSA algorithm is typically padded | 89 | /* The data wangled by the RSA algorithm is typically padded |
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c index 86fb68508952..7c93c7728454 100644 --- a/crypto/asymmetric_keys/restrict.c +++ b/crypto/asymmetric_keys/restrict.c | |||
@@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup); | |||
67 | * | 67 | * |
68 | * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a | 68 | * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a |
69 | * matching parent certificate in the trusted list, -EKEYREJECTED if the | 69 | * matching parent certificate in the trusted list, -EKEYREJECTED if the |
70 | * signature check fails or the key is blacklisted and some other error if | 70 | * signature check fails or the key is blacklisted, -ENOPKG if the signature |
71 | * there is a matching certificate but the signature check cannot be performed. | 71 | * uses unsupported crypto, or some other error if there is a matching |
72 | * certificate but the signature check cannot be performed. | ||
72 | */ | 73 | */ |
73 | int restrict_link_by_signature(struct key *dest_keyring, | 74 | int restrict_link_by_signature(struct key *dest_keyring, |
74 | const struct key_type *type, | 75 | const struct key_type *type, |
@@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring, | |||
88 | return -EOPNOTSUPP; | 89 | return -EOPNOTSUPP; |
89 | 90 | ||
90 | sig = payload->data[asym_auth]; | 91 | sig = payload->data[asym_auth]; |
92 | if (!sig) | ||
93 | return -ENOPKG; | ||
91 | if (!sig->auth_ids[0] && !sig->auth_ids[1]) | 94 | if (!sig->auth_ids[0] && !sig->auth_ids[1]) |
92 | return -ENOKEY; | 95 | return -ENOKEY; |
93 | 96 | ||
@@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring, | |||
139 | return -EOPNOTSUPP; | 142 | return -EOPNOTSUPP; |
140 | 143 | ||
141 | sig = payload->data[asym_auth]; | 144 | sig = payload->data[asym_auth]; |
145 | if (!sig) | ||
146 | return -ENOPKG; | ||
142 | if (!sig->auth_ids[0] && !sig->auth_ids[1]) | 147 | if (!sig->auth_ids[0] && !sig->auth_ids[1]) |
143 | return -ENOKEY; | 148 | return -ENOKEY; |
144 | 149 | ||
@@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring, | |||
222 | * | 227 | * |
223 | * Returns 0 if the new certificate was accepted, -ENOKEY if we | 228 | * Returns 0 if the new certificate was accepted, -ENOKEY if we |
224 | * couldn't find a matching parent certificate in the trusted list, | 229 | * couldn't find a matching parent certificate in the trusted list, |
225 | * -EKEYREJECTED if the signature check fails, and some other error if | 230 | * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses |
226 | * there is a matching certificate but the signature check cannot be | 231 | * unsupported crypto, or some other error if there is a matching certificate |
227 | * performed. | 232 | * but the signature check cannot be performed. |
228 | */ | 233 | */ |
229 | int restrict_link_by_key_or_keyring(struct key *dest_keyring, | 234 | int restrict_link_by_key_or_keyring(struct key *dest_keyring, |
230 | const struct key_type *type, | 235 | const struct key_type *type, |
@@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring, | |||
249 | * | 254 | * |
250 | * Returns 0 if the new certificate was accepted, -ENOKEY if we | 255 | * Returns 0 if the new certificate was accepted, -ENOKEY if we |
251 | * couldn't find a matching parent certificate in the trusted list, | 256 | * couldn't find a matching parent certificate in the trusted list, |
252 | * -EKEYREJECTED if the signature check fails, and some other error if | 257 | * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses |
253 | * there is a matching certificate but the signature check cannot be | 258 | * unsupported crypto, or some other error if there is a matching certificate |
254 | * performed. | 259 | * but the signature check cannot be performed. |
255 | */ | 260 | */ |
256 | int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, | 261 | int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, |
257 | const struct key_type *type, | 262 | const struct key_type *type, |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15e3d3c2260d..764b63a5aade 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t, | |||
1991 | &target_thread->reply_error.work); | 1991 | &target_thread->reply_error.work); |
1992 | wake_up_interruptible(&target_thread->wait); | 1992 | wake_up_interruptible(&target_thread->wait); |
1993 | } else { | 1993 | } else { |
1994 | WARN(1, "Unexpected reply error: %u\n", | 1994 | /* |
1995 | target_thread->reply_error.cmd); | 1995 | * Cannot get here for normal operation, but |
1996 | * we can if multiple synchronous transactions | ||
1997 | * are sent without blocking for responses. | ||
1998 | * Just ignore the 2nd error in this case. | ||
1999 | */ | ||
2000 | pr_warn("Unexpected reply error: %u\n", | ||
2001 | target_thread->reply_error.cmd); | ||
1996 | } | 2002 | } |
1997 | binder_inner_proc_unlock(target_thread->proc); | 2003 | binder_inner_proc_unlock(target_thread->proc); |
1998 | binder_thread_dec_tmpref(target_thread); | 2004 | binder_thread_dec_tmpref(target_thread); |
@@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2193 | int debug_id = buffer->debug_id; | 2199 | int debug_id = buffer->debug_id; |
2194 | 2200 | ||
2195 | binder_debug(BINDER_DEBUG_TRANSACTION, | 2201 | binder_debug(BINDER_DEBUG_TRANSACTION, |
2196 | "%d buffer release %d, size %zd-%zd, failed at %p\n", | 2202 | "%d buffer release %d, size %zd-%zd, failed at %pK\n", |
2197 | proc->pid, buffer->debug_id, | 2203 | proc->pid, buffer->debug_id, |
2198 | buffer->data_size, buffer->offsets_size, failed_at); | 2204 | buffer->data_size, buffer->offsets_size, failed_at); |
2199 | 2205 | ||
@@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc, | |||
3705 | } | 3711 | } |
3706 | } | 3712 | } |
3707 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 3713 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
3708 | "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", | 3714 | "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", |
3709 | proc->pid, thread->pid, (u64)cookie, | 3715 | proc->pid, thread->pid, (u64)cookie, |
3710 | death); | 3716 | death); |
3711 | if (death == NULL) { | 3717 | if (death == NULL) { |
@@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc, | |||
4376 | 4382 | ||
4377 | binder_inner_proc_unlock(thread->proc); | 4383 | binder_inner_proc_unlock(thread->proc); |
4378 | 4384 | ||
4385 | /* | ||
4386 | * This is needed to avoid races between wake_up_poll() above and | ||
4387 | * and ep_remove_waitqueue() called for other reasons (eg the epoll file | ||
4388 | * descriptor being closed); ep_remove_waitqueue() holds an RCU read | ||
4389 | * lock, so we can be sure it's done after calling synchronize_rcu(). | ||
4390 | */ | ||
4391 | if (thread->looper & BINDER_LOOPER_STATE_POLL) | ||
4392 | synchronize_rcu(); | ||
4393 | |||
4379 | if (send_reply) | 4394 | if (send_reply) |
4380 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); | 4395 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); |
4381 | binder_release_work(proc, &thread->todo); | 4396 | binder_release_work(proc, &thread->todo); |
@@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp, | |||
4391 | bool wait_for_proc_work; | 4406 | bool wait_for_proc_work; |
4392 | 4407 | ||
4393 | thread = binder_get_thread(proc); | 4408 | thread = binder_get_thread(proc); |
4409 | if (!thread) | ||
4410 | return POLLERR; | ||
4394 | 4411 | ||
4395 | binder_inner_proc_lock(thread->proc); | 4412 | binder_inner_proc_lock(thread->proc); |
4396 | thread->looper |= BINDER_LOOPER_STATE_POLL; | 4413 | thread->looper |= BINDER_LOOPER_STATE_POLL; |
@@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, | |||
5034 | spin_lock(&t->lock); | 5051 | spin_lock(&t->lock); |
5035 | to_proc = t->to_proc; | 5052 | to_proc = t->to_proc; |
5036 | seq_printf(m, | 5053 | seq_printf(m, |
5037 | "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", | 5054 | "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", |
5038 | prefix, t->debug_id, t, | 5055 | prefix, t->debug_id, t, |
5039 | t->from ? t->from->proc->pid : 0, | 5056 | t->from ? t->from->proc->pid : 0, |
5040 | t->from ? t->from->pid : 0, | 5057 | t->from ? t->from->pid : 0, |
@@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, | |||
5058 | } | 5075 | } |
5059 | if (buffer->target_node) | 5076 | if (buffer->target_node) |
5060 | seq_printf(m, " node %d", buffer->target_node->debug_id); | 5077 | seq_printf(m, " node %d", buffer->target_node->debug_id); |
5061 | seq_printf(m, " size %zd:%zd data %p\n", | 5078 | seq_printf(m, " size %zd:%zd data %pK\n", |
5062 | buffer->data_size, buffer->offsets_size, | 5079 | buffer->data_size, buffer->offsets_size, |
5063 | buffer->data); | 5080 | buffer->data); |
5064 | } | 5081 | } |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index e5aa62fcf5a8..3aaf6af3ec23 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1758 | if (unit[drive].type->code == FD_NODRIVE) | 1758 | if (unit[drive].type->code == FD_NODRIVE) |
1759 | return NULL; | 1759 | return NULL; |
1760 | *part = 0; | 1760 | *part = 0; |
1761 | return get_disk(unit[drive].gendisk); | 1761 | return get_disk_and_module(unit[drive].gendisk); |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) | 1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 8bc3b9fd8dd2..dfb2c2622e5a 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) | 1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) |
1918 | return NULL; | 1918 | return NULL; |
1919 | *part = 0; | 1919 | *part = 0; |
1920 | return get_disk(unit[drive].disk); | 1920 | return get_disk_and_module(unit[drive].disk); |
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | static int __init atari_floppy_init (void) | 1923 | static int __init atari_floppy_init (void) |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 8028a3a7e7fd..deea78e485da 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) | |||
456 | 456 | ||
457 | mutex_lock(&brd_devices_mutex); | 457 | mutex_lock(&brd_devices_mutex); |
458 | brd = brd_init_one(MINOR(dev) / max_part, &new); | 458 | brd = brd_init_one(MINOR(dev) / max_part, &new); |
459 | kobj = brd ? get_disk(brd->brd_disk) : NULL; | 459 | kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL; |
460 | mutex_unlock(&brd_devices_mutex); | 460 | mutex_unlock(&brd_devices_mutex); |
461 | 461 | ||
462 | if (new) | 462 | if (new) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index eae484acfbbc..8ec7235fc93b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) | 4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) |
4506 | return NULL; | 4506 | return NULL; |
4507 | *part = 0; | 4507 | *part = 0; |
4508 | return get_disk(disks[drive]); | 4508 | return get_disk_and_module(disks[drive]); |
4509 | } | 4509 | } |
4510 | 4510 | ||
4511 | static int __init do_floppy_init(void) | 4511 | static int __init do_floppy_init(void) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d5fe720cf149..87855b5123a6 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) | |||
1922 | if (err < 0) | 1922 | if (err < 0) |
1923 | kobj = NULL; | 1923 | kobj = NULL; |
1924 | else | 1924 | else |
1925 | kobj = get_disk(lo->lo_disk); | 1925 | kobj = get_disk_and_module(lo->lo_disk); |
1926 | mutex_unlock(&loop_index_mutex); | 1926 | mutex_unlock(&loop_index_mutex); |
1927 | 1927 | ||
1928 | *part = 0; | 1928 | *part = 0; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5f2a4240a204..86258b00a1d4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1591,7 +1591,7 @@ again: | |||
1591 | if (new_index < 0) { | 1591 | if (new_index < 0) { |
1592 | mutex_unlock(&nbd_index_mutex); | 1592 | mutex_unlock(&nbd_index_mutex); |
1593 | printk(KERN_ERR "nbd: failed to add new device\n"); | 1593 | printk(KERN_ERR "nbd: failed to add new device\n"); |
1594 | return ret; | 1594 | return new_index; |
1595 | } | 1595 | } |
1596 | nbd = idr_find(&nbd_index_idr, new_index); | 1596 | nbd = idr_find(&nbd_index_idr, new_index); |
1597 | } | 1597 | } |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 531a0915066b..c61d20c9f3f8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1122 | pkt->sector = new_sector; | 1122 | pkt->sector = new_sector; |
1123 | 1123 | ||
1124 | bio_reset(pkt->bio); | 1124 | bio_reset(pkt->bio); |
1125 | bio_set_set(pkt->bio, pd->bdev); | 1125 | bio_set_dev(pkt->bio, pd->bdev); |
1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); | 1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); |
1127 | pkt->bio->bi_iter.bi_sector = new_sector; | 1127 | pkt->bio->bi_iter.bi_sector = new_sector; |
1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; | 1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 84434d3ea19b..64e066eba72e 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
799 | return NULL; | 799 | return NULL; |
800 | 800 | ||
801 | *part = 0; | 801 | *part = 0; |
802 | return get_disk(swd->unit[drive].disk); | 802 | return get_disk_and_module(swd->unit[drive].disk); |
803 | } | 803 | } |
804 | 804 | ||
805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) | 805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 41c95c9b2ab4..8f9130ab5887 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops = | |||
332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) | 332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) |
333 | { | 333 | { |
334 | *part = 0; | 334 | *part = 0; |
335 | return get_disk(z2ram_gendisk); | 335 | return get_disk_and_module(z2ram_gendisk); |
336 | } | 336 | } |
337 | 337 | ||
338 | static struct request_queue *z2_queue; | 338 | static struct request_queue *z2_queue; |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4d46003c46cf..cdaeeea7999c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata) | |||
630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { | 630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { |
631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); | 631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); |
632 | if (!prop) | 632 | if (!prop) |
633 | break; | 633 | continue; |
634 | 634 | ||
635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; | 635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; |
636 | } | 636 | } |
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 4d1dc8b46877..f95b9c75175b 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c | |||
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
457 | size_t count) | 457 | size_t count) |
458 | { | 458 | { |
459 | int size = 0; | 459 | int size = 0; |
460 | int expected; | 460 | u32 expected; |
461 | 461 | ||
462 | if (!chip) | 462 | if (!chip) |
463 | return -EBUSY; | 463 | return -EBUSY; |
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
474 | } | 474 | } |
475 | 475 | ||
476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
477 | if (expected > count) { | 477 | if (expected > count || expected < TPM_HEADER_SIZE) { |
478 | size = -EIO; | 478 | size = -EIO; |
479 | goto out; | 479 | goto out; |
480 | } | 480 | } |
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 76df4fbcf089..9e80a953d693 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) | |||
1190 | break; | 1190 | break; |
1191 | 1191 | ||
1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); | 1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); |
1193 | if (recd > num_bytes) { | ||
1194 | total = -EFAULT; | ||
1195 | break; | ||
1196 | } | ||
1193 | 1197 | ||
1194 | rlength = be32_to_cpu(tpm_cmd.header.out.length); | 1198 | rlength = be32_to_cpu(tpm_cmd.header.out.length); |
1195 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + | 1199 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + |
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c17e75348a99..a700f8f9ead7 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, | |||
683 | if (!rc) { | 683 | if (!rc) { |
684 | data_len = be16_to_cpup( | 684 | data_len = be16_to_cpup( |
685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); | 685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); |
686 | if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { | ||
687 | rc = -EFAULT; | ||
688 | goto out; | ||
689 | } | ||
686 | 690 | ||
687 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) | 691 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) |
688 | ->header.out.length); | 692 | ->header.out.length); |
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index c1dd39eaaeeb..6116cd05e228 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c | |||
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | |||
473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
474 | { | 474 | { |
475 | int size = 0; | 475 | int size = 0; |
476 | int expected, status; | 476 | int status; |
477 | u32 expected; | ||
477 | 478 | ||
478 | if (count < TPM_HEADER_SIZE) { | 479 | if (count < TPM_HEADER_SIZE) { |
479 | size = -EIO; | 480 | size = -EIO; |
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 491 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
491 | if ((size_t) expected > count) { | 492 | if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { |
492 | size = -EIO; | 493 | size = -EIO; |
493 | goto out; | 494 | goto out; |
494 | } | 495 | } |
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c | |||
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
281 | struct device *dev = chip->dev.parent; | 281 | struct device *dev = chip->dev.parent; |
282 | struct i2c_client *client = to_i2c_client(dev); | 282 | struct i2c_client *client = to_i2c_client(dev); |
283 | s32 rc; | 283 | s32 rc; |
284 | int expected, status, burst_count, retries, size = 0; | 284 | int status; |
285 | int burst_count; | ||
286 | int retries; | ||
287 | int size = 0; | ||
288 | u32 expected; | ||
285 | 289 | ||
286 | if (count < TPM_HEADER_SIZE) { | 290 | if (count < TPM_HEADER_SIZE) { |
287 | i2c_nuvoton_ready(chip); /* return to idle */ | 291 | i2c_nuvoton_ready(chip); /* return to idle */ |
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
323 | * to machine native | 327 | * to machine native |
324 | */ | 328 | */ |
325 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 329 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
326 | if (expected > count) { | 330 | if (expected > count || expected < size) { |
327 | dev_err(dev, "%s() expected > count\n", __func__); | 331 | dev_err(dev, "%s() expected > count\n", __func__); |
328 | size = -EIO; | 332 | size = -EIO; |
329 | continue; | 333 | continue; |
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 183a5f54d875..da074e3db19b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c | |||
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
270 | { | 270 | { |
271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | 271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); |
272 | int size = 0; | 272 | int size = 0; |
273 | int expected, status; | 273 | int status; |
274 | u32 expected; | ||
274 | 275 | ||
275 | if (count < TPM_HEADER_SIZE) { | 276 | if (count < TPM_HEADER_SIZE) { |
276 | size = -EIO; | 277 | size = -EIO; |
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
285 | } | 286 | } |
286 | 287 | ||
287 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 288 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
288 | if (expected > count) { | 289 | if (expected > count || expected < TPM_HEADER_SIZE) { |
289 | size = -EIO; | 290 | size = -EIO; |
290 | goto out; | 291 | goto out; |
291 | } | 292 | } |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index a04808a21d4e..65e18c86d9b9 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node) | |||
205 | } else if (of_property_read_u32(node, "clock-frequency", | 205 | } else if (of_property_read_u32(node, "clock-frequency", |
206 | &gic_frequency)) { | 206 | &gic_frequency)) { |
207 | pr_err("GIC frequency not specified.\n"); | 207 | pr_err("GIC frequency not specified.\n"); |
208 | return -EINVAL;; | 208 | return -EINVAL; |
209 | } | 209 | } |
210 | gic_timer_irq = irq_of_parse_and_map(node, 0); | 210 | gic_timer_irq = irq_of_parse_and_map(node, 0); |
211 | if (!gic_timer_irq) { | 211 | if (!gic_timer_irq) { |
212 | pr_err("GIC timer IRQ not specified.\n"); | 212 | pr_err("GIC timer IRQ not specified.\n"); |
213 | return -EINVAL;; | 213 | return -EINVAL; |
214 | } | 214 | } |
215 | 215 | ||
216 | ret = __gic_clocksource_init(); | 216 | ret = __gic_clocksource_init(); |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2a3fe83ec337..3b56ea3f52af 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node) | |||
334 | timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); | 334 | timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); |
335 | if (IS_ERR(timer_base)) { | 335 | if (IS_ERR(timer_base)) { |
336 | pr_err("Can't map registers\n"); | 336 | pr_err("Can't map registers\n"); |
337 | return PTR_ERR(timer_base);; | 337 | return PTR_ERR(timer_base); |
338 | } | 338 | } |
339 | 339 | ||
340 | irq = irq_of_parse_and_map(node, 0); | 340 | irq = irq_of_parse_and_map(node, 0); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3a88e33b0cfe..fb586e09682d 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ | |||
44 | 44 | ||
45 | config ARM_SCPI_CPUFREQ | 45 | config ARM_SCPI_CPUFREQ |
46 | tristate "SCPI based CPUfreq driver" | 46 | tristate "SCPI based CPUfreq driver" |
47 | depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI | 47 | depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI |
48 | help | 48 | help |
49 | This adds the CPUfreq driver support for ARM big.LITTLE platforms | 49 | This adds the CPUfreq driver support for ARM platforms using SCPI |
50 | using SCPI protocol for CPU power management. | 50 | protocol for CPU power management. |
51 | 51 | ||
52 | This driver uses SCPI Message Protocol driver to interact with the | 52 | This driver uses SCPI Message Protocol driver to interact with the |
53 | firmware providing the CPU DVFS functionality. | 53 | firmware providing the CPU DVFS functionality. |
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c | |||
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) | |||
351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) | 351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) |
352 | { | 352 | { |
353 | policy->clk = clk_arm; | 353 | policy->clk = clk_arm; |
354 | return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); | 354 | |
355 | policy->cpuinfo.transition_latency = cpu_cur.info->latency; | ||
356 | |||
357 | if (ftab) | ||
358 | return cpufreq_table_validate_and_show(policy, ftab); | ||
359 | |||
360 | return 0; | ||
355 | } | 361 | } |
356 | 362 | ||
357 | static int __init s3c_cpufreq_initclks(void) | 363 | static int __init s3c_cpufreq_initclks(void) |
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index c32a833e1b00..d300a163945f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c | |||
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) | |||
51 | static int | 51 | static int |
52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) | 52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) |
53 | { | 53 | { |
54 | unsigned long freq = policy->freq_table[index].frequency; | ||
54 | struct scpi_data *priv = policy->driver_data; | 55 | struct scpi_data *priv = policy->driver_data; |
55 | u64 rate = policy->freq_table[index].frequency * 1000; | 56 | u64 rate = freq * 1000; |
56 | int ret; | 57 | int ret; |
57 | 58 | ||
58 | ret = clk_set_rate(priv->clk, rate); | 59 | ret = clk_set_rate(priv->clk, rate); |
59 | if (!ret && (clk_get_rate(priv->clk) != rate)) | ||
60 | ret = -EIO; | ||
61 | 60 | ||
62 | return ret; | 61 | if (ret) |
62 | return ret; | ||
63 | |||
64 | if (clk_get_rate(priv->clk) != rate) | ||
65 | return -EIO; | ||
66 | |||
67 | arch_set_freq_scale(policy->related_cpus, freq, | ||
68 | policy->cpuinfo.max_freq); | ||
69 | |||
70 | return 0; | ||
63 | } | 71 | } |
64 | 72 | ||
65 | static int | 73 | static int |
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index fcfa5b1eae61..b3afb6cc9d72 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c | |||
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error) | |||
211 | { | 211 | { |
212 | int ret; | 212 | int ret; |
213 | 213 | ||
214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); | 214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); |
215 | if (ret) | 215 | if (ret) |
216 | return ret; | 216 | return ret; |
217 | 217 | ||
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) | |||
271 | return rc; | 271 | return rc; |
272 | } | 272 | } |
273 | 273 | ||
274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); | 274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); |
275 | } | 275 | } |
276 | 276 | ||
277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) | 277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) |
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) | |||
299 | return rc; | 299 | return rc; |
300 | } | 300 | } |
301 | 301 | ||
302 | return __sev_do_cmd_locked(cmd, 0, &argp->error); | 302 | return __sev_do_cmd_locked(cmd, NULL, &argp->error); |
303 | } | 303 | } |
304 | 304 | ||
305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) | 305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) |
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission); | |||
624 | 624 | ||
625 | int sev_guest_df_flush(int *error) | 625 | int sev_guest_df_flush(int *error) |
626 | { | 626 | { |
627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); | 627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); |
628 | } | 628 | } |
629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); | 629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); |
630 | 630 | ||
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 188f44b7eb27..5d64c08b7f47 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) | |||
1922 | uint32_t aes_control; | 1922 | uint32_t aes_control; |
1923 | unsigned long flags; | 1923 | unsigned long flags; |
1924 | int err; | 1924 | int err; |
1925 | u8 *iv; | ||
1925 | 1926 | ||
1926 | aes_control = SSS_AES_KEY_CHANGE_MODE; | 1927 | aes_control = SSS_AES_KEY_CHANGE_MODE; |
1927 | if (mode & FLAGS_AES_DECRYPT) | 1928 | if (mode & FLAGS_AES_DECRYPT) |
1928 | aes_control |= SSS_AES_MODE_DECRYPT; | 1929 | aes_control |= SSS_AES_MODE_DECRYPT; |
1929 | 1930 | ||
1930 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) | 1931 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { |
1931 | aes_control |= SSS_AES_CHAIN_MODE_CBC; | 1932 | aes_control |= SSS_AES_CHAIN_MODE_CBC; |
1932 | else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) | 1933 | iv = req->info; |
1934 | } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { | ||
1933 | aes_control |= SSS_AES_CHAIN_MODE_CTR; | 1935 | aes_control |= SSS_AES_CHAIN_MODE_CTR; |
1936 | iv = req->info; | ||
1937 | } else { | ||
1938 | iv = NULL; /* AES_ECB */ | ||
1939 | } | ||
1934 | 1940 | ||
1935 | if (dev->ctx->keylen == AES_KEYSIZE_192) | 1941 | if (dev->ctx->keylen == AES_KEYSIZE_192) |
1936 | aes_control |= SSS_AES_KEY_SIZE_192; | 1942 | aes_control |= SSS_AES_KEY_SIZE_192; |
@@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) | |||
1961 | goto outdata_error; | 1967 | goto outdata_error; |
1962 | 1968 | ||
1963 | SSS_AES_WRITE(dev, AES_CONTROL, aes_control); | 1969 | SSS_AES_WRITE(dev, AES_CONTROL, aes_control); |
1964 | s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); | 1970 | s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen); |
1965 | 1971 | ||
1966 | s5p_set_dma_indata(dev, dev->sg_src); | 1972 | s5p_set_dma_indata(dev, dev->sg_src); |
1967 | s5p_set_dma_outdata(dev, dev->sg_dst); | 1973 | s5p_set_dma_outdata(dev, dev->sg_dst); |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f34430f99fd8..872100215ca0 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { | |||
279 | * sbridge structs | 279 | * sbridge structs |
280 | */ | 280 | */ |
281 | 281 | ||
282 | #define NUM_CHANNELS 4 /* Max channels per MC */ | 282 | #define NUM_CHANNELS 6 /* Max channels per MC */ |
283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ | 283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ |
284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ | 284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ |
285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ | 285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ |
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 0a44d43802fe..3ec4c715e240 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver | 2 | * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver |
3 | * | 3 | * |
4 | * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com> | ||
5 | * Copyright (C) 2015 Intel Corporation | 4 | * Copyright (C) 2015 Intel Corporation |
6 | * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> | 5 | * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> |
7 | * | 6 | * |
@@ -98,15 +97,13 @@ struct axp288_extcon_info { | |||
98 | struct device *dev; | 97 | struct device *dev; |
99 | struct regmap *regmap; | 98 | struct regmap *regmap; |
100 | struct regmap_irq_chip_data *regmap_irqc; | 99 | struct regmap_irq_chip_data *regmap_irqc; |
101 | struct delayed_work det_work; | ||
102 | int irq[EXTCON_IRQ_END]; | 100 | int irq[EXTCON_IRQ_END]; |
103 | struct extcon_dev *edev; | 101 | struct extcon_dev *edev; |
104 | unsigned int previous_cable; | 102 | unsigned int previous_cable; |
105 | bool first_detect_done; | ||
106 | }; | 103 | }; |
107 | 104 | ||
108 | /* Power up/down reason string array */ | 105 | /* Power up/down reason string array */ |
109 | static char *axp288_pwr_up_down_info[] = { | 106 | static const char * const axp288_pwr_up_down_info[] = { |
110 | "Last wake caused by user pressing the power button", | 107 | "Last wake caused by user pressing the power button", |
111 | "Last wake caused by a charger insertion", | 108 | "Last wake caused by a charger insertion", |
112 | "Last wake caused by a battery insertion", | 109 | "Last wake caused by a battery insertion", |
@@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = { | |||
124 | */ | 121 | */ |
125 | static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) | 122 | static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) |
126 | { | 123 | { |
127 | char **rsi; | 124 | const char * const *rsi; |
128 | unsigned int val, i, clear_mask = 0; | 125 | unsigned int val, i, clear_mask = 0; |
129 | int ret; | 126 | int ret; |
130 | 127 | ||
@@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) | |||
140 | regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); | 137 | regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); |
141 | } | 138 | } |
142 | 139 | ||
143 | static void axp288_chrg_detect_complete(struct axp288_extcon_info *info) | ||
144 | { | ||
145 | /* | ||
146 | * We depend on other drivers to do things like mux the data lines, | ||
147 | * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has | ||
148 | * not set these things up correctly resulting in the initial charger | ||
149 | * cable type detection giving a wrong result and we end up not charging | ||
150 | * or charging at only 0.5A. | ||
151 | * | ||
152 | * So we schedule a second cable type detection after 2 seconds to | ||
153 | * give the other drivers time to load and do their thing. | ||
154 | */ | ||
155 | if (!info->first_detect_done) { | ||
156 | queue_delayed_work(system_wq, &info->det_work, | ||
157 | msecs_to_jiffies(2000)); | ||
158 | info->first_detect_done = true; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) | 140 | static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) |
163 | { | 141 | { |
164 | int ret, stat, cfg, pwr_stat; | 142 | int ret, stat, cfg, pwr_stat; |
@@ -223,8 +201,6 @@ no_vbus: | |||
223 | info->previous_cable = cable; | 201 | info->previous_cable = cable; |
224 | } | 202 | } |
225 | 203 | ||
226 | axp288_chrg_detect_complete(info); | ||
227 | |||
228 | return 0; | 204 | return 0; |
229 | 205 | ||
230 | dev_det_ret: | 206 | dev_det_ret: |
@@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data) | |||
246 | return IRQ_HANDLED; | 222 | return IRQ_HANDLED; |
247 | } | 223 | } |
248 | 224 | ||
249 | static void axp288_extcon_det_work(struct work_struct *work) | 225 | static void axp288_extcon_enable(struct axp288_extcon_info *info) |
250 | { | 226 | { |
251 | struct axp288_extcon_info *info = | ||
252 | container_of(work, struct axp288_extcon_info, det_work.work); | ||
253 | |||
254 | regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, | 227 | regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, |
255 | BC_GLOBAL_RUN, 0); | 228 | BC_GLOBAL_RUN, 0); |
256 | /* Enable the charger detection logic */ | 229 | /* Enable the charger detection logic */ |
@@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev) | |||
272 | info->regmap = axp20x->regmap; | 245 | info->regmap = axp20x->regmap; |
273 | info->regmap_irqc = axp20x->regmap_irqc; | 246 | info->regmap_irqc = axp20x->regmap_irqc; |
274 | info->previous_cable = EXTCON_NONE; | 247 | info->previous_cable = EXTCON_NONE; |
275 | INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work); | ||
276 | 248 | ||
277 | platform_set_drvdata(pdev, info); | 249 | platform_set_drvdata(pdev, info); |
278 | 250 | ||
@@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev) | |||
318 | } | 290 | } |
319 | 291 | ||
320 | /* Start charger cable type detection */ | 292 | /* Start charger cable type detection */ |
321 | queue_delayed_work(system_wq, &info->det_work, 0); | 293 | axp288_extcon_enable(info); |
322 | 294 | ||
323 | return 0; | 295 | return 0; |
324 | } | 296 | } |
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index c8691b5a9cb0..191e99f06a9a 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c | |||
@@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev) | |||
153 | return ret; | 153 | return ret; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* queue initial processing of id-pin */ | 156 | /* process id-pin so that we start with the right status */ |
157 | queue_delayed_work(system_wq, &data->work, 0); | 157 | queue_delayed_work(system_wq, &data->work, 0); |
158 | flush_delayed_work(&data->work); | ||
158 | 159 | ||
159 | platform_set_drvdata(pdev, data); | 160 | platform_set_drvdata(pdev, data); |
160 | 161 | ||
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 564bb7a31da4..84e5a9df2344 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
241 | 241 | ||
242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, | 242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, |
243 | &of_flags); | 243 | &of_flags); |
244 | /* | ||
245 | * -EPROBE_DEFER in our case means that we found a | ||
246 | * valid GPIO property, but no controller has been | ||
247 | * registered so far. | ||
248 | * | ||
249 | * This means we don't need to look any further for | ||
250 | * alternate name conventions, and we should really | ||
251 | * preserve the return code for our user to be able to | ||
252 | * retry probing later. | ||
253 | */ | ||
254 | if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) | ||
255 | return desc; | ||
256 | |||
244 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) | 257 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) |
245 | break; | 258 | break; |
246 | } | 259 | } |
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
250 | desc = of_find_spi_gpio(dev, con_id, &of_flags); | 263 | desc = of_find_spi_gpio(dev, con_id, &of_flags); |
251 | 264 | ||
252 | /* Special handling for regulator GPIOs if used */ | 265 | /* Special handling for regulator GPIOs if used */ |
253 | if (IS_ERR(desc)) | 266 | if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) |
254 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); | 267 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); |
255 | 268 | ||
256 | if (IS_ERR(desc)) | 269 | if (IS_ERR(desc)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d5a2eefd6c3e..74edba18b159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, | |||
1156 | /* | 1156 | /* |
1157 | * Writeback | 1157 | * Writeback |
1158 | */ | 1158 | */ |
1159 | #define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */ | 1159 | #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ |
1160 | 1160 | ||
1161 | struct amdgpu_wb { | 1161 | struct amdgpu_wb { |
1162 | struct amdgpu_bo *wb_obj; | 1162 | struct amdgpu_bo *wb_obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8ca3783f2deb..74d2efaec52f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) | |||
736 | enum drm_connector_status ret = connector_status_disconnected; | 736 | enum drm_connector_status ret = connector_status_disconnected; |
737 | int r; | 737 | int r; |
738 | 738 | ||
739 | r = pm_runtime_get_sync(connector->dev->dev); | 739 | if (!drm_kms_helper_is_poll_worker()) { |
740 | if (r < 0) | 740 | r = pm_runtime_get_sync(connector->dev->dev); |
741 | return connector_status_disconnected; | 741 | if (r < 0) |
742 | return connector_status_disconnected; | ||
743 | } | ||
742 | 744 | ||
743 | if (encoder) { | 745 | if (encoder) { |
744 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | 746 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); |
@@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) | |||
757 | /* check acpi lid status ??? */ | 759 | /* check acpi lid status ??? */ |
758 | 760 | ||
759 | amdgpu_connector_update_scratch_regs(connector, ret); | 761 | amdgpu_connector_update_scratch_regs(connector, ret); |
760 | pm_runtime_mark_last_busy(connector->dev->dev); | 762 | |
761 | pm_runtime_put_autosuspend(connector->dev->dev); | 763 | if (!drm_kms_helper_is_poll_worker()) { |
764 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
765 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
766 | } | ||
767 | |||
762 | return ret; | 768 | return ret; |
763 | } | 769 | } |
764 | 770 | ||
@@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) | |||
868 | enum drm_connector_status ret = connector_status_disconnected; | 874 | enum drm_connector_status ret = connector_status_disconnected; |
869 | int r; | 875 | int r; |
870 | 876 | ||
871 | r = pm_runtime_get_sync(connector->dev->dev); | 877 | if (!drm_kms_helper_is_poll_worker()) { |
872 | if (r < 0) | 878 | r = pm_runtime_get_sync(connector->dev->dev); |
873 | return connector_status_disconnected; | 879 | if (r < 0) |
880 | return connector_status_disconnected; | ||
881 | } | ||
874 | 882 | ||
875 | encoder = amdgpu_connector_best_single_encoder(connector); | 883 | encoder = amdgpu_connector_best_single_encoder(connector); |
876 | if (!encoder) | 884 | if (!encoder) |
@@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) | |||
924 | amdgpu_connector_update_scratch_regs(connector, ret); | 932 | amdgpu_connector_update_scratch_regs(connector, ret); |
925 | 933 | ||
926 | out: | 934 | out: |
927 | pm_runtime_mark_last_busy(connector->dev->dev); | 935 | if (!drm_kms_helper_is_poll_worker()) { |
928 | pm_runtime_put_autosuspend(connector->dev->dev); | 936 | pm_runtime_mark_last_busy(connector->dev->dev); |
937 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
938 | } | ||
929 | 939 | ||
930 | return ret; | 940 | return ret; |
931 | } | 941 | } |
@@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) | |||
988 | enum drm_connector_status ret = connector_status_disconnected; | 998 | enum drm_connector_status ret = connector_status_disconnected; |
989 | bool dret = false, broken_edid = false; | 999 | bool dret = false, broken_edid = false; |
990 | 1000 | ||
991 | r = pm_runtime_get_sync(connector->dev->dev); | 1001 | if (!drm_kms_helper_is_poll_worker()) { |
992 | if (r < 0) | 1002 | r = pm_runtime_get_sync(connector->dev->dev); |
993 | return connector_status_disconnected; | 1003 | if (r < 0) |
1004 | return connector_status_disconnected; | ||
1005 | } | ||
994 | 1006 | ||
995 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | 1007 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { |
996 | ret = connector->status; | 1008 | ret = connector->status; |
@@ -1115,8 +1127,10 @@ out: | |||
1115 | amdgpu_connector_update_scratch_regs(connector, ret); | 1127 | amdgpu_connector_update_scratch_regs(connector, ret); |
1116 | 1128 | ||
1117 | exit: | 1129 | exit: |
1118 | pm_runtime_mark_last_busy(connector->dev->dev); | 1130 | if (!drm_kms_helper_is_poll_worker()) { |
1119 | pm_runtime_put_autosuspend(connector->dev->dev); | 1131 | pm_runtime_mark_last_busy(connector->dev->dev); |
1132 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1133 | } | ||
1120 | 1134 | ||
1121 | return ret; | 1135 | return ret; |
1122 | } | 1136 | } |
@@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) | |||
1359 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | 1373 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); |
1360 | int r; | 1374 | int r; |
1361 | 1375 | ||
1362 | r = pm_runtime_get_sync(connector->dev->dev); | 1376 | if (!drm_kms_helper_is_poll_worker()) { |
1363 | if (r < 0) | 1377 | r = pm_runtime_get_sync(connector->dev->dev); |
1364 | return connector_status_disconnected; | 1378 | if (r < 0) |
1379 | return connector_status_disconnected; | ||
1380 | } | ||
1365 | 1381 | ||
1366 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | 1382 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { |
1367 | ret = connector->status; | 1383 | ret = connector->status; |
@@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) | |||
1429 | 1445 | ||
1430 | amdgpu_connector_update_scratch_regs(connector, ret); | 1446 | amdgpu_connector_update_scratch_regs(connector, ret); |
1431 | out: | 1447 | out: |
1432 | pm_runtime_mark_last_busy(connector->dev->dev); | 1448 | if (!drm_kms_helper_is_poll_worker()) { |
1433 | pm_runtime_put_autosuspend(connector->dev->dev); | 1449 | pm_runtime_mark_last_busy(connector->dev->dev); |
1450 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1451 | } | ||
1434 | 1452 | ||
1435 | return ret; | 1453 | return ret; |
1436 | } | 1454 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 00a50cc5ec9a..af1b879a9ee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) | |||
492 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | 492 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); |
493 | 493 | ||
494 | /* clear wb memory */ | 494 | /* clear wb memory */ |
495 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); | 495 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); |
496 | } | 496 | } |
497 | 497 | ||
498 | return 0; | 498 | return 0; |
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) | |||
530 | */ | 530 | */ |
531 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) | 531 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) |
532 | { | 532 | { |
533 | wb >>= 3; | ||
533 | if (wb < adev->wb.num_wb) | 534 | if (wb < adev->wb.num_wb) |
534 | __clear_bit(wb >> 3, adev->wb.used); | 535 | __clear_bit(wb, adev->wb.used); |
535 | } | 536 | } |
536 | 537 | ||
537 | /** | 538 | /** |
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) | |||
1455 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1456 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1456 | if (!adev->ip_blocks[i].status.hw) | 1457 | if (!adev->ip_blocks[i].status.hw) |
1457 | continue; | 1458 | continue; |
1458 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | ||
1459 | amdgpu_free_static_csa(adev); | ||
1460 | amdgpu_device_wb_fini(adev); | ||
1461 | amdgpu_device_vram_scratch_fini(adev); | ||
1462 | } | ||
1463 | 1459 | ||
1464 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && | 1460 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
1465 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { | 1461 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { |
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) | |||
1486 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1482 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1487 | if (!adev->ip_blocks[i].status.sw) | 1483 | if (!adev->ip_blocks[i].status.sw) |
1488 | continue; | 1484 | continue; |
1485 | |||
1486 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | ||
1487 | amdgpu_free_static_csa(adev); | ||
1488 | amdgpu_device_wb_fini(adev); | ||
1489 | amdgpu_device_vram_scratch_fini(adev); | ||
1490 | } | ||
1491 | |||
1489 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); | 1492 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); |
1490 | /* XXX handle errors */ | 1493 | /* XXX handle errors */ |
1491 | if (r) { | 1494 | if (r) { |
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
2284 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | 2287 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); |
2285 | } | 2288 | } |
2286 | drm_modeset_unlock_all(dev); | 2289 | drm_modeset_unlock_all(dev); |
2287 | } else { | ||
2288 | /* | ||
2289 | * There is no equivalent atomic helper to turn on | ||
2290 | * display, so we defined our own function for this, | ||
2291 | * once suspend resume is supported by the atomic | ||
2292 | * framework this will be reworked | ||
2293 | */ | ||
2294 | amdgpu_dm_display_resume(adev); | ||
2295 | } | 2290 | } |
2296 | } | 2291 | } |
2297 | 2292 | ||
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, | |||
2726 | if (amdgpu_device_has_dc_support(adev)) { | 2721 | if (amdgpu_device_has_dc_support(adev)) { |
2727 | if (drm_atomic_helper_resume(adev->ddev, state)) | 2722 | if (drm_atomic_helper_resume(adev->ddev, state)) |
2728 | dev_info(adev->dev, "drm resume failed:%d\n", r); | 2723 | dev_info(adev->dev, "drm resume failed:%d\n", r); |
2729 | amdgpu_dm_display_resume(adev); | ||
2730 | } else { | 2724 | } else { |
2731 | drm_helper_resume_force_mode(adev->ddev); | 2725 | drm_helper_resume_force_mode(adev->ddev); |
2732 | } | 2726 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e14ab34d8262..7c2be32c5aea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |||
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, | |||
75 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) | 75 | static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) |
76 | { | 76 | { |
77 | struct amdgpu_gtt_mgr *mgr = man->priv; | 77 | struct amdgpu_gtt_mgr *mgr = man->priv; |
78 | 78 | spin_lock(&mgr->lock); | |
79 | drm_mm_takedown(&mgr->mm); | 79 | drm_mm_takedown(&mgr->mm); |
80 | spin_unlock(&mgr->lock); | 80 | spin_unlock(&mgr->lock); |
81 | kfree(mgr); | 81 | kfree(mgr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 56bcd59c3399..36483e0d3c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) | |||
257 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | 257 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); |
258 | if (r) { | 258 | if (r) { |
259 | adev->irq.installed = false; | 259 | adev->irq.installed = false; |
260 | flush_work(&adev->hotplug_work); | 260 | if (!amdgpu_device_has_dc_support(adev)) |
261 | flush_work(&adev->hotplug_work); | ||
261 | cancel_work_sync(&adev->reset_work); | 262 | cancel_work_sync(&adev->reset_work); |
262 | return r; | 263 | return r; |
263 | } | 264 | } |
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
282 | adev->irq.installed = false; | 283 | adev->irq.installed = false; |
283 | if (adev->irq.msi_enabled) | 284 | if (adev->irq.msi_enabled) |
284 | pci_disable_msi(adev->pdev); | 285 | pci_disable_msi(adev->pdev); |
285 | flush_work(&adev->hotplug_work); | 286 | if (!amdgpu_device_has_dc_support(adev)) |
287 | flush_work(&adev->hotplug_work); | ||
286 | cancel_work_sync(&adev->reset_work); | 288 | cancel_work_sync(&adev->reset_work); |
287 | } | 289 | } |
288 | 290 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2719937e09d6..3b7e7af09ead 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle) | |||
634 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) | 634 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) |
635 | BUG_ON(vm_inv_eng[i] > 16); | 635 | BUG_ON(vm_inv_eng[i] > 16); |
636 | 636 | ||
637 | if (adev->asic_type == CHIP_VEGA10) { | 637 | if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { |
638 | r = gmc_v9_0_ecc_available(adev); | 638 | r = gmc_v9_0_ecc_available(adev); |
639 | if (r == 1) { | 639 | if (r == 1) { |
640 | DRM_INFO("ECC is active.\n"); | 640 | DRM_INFO("ECC is active.\n"); |
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) | |||
682 | adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); | 682 | adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); |
683 | if (!adev->mc.vram_width) { | 683 | if (!adev->mc.vram_width) { |
684 | /* hbm memory channel size */ | 684 | /* hbm memory channel size */ |
685 | chansize = 128; | 685 | if (adev->flags & AMD_IS_APU) |
686 | chansize = 64; | ||
687 | else | ||
688 | chansize = 128; | ||
686 | 689 | ||
687 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); | 690 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); |
688 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; | 691 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e92fb372bc99..91cf95a8c39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring) | |||
238 | static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) | 238 | static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) |
239 | { | 239 | { |
240 | struct amdgpu_device *adev = ring->adev; | 240 | struct amdgpu_device *adev = ring->adev; |
241 | u64 *wptr = NULL; | 241 | u64 wptr; |
242 | uint64_t local_wptr = 0; | ||
243 | 242 | ||
244 | if (ring->use_doorbell) { | 243 | if (ring->use_doorbell) { |
245 | /* XXX check if swapping is necessary on BE */ | 244 | /* XXX check if swapping is necessary on BE */ |
246 | wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); | 245 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); |
247 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); | 246 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); |
248 | *wptr = (*wptr) >> 2; | ||
249 | DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); | ||
250 | } else { | 247 | } else { |
251 | u32 lowbit, highbit; | 248 | u32 lowbit, highbit; |
252 | int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; | 249 | int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
253 | 250 | ||
254 | wptr = &local_wptr; | ||
255 | lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; | 251 | lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; |
256 | highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; | 252 | highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; |
257 | 253 | ||
258 | DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", | 254 | DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", |
259 | me, highbit, lowbit); | 255 | me, highbit, lowbit); |
260 | *wptr = highbit; | 256 | wptr = highbit; |
261 | *wptr = (*wptr) << 32; | 257 | wptr = wptr << 32; |
262 | *wptr |= lowbit; | 258 | wptr |= lowbit; |
263 | } | 259 | } |
264 | 260 | ||
265 | return *wptr; | 261 | return wptr >> 2; |
266 | } | 262 | } |
267 | 263 | ||
268 | /** | 264 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index b2bfedaf57f1..9bab4842cd44 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { | |||
1618 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, | 1618 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, |
1619 | .emit_frame_size = | 1619 | .emit_frame_size = |
1620 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ | 1620 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ |
1621 | 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */ | 1621 | 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ |
1622 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ | 1622 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ |
1623 | 1, /* uvd_v6_0_enc_ring_insert_end */ | 1623 | 1, /* uvd_v6_0_enc_ring_insert_end */ |
1624 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ | 1624 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ce4c98385e3..862835dc054e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -629,11 +629,13 @@ static int dm_resume(void *handle) | |||
629 | { | 629 | { |
630 | struct amdgpu_device *adev = handle; | 630 | struct amdgpu_device *adev = handle; |
631 | struct amdgpu_display_manager *dm = &adev->dm; | 631 | struct amdgpu_display_manager *dm = &adev->dm; |
632 | int ret = 0; | ||
632 | 633 | ||
633 | /* power on hardware */ | 634 | /* power on hardware */ |
634 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); | 635 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
635 | 636 | ||
636 | return 0; | 637 | ret = amdgpu_dm_display_resume(adev); |
638 | return ret; | ||
637 | } | 639 | } |
638 | 640 | ||
639 | int amdgpu_dm_display_resume(struct amdgpu_device *adev) | 641 | int amdgpu_dm_display_resume(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 61e8c3e02d16..639421a00ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence( | |||
718 | uint32_t retries_ch_eq; | 718 | uint32_t retries_ch_eq; |
719 | enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; | 719 | enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; |
720 | union lane_align_status_updated dpcd_lane_status_updated = {{0}}; | 720 | union lane_align_status_updated dpcd_lane_status_updated = {{0}}; |
721 | union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};; | 721 | union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}}; |
722 | 722 | ||
723 | hw_tr_pattern = get_supported_tp(link); | 723 | hw_tr_pattern = get_supported_tp(link); |
724 | 724 | ||
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream, | |||
1465 | /* MST doesn't perform link training for now | 1465 | /* MST doesn't perform link training for now |
1466 | * TODO: add MST specific link training routine | 1466 | * TODO: add MST specific link training routine |
1467 | */ | 1467 | */ |
1468 | if (is_mst_supported(link)) { | 1468 | if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { |
1469 | *link_setting = link->verified_link_cap; | 1469 | *link_setting = link->verified_link_cap; |
1470 | return; | 1470 | return; |
1471 | } | 1471 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 261811e0c094..539c3e0a6292 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -197,7 +197,8 @@ bool dc_stream_set_cursor_attributes( | |||
197 | for (i = 0; i < MAX_PIPES; i++) { | 197 | for (i = 0; i < MAX_PIPES; i++) { |
198 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | 198 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; |
199 | 199 | ||
200 | if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) | 200 | if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && |
201 | !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp) | ||
201 | continue; | 202 | continue; |
202 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) | 203 | if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) |
203 | continue; | 204 | continue; |
@@ -273,7 +274,8 @@ bool dc_stream_set_cursor_position( | |||
273 | if (pipe_ctx->stream != stream || | 274 | if (pipe_ctx->stream != stream || |
274 | (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || | 275 | (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || |
275 | !pipe_ctx->plane_state || | 276 | !pipe_ctx->plane_state || |
276 | (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) | 277 | (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || |
278 | !pipe_ctx->plane_res.ipp) | ||
277 | continue; | 279 | continue; |
278 | 280 | ||
279 | if (pipe_ctx->plane_state->address.type | 281 | if (pipe_ctx->plane_state->address.type |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 4c3223a4d62b..adb6e7b9280c 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle) | |||
162 | if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { | 162 | if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { |
163 | pr_err("smc start failed\n"); | 163 | pr_err("smc start failed\n"); |
164 | hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); | 164 | hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); |
165 | return -EINVAL;; | 165 | return -EINVAL; |
166 | } | 166 | } |
167 | if (ret == PP_DPM_DISABLED) | 167 | if (ret == PP_DPM_DISABLED) |
168 | goto exit; | 168 | goto exit; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 41e42beff213..08e8a793714f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
2756 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); | 2756 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); |
2757 | 2757 | ||
2758 | 2758 | ||
2759 | disable_mclk_switching = ((1 < info.display_count) || | 2759 | if (info.display_count == 0) |
2760 | disable_mclk_switching_for_frame_lock || | 2760 | disable_mclk_switching = false; |
2761 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || | 2761 | else |
2762 | (mode_info.refresh_rate > 120)); | 2762 | disable_mclk_switching = ((1 < info.display_count) || |
2763 | disable_mclk_switching_for_frame_lock || | ||
2764 | smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || | ||
2765 | (mode_info.refresh_rate > 120)); | ||
2763 | 2766 | ||
2764 | sclk = smu7_ps->performance_levels[0].engine_clock; | 2767 | sclk = smu7_ps->performance_levels[0].engine_clock; |
2765 | mclk = smu7_ps->performance_levels[0].memory_clock; | 2768 | mclk = smu7_ps->performance_levels[0].memory_clock; |
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, | |||
4534 | int tmp_result, result = 0; | 4537 | int tmp_result, result = 0; |
4535 | uint32_t sclk_mask = 0, mclk_mask = 0; | 4538 | uint32_t sclk_mask = 0, mclk_mask = 0; |
4536 | 4539 | ||
4537 | if (hwmgr->chip_id == CHIP_FIJI) { | ||
4538 | if (request->type == AMD_PP_GFX_PROFILE) | ||
4539 | smu7_enable_power_containment(hwmgr); | ||
4540 | else if (request->type == AMD_PP_COMPUTE_PROFILE) | ||
4541 | smu7_disable_power_containment(hwmgr); | ||
4542 | } | ||
4543 | |||
4544 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) | 4540 | if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) |
4545 | return -EINVAL; | 4541 | return -EINVAL; |
4546 | 4542 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 2d55dabc77d4..5f9c3efb532f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |||
3168 | disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); | 3168 | disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); |
3169 | force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); | 3169 | force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); |
3170 | 3170 | ||
3171 | disable_mclk_switching = (info.display_count > 1) || | 3171 | if (info.display_count == 0) |
3172 | disable_mclk_switching_for_frame_lock || | 3172 | disable_mclk_switching = false; |
3173 | disable_mclk_switching_for_vr || | 3173 | else |
3174 | force_mclk_high; | 3174 | disable_mclk_switching = (info.display_count > 1) || |
3175 | disable_mclk_switching_for_frame_lock || | ||
3176 | disable_mclk_switching_for_vr || | ||
3177 | force_mclk_high; | ||
3175 | 3178 | ||
3176 | sclk = vega10_ps->performance_levels[0].gfx_clock; | 3179 | sclk = vega10_ps->performance_levels[0].gfx_clock; |
3177 | mclk = vega10_ps->performance_levels[0].mem_clock; | 3180 | mclk = vega10_ps->performance_levels[0].mem_clock; |
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index cd23b1b28259..c91b9b054e3f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c | |||
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc) | |||
294 | { | 294 | { |
295 | } | 295 | } |
296 | 296 | ||
297 | /* | 297 | static void cirrus_crtc_load_lut(struct drm_crtc *crtc) |
298 | * This is called after a mode is programmed. It should reverse anything done | ||
299 | * by the prepare function | ||
300 | */ | ||
301 | static void cirrus_crtc_commit(struct drm_crtc *crtc) | ||
302 | { | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * The core can pass us a set of gamma values to program. We actually only | ||
307 | * use this for 8-bit mode so can't perform smooth fades on deeper modes, | ||
308 | * but it's a requirement that we provide the function | ||
309 | */ | ||
310 | static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
311 | u16 *blue, uint32_t size, | ||
312 | struct drm_modeset_acquire_ctx *ctx) | ||
313 | { | 298 | { |
314 | struct drm_device *dev = crtc->dev; | 299 | struct drm_device *dev = crtc->dev; |
315 | struct cirrus_device *cdev = dev->dev_private; | 300 | struct cirrus_device *cdev = dev->dev_private; |
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
317 | int i; | 302 | int i; |
318 | 303 | ||
319 | if (!crtc->enabled) | 304 | if (!crtc->enabled) |
320 | return 0; | 305 | return; |
321 | 306 | ||
322 | r = crtc->gamma_store; | 307 | r = crtc->gamma_store; |
323 | g = r + crtc->gamma_size; | 308 | g = r + crtc->gamma_size; |
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
330 | WREG8(PALETTE_DATA, *g++ >> 8); | 315 | WREG8(PALETTE_DATA, *g++ >> 8); |
331 | WREG8(PALETTE_DATA, *b++ >> 8); | 316 | WREG8(PALETTE_DATA, *b++ >> 8); |
332 | } | 317 | } |
318 | } | ||
319 | |||
320 | /* | ||
321 | * This is called after a mode is programmed. It should reverse anything done | ||
322 | * by the prepare function | ||
323 | */ | ||
324 | static void cirrus_crtc_commit(struct drm_crtc *crtc) | ||
325 | { | ||
326 | cirrus_crtc_load_lut(crtc); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * The core can pass us a set of gamma values to program. We actually only | ||
331 | * use this for 8-bit mode so can't perform smooth fades on deeper modes, | ||
332 | * but it's a requirement that we provide the function | ||
333 | */ | ||
334 | static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
335 | u16 *blue, uint32_t size, | ||
336 | struct drm_modeset_acquire_ctx *ctx) | ||
337 | { | ||
338 | cirrus_crtc_load_lut(crtc); | ||
333 | 339 | ||
334 | return 0; | 340 | return 0; |
335 | } | 341 | } |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ab4032167094..ae3cbfe9e01c 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, | |||
1878 | new_crtc_state->event->base.completion = &commit->flip_done; | 1878 | new_crtc_state->event->base.completion = &commit->flip_done; |
1879 | new_crtc_state->event->base.completion_release = release_crtc_commit; | 1879 | new_crtc_state->event->base.completion_release = release_crtc_commit; |
1880 | drm_crtc_commit_get(commit); | 1880 | drm_crtc_commit_get(commit); |
1881 | |||
1882 | commit->abort_completion = true; | ||
1881 | } | 1883 | } |
1882 | 1884 | ||
1883 | for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { | 1885 | for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { |
@@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); | |||
3421 | void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) | 3423 | void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) |
3422 | { | 3424 | { |
3423 | if (state->commit) { | 3425 | if (state->commit) { |
3426 | /* | ||
3427 | * In the event that a non-blocking commit returns | ||
3428 | * -ERESTARTSYS before the commit_tail work is queued, we will | ||
3429 | * have an extra reference to the commit object. Release it, if | ||
3430 | * the event has not been consumed by the worker. | ||
3431 | * | ||
3432 | * state->event may be freed, so we can't directly look at | ||
3433 | * state->event->base.completion. | ||
3434 | */ | ||
3435 | if (state->event && state->commit->abort_completion) | ||
3436 | drm_crtc_commit_put(state->commit); | ||
3437 | |||
3424 | kfree(state->commit->event); | 3438 | kfree(state->commit->event); |
3425 | state->commit->event = NULL; | 3439 | state->commit->event = NULL; |
3440 | |||
3426 | drm_crtc_commit_put(state->commit); | 3441 | drm_crtc_commit_put(state->commit); |
3427 | } | 3442 | } |
3428 | 3443 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ddd537914575..4f751a9d71a3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -113,6 +113,9 @@ static const struct edid_quirk { | |||
113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ | 113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ |
114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, | 114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, |
115 | 115 | ||
116 | /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ | ||
117 | { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, | ||
118 | |||
116 | /* Belinea 10 15 55 */ | 119 | /* Belinea 10 15 55 */ |
117 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, | 120 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, |
118 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, | 121 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, |
@@ -162,6 +165,24 @@ static const struct edid_quirk { | |||
162 | 165 | ||
163 | /* HTC Vive VR Headset */ | 166 | /* HTC Vive VR Headset */ |
164 | { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, | 167 | { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, |
168 | |||
169 | /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ | ||
170 | { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, | ||
171 | { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, | ||
172 | { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, | ||
173 | |||
174 | /* Windows Mixed Reality Headsets */ | ||
175 | { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, | ||
176 | { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP }, | ||
177 | { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP }, | ||
178 | { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP }, | ||
179 | { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP }, | ||
180 | { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP }, | ||
181 | { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP }, | ||
182 | { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP }, | ||
183 | |||
184 | /* Sony PlayStation VR Headset */ | ||
185 | { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, | ||
165 | }; | 186 | }; |
166 | 187 | ||
167 | /* | 188 | /* |
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 5a13ff29f4f0..c0530a1af5e3 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c | |||
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev, | |||
121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); | 121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); |
122 | r.handles[0] = or->handle; | 122 | r.handles[0] = or->handle; |
123 | 123 | ||
124 | if (r.pixel_format == DRM_FORMAT_XRGB2101010 && | ||
125 | dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) | ||
126 | r.pixel_format = DRM_FORMAT_XBGR2101010; | ||
127 | |||
124 | ret = drm_mode_addfb2(dev, &r, file_priv); | 128 | ret = drm_mode_addfb2(dev, &r, file_priv); |
125 | if (ret) | 129 | if (ret) |
126 | return ret; | 130 | return ret; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 186c4e90cc1c..89eef1bb4ddc 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) | |||
836 | if (!mm->color_adjust) | 836 | if (!mm->color_adjust) |
837 | return NULL; | 837 | return NULL; |
838 | 838 | ||
839 | hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); | 839 | /* |
840 | hole_start = __drm_mm_hole_node_start(hole); | 840 | * The hole found during scanning should ideally be the first element |
841 | hole_end = hole_start + hole->hole_size; | 841 | * in the hole_stack list, but due to side-effects in the driver it |
842 | * may not be. | ||
843 | */ | ||
844 | list_for_each_entry(hole, &mm->hole_stack, hole_stack) { | ||
845 | hole_start = __drm_mm_hole_node_start(hole); | ||
846 | hole_end = hole_start + hole->hole_size; | ||
847 | |||
848 | if (hole_start <= scan->hit_start && | ||
849 | hole_end >= scan->hit_end) | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | /* We should only be called after we found the hole previously */ | ||
854 | DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); | ||
855 | if (unlikely(&hole->hole_stack == &mm->hole_stack)) | ||
856 | return NULL; | ||
842 | 857 | ||
843 | DRM_MM_BUG_ON(hole_start > scan->hit_start); | 858 | DRM_MM_BUG_ON(hole_start > scan->hit_start); |
844 | DRM_MM_BUG_ON(hole_end < scan->hit_end); | 859 | DRM_MM_BUG_ON(hole_end < scan->hit_end); |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 555fbe54d6e2..00b8445ba819 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -654,6 +654,26 @@ out: | |||
654 | } | 654 | } |
655 | 655 | ||
656 | /** | 656 | /** |
657 | * drm_kms_helper_is_poll_worker - is %current task an output poll worker? | ||
658 | * | ||
659 | * Determine if %current task is an output poll worker. This can be used | ||
660 | * to select distinct code paths for output polling versus other contexts. | ||
661 | * | ||
662 | * One use case is to avoid a deadlock between the output poll worker and | ||
663 | * the autosuspend worker wherein the latter waits for polling to finish | ||
664 | * upon calling drm_kms_helper_poll_disable(), while the former waits for | ||
665 | * runtime suspend to finish upon calling pm_runtime_get_sync() in a | ||
666 | * connector ->detect hook. | ||
667 | */ | ||
668 | bool drm_kms_helper_is_poll_worker(void) | ||
669 | { | ||
670 | struct work_struct *work = current_work(); | ||
671 | |||
672 | return work && work->func == output_poll_execute; | ||
673 | } | ||
674 | EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); | ||
675 | |||
676 | /** | ||
657 | * drm_kms_helper_poll_disable - disable output polling | 677 | * drm_kms_helper_poll_disable - disable output polling |
658 | * @dev: drm_device | 678 | * @dev: drm_device |
659 | * | 679 | * |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2b8bf2dd6387..f68ef1b3a28c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
286 | 286 | ||
287 | node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); | 287 | node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); |
288 | if (!node) { | 288 | if (!node) { |
289 | dev_err(dev, "failed to allocate memory\n"); | ||
290 | ret = -ENOMEM; | 289 | ret = -ENOMEM; |
291 | goto err; | 290 | goto err; |
292 | } | 291 | } |
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) | |||
926 | struct drm_device *drm_dev = g2d->subdrv.drm_dev; | 925 | struct drm_device *drm_dev = g2d->subdrv.drm_dev; |
927 | struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; | 926 | struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; |
928 | struct drm_exynos_pending_g2d_event *e; | 927 | struct drm_exynos_pending_g2d_event *e; |
929 | struct timeval now; | 928 | struct timespec64 now; |
930 | 929 | ||
931 | if (list_empty(&runqueue_node->event_list)) | 930 | if (list_empty(&runqueue_node->event_list)) |
932 | return; | 931 | return; |
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) | |||
934 | e = list_first_entry(&runqueue_node->event_list, | 933 | e = list_first_entry(&runqueue_node->event_list, |
935 | struct drm_exynos_pending_g2d_event, base.link); | 934 | struct drm_exynos_pending_g2d_event, base.link); |
936 | 935 | ||
937 | do_gettimeofday(&now); | 936 | ktime_get_ts64(&now); |
938 | e->event.tv_sec = now.tv_sec; | 937 | e->event.tv_sec = now.tv_sec; |
939 | e->event.tv_usec = now.tv_usec; | 938 | e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; |
940 | e->event.cmdlist_no = cmdlist_no; | 939 | e->event.cmdlist_no = cmdlist_no; |
941 | 940 | ||
942 | drm_send_event(drm_dev, &e->base); | 941 | drm_send_event(drm_dev, &e->base); |
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
1358 | return -EFAULT; | 1357 | return -EFAULT; |
1359 | 1358 | ||
1360 | runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); | 1359 | runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); |
1361 | if (!runqueue_node) { | 1360 | if (!runqueue_node) |
1362 | dev_err(dev, "failed to allocate memory\n"); | ||
1363 | return -ENOMEM; | 1361 | return -ENOMEM; |
1364 | } | 1362 | |
1365 | run_cmdlist = &runqueue_node->run_cmdlist; | 1363 | run_cmdlist = &runqueue_node->run_cmdlist; |
1366 | event_list = &runqueue_node->event_list; | 1364 | event_list = &runqueue_node->event_list; |
1367 | INIT_LIST_HEAD(run_cmdlist); | 1365 | INIT_LIST_HEAD(run_cmdlist); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h deleted file mode 100644 index 71a0b4c0c1e8..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
3 | * | ||
4 | * Authors: | ||
5 | * YoungJun Cho <yj44.cho@samsung.com> | ||
6 | * Eunchul Kim <chulspro.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _EXYNOS_DRM_ROTATOR_H_ | ||
15 | #define _EXYNOS_DRM_ROTATOR_H_ | ||
16 | |||
17 | /* TODO */ | ||
18 | |||
19 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a4b75a46f946..abd84cbcf1c2 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata) | |||
1068 | /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ | 1068 | /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ |
1069 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | 1069 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) |
1070 | | HDMI_I2S_SEL_LRCK(6)); | 1070 | | HDMI_I2S_SEL_LRCK(6)); |
1071 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | 1071 | |
1072 | | HDMI_I2S_SEL_SDATA2(4)); | 1072 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3) |
1073 | | HDMI_I2S_SEL_SDATA0(4)); | ||
1074 | |||
1073 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | 1075 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) |
1074 | | HDMI_I2S_SEL_SDATA2(2)); | 1076 | | HDMI_I2S_SEL_SDATA2(2)); |
1077 | |||
1075 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); | 1078 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); |
1076 | 1079 | ||
1077 | /* I2S_CON_1 & 2 */ | 1080 | /* I2S_CON_1 & 2 */ |
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h index 30496134a3d0..d7cbe53c4c01 100644 --- a/drivers/gpu/drm/exynos/regs-fimc.h +++ b/drivers/gpu/drm/exynos/regs-fimc.h | |||
@@ -569,7 +569,7 @@ | |||
569 | #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) | 569 | #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) |
570 | #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) | 570 | #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) |
571 | #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) | 571 | #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) |
572 | #define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) | 572 | #define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0)) |
573 | 573 | ||
574 | /* Real input DMA size register */ | 574 | /* Real input DMA size register */ |
575 | #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) | 575 | #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) |
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h index 04be0f7e8193..4420c203ac85 100644 --- a/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/drivers/gpu/drm/exynos/regs-hdmi.h | |||
@@ -464,7 +464,7 @@ | |||
464 | 464 | ||
465 | /* I2S_PIN_SEL_1 */ | 465 | /* I2S_PIN_SEL_1 */ |
466 | #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) | 466 | #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) |
467 | #define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) | 467 | #define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7) |
468 | 468 | ||
469 | /* I2S_PIN_SEL_2 */ | 469 | /* I2S_PIN_SEL_2 */ |
470 | #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) | 470 | #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4401068ff468..3ab1ace2a6bd 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) | |||
505 | list_add_tail(&vma->exec_link, &eb->unbound); | 505 | list_add_tail(&vma->exec_link, &eb->unbound); |
506 | if (drm_mm_node_allocated(&vma->node)) | 506 | if (drm_mm_node_allocated(&vma->node)) |
507 | err = i915_vma_unbind(vma); | 507 | err = i915_vma_unbind(vma); |
508 | if (unlikely(err)) | ||
509 | vma->exec_flags = NULL; | ||
508 | } | 510 | } |
509 | return err; | 511 | return err; |
510 | } | 512 | } |
@@ -2410,7 +2412,7 @@ err_request: | |||
2410 | if (out_fence) { | 2412 | if (out_fence) { |
2411 | if (err == 0) { | 2413 | if (err == 0) { |
2412 | fd_install(out_fence_fd, out_fence->file); | 2414 | fd_install(out_fence_fd, out_fence->file); |
2413 | args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ | 2415 | args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ |
2414 | args->rsvd2 |= (u64)out_fence_fd << 32; | 2416 | args->rsvd2 |= (u64)out_fence_fd << 32; |
2415 | out_fence_fd = -1; | 2417 | out_fence_fd = -1; |
2416 | } else { | 2418 | } else { |
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index e09d18df8b7f..a3e93d46316a 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c | |||
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) | |||
476 | GEM_BUG_ON(!irqs_disabled()); | 476 | GEM_BUG_ON(!irqs_disabled()); |
477 | lockdep_assert_held(&engine->timeline->lock); | 477 | lockdep_assert_held(&engine->timeline->lock); |
478 | 478 | ||
479 | trace_i915_gem_request_execute(request); | ||
480 | |||
481 | /* Transfer from per-context onto the global per-engine timeline */ | 479 | /* Transfer from per-context onto the global per-engine timeline */ |
482 | timeline = engine->timeline; | 480 | timeline = engine->timeline; |
483 | GEM_BUG_ON(timeline == request->timeline); | 481 | GEM_BUG_ON(timeline == request->timeline); |
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) | |||
501 | list_move_tail(&request->link, &timeline->requests); | 499 | list_move_tail(&request->link, &timeline->requests); |
502 | spin_unlock(&request->timeline->lock); | 500 | spin_unlock(&request->timeline->lock); |
503 | 501 | ||
502 | trace_i915_gem_request_execute(request); | ||
503 | |||
504 | wake_up_all(&request->execute); | 504 | wake_up_all(&request->execute); |
505 | } | 505 | } |
506 | 506 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a2108e35c599..33eb0c5b1d32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -2027,7 +2027,7 @@ enum i915_power_well_id { | |||
2027 | #define _CNL_PORT_TX_DW5_LN0_AE 0x162454 | 2027 | #define _CNL_PORT_TX_DW5_LN0_AE 0x162454 |
2028 | #define _CNL_PORT_TX_DW5_LN0_B 0x162654 | 2028 | #define _CNL_PORT_TX_DW5_LN0_B 0x162654 |
2029 | #define _CNL_PORT_TX_DW5_LN0_C 0x162C54 | 2029 | #define _CNL_PORT_TX_DW5_LN0_C 0x162C54 |
2030 | #define _CNL_PORT_TX_DW5_LN0_D 0x162ED4 | 2030 | #define _CNL_PORT_TX_DW5_LN0_D 0x162E54 |
2031 | #define _CNL_PORT_TX_DW5_LN0_F 0x162854 | 2031 | #define _CNL_PORT_TX_DW5_LN0_F 0x162854 |
2032 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ | 2032 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ |
2033 | _CNL_PORT_TX_DW5_GRP_AE, \ | 2033 | _CNL_PORT_TX_DW5_GRP_AE, \ |
@@ -2058,7 +2058,7 @@ enum i915_power_well_id { | |||
2058 | #define _CNL_PORT_TX_DW7_LN0_AE 0x16245C | 2058 | #define _CNL_PORT_TX_DW7_LN0_AE 0x16245C |
2059 | #define _CNL_PORT_TX_DW7_LN0_B 0x16265C | 2059 | #define _CNL_PORT_TX_DW7_LN0_B 0x16265C |
2060 | #define _CNL_PORT_TX_DW7_LN0_C 0x162C5C | 2060 | #define _CNL_PORT_TX_DW7_LN0_C 0x162C5C |
2061 | #define _CNL_PORT_TX_DW7_LN0_D 0x162EDC | 2061 | #define _CNL_PORT_TX_DW7_LN0_D 0x162E5C |
2062 | #define _CNL_PORT_TX_DW7_LN0_F 0x16285C | 2062 | #define _CNL_PORT_TX_DW7_LN0_F 0x16285C |
2063 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ | 2063 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ |
2064 | _CNL_PORT_TX_DW7_GRP_AE, \ | 2064 | _CNL_PORT_TX_DW7_GRP_AE, \ |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 522d54fecb53..4a01f62a392d 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, | |||
779 | { | 779 | { |
780 | struct intel_encoder *encoder; | 780 | struct intel_encoder *encoder; |
781 | 781 | ||
782 | if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) | ||
783 | return NULL; | ||
784 | |||
785 | /* MST */ | 782 | /* MST */ |
786 | if (pipe >= 0) { | 783 | if (pipe >= 0) { |
784 | if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) | ||
785 | return NULL; | ||
786 | |||
787 | encoder = dev_priv->av_enc_map[pipe]; | 787 | encoder = dev_priv->av_enc_map[pipe]; |
788 | /* | 788 | /* |
789 | * when bootup, audio driver may not know it is | 789 | * when bootup, audio driver may not know it is |
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 5155f0179b61..05520202c967 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "meson_venc.h" | 36 | #include "meson_venc.h" |
37 | #include "meson_vpp.h" | 37 | #include "meson_vpp.h" |
38 | #include "meson_viu.h" | 38 | #include "meson_viu.h" |
39 | #include "meson_canvas.h" | ||
39 | #include "meson_registers.h" | 40 | #include "meson_registers.h" |
40 | 41 | ||
41 | /* CRTC definition */ | 42 | /* CRTC definition */ |
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv) | |||
192 | } else | 193 | } else |
193 | meson_vpp_disable_interlace_vscaler_osd1(priv); | 194 | meson_vpp_disable_interlace_vscaler_osd1(priv); |
194 | 195 | ||
196 | meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, | ||
197 | priv->viu.osd1_addr, priv->viu.osd1_stride, | ||
198 | priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, | ||
199 | MESON_CANVAS_BLKMODE_LINEAR); | ||
200 | |||
195 | /* Enable OSD1 */ | 201 | /* Enable OSD1 */ |
196 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, | 202 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, |
197 | priv->io_base + _REG(VPP_MISC)); | 203 | priv->io_base + _REG(VPP_MISC)); |
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 5e8b392b9d1f..8450d6ac8c9b 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h | |||
@@ -43,6 +43,9 @@ struct meson_drm { | |||
43 | bool osd1_commit; | 43 | bool osd1_commit; |
44 | uint32_t osd1_ctrl_stat; | 44 | uint32_t osd1_ctrl_stat; |
45 | uint32_t osd1_blk0_cfg[5]; | 45 | uint32_t osd1_blk0_cfg[5]; |
46 | uint32_t osd1_addr; | ||
47 | uint32_t osd1_stride; | ||
48 | uint32_t osd1_height; | ||
46 | } viu; | 49 | } viu; |
47 | 50 | ||
48 | struct { | 51 | struct { |
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index d0a6ac8390f3..27bd3503e1e4 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c | |||
@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane, | |||
164 | /* Update Canvas with buffer address */ | 164 | /* Update Canvas with buffer address */ |
165 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 165 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
166 | 166 | ||
167 | meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, | 167 | priv->viu.osd1_addr = gem->paddr; |
168 | gem->paddr, fb->pitches[0], | 168 | priv->viu.osd1_stride = fb->pitches[0]; |
169 | fb->height, MESON_CANVAS_WRAP_NONE, | 169 | priv->viu.osd1_height = fb->height; |
170 | MESON_CANVAS_BLKMODE_LINEAR); | ||
171 | 170 | ||
172 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); | 171 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); |
173 | } | 172 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 3e9bba4d6624..6d8e3a9a6fc0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) | |||
680 | } else { | 680 | } else { |
681 | dev_info(&pdev->dev, | 681 | dev_info(&pdev->dev, |
682 | "no iommu, fallback to phys contig buffers for scanout\n"); | 682 | "no iommu, fallback to phys contig buffers for scanout\n"); |
683 | aspace = NULL;; | 683 | aspace = NULL; |
684 | } | 684 | } |
685 | 685 | ||
686 | pm_runtime_put_sync(&pdev->dev); | 686 | pm_runtime_put_sync(&pdev->dev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 69d6e61a01ec..6ed9cb053dfa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
570 | nv_connector->edid = NULL; | 570 | nv_connector->edid = NULL; |
571 | } | 571 | } |
572 | 572 | ||
573 | ret = pm_runtime_get_sync(connector->dev->dev); | 573 | /* Outputs are only polled while runtime active, so acquiring a |
574 | if (ret < 0 && ret != -EACCES) | 574 | * runtime PM ref here is unnecessary (and would deadlock upon |
575 | return conn_status; | 575 | * runtime suspend because it waits for polling to finish). |
576 | */ | ||
577 | if (!drm_kms_helper_is_poll_worker()) { | ||
578 | ret = pm_runtime_get_sync(connector->dev->dev); | ||
579 | if (ret < 0 && ret != -EACCES) | ||
580 | return conn_status; | ||
581 | } | ||
576 | 582 | ||
577 | nv_encoder = nouveau_connector_ddc_detect(connector); | 583 | nv_encoder = nouveau_connector_ddc_detect(connector); |
578 | if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { | 584 | if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { |
@@ -647,8 +653,10 @@ detect_analog: | |||
647 | 653 | ||
648 | out: | 654 | out: |
649 | 655 | ||
650 | pm_runtime_mark_last_busy(connector->dev->dev); | 656 | if (!drm_kms_helper_is_poll_worker()) { |
651 | pm_runtime_put_autosuspend(connector->dev->dev); | 657 | pm_runtime_mark_last_busy(connector->dev->dev); |
658 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
659 | } | ||
652 | 660 | ||
653 | return conn_status; | 661 | return conn_status; |
654 | } | 662 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index dd8d4352ed99..caddce88d2d8 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev) | |||
4477 | nouveau_display(dev)->fini = nv50_display_fini; | 4477 | nouveau_display(dev)->fini = nv50_display_fini; |
4478 | disp->disp = &nouveau_display(dev)->disp; | 4478 | disp->disp = &nouveau_display(dev)->disp; |
4479 | dev->mode_config.funcs = &nv50_disp_func; | 4479 | dev->mode_config.funcs = &nv50_disp_func; |
4480 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; | ||
4480 | if (nouveau_atomic) | 4481 | if (nouveau_atomic) |
4481 | dev->driver->driver_features |= DRIVER_ATOMIC; | 4482 | dev->driver->driver_features |= DRIVER_ATOMIC; |
4482 | 4483 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5012f5e47a1e..2e2ca3c6b47d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) | |||
899 | enum drm_connector_status ret = connector_status_disconnected; | 899 | enum drm_connector_status ret = connector_status_disconnected; |
900 | int r; | 900 | int r; |
901 | 901 | ||
902 | r = pm_runtime_get_sync(connector->dev->dev); | 902 | if (!drm_kms_helper_is_poll_worker()) { |
903 | if (r < 0) | 903 | r = pm_runtime_get_sync(connector->dev->dev); |
904 | return connector_status_disconnected; | 904 | if (r < 0) |
905 | return connector_status_disconnected; | ||
906 | } | ||
905 | 907 | ||
906 | if (encoder) { | 908 | if (encoder) { |
907 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 909 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
@@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) | |||
924 | /* check acpi lid status ??? */ | 926 | /* check acpi lid status ??? */ |
925 | 927 | ||
926 | radeon_connector_update_scratch_regs(connector, ret); | 928 | radeon_connector_update_scratch_regs(connector, ret); |
927 | pm_runtime_mark_last_busy(connector->dev->dev); | 929 | |
928 | pm_runtime_put_autosuspend(connector->dev->dev); | 930 | if (!drm_kms_helper_is_poll_worker()) { |
931 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
932 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
933 | } | ||
934 | |||
929 | return ret; | 935 | return ret; |
930 | } | 936 | } |
931 | 937 | ||
@@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
1039 | enum drm_connector_status ret = connector_status_disconnected; | 1045 | enum drm_connector_status ret = connector_status_disconnected; |
1040 | int r; | 1046 | int r; |
1041 | 1047 | ||
1042 | r = pm_runtime_get_sync(connector->dev->dev); | 1048 | if (!drm_kms_helper_is_poll_worker()) { |
1043 | if (r < 0) | 1049 | r = pm_runtime_get_sync(connector->dev->dev); |
1044 | return connector_status_disconnected; | 1050 | if (r < 0) |
1051 | return connector_status_disconnected; | ||
1052 | } | ||
1045 | 1053 | ||
1046 | encoder = radeon_best_single_encoder(connector); | 1054 | encoder = radeon_best_single_encoder(connector); |
1047 | if (!encoder) | 1055 | if (!encoder) |
@@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
1108 | radeon_connector_update_scratch_regs(connector, ret); | 1116 | radeon_connector_update_scratch_regs(connector, ret); |
1109 | 1117 | ||
1110 | out: | 1118 | out: |
1111 | pm_runtime_mark_last_busy(connector->dev->dev); | 1119 | if (!drm_kms_helper_is_poll_worker()) { |
1112 | pm_runtime_put_autosuspend(connector->dev->dev); | 1120 | pm_runtime_mark_last_busy(connector->dev->dev); |
1121 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1122 | } | ||
1113 | 1123 | ||
1114 | return ret; | 1124 | return ret; |
1115 | } | 1125 | } |
@@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) | |||
1173 | if (!radeon_connector->dac_load_detect) | 1183 | if (!radeon_connector->dac_load_detect) |
1174 | return ret; | 1184 | return ret; |
1175 | 1185 | ||
1176 | r = pm_runtime_get_sync(connector->dev->dev); | 1186 | if (!drm_kms_helper_is_poll_worker()) { |
1177 | if (r < 0) | 1187 | r = pm_runtime_get_sync(connector->dev->dev); |
1178 | return connector_status_disconnected; | 1188 | if (r < 0) |
1189 | return connector_status_disconnected; | ||
1190 | } | ||
1179 | 1191 | ||
1180 | encoder = radeon_best_single_encoder(connector); | 1192 | encoder = radeon_best_single_encoder(connector); |
1181 | if (!encoder) | 1193 | if (!encoder) |
@@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) | |||
1187 | if (ret == connector_status_connected) | 1199 | if (ret == connector_status_connected) |
1188 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); | 1200 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); |
1189 | radeon_connector_update_scratch_regs(connector, ret); | 1201 | radeon_connector_update_scratch_regs(connector, ret); |
1190 | pm_runtime_mark_last_busy(connector->dev->dev); | 1202 | |
1191 | pm_runtime_put_autosuspend(connector->dev->dev); | 1203 | if (!drm_kms_helper_is_poll_worker()) { |
1204 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
1205 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1206 | } | ||
1207 | |||
1192 | return ret; | 1208 | return ret; |
1193 | } | 1209 | } |
1194 | 1210 | ||
@@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
1251 | enum drm_connector_status ret = connector_status_disconnected; | 1267 | enum drm_connector_status ret = connector_status_disconnected; |
1252 | bool dret = false, broken_edid = false; | 1268 | bool dret = false, broken_edid = false; |
1253 | 1269 | ||
1254 | r = pm_runtime_get_sync(connector->dev->dev); | 1270 | if (!drm_kms_helper_is_poll_worker()) { |
1255 | if (r < 0) | 1271 | r = pm_runtime_get_sync(connector->dev->dev); |
1256 | return connector_status_disconnected; | 1272 | if (r < 0) |
1273 | return connector_status_disconnected; | ||
1274 | } | ||
1257 | 1275 | ||
1258 | if (radeon_connector->detected_hpd_without_ddc) { | 1276 | if (radeon_connector->detected_hpd_without_ddc) { |
1259 | force = true; | 1277 | force = true; |
@@ -1436,8 +1454,10 @@ out: | |||
1436 | } | 1454 | } |
1437 | 1455 | ||
1438 | exit: | 1456 | exit: |
1439 | pm_runtime_mark_last_busy(connector->dev->dev); | 1457 | if (!drm_kms_helper_is_poll_worker()) { |
1440 | pm_runtime_put_autosuspend(connector->dev->dev); | 1458 | pm_runtime_mark_last_busy(connector->dev->dev); |
1459 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1460 | } | ||
1441 | 1461 | ||
1442 | return ret; | 1462 | return ret; |
1443 | } | 1463 | } |
@@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1688 | if (radeon_dig_connector->is_mst) | 1708 | if (radeon_dig_connector->is_mst) |
1689 | return connector_status_disconnected; | 1709 | return connector_status_disconnected; |
1690 | 1710 | ||
1691 | r = pm_runtime_get_sync(connector->dev->dev); | 1711 | if (!drm_kms_helper_is_poll_worker()) { |
1692 | if (r < 0) | 1712 | r = pm_runtime_get_sync(connector->dev->dev); |
1693 | return connector_status_disconnected; | 1713 | if (r < 0) |
1714 | return connector_status_disconnected; | ||
1715 | } | ||
1694 | 1716 | ||
1695 | if (!force && radeon_check_hpd_status_unchanged(connector)) { | 1717 | if (!force && radeon_check_hpd_status_unchanged(connector)) { |
1696 | ret = connector->status; | 1718 | ret = connector->status; |
@@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1777 | } | 1799 | } |
1778 | 1800 | ||
1779 | out: | 1801 | out: |
1780 | pm_runtime_mark_last_busy(connector->dev->dev); | 1802 | if (!drm_kms_helper_is_poll_worker()) { |
1781 | pm_runtime_put_autosuspend(connector->dev->dev); | 1803 | pm_runtime_mark_last_busy(connector->dev->dev); |
1804 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1805 | } | ||
1782 | 1806 | ||
1783 | return ret; | 1807 | return ret; |
1784 | } | 1808 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8d3e3d2e0090..7828a5e10629 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1365 | if ((rdev->flags & RADEON_IS_PCI) && | 1365 | if ((rdev->flags & RADEON_IS_PCI) && |
1366 | (rdev->family <= CHIP_RS740)) | 1366 | (rdev->family <= CHIP_RS740)) |
1367 | rdev->need_dma32 = true; | 1367 | rdev->need_dma32 = true; |
1368 | #ifdef CONFIG_PPC64 | ||
1369 | if (rdev->family == CHIP_CEDAR) | ||
1370 | rdev->need_dma32 = true; | ||
1371 | #endif | ||
1368 | 1372 | ||
1369 | dma_bits = rdev->need_dma32 ? 32 : 40; | 1373 | dma_bits = rdev->need_dma32 ? 32 : 40; |
1370 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | 1374 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 326ad068c15a..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); | |||
47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | 47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); |
48 | static void radeon_pm_update_profile(struct radeon_device *rdev); | 48 | static void radeon_pm_update_profile(struct radeon_device *rdev); |
49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
50 | static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); | ||
51 | 50 | ||
52 | int radeon_pm_get_type_index(struct radeon_device *rdev, | 51 | int radeon_pm_get_type_index(struct radeon_device *rdev, |
53 | enum radeon_pm_state_type ps_type, | 52 | enum radeon_pm_state_type ps_type, |
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) | |||
80 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); | 79 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); |
81 | } | 80 | } |
82 | mutex_unlock(&rdev->pm.mutex); | 81 | mutex_unlock(&rdev->pm.mutex); |
83 | /* allow new DPM state to be picked */ | ||
84 | radeon_pm_compute_clocks_dpm(rdev); | ||
85 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 82 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
86 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | 83 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
87 | mutex_lock(&rdev->pm.mutex); | 84 | mutex_lock(&rdev->pm.mutex); |
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
885 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | 882 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; |
886 | /* balanced states don't exist at the moment */ | 883 | /* balanced states don't exist at the moment */ |
887 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | 884 | if (dpm_state == POWER_STATE_TYPE_BALANCED) |
888 | dpm_state = rdev->pm.dpm.ac_power ? | 885 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; |
889 | POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; | ||
890 | 886 | ||
891 | restart_search: | 887 | restart_search: |
892 | /* Pick the best power state based on current conditions */ | 888 | /* Pick the best power state based on current conditions */ |
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 2c18996d59c5..0d95888ccc3e 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c | |||
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo | |||
461 | { | 461 | { |
462 | struct drm_sched_job *s_job; | 462 | struct drm_sched_job *s_job; |
463 | struct drm_sched_entity *entity, *tmp; | 463 | struct drm_sched_entity *entity, *tmp; |
464 | int i;; | 464 | int i; |
465 | 465 | ||
466 | spin_lock(&sched->job_list_lock); | 466 | spin_lock(&sched->job_list_lock); |
467 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | 467 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 3c15cf24b503..b3960118deb9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
@@ -260,7 +260,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon, | |||
260 | const struct drm_display_mode *mode) | 260 | const struct drm_display_mode *mode) |
261 | { | 261 | { |
262 | /* Configure the dot clock */ | 262 | /* Configure the dot clock */ |
263 | clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); | 263 | clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000); |
264 | 264 | ||
265 | /* Set the resolution */ | 265 | /* Set the resolution */ |
266 | regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, | 266 | regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, |
@@ -335,6 +335,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, | |||
335 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, | 335 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, |
336 | SUN4I_TCON_GCTL_IOMAP_MASK, | 336 | SUN4I_TCON_GCTL_IOMAP_MASK, |
337 | SUN4I_TCON_GCTL_IOMAP_TCON0); | 337 | SUN4I_TCON_GCTL_IOMAP_TCON0); |
338 | |||
339 | /* Enable the output on the pins */ | ||
340 | regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000); | ||
338 | } | 341 | } |
339 | 342 | ||
340 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | 343 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, |
@@ -418,7 +421,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, | |||
418 | WARN_ON(!tcon->quirks->has_channel_1); | 421 | WARN_ON(!tcon->quirks->has_channel_1); |
419 | 422 | ||
420 | /* Configure the dot clock */ | 423 | /* Configure the dot clock */ |
421 | clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000); | 424 | clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000); |
422 | 425 | ||
423 | /* Adjust clock delay */ | 426 | /* Adjust clock delay */ |
424 | clk_delay = sun4i_tcon_get_clk_delay(mode, 1); | 427 | clk_delay = sun4i_tcon_get_clk_delay(mode, 1); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5720a0d4ac0a..677ac16c8a6d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, | |||
197 | case VIRTGPU_PARAM_3D_FEATURES: | 197 | case VIRTGPU_PARAM_3D_FEATURES: |
198 | value = vgdev->has_virgl_3d == true ? 1 : 0; | 198 | value = vgdev->has_virgl_3d == true ? 1 : 0; |
199 | break; | 199 | break; |
200 | case VIRTGPU_PARAM_CAPSET_QUERY_FIX: | ||
201 | value = 1; | ||
202 | break; | ||
200 | default: | 203 | default: |
201 | return -EINVAL; | 204 | return -EINVAL; |
202 | } | 205 | } |
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
472 | { | 475 | { |
473 | struct virtio_gpu_device *vgdev = dev->dev_private; | 476 | struct virtio_gpu_device *vgdev = dev->dev_private; |
474 | struct drm_virtgpu_get_caps *args = data; | 477 | struct drm_virtgpu_get_caps *args = data; |
475 | int size; | 478 | unsigned size, host_caps_size; |
476 | int i; | 479 | int i; |
477 | int found_valid = -1; | 480 | int found_valid = -1; |
478 | int ret; | 481 | int ret; |
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
481 | if (vgdev->num_capsets == 0) | 484 | if (vgdev->num_capsets == 0) |
482 | return -ENOSYS; | 485 | return -ENOSYS; |
483 | 486 | ||
487 | /* don't allow userspace to pass 0 */ | ||
488 | if (args->size == 0) | ||
489 | return -EINVAL; | ||
490 | |||
484 | spin_lock(&vgdev->display_info_lock); | 491 | spin_lock(&vgdev->display_info_lock); |
485 | for (i = 0; i < vgdev->num_capsets; i++) { | 492 | for (i = 0; i < vgdev->num_capsets; i++) { |
486 | if (vgdev->capsets[i].id == args->cap_set_id) { | 493 | if (vgdev->capsets[i].id == args->cap_set_id) { |
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
496 | return -EINVAL; | 503 | return -EINVAL; |
497 | } | 504 | } |
498 | 505 | ||
499 | size = vgdev->capsets[found_valid].max_size; | 506 | host_caps_size = vgdev->capsets[found_valid].max_size; |
500 | if (args->size > size) { | 507 | /* only copy to user the minimum of the host caps size or the guest caps size */ |
501 | spin_unlock(&vgdev->display_info_lock); | 508 | size = min(args->size, host_caps_size); |
502 | return -EINVAL; | ||
503 | } | ||
504 | 509 | ||
505 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | 510 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
506 | if (cache_ent->id == args->cap_set_id && | 511 | if (cache_ent->id == args->cap_set_id && |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 658fa2d3e40c..48685cddbad1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc) | |||
1089 | { | 1089 | { |
1090 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 1090 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
1091 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1091 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1092 | const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; | 1092 | static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; |
1093 | 1093 | ||
1094 | chained_irq_enter(chip, desc); | 1094 | chained_irq_enter(chip, desc); |
1095 | 1095 | ||
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc) | |||
1102 | { | 1102 | { |
1103 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 1103 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
1104 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1105 | const int int_reg[] = { 4, 5, 8, 9}; | 1105 | static const int int_reg[] = { 4, 5, 8, 9}; |
1106 | 1106 | ||
1107 | chained_irq_enter(chip, desc); | 1107 | chained_irq_enter(chip, desc); |
1108 | 1108 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index bb9c087e6c0d..9f2d9ec42add 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c | |||
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) | |||
788 | case V4L2_PIX_FMT_SGBRG8: | 788 | case V4L2_PIX_FMT_SGBRG8: |
789 | case V4L2_PIX_FMT_SGRBG8: | 789 | case V4L2_PIX_FMT_SGRBG8: |
790 | case V4L2_PIX_FMT_SRGGB8: | 790 | case V4L2_PIX_FMT_SRGGB8: |
791 | case V4L2_PIX_FMT_GREY: | ||
791 | offset = image->rect.left + image->rect.top * pix->bytesperline; | 792 | offset = image->rect.left + image->rect.top * pix->bytesperline; |
792 | break; | 793 | break; |
793 | case V4L2_PIX_FMT_SBGGR16: | 794 | case V4L2_PIX_FMT_SBGGR16: |
794 | case V4L2_PIX_FMT_SGBRG16: | 795 | case V4L2_PIX_FMT_SGBRG16: |
795 | case V4L2_PIX_FMT_SGRBG16: | 796 | case V4L2_PIX_FMT_SGRBG16: |
796 | case V4L2_PIX_FMT_SRGGB16: | 797 | case V4L2_PIX_FMT_SRGGB16: |
798 | case V4L2_PIX_FMT_Y16: | ||
797 | offset = image->rect.left * 2 + | 799 | offset = image->rect.left * 2 + |
798 | image->rect.top * pix->bytesperline; | 800 | image->rect.top * pix->bytesperline; |
799 | break; | 801 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 24e12b87a0cb..caa05b0702e1 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c | |||
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) | |||
288 | case MEDIA_BUS_FMT_SGBRG10_1X10: | 288 | case MEDIA_BUS_FMT_SGBRG10_1X10: |
289 | case MEDIA_BUS_FMT_SGRBG10_1X10: | 289 | case MEDIA_BUS_FMT_SGRBG10_1X10: |
290 | case MEDIA_BUS_FMT_SRGGB10_1X10: | 290 | case MEDIA_BUS_FMT_SRGGB10_1X10: |
291 | case MEDIA_BUS_FMT_Y10_1X10: | ||
291 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; | 292 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; |
292 | cfg->mipi_dt = MIPI_DT_RAW10; | 293 | cfg->mipi_dt = MIPI_DT_RAW10; |
293 | cfg->data_width = IPU_CSI_DATA_WIDTH_10; | 294 | cfg->data_width = IPU_CSI_DATA_WIDTH_10; |
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) | |||
296 | case MEDIA_BUS_FMT_SGBRG12_1X12: | 297 | case MEDIA_BUS_FMT_SGBRG12_1X12: |
297 | case MEDIA_BUS_FMT_SGRBG12_1X12: | 298 | case MEDIA_BUS_FMT_SGRBG12_1X12: |
298 | case MEDIA_BUS_FMT_SRGGB12_1X12: | 299 | case MEDIA_BUS_FMT_SRGGB12_1X12: |
300 | case MEDIA_BUS_FMT_Y12_1X12: | ||
299 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; | 301 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; |
300 | cfg->mipi_dt = MIPI_DT_RAW12; | 302 | cfg->mipi_dt = MIPI_DT_RAW12; |
301 | cfg->data_width = IPU_CSI_DATA_WIDTH_12; | 303 | cfg->data_width = IPU_CSI_DATA_WIDTH_12; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index f1cec3d70498..0f70e8847540 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index) | |||
129 | if (pre_node == pre->dev->of_node) { | 129 | if (pre_node == pre->dev->of_node) { |
130 | mutex_unlock(&ipu_pre_list_mutex); | 130 | mutex_unlock(&ipu_pre_list_mutex); |
131 | device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); | 131 | device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); |
132 | of_node_put(pre_node); | ||
132 | return pre; | 133 | return pre; |
133 | } | 134 | } |
134 | } | 135 | } |
135 | mutex_unlock(&ipu_pre_list_mutex); | 136 | mutex_unlock(&ipu_pre_list_mutex); |
136 | 137 | ||
138 | of_node_put(pre_node); | ||
139 | |||
137 | return NULL; | 140 | return NULL; |
138 | } | 141 | } |
139 | 142 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index 067365c733c6..97b99500153d 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id) | |||
102 | mutex_unlock(&ipu_prg_list_mutex); | 102 | mutex_unlock(&ipu_prg_list_mutex); |
103 | device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); | 103 | device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); |
104 | prg->id = ipu_id; | 104 | prg->id = ipu_id; |
105 | of_node_put(prg_node); | ||
105 | return prg; | 106 | return prg; |
106 | } | 107 | } |
107 | } | 108 | } |
108 | mutex_unlock(&ipu_prg_list_mutex); | 109 | mutex_unlock(&ipu_prg_list_mutex); |
109 | 110 | ||
111 | of_node_put(prg_node); | ||
112 | |||
110 | return NULL; | 113 | return NULL; |
111 | } | 114 | } |
112 | 115 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 43ddcdfbd0da..9454ac134ce2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -645,6 +645,9 @@ | |||
645 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 | 645 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 |
646 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 | 646 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 |
647 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 | 647 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 |
648 | #define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 | ||
649 | #define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 | ||
650 | #define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 | ||
648 | #define USB_DEVICE_ID_LD_JWM 0x1080 | 651 | #define USB_DEVICE_ID_LD_JWM 0x1080 |
649 | #define USB_DEVICE_ID_LD_DMMP 0x1081 | 652 | #define USB_DEVICE_ID_LD_DMMP 0x1081 |
650 | #define USB_DEVICE_ID_LD_UMIP 0x1090 | 653 | #define USB_DEVICE_ID_LD_UMIP 0x1090 |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 5f6035a5ce36..e92b77fa574a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
@@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
809 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, | 809 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, |
810 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, | 810 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, |
811 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, | 811 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, |
812 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, | ||
813 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, | ||
814 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, | ||
812 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, | 815 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, |
813 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, | 816 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, |
814 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, | 817 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a9805c7cb305..e2954fb86d65 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -123,8 +123,10 @@ config I2C_I801 | |||
123 | Wildcat Point (PCH) | 123 | Wildcat Point (PCH) |
124 | Wildcat Point-LP (PCH) | 124 | Wildcat Point-LP (PCH) |
125 | BayTrail (SOC) | 125 | BayTrail (SOC) |
126 | Braswell (SOC) | ||
126 | Sunrise Point-H (PCH) | 127 | Sunrise Point-H (PCH) |
127 | Sunrise Point-LP (PCH) | 128 | Sunrise Point-LP (PCH) |
129 | Kaby Lake-H (PCH) | ||
128 | DNV (SOC) | 130 | DNV (SOC) |
129 | Broxton (SOC) | 131 | Broxton (SOC) |
130 | Lewisburg (PCH) | 132 | Lewisburg (PCH) |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index cd07a69e2e93..44deae78913e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define BCM2835_I2C_S_CLKT BIT(9) | 50 | #define BCM2835_I2C_S_CLKT BIT(9) |
51 | #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ | 51 | #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ |
52 | 52 | ||
53 | #define BCM2835_I2C_FEDL_SHIFT 16 | ||
54 | #define BCM2835_I2C_REDL_SHIFT 0 | ||
55 | |||
53 | #define BCM2835_I2C_CDIV_MIN 0x0002 | 56 | #define BCM2835_I2C_CDIV_MIN 0x0002 |
54 | #define BCM2835_I2C_CDIV_MAX 0xFFFE | 57 | #define BCM2835_I2C_CDIV_MAX 0xFFFE |
55 | 58 | ||
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg) | |||
81 | 84 | ||
82 | static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) | 85 | static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) |
83 | { | 86 | { |
84 | u32 divider; | 87 | u32 divider, redl, fedl; |
85 | 88 | ||
86 | divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), | 89 | divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), |
87 | i2c_dev->bus_clk_rate); | 90 | i2c_dev->bus_clk_rate); |
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) | |||
100 | 103 | ||
101 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); | 104 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); |
102 | 105 | ||
106 | /* | ||
107 | * Number of core clocks to wait after falling edge before | ||
108 | * outputting the next data bit. Note that both FEDL and REDL | ||
109 | * can't be greater than CDIV/2. | ||
110 | */ | ||
111 | fedl = max(divider / 16, 1u); | ||
112 | |||
113 | /* | ||
114 | * Number of core clocks to wait after rising edge before | ||
115 | * sampling the next incoming data bit. | ||
116 | */ | ||
117 | redl = max(divider / 4, 1u); | ||
118 | |||
119 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL, | ||
120 | (fedl << BCM2835_I2C_FEDL_SHIFT) | | ||
121 | (redl << BCM2835_I2C_REDL_SHIFT)); | ||
103 | return 0; | 122 | return 0; |
104 | } | 123 | } |
105 | 124 | ||
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index ae691884d071..05732531829f 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
209 | i2c_dw_disable_int(dev); | 209 | i2c_dw_disable_int(dev); |
210 | 210 | ||
211 | /* Enable the adapter */ | 211 | /* Enable the adapter */ |
212 | __i2c_dw_enable(dev, true); | 212 | __i2c_dw_enable_and_wait(dev, true); |
213 | 213 | ||
214 | /* Clear and enable interrupts */ | 214 | /* Clear and enable interrupts */ |
215 | dw_readl(dev, DW_IC_CLR_INTR); | 215 | dw_readl(dev, DW_IC_CLR_INTR); |
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) | |||
644 | gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); | 644 | gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); |
645 | if (IS_ERR(gpio)) { | 645 | if (IS_ERR(gpio)) { |
646 | r = PTR_ERR(gpio); | 646 | r = PTR_ERR(gpio); |
647 | if (r == -ENOENT) | 647 | if (r == -ENOENT || r == -ENOSYS) |
648 | return 0; | 648 | return 0; |
649 | return r; | 649 | return r; |
650 | } | 650 | } |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 8eac00efadc1..692b34125866 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -58,6 +58,7 @@ | |||
58 | * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes | 58 | * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes |
59 | * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes | 59 | * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes |
60 | * BayTrail (SOC) 0x0f12 32 hard yes yes yes | 60 | * BayTrail (SOC) 0x0f12 32 hard yes yes yes |
61 | * Braswell (SOC) 0x2292 32 hard yes yes yes | ||
61 | * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes | 62 | * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes |
62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes | 63 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes |
63 | * DNV (SOC) 0x19df 32 hard yes yes yes | 64 | * DNV (SOC) 0x19df 32 hard yes yes yes |
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 2fd8b6d00391..87197ece0f90 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c | |||
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
341 | platform_set_drvdata(pdev, adap); | 341 | platform_set_drvdata(pdev, adap); |
342 | init_completion(&siic->done); | 342 | init_completion(&siic->done); |
343 | 343 | ||
344 | /* Controller Initalisation */ | 344 | /* Controller initialisation */ |
345 | 345 | ||
346 | writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); | 346 | writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); |
347 | while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) | 347 | while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) |
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
369 | * but they start to affect the speed when clock is set to faster | 369 | * but they start to affect the speed when clock is set to faster |
370 | * frequencies. | 370 | * frequencies. |
371 | * Through the actual tests, use the different user_div value(which | 371 | * Through the actual tests, use the different user_div value(which |
372 | * in the divider formular 'Fio / (Fi2c * user_div)') to adapt | 372 | * in the divider formula 'Fio / (Fi2c * user_div)') to adapt |
373 | * the different ranges of i2c bus clock frequency, to make the SCL | 373 | * the different ranges of i2c bus clock frequency, to make the SCL |
374 | * more accurate. | 374 | * more accurate. |
375 | */ | 375 | */ |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 17fd55af4d92..caa20eb5f26b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data) | |||
928 | { | 928 | { |
929 | struct gendisk *p = data; | 929 | struct gendisk *p = data; |
930 | 930 | ||
931 | if (!get_disk(p)) | 931 | if (!get_disk_and_module(p)) |
932 | return -1; | 932 | return -1; |
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 327a49ba1991..9515ca165dfd 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c | |||
@@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev) | |||
243 | ASPEED_ADC_INIT_POLLING_TIME, | 243 | ASPEED_ADC_INIT_POLLING_TIME, |
244 | ASPEED_ADC_INIT_TIMEOUT); | 244 | ASPEED_ADC_INIT_TIMEOUT); |
245 | if (ret) | 245 | if (ret) |
246 | goto scaler_error; | 246 | goto poll_timeout_error; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* Start all channels in normal mode. */ | 249 | /* Start all channels in normal mode. */ |
@@ -274,9 +274,10 @@ iio_register_error: | |||
274 | writel(ASPEED_OPERATION_MODE_POWER_DOWN, | 274 | writel(ASPEED_OPERATION_MODE_POWER_DOWN, |
275 | data->base + ASPEED_REG_ENGINE_CONTROL); | 275 | data->base + ASPEED_REG_ENGINE_CONTROL); |
276 | clk_disable_unprepare(data->clk_scaler->clk); | 276 | clk_disable_unprepare(data->clk_scaler->clk); |
277 | reset_error: | ||
278 | reset_control_assert(data->rst); | ||
279 | clk_enable_error: | 277 | clk_enable_error: |
278 | poll_timeout_error: | ||
279 | reset_control_assert(data->rst); | ||
280 | reset_error: | ||
280 | clk_hw_unregister_divider(data->clk_scaler); | 281 | clk_hw_unregister_divider(data->clk_scaler); |
281 | scaler_error: | 282 | scaler_error: |
282 | clk_hw_unregister_divider(data->clk_prescaler); | 283 | clk_hw_unregister_divider(data->clk_prescaler); |
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 7f5def465340..9a2583caedaa 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c | |||
@@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) | |||
722 | int ret; | 722 | int ret; |
723 | u32 val; | 723 | u32 val; |
724 | 724 | ||
725 | /* Clear ADRDY by writing one, then enable ADC */ | ||
726 | stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); | ||
727 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); | 725 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); |
728 | 726 | ||
729 | /* Poll for ADRDY to be set (after adc startup time) */ | 727 | /* Poll for ADRDY to be set (after adc startup time) */ |
@@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) | |||
731 | val & STM32H7_ADRDY, | 729 | val & STM32H7_ADRDY, |
732 | 100, STM32_ADC_TIMEOUT_US); | 730 | 100, STM32_ADC_TIMEOUT_US); |
733 | if (ret) { | 731 | if (ret) { |
734 | stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); | 732 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS); |
735 | dev_err(&indio_dev->dev, "Failed to enable ADC\n"); | 733 | dev_err(&indio_dev->dev, "Failed to enable ADC\n"); |
734 | } else { | ||
735 | /* Clear ADRDY by writing one */ | ||
736 | stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); | ||
736 | } | 737 | } |
737 | 738 | ||
738 | return ret; | 739 | return ret; |
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index 0dd5a381be64..457372f36791 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c | |||
@@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
46 | if (adis->trig == NULL) | 46 | if (adis->trig == NULL) |
47 | return -ENOMEM; | 47 | return -ENOMEM; |
48 | 48 | ||
49 | adis->trig->dev.parent = &adis->spi->dev; | ||
50 | adis->trig->ops = &adis_trigger_ops; | ||
51 | iio_trigger_set_drvdata(adis->trig, adis); | ||
52 | |||
49 | ret = request_irq(adis->spi->irq, | 53 | ret = request_irq(adis->spi->irq, |
50 | &iio_trigger_generic_data_rdy_poll, | 54 | &iio_trigger_generic_data_rdy_poll, |
51 | IRQF_TRIGGER_RISING, | 55 | IRQF_TRIGGER_RISING, |
@@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
54 | if (ret) | 58 | if (ret) |
55 | goto error_free_trig; | 59 | goto error_free_trig; |
56 | 60 | ||
57 | adis->trig->dev.parent = &adis->spi->dev; | ||
58 | adis->trig->ops = &adis_trigger_ops; | ||
59 | iio_trigger_set_drvdata(adis->trig, adis); | ||
60 | ret = iio_trigger_register(adis->trig); | 61 | ret = iio_trigger_register(adis->trig); |
61 | 62 | ||
62 | indio_dev->trig = iio_trigger_get(adis->trig); | 63 | indio_dev->trig = iio_trigger_get(adis->trig); |
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 79abf70a126d..cd5bfe39591b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp, | |||
175 | struct iio_dev *indio_dev = filp->private_data; | 175 | struct iio_dev *indio_dev = filp->private_data; |
176 | struct iio_buffer *rb = indio_dev->buffer; | 176 | struct iio_buffer *rb = indio_dev->buffer; |
177 | 177 | ||
178 | if (!indio_dev->info) | 178 | if (!indio_dev->info || rb == NULL) |
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | poll_wait(filp, &rb->pollq, wait); | 181 | poll_wait(filp, &rb->pollq, wait); |
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index fcb1c4ba5e41..f726f9427602 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig | |||
@@ -68,6 +68,8 @@ config SX9500 | |||
68 | 68 | ||
69 | config SRF08 | 69 | config SRF08 |
70 | tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" | 70 | tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" |
71 | select IIO_BUFFER | ||
72 | select IIO_TRIGGERED_BUFFER | ||
71 | depends on I2C | 73 | depends on I2C |
72 | help | 74 | help |
73 | Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 | 75 | Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index c4560d84dfae..25bb178f6074 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -305,16 +305,21 @@ void nldev_exit(void); | |||
305 | static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, | 305 | static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, |
306 | struct ib_pd *pd, | 306 | struct ib_pd *pd, |
307 | struct ib_qp_init_attr *attr, | 307 | struct ib_qp_init_attr *attr, |
308 | struct ib_udata *udata) | 308 | struct ib_udata *udata, |
309 | struct ib_uobject *uobj) | ||
309 | { | 310 | { |
310 | struct ib_qp *qp; | 311 | struct ib_qp *qp; |
311 | 312 | ||
313 | if (!dev->create_qp) | ||
314 | return ERR_PTR(-EOPNOTSUPP); | ||
315 | |||
312 | qp = dev->create_qp(pd, attr, udata); | 316 | qp = dev->create_qp(pd, attr, udata); |
313 | if (IS_ERR(qp)) | 317 | if (IS_ERR(qp)) |
314 | return qp; | 318 | return qp; |
315 | 319 | ||
316 | qp->device = dev; | 320 | qp->device = dev; |
317 | qp->pd = pd; | 321 | qp->pd = pd; |
322 | qp->uobject = uobj; | ||
318 | /* | 323 | /* |
319 | * We don't track XRC QPs for now, because they don't have PD | 324 | * We don't track XRC QPs for now, because they don't have PD |
320 | * and more importantly they are created internaly by driver, | 325 | * and more importantly they are created internaly by driver, |
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 85b5ee4defa4..d8eead5d106d 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c | |||
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context, | |||
141 | */ | 141 | */ |
142 | uobj->context = context; | 142 | uobj->context = context; |
143 | uobj->type = type; | 143 | uobj->type = type; |
144 | atomic_set(&uobj->usecnt, 0); | 144 | /* |
145 | * Allocated objects start out as write locked to deny any other | ||
146 | * syscalls from accessing them until they are committed. See | ||
147 | * rdma_alloc_commit_uobject | ||
148 | */ | ||
149 | atomic_set(&uobj->usecnt, -1); | ||
145 | kref_init(&uobj->ref); | 150 | kref_init(&uobj->ref); |
146 | 151 | ||
147 | return uobj; | 152 | return uobj; |
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t | |||
196 | goto free; | 201 | goto free; |
197 | } | 202 | } |
198 | 203 | ||
199 | uverbs_uobject_get(uobj); | 204 | /* |
205 | * The idr_find is guaranteed to return a pointer to something that | ||
206 | * isn't freed yet, or NULL, as the free after idr_remove goes through | ||
207 | * kfree_rcu(). However the object may still have been released and | ||
208 | * kfree() could be called at any time. | ||
209 | */ | ||
210 | if (!kref_get_unless_zero(&uobj->ref)) | ||
211 | uobj = ERR_PTR(-ENOENT); | ||
212 | |||
200 | free: | 213 | free: |
201 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
202 | return uobj; | 215 | return uobj; |
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, | |||
399 | return ret; | 412 | return ret; |
400 | } | 413 | } |
401 | 414 | ||
402 | static void lockdep_check(struct ib_uobject *uobj, bool exclusive) | 415 | static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive) |
403 | { | 416 | { |
404 | #ifdef CONFIG_LOCKDEP | 417 | #ifdef CONFIG_LOCKDEP |
405 | if (exclusive) | 418 | if (exclusive) |
406 | WARN_ON(atomic_read(&uobj->usecnt) > 0); | 419 | WARN_ON(atomic_read(&uobj->usecnt) != -1); |
407 | else | 420 | else |
408 | WARN_ON(atomic_read(&uobj->usecnt) == -1); | 421 | WARN_ON(atomic_read(&uobj->usecnt) <= 0); |
409 | #endif | 422 | #endif |
410 | } | 423 | } |
411 | 424 | ||
@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj) | |||
444 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); | 457 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); |
445 | return 0; | 458 | return 0; |
446 | } | 459 | } |
447 | lockdep_check(uobj, true); | 460 | assert_uverbs_usecnt(uobj, true); |
448 | ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); | 461 | ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); |
449 | 462 | ||
450 | up_read(&ucontext->cleanup_rwsem); | 463 | up_read(&ucontext->cleanup_rwsem); |
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) | |||
474 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); | 487 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); |
475 | return 0; | 488 | return 0; |
476 | } | 489 | } |
477 | lockdep_check(uobject, true); | 490 | assert_uverbs_usecnt(uobject, true); |
478 | ret = uobject->type->type_class->remove_commit(uobject, | 491 | ret = uobject->type->type_class->remove_commit(uobject, |
479 | RDMA_REMOVE_DESTROY); | 492 | RDMA_REMOVE_DESTROY); |
480 | if (ret) | 493 | if (ret) |
481 | return ret; | 494 | goto out; |
482 | 495 | ||
483 | uobject->type = &null_obj_type; | 496 | uobject->type = &null_obj_type; |
484 | 497 | ||
498 | out: | ||
485 | up_read(&ucontext->cleanup_rwsem); | 499 | up_read(&ucontext->cleanup_rwsem); |
486 | return 0; | 500 | return ret; |
487 | } | 501 | } |
488 | 502 | ||
489 | static void alloc_commit_idr_uobject(struct ib_uobject *uobj) | 503 | static void alloc_commit_idr_uobject(struct ib_uobject *uobj) |
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj) | |||
527 | return ret; | 541 | return ret; |
528 | } | 542 | } |
529 | 543 | ||
544 | /* matches atomic_set(-1) in alloc_uobj */ | ||
545 | assert_uverbs_usecnt(uobj, true); | ||
546 | atomic_set(&uobj->usecnt, 0); | ||
547 | |||
530 | uobj->type->type_class->alloc_commit(uobj); | 548 | uobj->type->type_class->alloc_commit(uobj); |
531 | up_read(&uobj->context->cleanup_rwsem); | 549 | up_read(&uobj->context->cleanup_rwsem); |
532 | 550 | ||
@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive) | |||
561 | 579 | ||
562 | void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) | 580 | void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) |
563 | { | 581 | { |
564 | lockdep_check(uobj, exclusive); | 582 | assert_uverbs_usecnt(uobj, exclusive); |
565 | uobj->type->type_class->lookup_put(uobj, exclusive); | 583 | uobj->type->type_class->lookup_put(uobj, exclusive); |
566 | /* | 584 | /* |
567 | * In order to unlock an object, either decrease its usecnt for | 585 | * In order to unlock an object, either decrease its usecnt for |
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 857637bf46da..3dbc4e4cca41 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <rdma/restrack.h> | 7 | #include <rdma/restrack.h> |
8 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
9 | #include <linux/sched/task.h> | 9 | #include <linux/sched/task.h> |
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/pid_namespace.h> | 10 | #include <linux/pid_namespace.h> |
12 | 11 | ||
13 | void rdma_restrack_init(struct rdma_restrack_root *res) | 12 | void rdma_restrack_init(struct rdma_restrack_root *res) |
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
63 | { | 62 | { |
64 | enum rdma_restrack_type type = res->type; | 63 | enum rdma_restrack_type type = res->type; |
65 | struct ib_device *dev; | 64 | struct ib_device *dev; |
66 | struct ib_xrcd *xrcd; | ||
67 | struct ib_pd *pd; | 65 | struct ib_pd *pd; |
68 | struct ib_cq *cq; | 66 | struct ib_cq *cq; |
69 | struct ib_qp *qp; | 67 | struct ib_qp *qp; |
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
81 | qp = container_of(res, struct ib_qp, res); | 79 | qp = container_of(res, struct ib_qp, res); |
82 | dev = qp->device; | 80 | dev = qp->device; |
83 | break; | 81 | break; |
84 | case RDMA_RESTRACK_XRCD: | ||
85 | xrcd = container_of(res, struct ib_xrcd, res); | ||
86 | dev = xrcd->device; | ||
87 | break; | ||
88 | default: | 82 | default: |
89 | WARN_ONCE(true, "Wrong resource tracking type %u\n", type); | 83 | WARN_ONCE(true, "Wrong resource tracking type %u\n", type); |
90 | return NULL; | 84 | return NULL; |
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
93 | return dev; | 87 | return dev; |
94 | } | 88 | } |
95 | 89 | ||
90 | static bool res_is_user(struct rdma_restrack_entry *res) | ||
91 | { | ||
92 | switch (res->type) { | ||
93 | case RDMA_RESTRACK_PD: | ||
94 | return container_of(res, struct ib_pd, res)->uobject; | ||
95 | case RDMA_RESTRACK_CQ: | ||
96 | return container_of(res, struct ib_cq, res)->uobject; | ||
97 | case RDMA_RESTRACK_QP: | ||
98 | return container_of(res, struct ib_qp, res)->uobject; | ||
99 | default: | ||
100 | WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); | ||
101 | return false; | ||
102 | } | ||
103 | } | ||
104 | |||
96 | void rdma_restrack_add(struct rdma_restrack_entry *res) | 105 | void rdma_restrack_add(struct rdma_restrack_entry *res) |
97 | { | 106 | { |
98 | struct ib_device *dev = res_to_dev(res); | 107 | struct ib_device *dev = res_to_dev(res); |
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res) | |||
100 | if (!dev) | 109 | if (!dev) |
101 | return; | 110 | return; |
102 | 111 | ||
103 | if (!uaccess_kernel()) { | 112 | if (res_is_user(res)) { |
104 | get_task_struct(current); | 113 | get_task_struct(current); |
105 | res->task = current; | 114 | res->task = current; |
106 | res->kern_name = NULL; | 115 | res->kern_name = NULL; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 256934d1f64f..a148de35df8d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | |||
562 | if (f.file) | 562 | if (f.file) |
563 | fdput(f); | 563 | fdput(f); |
564 | 564 | ||
565 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
566 | |||
565 | uobj_alloc_commit(&obj->uobject); | 567 | uobj_alloc_commit(&obj->uobject); |
566 | 568 | ||
567 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
568 | return in_len; | 569 | return in_len; |
569 | 570 | ||
570 | err_copy: | 571 | err_copy: |
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, | |||
603 | 604 | ||
604 | uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, | 605 | uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, |
605 | file->ucontext); | 606 | file->ucontext); |
606 | if (IS_ERR(uobj)) { | 607 | if (IS_ERR(uobj)) |
607 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
608 | return PTR_ERR(uobj); | 608 | return PTR_ERR(uobj); |
609 | } | ||
610 | 609 | ||
611 | ret = uobj_remove_commit(uobj); | 610 | ret = uobj_remove_commit(uobj); |
612 | return ret ?: in_len; | 611 | return ret ?: in_len; |
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
979 | struct ib_uverbs_ex_create_cq_resp resp; | 978 | struct ib_uverbs_ex_create_cq_resp resp; |
980 | struct ib_cq_init_attr attr = {}; | 979 | struct ib_cq_init_attr attr = {}; |
981 | 980 | ||
981 | if (!ib_dev->create_cq) | ||
982 | return ERR_PTR(-EOPNOTSUPP); | ||
983 | |||
982 | if (cmd->comp_vector >= file->device->num_comp_vectors) | 984 | if (cmd->comp_vector >= file->device->num_comp_vectors) |
983 | return ERR_PTR(-EINVAL); | 985 | return ERR_PTR(-EINVAL); |
984 | 986 | ||
@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
1030 | resp.response_length = offsetof(typeof(resp), response_length) + | 1032 | resp.response_length = offsetof(typeof(resp), response_length) + |
1031 | sizeof(resp.response_length); | 1033 | sizeof(resp.response_length); |
1032 | 1034 | ||
1035 | cq->res.type = RDMA_RESTRACK_CQ; | ||
1036 | rdma_restrack_add(&cq->res); | ||
1037 | |||
1033 | ret = cb(file, obj, &resp, ucore, context); | 1038 | ret = cb(file, obj, &resp, ucore, context); |
1034 | if (ret) | 1039 | if (ret) |
1035 | goto err_cb; | 1040 | goto err_cb; |
1036 | 1041 | ||
1037 | uobj_alloc_commit(&obj->uobject); | 1042 | uobj_alloc_commit(&obj->uobject); |
1038 | cq->res.type = RDMA_RESTRACK_CQ; | ||
1039 | rdma_restrack_add(&cq->res); | ||
1040 | |||
1041 | return obj; | 1043 | return obj; |
1042 | 1044 | ||
1043 | err_cb: | 1045 | err_cb: |
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1518 | if (cmd->qp_type == IB_QPT_XRC_TGT) | 1520 | if (cmd->qp_type == IB_QPT_XRC_TGT) |
1519 | qp = ib_create_qp(pd, &attr); | 1521 | qp = ib_create_qp(pd, &attr); |
1520 | else | 1522 | else |
1521 | qp = _ib_create_qp(device, pd, &attr, uhw); | 1523 | qp = _ib_create_qp(device, pd, &attr, uhw, |
1524 | &obj->uevent.uobject); | ||
1522 | 1525 | ||
1523 | if (IS_ERR(qp)) { | 1526 | if (IS_ERR(qp)) { |
1524 | ret = PTR_ERR(qp); | 1527 | ret = PTR_ERR(qp); |
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1550 | atomic_inc(&attr.srq->usecnt); | 1553 | atomic_inc(&attr.srq->usecnt); |
1551 | if (ind_tbl) | 1554 | if (ind_tbl) |
1552 | atomic_inc(&ind_tbl->usecnt); | 1555 | atomic_inc(&ind_tbl->usecnt); |
1556 | } else { | ||
1557 | /* It is done in _ib_create_qp for other QP types */ | ||
1558 | qp->uobject = &obj->uevent.uobject; | ||
1553 | } | 1559 | } |
1554 | qp->uobject = &obj->uevent.uobject; | ||
1555 | 1560 | ||
1556 | obj->uevent.uobject.object = qp; | 1561 | obj->uevent.uobject.object = qp; |
1557 | 1562 | ||
@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
1971 | goto release_qp; | 1976 | goto release_qp; |
1972 | } | 1977 | } |
1973 | 1978 | ||
1979 | if ((cmd->base.attr_mask & IB_QP_AV) && | ||
1980 | !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { | ||
1981 | ret = -EINVAL; | ||
1982 | goto release_qp; | ||
1983 | } | ||
1984 | |||
1974 | if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && | 1985 | if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && |
1975 | !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { | 1986 | (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || |
1987 | !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { | ||
1976 | ret = -EINVAL; | 1988 | ret = -EINVAL; |
1977 | goto release_qp; | 1989 | goto release_qp; |
1978 | } | 1990 | } |
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, | |||
2941 | wq_init_attr.create_flags = cmd.create_flags; | 2953 | wq_init_attr.create_flags = cmd.create_flags; |
2942 | obj->uevent.events_reported = 0; | 2954 | obj->uevent.events_reported = 0; |
2943 | INIT_LIST_HEAD(&obj->uevent.event_list); | 2955 | INIT_LIST_HEAD(&obj->uevent.event_list); |
2956 | |||
2957 | if (!pd->device->create_wq) { | ||
2958 | err = -EOPNOTSUPP; | ||
2959 | goto err_put_cq; | ||
2960 | } | ||
2944 | wq = pd->device->create_wq(pd, &wq_init_attr, uhw); | 2961 | wq = pd->device->create_wq(pd, &wq_init_attr, uhw); |
2945 | if (IS_ERR(wq)) { | 2962 | if (IS_ERR(wq)) { |
2946 | err = PTR_ERR(wq); | 2963 | err = PTR_ERR(wq); |
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, | |||
3084 | wq_attr.flags = cmd.flags; | 3101 | wq_attr.flags = cmd.flags; |
3085 | wq_attr.flags_mask = cmd.flags_mask; | 3102 | wq_attr.flags_mask = cmd.flags_mask; |
3086 | } | 3103 | } |
3104 | if (!wq->device->modify_wq) { | ||
3105 | ret = -EOPNOTSUPP; | ||
3106 | goto out; | ||
3107 | } | ||
3087 | ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); | 3108 | ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); |
3109 | out: | ||
3088 | uobj_put_obj_read(wq); | 3110 | uobj_put_obj_read(wq); |
3089 | return ret; | 3111 | return ret; |
3090 | } | 3112 | } |
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, | |||
3181 | 3203 | ||
3182 | init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; | 3204 | init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; |
3183 | init_attr.ind_tbl = wqs; | 3205 | init_attr.ind_tbl = wqs; |
3206 | |||
3207 | if (!ib_dev->create_rwq_ind_table) { | ||
3208 | err = -EOPNOTSUPP; | ||
3209 | goto err_uobj; | ||
3210 | } | ||
3184 | rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); | 3211 | rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); |
3185 | 3212 | ||
3186 | if (IS_ERR(rwq_ind_tbl)) { | 3213 | if (IS_ERR(rwq_ind_tbl)) { |
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, | |||
3770 | struct ib_device_attr attr = {0}; | 3797 | struct ib_device_attr attr = {0}; |
3771 | int err; | 3798 | int err; |
3772 | 3799 | ||
3800 | if (!ib_dev->query_device) | ||
3801 | return -EOPNOTSUPP; | ||
3802 | |||
3773 | if (ucore->inlen < sizeof(cmd)) | 3803 | if (ucore->inlen < sizeof(cmd)) |
3774 | return -EINVAL; | 3804 | return -EINVAL; |
3775 | 3805 | ||
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index d96dc1d17be1..339b85145044 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c | |||
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) | ||
63 | return -EINVAL; | ||
64 | |||
62 | spec = &attr_spec_bucket->attrs[attr_id]; | 65 | spec = &attr_spec_bucket->attrs[attr_id]; |
63 | e = &elements[attr_id]; | 66 | e = &elements[attr_id]; |
64 | e->uattr = uattr_ptr; | 67 | e->uattr = uattr_ptr; |
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 062485f9300d..62e1eb1d2a28 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c | |||
@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, | |||
114 | short min = SHRT_MAX; | 114 | short min = SHRT_MAX; |
115 | const void *elem; | 115 | const void *elem; |
116 | int i, j, last_stored = -1; | 116 | int i, j, last_stored = -1; |
117 | unsigned int equal_min = 0; | ||
117 | 118 | ||
118 | for_each_element(elem, i, j, elements, num_elements, num_offset, | 119 | for_each_element(elem, i, j, elements, num_elements, num_offset, |
119 | data_offset) { | 120 | data_offset) { |
@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, | |||
136 | */ | 137 | */ |
137 | iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; | 138 | iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; |
138 | last_stored = i; | 139 | last_stored = i; |
140 | if (min == GET_ID(id)) | ||
141 | equal_min++; | ||
142 | else | ||
143 | equal_min = 1; | ||
139 | min = GET_ID(id); | 144 | min = GET_ID(id); |
140 | } | 145 | } |
141 | 146 | ||
@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, | |||
146 | * Therefore, we need to clean the beginning of the array to make sure | 151 | * Therefore, we need to clean the beginning of the array to make sure |
147 | * all ids of final elements are equal to min. | 152 | * all ids of final elements are equal to min. |
148 | */ | 153 | */ |
149 | for (i = num_iters - 1; i >= 0 && | 154 | memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min); |
150 | GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) | ||
151 | ; | ||
152 | |||
153 | num_iters -= i + 1; | ||
154 | memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); | ||
155 | 155 | ||
156 | *min_id = min; | 156 | *min_id = min; |
157 | return num_iters; | 157 | return equal_min; |
158 | } | 158 | } |
159 | 159 | ||
160 | #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ | 160 | #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ |
@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me | |||
322 | hash = kzalloc(sizeof(*hash) + | 322 | hash = kzalloc(sizeof(*hash) + |
323 | ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), | 323 | ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), |
324 | sizeof(long)) + | 324 | sizeof(long)) + |
325 | BITS_TO_LONGS(attr_max_bucket) * sizeof(long), | 325 | BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long), |
326 | GFP_KERNEL); | 326 | GFP_KERNEL); |
327 | if (!hash) { | 327 | if (!hash) { |
328 | res = -ENOMEM; | 328 | res = -ENOMEM; |
@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ | |||
509 | * first handler which != NULL. This also defines the | 509 | * first handler which != NULL. This also defines the |
510 | * set of flags used for this handler. | 510 | * set of flags used for this handler. |
511 | */ | 511 | */ |
512 | for (i = num_object_defs - 1; | 512 | for (i = num_method_defs - 1; |
513 | i >= 0 && !method_defs[i]->handler; i--) | 513 | i >= 0 && !method_defs[i]->handler; i--) |
514 | ; | 514 | ; |
515 | hash->methods[min_id++] = method; | 515 | hash->methods[min_id++] = method; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 395a3b091229..b1ca223aa380 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command) | |||
650 | return -1; | 650 | return -1; |
651 | } | 651 | } |
652 | 652 | ||
653 | static bool verify_command_idx(u32 command, bool extended) | ||
654 | { | ||
655 | if (extended) | ||
656 | return command < ARRAY_SIZE(uverbs_ex_cmd_table); | ||
657 | |||
658 | return command < ARRAY_SIZE(uverbs_cmd_table); | ||
659 | } | ||
660 | |||
653 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | 661 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, |
654 | size_t count, loff_t *pos) | 662 | size_t count, loff_t *pos) |
655 | { | 663 | { |
656 | struct ib_uverbs_file *file = filp->private_data; | 664 | struct ib_uverbs_file *file = filp->private_data; |
657 | struct ib_device *ib_dev; | 665 | struct ib_device *ib_dev; |
658 | struct ib_uverbs_cmd_hdr hdr; | 666 | struct ib_uverbs_cmd_hdr hdr; |
667 | bool extended_command; | ||
659 | __u32 command; | 668 | __u32 command; |
660 | __u32 flags; | 669 | __u32 flags; |
661 | int srcu_key; | 670 | int srcu_key; |
@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
688 | } | 697 | } |
689 | 698 | ||
690 | command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; | 699 | command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; |
700 | flags = (hdr.command & | ||
701 | IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; | ||
702 | |||
703 | extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED; | ||
704 | if (!verify_command_idx(command, extended_command)) { | ||
705 | ret = -EINVAL; | ||
706 | goto out; | ||
707 | } | ||
708 | |||
691 | if (verify_command_mask(ib_dev, command)) { | 709 | if (verify_command_mask(ib_dev, command)) { |
692 | ret = -EOPNOTSUPP; | 710 | ret = -EOPNOTSUPP; |
693 | goto out; | 711 | goto out; |
@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
699 | goto out; | 717 | goto out; |
700 | } | 718 | } |
701 | 719 | ||
702 | flags = (hdr.command & | ||
703 | IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; | ||
704 | |||
705 | if (!flags) { | 720 | if (!flags) { |
706 | if (command >= ARRAY_SIZE(uverbs_cmd_table) || | 721 | if (!uverbs_cmd_table[command]) { |
707 | !uverbs_cmd_table[command]) { | ||
708 | ret = -EINVAL; | 722 | ret = -EINVAL; |
709 | goto out; | 723 | goto out; |
710 | } | 724 | } |
@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
725 | struct ib_udata uhw; | 739 | struct ib_udata uhw; |
726 | size_t written_count = count; | 740 | size_t written_count = count; |
727 | 741 | ||
728 | if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || | 742 | if (!uverbs_ex_cmd_table[command]) { |
729 | !uverbs_ex_cmd_table[command]) { | ||
730 | ret = -ENOSYS; | 743 | ret = -ENOSYS; |
731 | goto out; | 744 | goto out; |
732 | } | 745 | } |
@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = { | |||
942 | .llseek = no_llseek, | 955 | .llseek = no_llseek, |
943 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) | 956 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) |
944 | .unlocked_ioctl = ib_uverbs_ioctl, | 957 | .unlocked_ioctl = ib_uverbs_ioctl, |
958 | .compat_ioctl = ib_uverbs_ioctl, | ||
945 | #endif | 959 | #endif |
946 | }; | 960 | }; |
947 | 961 | ||
@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = { | |||
954 | .llseek = no_llseek, | 968 | .llseek = no_llseek, |
955 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) | 969 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) |
956 | .unlocked_ioctl = ib_uverbs_ioctl, | 970 | .unlocked_ioctl = ib_uverbs_ioctl, |
971 | .compat_ioctl = ib_uverbs_ioctl, | ||
957 | #endif | 972 | #endif |
958 | }; | 973 | }; |
959 | 974 | ||
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index cab0ac3556eb..df1360e6774f 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c | |||
@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx, | |||
234 | uverbs_attr_get(ctx, UVERBS_UHW_OUT); | 234 | uverbs_attr_get(ctx, UVERBS_UHW_OUT); |
235 | 235 | ||
236 | if (!IS_ERR(uhw_in)) { | 236 | if (!IS_ERR(uhw_in)) { |
237 | udata->inbuf = uhw_in->ptr_attr.ptr; | ||
238 | udata->inlen = uhw_in->ptr_attr.len; | 237 | udata->inlen = uhw_in->ptr_attr.len; |
238 | if (uverbs_attr_ptr_is_inline(uhw_in)) | ||
239 | udata->inbuf = &uhw_in->uattr->data; | ||
240 | else | ||
241 | udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data); | ||
239 | } else { | 242 | } else { |
240 | udata->inbuf = NULL; | 243 | udata->inbuf = NULL; |
241 | udata->inlen = 0; | 244 | udata->inlen = 0; |
242 | } | 245 | } |
243 | 246 | ||
244 | if (!IS_ERR(uhw_out)) { | 247 | if (!IS_ERR(uhw_out)) { |
245 | udata->outbuf = uhw_out->ptr_attr.ptr; | 248 | udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data); |
246 | udata->outlen = uhw_out->ptr_attr.len; | 249 | udata->outlen = uhw_out->ptr_attr.len; |
247 | } else { | 250 | } else { |
248 | udata->outbuf = NULL; | 251 | udata->outbuf = NULL; |
@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, | |||
323 | cq->res.type = RDMA_RESTRACK_CQ; | 326 | cq->res.type = RDMA_RESTRACK_CQ; |
324 | rdma_restrack_add(&cq->res); | 327 | rdma_restrack_add(&cq->res); |
325 | 328 | ||
326 | ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); | 329 | ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe, |
330 | sizeof(cq->cqe)); | ||
327 | if (ret) | 331 | if (ret) |
328 | goto err_cq; | 332 | goto err_cq; |
329 | 333 | ||
@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev, | |||
375 | resp.comp_events_reported = obj->comp_events_reported; | 379 | resp.comp_events_reported = obj->comp_events_reported; |
376 | resp.async_events_reported = obj->async_events_reported; | 380 | resp.async_events_reported = obj->async_events_reported; |
377 | 381 | ||
378 | return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); | 382 | return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp)); |
379 | } | 383 | } |
380 | 384 | ||
381 | static DECLARE_UVERBS_METHOD( | 385 | static DECLARE_UVERBS_METHOD( |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 16ebc6372c31..93025d2009b8 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
887 | if (qp_init_attr->cap.max_rdma_ctxs) | 887 | if (qp_init_attr->cap.max_rdma_ctxs) |
888 | rdma_rw_init_qp(device, qp_init_attr); | 888 | rdma_rw_init_qp(device, qp_init_attr); |
889 | 889 | ||
890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL); | 890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL); |
891 | if (IS_ERR(qp)) | 891 | if (IS_ERR(qp)) |
892 | return qp; | 892 | return qp; |
893 | 893 | ||
@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
898 | } | 898 | } |
899 | 899 | ||
900 | qp->real_qp = qp; | 900 | qp->real_qp = qp; |
901 | qp->uobject = NULL; | ||
902 | qp->qp_type = qp_init_attr->qp_type; | 901 | qp->qp_type = qp_init_attr->qp_type; |
903 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; | 902 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
904 | 903 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ca32057e886f..3eb7a8387116 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h | |||
@@ -120,7 +120,6 @@ struct bnxt_re_dev { | |||
120 | #define BNXT_RE_FLAG_HAVE_L2_REF 3 | 120 | #define BNXT_RE_FLAG_HAVE_L2_REF 3 |
121 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 | 121 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 |
122 | #define BNXT_RE_FLAG_QOS_WORK_REG 5 | 122 | #define BNXT_RE_FLAG_QOS_WORK_REG 5 |
123 | #define BNXT_RE_FLAG_TASK_IN_PROG 6 | ||
124 | #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 | 123 | #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 |
125 | struct net_device *netdev; | 124 | struct net_device *netdev; |
126 | unsigned int version, major, minor; | 125 | unsigned int version, major, minor; |
@@ -158,6 +157,7 @@ struct bnxt_re_dev { | |||
158 | atomic_t srq_count; | 157 | atomic_t srq_count; |
159 | atomic_t mr_count; | 158 | atomic_t mr_count; |
160 | atomic_t mw_count; | 159 | atomic_t mw_count; |
160 | atomic_t sched_count; | ||
161 | /* Max of 2 lossless traffic class supported per port */ | 161 | /* Max of 2 lossless traffic class supported per port */ |
162 | u16 cosq[2]; | 162 | u16 cosq[2]; |
163 | 163 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ae9e9ff54826..643174d949a8 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
174 | ib_attr->max_pd = dev_attr->max_pd; | 174 | ib_attr->max_pd = dev_attr->max_pd; |
175 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; | 175 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; |
176 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; | 176 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; |
177 | if (dev_attr->is_atomic) { | 177 | ib_attr->atomic_cap = IB_ATOMIC_NONE; |
178 | ib_attr->atomic_cap = IB_ATOMIC_HCA; | 178 | ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; |
179 | ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; | ||
180 | } | ||
181 | 179 | ||
182 | ib_attr->max_ee_rd_atom = 0; | 180 | ib_attr->max_ee_rd_atom = 0; |
183 | ib_attr->max_res_rd_atom = 0; | 181 | ib_attr->max_res_rd_atom = 0; |
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) | |||
787 | return 0; | 785 | return 0; |
788 | } | 786 | } |
789 | 787 | ||
788 | static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) | ||
789 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) | ||
790 | { | ||
791 | unsigned long flags; | ||
792 | |||
793 | spin_lock_irqsave(&qp->scq->cq_lock, flags); | ||
794 | if (qp->rcq != qp->scq) | ||
795 | spin_lock(&qp->rcq->cq_lock); | ||
796 | else | ||
797 | __acquire(&qp->rcq->cq_lock); | ||
798 | |||
799 | return flags; | ||
800 | } | ||
801 | |||
802 | static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, | ||
803 | unsigned long flags) | ||
804 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) | ||
805 | { | ||
806 | if (qp->rcq != qp->scq) | ||
807 | spin_unlock(&qp->rcq->cq_lock); | ||
808 | else | ||
809 | __release(&qp->rcq->cq_lock); | ||
810 | spin_unlock_irqrestore(&qp->scq->cq_lock, flags); | ||
811 | } | ||
812 | |||
790 | /* Queue Pairs */ | 813 | /* Queue Pairs */ |
791 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | 814 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp) |
792 | { | 815 | { |
793 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 816 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
794 | struct bnxt_re_dev *rdev = qp->rdev; | 817 | struct bnxt_re_dev *rdev = qp->rdev; |
795 | int rc; | 818 | int rc; |
819 | unsigned int flags; | ||
796 | 820 | ||
797 | bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); | 821 | bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); |
798 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | ||
799 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); | 822 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
800 | if (rc) { | 823 | if (rc) { |
801 | dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); | 824 | dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); |
802 | return rc; | 825 | return rc; |
803 | } | 826 | } |
827 | |||
828 | flags = bnxt_re_lock_cqs(qp); | ||
829 | bnxt_qplib_clean_qp(&qp->qplib_qp); | ||
830 | bnxt_re_unlock_cqs(qp, flags); | ||
831 | bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); | ||
832 | |||
804 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { | 833 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { |
805 | rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, | 834 | rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, |
806 | &rdev->sqp_ah->qplib_ah); | 835 | &rdev->sqp_ah->qplib_ah); |
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | |||
810 | return rc; | 839 | return rc; |
811 | } | 840 | } |
812 | 841 | ||
813 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | 842 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
814 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, | 843 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, |
815 | &rdev->qp1_sqp->qplib_qp); | 844 | &rdev->qp1_sqp->qplib_qp); |
816 | if (rc) { | 845 | if (rc) { |
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1069 | goto fail; | 1098 | goto fail; |
1070 | } | 1099 | } |
1071 | qp->qplib_qp.scq = &cq->qplib_cq; | 1100 | qp->qplib_qp.scq = &cq->qplib_cq; |
1101 | qp->scq = cq; | ||
1072 | } | 1102 | } |
1073 | 1103 | ||
1074 | if (qp_init_attr->recv_cq) { | 1104 | if (qp_init_attr->recv_cq) { |
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1080 | goto fail; | 1110 | goto fail; |
1081 | } | 1111 | } |
1082 | qp->qplib_qp.rcq = &cq->qplib_cq; | 1112 | qp->qplib_qp.rcq = &cq->qplib_cq; |
1113 | qp->rcq = cq; | ||
1083 | } | 1114 | } |
1084 | 1115 | ||
1085 | if (qp_init_attr->srq) { | 1116 | if (qp_init_attr->srq) { |
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1185 | rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); | 1216 | rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); |
1186 | if (rc) { | 1217 | if (rc) { |
1187 | dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); | 1218 | dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); |
1188 | goto fail; | 1219 | goto free_umem; |
1189 | } | 1220 | } |
1190 | } | 1221 | } |
1191 | 1222 | ||
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1213 | return &qp->ib_qp; | 1244 | return &qp->ib_qp; |
1214 | qp_destroy: | 1245 | qp_destroy: |
1215 | bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); | 1246 | bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
1247 | free_umem: | ||
1248 | if (udata) { | ||
1249 | if (qp->rumem) | ||
1250 | ib_umem_release(qp->rumem); | ||
1251 | if (qp->sumem) | ||
1252 | ib_umem_release(qp->sumem); | ||
1253 | } | ||
1216 | fail: | 1254 | fail: |
1217 | kfree(qp); | 1255 | kfree(qp); |
1218 | return ERR_PTR(rc); | 1256 | return ERR_PTR(rc); |
@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1603 | dev_dbg(rdev_to_dev(rdev), | 1641 | dev_dbg(rdev_to_dev(rdev), |
1604 | "Move QP = %p out of flush list\n", | 1642 | "Move QP = %p out of flush list\n", |
1605 | qp); | 1643 | qp); |
1606 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | 1644 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
1607 | } | 1645 | } |
1608 | } | 1646 | } |
1609 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { | 1647 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 423ebe012f95..b88a48d43a9d 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
@@ -89,6 +89,8 @@ struct bnxt_re_qp { | |||
89 | /* QP1 */ | 89 | /* QP1 */ |
90 | u32 send_psn; | 90 | u32 send_psn; |
91 | struct ib_ud_header qp1_hdr; | 91 | struct ib_ud_header qp1_hdr; |
92 | struct bnxt_re_cq *scq; | ||
93 | struct bnxt_re_cq *rcq; | ||
92 | }; | 94 | }; |
93 | 95 | ||
94 | struct bnxt_re_cq { | 96 | struct bnxt_re_cq { |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 508d00a5a106..33a448036c2e 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) | |||
656 | mutex_unlock(&bnxt_re_dev_lock); | 656 | mutex_unlock(&bnxt_re_dev_lock); |
657 | 657 | ||
658 | synchronize_rcu(); | 658 | synchronize_rcu(); |
659 | flush_workqueue(bnxt_re_wq); | ||
660 | 659 | ||
661 | ib_dealloc_device(&rdev->ibdev); | 660 | ib_dealloc_device(&rdev->ibdev); |
662 | /* rdev is gone */ | 661 | /* rdev is gone */ |
@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work) | |||
1441 | break; | 1440 | break; |
1442 | } | 1441 | } |
1443 | smp_mb__before_atomic(); | 1442 | smp_mb__before_atomic(); |
1444 | clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | 1443 | atomic_dec(&rdev->sched_count); |
1445 | kfree(re_work); | 1444 | kfree(re_work); |
1446 | } | 1445 | } |
1447 | 1446 | ||
@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1503 | /* netdev notifier will call NETDEV_UNREGISTER again later since | 1502 | /* netdev notifier will call NETDEV_UNREGISTER again later since |
1504 | * we are still holding the reference to the netdev | 1503 | * we are still holding the reference to the netdev |
1505 | */ | 1504 | */ |
1506 | if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) | 1505 | if (atomic_read(&rdev->sched_count) > 0) |
1507 | goto exit; | 1506 | goto exit; |
1508 | bnxt_re_ib_unreg(rdev, false); | 1507 | bnxt_re_ib_unreg(rdev, false); |
1509 | bnxt_re_remove_one(rdev); | 1508 | bnxt_re_remove_one(rdev); |
@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1523 | re_work->vlan_dev = (real_dev == netdev ? | 1522 | re_work->vlan_dev = (real_dev == netdev ? |
1524 | NULL : netdev); | 1523 | NULL : netdev); |
1525 | INIT_WORK(&re_work->work, bnxt_re_task); | 1524 | INIT_WORK(&re_work->work, bnxt_re_task); |
1526 | set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | 1525 | atomic_inc(&rdev->sched_count); |
1527 | queue_work(bnxt_re_wq, &re_work->work); | 1526 | queue_work(bnxt_re_wq, &re_work->work); |
1528 | } | 1527 | } |
1529 | } | 1528 | } |
@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void) | |||
1578 | */ | 1577 | */ |
1579 | list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { | 1578 | list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { |
1580 | dev_info(rdev_to_dev(rdev), "Unregistering Device"); | 1579 | dev_info(rdev_to_dev(rdev), "Unregistering Device"); |
1580 | /* | ||
1581 | * Flush out any scheduled tasks before destroying the | ||
1582 | * resources | ||
1583 | */ | ||
1584 | flush_workqueue(bnxt_re_wq); | ||
1581 | bnxt_re_dev_stop(rdev); | 1585 | bnxt_re_dev_stop(rdev); |
1582 | bnxt_re_ib_unreg(rdev, true); | 1586 | bnxt_re_ib_unreg(rdev, true); |
1583 | bnxt_re_remove_one(rdev); | 1587 | bnxt_re_remove_one(rdev); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 1b0e94697fe3..3ea5b9624f6b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | |||
173 | } | 173 | } |
174 | } | 174 | } |
175 | 175 | ||
176 | void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | 176 | void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) |
177 | { | 177 | { |
178 | unsigned long flags; | 178 | unsigned long flags; |
179 | 179 | ||
@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1419 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1419 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
1420 | struct cmdq_destroy_qp req; | 1420 | struct cmdq_destroy_qp req; |
1421 | struct creq_destroy_qp_resp resp; | 1421 | struct creq_destroy_qp_resp resp; |
1422 | unsigned long flags; | ||
1423 | u16 cmd_flags = 0; | 1422 | u16 cmd_flags = 0; |
1424 | int rc; | 1423 | int rc; |
1425 | 1424 | ||
@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1437 | return rc; | 1436 | return rc; |
1438 | } | 1437 | } |
1439 | 1438 | ||
1440 | /* Must walk the associated CQs to nullified the QP ptr */ | 1439 | return 0; |
1441 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); | 1440 | } |
1442 | |||
1443 | __clean_cq(qp->scq, (u64)(unsigned long)qp); | ||
1444 | |||
1445 | if (qp->rcq && qp->rcq != qp->scq) { | ||
1446 | spin_lock(&qp->rcq->hwq.lock); | ||
1447 | __clean_cq(qp->rcq, (u64)(unsigned long)qp); | ||
1448 | spin_unlock(&qp->rcq->hwq.lock); | ||
1449 | } | ||
1450 | |||
1451 | spin_unlock_irqrestore(&qp->scq->hwq.lock, flags); | ||
1452 | 1441 | ||
1442 | void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, | ||
1443 | struct bnxt_qplib_qp *qp) | ||
1444 | { | ||
1453 | bnxt_qplib_free_qp_hdr_buf(res, qp); | 1445 | bnxt_qplib_free_qp_hdr_buf(res, qp); |
1454 | bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); | 1446 | bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); |
1455 | kfree(qp->sq.swq); | 1447 | kfree(qp->sq.swq); |
@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1462 | if (qp->orrq.max_elements) | 1454 | if (qp->orrq.max_elements) |
1463 | bnxt_qplib_free_hwq(res->pdev, &qp->orrq); | 1455 | bnxt_qplib_free_hwq(res->pdev, &qp->orrq); |
1464 | 1456 | ||
1465 | return 0; | ||
1466 | } | 1457 | } |
1467 | 1458 | ||
1468 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, | 1459 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 211b27a8f9e2..ca0a2ffa3509 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | |||
478 | int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 478 | int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
479 | int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 479 | int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
480 | int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 480 | int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
481 | void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); | ||
482 | void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, | ||
483 | struct bnxt_qplib_qp *qp); | ||
481 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, | 484 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, |
482 | struct bnxt_qplib_sge *sge); | 485 | struct bnxt_qplib_sge *sge); |
483 | void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, | 486 | void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, |
@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); | |||
500 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); | 503 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); |
501 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); | 504 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); |
502 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); | 505 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); |
503 | void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp); | ||
504 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, | 506 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, |
505 | unsigned long *flags); | 507 | unsigned long *flags); |
506 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, | 508 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index c015c1861351..03057983341f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, | |||
52 | 52 | ||
53 | /* Device */ | 53 | /* Device */ |
54 | 54 | ||
55 | static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) | ||
56 | { | ||
57 | int rc; | ||
58 | u16 pcie_ctl2; | ||
59 | |||
60 | rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, | ||
61 | &pcie_ctl2); | ||
62 | if (rc) | ||
63 | return false; | ||
64 | return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); | ||
65 | } | ||
66 | |||
67 | static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, | 55 | static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, |
68 | char *fw_ver) | 56 | char *fw_ver) |
69 | { | 57 | { |
@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
165 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); | 153 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); |
166 | } | 154 | } |
167 | 155 | ||
168 | attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); | 156 | attr->is_atomic = 0; |
169 | bail: | 157 | bail: |
170 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | 158 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); |
171 | return rc; | 159 | return rc; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index faa9478c14a6..f95b97646c25 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
114 | union pvrdma_cmd_resp rsp; | 114 | union pvrdma_cmd_resp rsp; |
115 | struct pvrdma_cmd_create_cq *cmd = &req.create_cq; | 115 | struct pvrdma_cmd_create_cq *cmd = &req.create_cq; |
116 | struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; | 116 | struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; |
117 | struct pvrdma_create_cq_resp cq_resp = {0}; | ||
117 | struct pvrdma_create_cq ucmd; | 118 | struct pvrdma_create_cq ucmd; |
118 | 119 | ||
119 | BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); | 120 | BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); |
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
197 | 198 | ||
198 | cq->ibcq.cqe = resp->cqe; | 199 | cq->ibcq.cqe = resp->cqe; |
199 | cq->cq_handle = resp->cq_handle; | 200 | cq->cq_handle = resp->cq_handle; |
201 | cq_resp.cqn = resp->cq_handle; | ||
200 | spin_lock_irqsave(&dev->cq_tbl_lock, flags); | 202 | spin_lock_irqsave(&dev->cq_tbl_lock, flags); |
201 | dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; | 203 | dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; |
202 | spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); | 204 | spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); |
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
205 | cq->uar = &(to_vucontext(context)->uar); | 207 | cq->uar = &(to_vucontext(context)->uar); |
206 | 208 | ||
207 | /* Copy udata back. */ | 209 | /* Copy udata back. */ |
208 | if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { | 210 | if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { |
209 | dev_warn(&dev->pdev->dev, | 211 | dev_warn(&dev->pdev->dev, |
210 | "failed to copy back udata\n"); | 212 | "failed to copy back udata\n"); |
211 | pvrdma_destroy_cq(&cq->ibcq); | 213 | pvrdma_destroy_cq(&cq->ibcq); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 5acebb1ef631..af235967a9c2 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | |||
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
113 | union pvrdma_cmd_resp rsp; | 113 | union pvrdma_cmd_resp rsp; |
114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; | 114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; |
115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; | 115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; |
116 | struct pvrdma_create_srq_resp srq_resp = {0}; | ||
116 | struct pvrdma_create_srq ucmd; | 117 | struct pvrdma_create_srq ucmd; |
117 | unsigned long flags; | 118 | unsigned long flags; |
118 | int ret; | 119 | int ret; |
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
204 | } | 205 | } |
205 | 206 | ||
206 | srq->srq_handle = resp->srqn; | 207 | srq->srq_handle = resp->srqn; |
208 | srq_resp.srqn = resp->srqn; | ||
207 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | 209 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); |
208 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; | 210 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; |
209 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | 211 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); |
210 | 212 | ||
211 | /* Copy udata back. */ | 213 | /* Copy udata back. */ |
212 | if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { | 214 | if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { |
213 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); | 215 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); |
214 | pvrdma_destroy_srq(&srq->ibsrq); | 216 | pvrdma_destroy_srq(&srq->ibsrq); |
215 | return ERR_PTR(-EINVAL); | 217 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 16b96616ef7e..a51463cd2f37 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | |||
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, | |||
447 | union pvrdma_cmd_resp rsp; | 447 | union pvrdma_cmd_resp rsp; |
448 | struct pvrdma_cmd_create_pd *cmd = &req.create_pd; | 448 | struct pvrdma_cmd_create_pd *cmd = &req.create_pd; |
449 | struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; | 449 | struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; |
450 | struct pvrdma_alloc_pd_resp pd_resp = {0}; | ||
450 | int ret; | 451 | int ret; |
451 | void *ptr; | 452 | void *ptr; |
452 | 453 | ||
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, | |||
475 | pd->privileged = !context; | 476 | pd->privileged = !context; |
476 | pd->pd_handle = resp->pd_handle; | 477 | pd->pd_handle = resp->pd_handle; |
477 | pd->pdn = resp->pd_handle; | 478 | pd->pdn = resp->pd_handle; |
479 | pd_resp.pdn = resp->pd_handle; | ||
478 | 480 | ||
479 | if (context) { | 481 | if (context) { |
480 | if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { | 482 | if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { |
481 | dev_warn(&dev->pdev->dev, | 483 | dev_warn(&dev->pdev->dev, |
482 | "failed to copy back protection domain\n"); | 484 | "failed to copy back protection domain\n"); |
483 | pvrdma_dealloc_pd(&pd->ibpd); | 485 | pvrdma_dealloc_pd(&pd->ibpd); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 11f74cbe6660..ea302b054601 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev) | |||
281 | { | 281 | { |
282 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 282 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
283 | 283 | ||
284 | WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); | ||
285 | WARN_ONCE(!priv->path_dentry, "null path debug file\n"); | ||
286 | debugfs_remove(priv->mcg_dentry); | 284 | debugfs_remove(priv->mcg_dentry); |
287 | debugfs_remove(priv->path_dentry); | 285 | debugfs_remove(priv->path_dentry); |
288 | priv->mcg_dentry = priv->path_dentry = NULL; | 286 | priv->mcg_dentry = priv->path_dentry = NULL; |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 35a408d0ae4f..99bc9bd64b9e 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d | |||
205 | * for example, an "address" value of 0x12345f000 will | 205 | * for example, an "address" value of 0x12345f000 will |
206 | * flush from 0x123440000 to 0x12347ffff (256KiB). */ | 206 | * flush from 0x123440000 to 0x12347ffff (256KiB). */ |
207 | unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); | 207 | unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); |
208 | unsigned long mask = __rounddown_pow_of_two(address ^ last);; | 208 | unsigned long mask = __rounddown_pow_of_two(address ^ last); |
209 | 209 | ||
210 | desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; | 210 | desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; |
211 | } else { | 211 | } else { |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 1a46b41dac70..6422846b546e 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) | |||
659 | static void search_free(struct closure *cl) | 659 | static void search_free(struct closure *cl) |
660 | { | 660 | { |
661 | struct search *s = container_of(cl, struct search, cl); | 661 | struct search *s = container_of(cl, struct search, cl); |
662 | bio_complete(s); | ||
663 | 662 | ||
664 | if (s->iop.bio) | 663 | if (s->iop.bio) |
665 | bio_put(s->iop.bio); | 664 | bio_put(s->iop.bio); |
666 | 665 | ||
666 | bio_complete(s); | ||
667 | closure_debug_destroy(cl); | 667 | closure_debug_destroy(cl); |
668 | mempool_free(s, s->d->c->search); | 668 | mempool_free(s, s->d->c->search); |
669 | } | 669 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 312895788036..4d1d8dfb2d2a 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1274,7 +1274,7 @@ static int flash_devs_run(struct cache_set *c) | |||
1274 | struct uuid_entry *u; | 1274 | struct uuid_entry *u; |
1275 | 1275 | ||
1276 | for (u = c->uuids; | 1276 | for (u = c->uuids; |
1277 | u < c->uuids + c->devices_max_used && !ret; | 1277 | u < c->uuids + c->nr_uuids && !ret; |
1278 | u++) | 1278 | u++) |
1279 | if (UUID_FLASH_ONLY(u)) | 1279 | if (UUID_FLASH_ONLY(u)) |
1280 | ret = flash_dev_run(c, u); | 1280 | ret = flash_dev_run(c, u); |
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e40065bdbfc8..0a7e99d62c69 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c | |||
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) | |||
157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); | 157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
158 | } | 158 | } |
159 | rcu_read_unlock(); | 159 | rcu_read_unlock(); |
160 | seq_printf (seq, "]"); | 160 | seq_putc(seq, ']'); |
161 | } | 161 | } |
162 | 162 | ||
163 | static int multipath_congested(struct mddev *mddev, int bits) | 163 | static int multipath_congested(struct mddev *mddev, int bits) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..254e44e44668 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
801 | struct bio *bio; | 801 | struct bio *bio; |
802 | int ff = 0; | 802 | int ff = 0; |
803 | 803 | ||
804 | if (!page) | ||
805 | return; | ||
806 | |||
804 | if (test_bit(Faulty, &rdev->flags)) | 807 | if (test_bit(Faulty, &rdev->flags)) |
805 | return; | 808 | return; |
806 | 809 | ||
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev) | |||
5452 | * the only valid external interface is through the md | 5455 | * the only valid external interface is through the md |
5453 | * device. | 5456 | * device. |
5454 | */ | 5457 | */ |
5458 | mddev->has_superblocks = false; | ||
5455 | rdev_for_each(rdev, mddev) { | 5459 | rdev_for_each(rdev, mddev) { |
5456 | if (test_bit(Faulty, &rdev->flags)) | 5460 | if (test_bit(Faulty, &rdev->flags)) |
5457 | continue; | 5461 | continue; |
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev) | |||
5465 | set_disk_ro(mddev->gendisk, 1); | 5469 | set_disk_ro(mddev->gendisk, 1); |
5466 | } | 5470 | } |
5467 | 5471 | ||
5472 | if (rdev->sb_page) | ||
5473 | mddev->has_superblocks = true; | ||
5474 | |||
5468 | /* perform some consistency tests on the device. | 5475 | /* perform some consistency tests on the device. |
5469 | * We don't want the data to overlap the metadata, | 5476 | * We don't want the data to overlap the metadata, |
5470 | * Internal Bitmap issues have been handled elsewhere. | 5477 | * Internal Bitmap issues have been handled elsewhere. |
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev) | |||
5497 | } | 5504 | } |
5498 | if (mddev->sync_set == NULL) { | 5505 | if (mddev->sync_set == NULL) { |
5499 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); | 5506 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
5500 | if (!mddev->sync_set) | 5507 | if (!mddev->sync_set) { |
5501 | return -ENOMEM; | 5508 | err = -ENOMEM; |
5509 | goto abort; | ||
5510 | } | ||
5502 | } | 5511 | } |
5503 | 5512 | ||
5504 | spin_lock(&pers_lock); | 5513 | spin_lock(&pers_lock); |
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev) | |||
5511 | else | 5520 | else |
5512 | pr_warn("md: personality for level %s is not loaded!\n", | 5521 | pr_warn("md: personality for level %s is not loaded!\n", |
5513 | mddev->clevel); | 5522 | mddev->clevel); |
5514 | return -EINVAL; | 5523 | err = -EINVAL; |
5524 | goto abort; | ||
5515 | } | 5525 | } |
5516 | spin_unlock(&pers_lock); | 5526 | spin_unlock(&pers_lock); |
5517 | if (mddev->level != pers->level) { | 5527 | if (mddev->level != pers->level) { |
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev) | |||
5524 | pers->start_reshape == NULL) { | 5534 | pers->start_reshape == NULL) { |
5525 | /* This personality cannot handle reshaping... */ | 5535 | /* This personality cannot handle reshaping... */ |
5526 | module_put(pers->owner); | 5536 | module_put(pers->owner); |
5527 | return -EINVAL; | 5537 | err = -EINVAL; |
5538 | goto abort; | ||
5528 | } | 5539 | } |
5529 | 5540 | ||
5530 | if (pers->sync_request) { | 5541 | if (pers->sync_request) { |
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev) | |||
5593 | mddev->private = NULL; | 5604 | mddev->private = NULL; |
5594 | module_put(pers->owner); | 5605 | module_put(pers->owner); |
5595 | bitmap_destroy(mddev); | 5606 | bitmap_destroy(mddev); |
5596 | return err; | 5607 | goto abort; |
5597 | } | 5608 | } |
5598 | if (mddev->queue) { | 5609 | if (mddev->queue) { |
5599 | bool nonrot = true; | 5610 | bool nonrot = true; |
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev) | |||
5655 | sysfs_notify_dirent_safe(mddev->sysfs_action); | 5666 | sysfs_notify_dirent_safe(mddev->sysfs_action); |
5656 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 5667 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
5657 | return 0; | 5668 | return 0; |
5669 | |||
5670 | abort: | ||
5671 | if (mddev->bio_set) { | ||
5672 | bioset_free(mddev->bio_set); | ||
5673 | mddev->bio_set = NULL; | ||
5674 | } | ||
5675 | if (mddev->sync_set) { | ||
5676 | bioset_free(mddev->sync_set); | ||
5677 | mddev->sync_set = NULL; | ||
5678 | } | ||
5679 | |||
5680 | return err; | ||
5658 | } | 5681 | } |
5659 | EXPORT_SYMBOL_GPL(md_run); | 5682 | EXPORT_SYMBOL_GPL(md_run); |
5660 | 5683 | ||
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync); | |||
8049 | bool md_write_start(struct mddev *mddev, struct bio *bi) | 8072 | bool md_write_start(struct mddev *mddev, struct bio *bi) |
8050 | { | 8073 | { |
8051 | int did_change = 0; | 8074 | int did_change = 0; |
8075 | |||
8052 | if (bio_data_dir(bi) != WRITE) | 8076 | if (bio_data_dir(bi) != WRITE) |
8053 | return true; | 8077 | return true; |
8054 | 8078 | ||
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) | |||
8081 | rcu_read_unlock(); | 8105 | rcu_read_unlock(); |
8082 | if (did_change) | 8106 | if (did_change) |
8083 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 8107 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
8108 | if (!mddev->has_superblocks) | ||
8109 | return true; | ||
8084 | wait_event(mddev->sb_wait, | 8110 | wait_event(mddev->sb_wait, |
8085 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || | 8111 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || |
8086 | mddev->suspended); | 8112 | mddev->suspended); |
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread) | |||
8543 | set_mask_bits(&mddev->sb_flags, 0, | 8569 | set_mask_bits(&mddev->sb_flags, 0, |
8544 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); | 8570 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); |
8545 | 8571 | ||
8572 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | ||
8573 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | ||
8574 | mddev->delta_disks > 0 && | ||
8575 | mddev->pers->finish_reshape && | ||
8576 | mddev->pers->size && | ||
8577 | mddev->queue) { | ||
8578 | mddev_lock_nointr(mddev); | ||
8579 | md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); | ||
8580 | mddev_unlock(mddev); | ||
8581 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8582 | revalidate_disk(mddev->gendisk); | ||
8583 | } | ||
8584 | |||
8546 | spin_lock(&mddev->lock); | 8585 | spin_lock(&mddev->lock); |
8547 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8586 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8548 | /* We completed so min/max setting can be forgotten if used. */ | 8587 | /* We completed so min/max setting can be forgotten if used. */ |
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev, | |||
8569 | int removed = 0; | 8608 | int removed = 0; |
8570 | bool remove_some = false; | 8609 | bool remove_some = false; |
8571 | 8610 | ||
8611 | if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | ||
8612 | /* Mustn't remove devices when resync thread is running */ | ||
8613 | return 0; | ||
8614 | |||
8572 | rdev_for_each(rdev, mddev) { | 8615 | rdev_for_each(rdev, mddev) { |
8573 | if ((this == NULL || rdev == this) && | 8616 | if ((this == NULL || rdev == this) && |
8574 | rdev->raid_disk >= 0 && | 8617 | rdev->raid_disk >= 0 && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 58cd20a5e85e..fbc925cce810 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -468,6 +468,8 @@ struct mddev { | |||
468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); | 468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
469 | struct md_cluster_info *cluster_info; | 469 | struct md_cluster_info *cluster_info; |
470 | unsigned int good_device_nr; /* good device num within cluster raid */ | 470 | unsigned int good_device_nr; /* good device num within cluster raid */ |
471 | |||
472 | bool has_superblocks:1; | ||
471 | }; | 473 | }; |
472 | 474 | ||
473 | enum recovery_flags { | 475 | enum recovery_flags { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..fe872dc6712e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, | |||
1108 | 1108 | ||
1109 | bio_copy_data(behind_bio, bio); | 1109 | bio_copy_data(behind_bio, bio); |
1110 | skip_copy: | 1110 | skip_copy: |
1111 | r1_bio->behind_master_bio = behind_bio;; | 1111 | r1_bio->behind_master_bio = behind_bio; |
1112 | set_bit(R1BIO_BehindIO, &r1_bio->state); | 1112 | set_bit(R1BIO_BehindIO, &r1_bio->state); |
1113 | 1113 | ||
1114 | return; | 1114 | return; |
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1809 | struct md_rdev *repl = | 1809 | struct md_rdev *repl = |
1810 | conf->mirrors[conf->raid_disks + number].rdev; | 1810 | conf->mirrors[conf->raid_disks + number].rdev; |
1811 | freeze_array(conf, 0); | 1811 | freeze_array(conf, 0); |
1812 | if (atomic_read(&repl->nr_pending)) { | ||
1813 | /* It means that some queued IO of retry_list | ||
1814 | * hold repl. Thus, we cannot set replacement | ||
1815 | * as NULL, avoiding rdev NULL pointer | ||
1816 | * dereference in sync_request_write and | ||
1817 | * handle_write_finished. | ||
1818 | */ | ||
1819 | err = -EBUSY; | ||
1820 | unfreeze_array(conf); | ||
1821 | goto abort; | ||
1822 | } | ||
1812 | clear_bit(Replacement, &repl->flags); | 1823 | clear_bit(Replacement, &repl->flags); |
1813 | p->rdev = repl; | 1824 | p->rdev = repl; |
1814 | conf->mirrors[conf->raid_disks + number].rdev = NULL; | 1825 | conf->mirrors[conf->raid_disks + number].rdev = NULL; |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c7294e7557e0..eb84bc68e2fd 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -26,6 +26,18 @@ | |||
26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) | 26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) |
27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) | 27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) |
28 | 28 | ||
29 | /* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk. | ||
30 | * There are three safe ways to access raid1_info.rdev. | ||
31 | * 1/ when holding mddev->reconfig_mutex | ||
32 | * 2/ when resync/recovery is known to be happening - i.e. in code that is | ||
33 | * called as part of performing resync/recovery. | ||
34 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
35 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
36 | * RCU lock. | ||
37 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
38 | * been incremented, the pointer is put back in .rdev. | ||
39 | */ | ||
40 | |||
29 | struct raid1_info { | 41 | struct raid1_info { |
30 | struct md_rdev *rdev; | 42 | struct md_rdev *rdev; |
31 | sector_t head_position; | 43 | sector_t head_position; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..c5e6c60fc0d4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data) | |||
141 | #define RESYNC_WINDOW (1024*1024) | 141 | #define RESYNC_WINDOW (1024*1024) |
142 | /* maximum number of concurrent requests, memory permitting */ | 142 | /* maximum number of concurrent requests, memory permitting */ |
143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) | 143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) |
144 | #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) | 144 | #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW) |
145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) | 145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) |
146 | 146 | ||
147 | /* | 147 | /* |
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2655 | for (m = 0; m < conf->copies; m++) { | 2655 | for (m = 0; m < conf->copies; m++) { |
2656 | int dev = r10_bio->devs[m].devnum; | 2656 | int dev = r10_bio->devs[m].devnum; |
2657 | rdev = conf->mirrors[dev].rdev; | 2657 | rdev = conf->mirrors[dev].rdev; |
2658 | if (r10_bio->devs[m].bio == NULL) | 2658 | if (r10_bio->devs[m].bio == NULL || |
2659 | r10_bio->devs[m].bio->bi_end_io == NULL) | ||
2659 | continue; | 2660 | continue; |
2660 | if (!r10_bio->devs[m].bio->bi_status) { | 2661 | if (!r10_bio->devs[m].bio->bi_status) { |
2661 | rdev_clear_badblocks( | 2662 | rdev_clear_badblocks( |
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2670 | md_error(conf->mddev, rdev); | 2671 | md_error(conf->mddev, rdev); |
2671 | } | 2672 | } |
2672 | rdev = conf->mirrors[dev].replacement; | 2673 | rdev = conf->mirrors[dev].replacement; |
2673 | if (r10_bio->devs[m].repl_bio == NULL) | 2674 | if (r10_bio->devs[m].repl_bio == NULL || |
2675 | r10_bio->devs[m].repl_bio->bi_end_io == NULL) | ||
2674 | continue; | 2676 | continue; |
2675 | 2677 | ||
2676 | if (!r10_bio->devs[m].repl_bio->bi_status) { | 2678 | if (!r10_bio->devs[m].repl_bio->bi_status) { |
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev) | |||
3782 | if (fc > 1 || fo > 0) { | 3784 | if (fc > 1 || fo > 0) { |
3783 | pr_err("only near layout is supported by clustered" | 3785 | pr_err("only near layout is supported by clustered" |
3784 | " raid10\n"); | 3786 | " raid10\n"); |
3785 | goto out; | 3787 | goto out_free_conf; |
3786 | } | 3788 | } |
3787 | } | 3789 | } |
3788 | 3790 | ||
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev) | |||
4830 | return; | 4832 | return; |
4831 | 4833 | ||
4832 | if (mddev->delta_disks > 0) { | 4834 | if (mddev->delta_disks > 0) { |
4833 | sector_t size = raid10_size(mddev, 0, 0); | ||
4834 | md_set_array_sectors(mddev, size); | ||
4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { | 4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { |
4836 | mddev->recovery_cp = mddev->resync_max_sectors; | 4836 | mddev->recovery_cp = mddev->resync_max_sectors; |
4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
4838 | } | 4838 | } |
4839 | mddev->resync_max_sectors = size; | 4839 | mddev->resync_max_sectors = mddev->array_sectors; |
4840 | if (mddev->queue) { | ||
4841 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
4842 | revalidate_disk(mddev->gendisk); | ||
4843 | } | ||
4844 | } else { | 4840 | } else { |
4845 | int d; | 4841 | int d; |
4846 | rcu_read_lock(); | 4842 | rcu_read_lock(); |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index db2ac22ac1b4..e2e8840de9bf 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -2,6 +2,19 @@ | |||
2 | #ifndef _RAID10_H | 2 | #ifndef _RAID10_H |
3 | #define _RAID10_H | 3 | #define _RAID10_H |
4 | 4 | ||
5 | /* Note: raid10_info.rdev can be set to NULL asynchronously by | ||
6 | * raid10_remove_disk. | ||
7 | * There are three safe ways to access raid10_info.rdev. | ||
8 | * 1/ when holding mddev->reconfig_mutex | ||
9 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code | ||
10 | * that is called as part of performing resync/recovery/reshape. | ||
11 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
12 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
13 | * RCU lock. | ||
14 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
15 | * been incremented, the pointer is put back in .rdev. | ||
16 | */ | ||
17 | |||
5 | struct raid10_info { | 18 | struct raid10_info { |
6 | struct md_rdev *rdev, *replacement; | 19 | struct md_rdev *rdev, *replacement; |
7 | sector_t head_position; | 20 | sector_t head_position; |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 0c76bcedfc1c..a001808a2b77 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf); | |||
44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); | 44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); |
45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | 45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); |
46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | ||
47 | 48 | ||
48 | static inline bool raid5_has_ppl(struct r5conf *conf) | 49 | static inline bool raid5_has_ppl(struct r5conf *conf) |
49 | { | 50 | { |
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio) | |||
104 | if (conf->log) | 105 | if (conf->log) |
105 | ret = r5l_handle_flush_request(conf->log, bio); | 106 | ret = r5l_handle_flush_request(conf->log, bio); |
106 | else if (raid5_has_ppl(conf)) | 107 | else if (raid5_has_ppl(conf)) |
107 | ret = 0; | 108 | ret = ppl_handle_flush_request(conf->log, bio); |
108 | 109 | ||
109 | return ret; | 110 | return ret; |
110 | } | 111 | } |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 2764c2290062..42890a08375b 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce) | |||
693 | } | 693 | } |
694 | } | 694 | } |
695 | 695 | ||
696 | int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio) | ||
697 | { | ||
698 | if (bio->bi_iter.bi_size == 0) { | ||
699 | bio_endio(bio); | ||
700 | return 0; | ||
701 | } | ||
702 | bio->bi_opf &= ~REQ_PREFLUSH; | ||
703 | return -EAGAIN; | ||
704 | } | ||
705 | |||
696 | void ppl_stripe_write_finished(struct stripe_head *sh) | 706 | void ppl_stripe_write_finished(struct stripe_head *sh) |
697 | { | 707 | { |
698 | struct ppl_io_unit *io; | 708 | struct ppl_io_unit *io; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..b5d2601483e3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
2196 | static int grow_stripes(struct r5conf *conf, int num) | 2196 | static int grow_stripes(struct r5conf *conf, int num) |
2197 | { | 2197 | { |
2198 | struct kmem_cache *sc; | 2198 | struct kmem_cache *sc; |
2199 | size_t namelen = sizeof(conf->cache_name[0]); | ||
2199 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | 2200 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
2200 | 2201 | ||
2201 | if (conf->mddev->gendisk) | 2202 | if (conf->mddev->gendisk) |
2202 | sprintf(conf->cache_name[0], | 2203 | snprintf(conf->cache_name[0], namelen, |
2203 | "raid%d-%s", conf->level, mdname(conf->mddev)); | 2204 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
2204 | else | 2205 | else |
2205 | sprintf(conf->cache_name[0], | 2206 | snprintf(conf->cache_name[0], namelen, |
2206 | "raid%d-%p", conf->level, conf->mddev); | 2207 | "raid%d-%p", conf->level, conf->mddev); |
2207 | sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); | 2208 | snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); |
2208 | 2209 | ||
2209 | conf->active_name = 0; | 2210 | conf->active_name = 0; |
2210 | sc = kmem_cache_create(conf->cache_name[conf->active_name], | 2211 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf) | |||
6764 | 6765 | ||
6765 | log_exit(conf); | 6766 | log_exit(conf); |
6766 | 6767 | ||
6767 | if (conf->shrinker.nr_deferred) | 6768 | unregister_shrinker(&conf->shrinker); |
6768 | unregister_shrinker(&conf->shrinker); | ||
6769 | |||
6770 | free_thread_groups(conf); | 6769 | free_thread_groups(conf); |
6771 | shrink_stripes(conf); | 6770 | shrink_stripes(conf); |
6772 | raid5_free_percpu(conf); | 6771 | raid5_free_percpu(conf); |
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
8001 | 8000 | ||
8002 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8001 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8003 | 8002 | ||
8004 | if (mddev->delta_disks > 0) { | 8003 | if (mddev->delta_disks <= 0) { |
8005 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | ||
8006 | if (mddev->queue) { | ||
8007 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8008 | revalidate_disk(mddev->gendisk); | ||
8009 | } | ||
8010 | } else { | ||
8011 | int d; | 8004 | int d; |
8012 | spin_lock_irq(&conf->device_lock); | 8005 | spin_lock_irq(&conf->device_lock); |
8013 | mddev->degraded = raid5_calc_degraded(conf); | 8006 | mddev->degraded = raid5_calc_degraded(conf); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2e6123825095..3f8da26032ac 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -450,6 +450,18 @@ enum { | |||
450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. | 450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. |
451 | */ | 451 | */ |
452 | 452 | ||
453 | /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk. | ||
454 | * There are three safe ways to access disk_info.rdev. | ||
455 | * 1/ when holding mddev->reconfig_mutex | ||
456 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that | ||
457 | * is called as part of performing resync/recovery/reshape. | ||
458 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
459 | * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU | ||
460 | * lock. | ||
461 | * When .rdev is set to NULL, the nr_pending count checked again and if | ||
462 | * it has been incremented, the pointer is put back in .rdev. | ||
463 | */ | ||
464 | |||
453 | struct disk_info { | 465 | struct disk_info { |
454 | struct md_rdev *rdev, *replacement; | 466 | struct md_rdev *rdev, *replacement; |
455 | struct page *extra_page; /* extra page to use in prexor */ | 467 | struct page *extra_page; /* extra page to use in prexor */ |
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index 0a7bdbed3a6f..e9c1485c32b9 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c | |||
@@ -45,8 +45,16 @@ | |||
45 | #define REG_TO_DCPU_MBOX 0x10 | 45 | #define REG_TO_DCPU_MBOX 0x10 |
46 | #define REG_TO_HOST_MBOX 0x14 | 46 | #define REG_TO_HOST_MBOX 0x14 |
47 | 47 | ||
48 | /* Macros to process offsets returned by the DCPU */ | ||
49 | #define DRAM_MSG_ADDR_OFFSET 0x0 | ||
50 | #define DRAM_MSG_TYPE_OFFSET 0x1c | ||
51 | #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1) | ||
52 | #define DRAM_MSG_TYPE_MASK ((1UL << \ | ||
53 | (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1) | ||
54 | |||
48 | /* Message RAM */ | 55 | /* Message RAM */ |
49 | #define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32)) | 56 | #define DCPU_MSG_RAM_START 0x100 |
57 | #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32)) | ||
50 | 58 | ||
51 | /* DRAM Info Offsets & Masks */ | 59 | /* DRAM Info Offsets & Masks */ |
52 | #define DRAM_INFO_INTERVAL 0x0 | 60 | #define DRAM_INFO_INTERVAL 0x0 |
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[]) | |||
255 | return sum; | 263 | return sum; |
256 | } | 264 | } |
257 | 265 | ||
266 | static void __iomem *get_msg_ptr(struct private_data *priv, u32 response, | ||
267 | char *buf, ssize_t *size) | ||
268 | { | ||
269 | unsigned int msg_type; | ||
270 | unsigned int offset; | ||
271 | void __iomem *ptr = NULL; | ||
272 | |||
273 | msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK; | ||
274 | offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK; | ||
275 | |||
276 | /* | ||
277 | * msg_type == 1: the offset is relative to the message RAM | ||
278 | * msg_type == 0: the offset is relative to the data RAM (this is the | ||
279 | * previous way of passing data) | ||
280 | * msg_type is anything else: there's critical hardware problem | ||
281 | */ | ||
282 | switch (msg_type) { | ||
283 | case 1: | ||
284 | ptr = priv->regs + DCPU_MSG_RAM_START + offset; | ||
285 | break; | ||
286 | case 0: | ||
287 | ptr = priv->dmem + offset; | ||
288 | break; | ||
289 | default: | ||
290 | dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n", | ||
291 | response); | ||
292 | if (buf && size) | ||
293 | *size = sprintf(buf, | ||
294 | "FATAL: communication error with DCPU\n"); | ||
295 | } | ||
296 | |||
297 | return ptr; | ||
298 | } | ||
299 | |||
258 | static int __send_command(struct private_data *priv, unsigned int cmd, | 300 | static int __send_command(struct private_data *priv, unsigned int cmd, |
259 | u32 result[]) | 301 | u32 result[]) |
260 | { | 302 | { |
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr, | |||
507 | { | 549 | { |
508 | u32 response[MSG_FIELD_MAX]; | 550 | u32 response[MSG_FIELD_MAX]; |
509 | unsigned int info; | 551 | unsigned int info; |
510 | int ret; | 552 | ssize_t ret; |
511 | 553 | ||
512 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); | 554 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); |
513 | if (ret) | 555 | if (ret) |
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev, | |||
528 | u32 response[MSG_FIELD_MAX]; | 570 | u32 response[MSG_FIELD_MAX]; |
529 | void __iomem *info; | 571 | void __iomem *info; |
530 | struct private_data *priv; | 572 | struct private_data *priv; |
531 | unsigned int offset; | ||
532 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; | 573 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; |
533 | u32 mr4; | 574 | u32 mr4; |
534 | int ret; | 575 | ssize_t ret; |
535 | 576 | ||
536 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); | 577 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); |
537 | if (ret) | 578 | if (ret) |
538 | return ret; | 579 | return ret; |
539 | 580 | ||
540 | priv = dev_get_drvdata(dev); | 581 | priv = dev_get_drvdata(dev); |
541 | offset = response[MSG_ARG0]; | 582 | |
542 | info = priv->dmem + offset; | 583 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); |
584 | if (!info) | ||
585 | return ret; | ||
543 | 586 | ||
544 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; | 587 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; |
545 | 588 | ||
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
561 | u32 response[MSG_FIELD_MAX]; | 604 | u32 response[MSG_FIELD_MAX]; |
562 | struct private_data *priv; | 605 | struct private_data *priv; |
563 | void __iomem *info; | 606 | void __iomem *info; |
564 | unsigned int offset; | ||
565 | unsigned long val; | 607 | unsigned long val; |
566 | int ret; | 608 | int ret; |
567 | 609 | ||
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
574 | if (ret) | 616 | if (ret) |
575 | return ret; | 617 | return ret; |
576 | 618 | ||
577 | offset = response[MSG_ARG0]; | 619 | info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL); |
578 | info = priv->dmem + offset; | 620 | if (!info) |
621 | return -EIO; | ||
622 | |||
579 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); | 623 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); |
580 | 624 | ||
581 | return count; | 625 | return count; |
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr, | |||
587 | u32 response[MSG_FIELD_MAX]; | 631 | u32 response[MSG_FIELD_MAX]; |
588 | struct private_data *priv; | 632 | struct private_data *priv; |
589 | void __iomem *info; | 633 | void __iomem *info; |
590 | unsigned int offset; | 634 | ssize_t ret; |
591 | int ret; | ||
592 | 635 | ||
593 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); | 636 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); |
594 | if (ret) | 637 | if (ret) |
595 | return ret; | 638 | return ret; |
596 | 639 | ||
597 | offset = response[MSG_ARG0]; | ||
598 | priv = dev_get_drvdata(dev); | 640 | priv = dev_get_drvdata(dev); |
599 | info = priv->dmem + offset; | 641 | |
642 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); | ||
643 | if (!info) | ||
644 | return ret; | ||
600 | 645 | ||
601 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", | 646 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", |
602 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, | 647 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, |
603 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, | 648 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, |
604 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, | 649 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, |
605 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, | 650 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, |
606 | readl_relaxed(info + DRAM_VENDOR_ERROR)); | 651 | readl_relaxed(info + DRAM_VENDOR_ERROR) & |
652 | DRAM_VENDOR_MASK); | ||
607 | } | 653 | } |
608 | 654 | ||
609 | static int brcmstb_dpfe_resume(struct platform_device *pdev) | 655 | static int brcmstb_dpfe_resume(struct platform_device *pdev) |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 8d12017b9893..4470630dd545 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg) | |||
2687 | __FILE__, __LINE__, iocnum); | 2687 | __FILE__, __LINE__, iocnum); |
2688 | return -ENODEV; | 2688 | return -ENODEV; |
2689 | } | 2689 | } |
2690 | if (karg.hdr.id >= MPT_MAX_FC_DEVICES) | ||
2691 | return -EINVAL; | ||
2690 | dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", | 2692 | dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", |
2691 | ioc->name)); | 2693 | ioc->name)); |
2692 | 2694 | ||
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 3e5eabdae8d9..772d02922529 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev) | |||
548 | goto out; | 548 | goto out; |
549 | } | 549 | } |
550 | 550 | ||
551 | if (bus->dev_state == MEI_DEV_POWER_DOWN) { | ||
552 | dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n"); | ||
553 | err = 0; | ||
554 | goto out; | ||
555 | } | ||
556 | |||
557 | err = mei_cl_disconnect(cl); | 551 | err = mei_cl_disconnect(cl); |
558 | if (err < 0) | 552 | if (err < 0) |
559 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); | 553 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986a..7e60c1817c31 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
945 | return 0; | 945 | return 0; |
946 | } | 946 | } |
947 | 947 | ||
948 | if (dev->dev_state == MEI_DEV_POWER_DOWN) { | ||
949 | cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); | ||
950 | mei_cl_set_disconnected(cl); | ||
951 | return 0; | ||
952 | } | ||
953 | |||
948 | rets = pm_runtime_get(dev->dev); | 954 | rets = pm_runtime_get(dev->dev); |
949 | if (rets < 0 && rets != -EINPROGRESS) { | 955 | if (rets < 0 && rets != -EINPROGRESS) { |
950 | pm_runtime_put_noidle(dev->dev); | 956 | pm_runtime_put_noidle(dev->dev); |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 0ccccbaf530d..e4b10b2d1a08 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -132,6 +132,11 @@ | |||
132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | 132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ | 133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ |
134 | 134 | ||
135 | #define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */ | ||
136 | #define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */ | ||
137 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ | ||
138 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ | ||
139 | |||
135 | /* | 140 | /* |
136 | * MEI HW Section | 141 | * MEI HW Section |
137 | */ | 142 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4a0ccda4d04b..ea4e152270a3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, |
99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, | 99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, |
100 | 100 | ||
101 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, | ||
102 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, | ||
103 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, | ||
104 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, | ||
105 | |||
101 | /* required last entry */ | 106 | /* required last entry */ |
102 | {0, } | 107 | {0, } |
103 | }; | 108 | }; |
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 2dd2db9bc1c9..337462e1569f 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c | |||
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd, | |||
133 | if (!rc) { | 133 | if (!rc) { |
134 | rc = copy_to_user((u64 __user *) args, &irq_offset, | 134 | rc = copy_to_user((u64 __user *) args, &irq_offset, |
135 | sizeof(irq_offset)); | 135 | sizeof(irq_offset)); |
136 | if (rc) | 136 | if (rc) { |
137 | ocxl_afu_irq_free(ctx, irq_offset); | 137 | ocxl_afu_irq_free(ctx, irq_offset); |
138 | return -EFAULT; | ||
139 | } | ||
138 | } | 140 | } |
139 | break; | 141 | break; |
140 | 142 | ||
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count, | |||
329 | 331 | ||
330 | used += sizeof(header); | 332 | used += sizeof(header); |
331 | 333 | ||
332 | rc = (ssize_t) used; | 334 | rc = used; |
333 | return rc; | 335 | return rc; |
334 | } | 336 | } |
335 | 337 | ||
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 908e4db03535..42d6aa89a48a 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
848 | return 1; | 848 | return 1; |
849 | } | 849 | } |
850 | 850 | ||
851 | mmc_claim_host(card->host); | ||
852 | err = mmc_send_status(card, &status); | 851 | err = mmc_send_status(card, &status); |
853 | if (err) { | 852 | if (err) { |
854 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); | 853 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); |
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
890 | } while (!err); | 889 | } while (!err); |
891 | 890 | ||
892 | out: | 891 | out: |
893 | mmc_release_host(card->host); | ||
894 | return err; | 892 | return err; |
895 | } | 893 | } |
896 | 894 | ||
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card) | |||
932 | int err; | 930 | int err; |
933 | u8 *ext_csd; | 931 | u8 *ext_csd; |
934 | 932 | ||
935 | mmc_claim_host(card->host); | ||
936 | err = mmc_get_ext_csd(card, &ext_csd); | 933 | err = mmc_get_ext_csd(card, &ext_csd); |
937 | mmc_release_host(card->host); | ||
938 | if (err) | 934 | if (err) |
939 | return err; | 935 | return err; |
940 | 936 | ||
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..fa41d9422d57 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c | |||
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = { | |||
487 | 487 | ||
488 | static const struct dw_mci_drv_data exynos_drv_data = { | 488 | static const struct dw_mci_drv_data exynos_drv_data = { |
489 | .caps = exynos_dwmmc_caps, | 489 | .caps = exynos_dwmmc_caps, |
490 | .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), | ||
490 | .init = dw_mci_exynos_priv_init, | 491 | .init = dw_mci_exynos_priv_init, |
491 | .set_ios = dw_mci_exynos_set_ios, | 492 | .set_ios = dw_mci_exynos_set_ios, |
492 | .parse_dt = dw_mci_exynos_parse_dt, | 493 | .parse_dt = dw_mci_exynos_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 73fd75c3c824..89cdb3d533bb 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c | |||
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) | |||
135 | if (priv->ctrl_id < 0) | 135 | if (priv->ctrl_id < 0) |
136 | priv->ctrl_id = 0; | 136 | priv->ctrl_id = 0; |
137 | 137 | ||
138 | if (priv->ctrl_id >= TIMING_MODE) | ||
139 | return -EINVAL; | ||
140 | |||
138 | host->priv = priv; | 141 | host->priv = priv; |
139 | return 0; | 142 | return 0; |
140 | } | 143 | } |
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) | |||
207 | 210 | ||
208 | static const struct dw_mci_drv_data hi6220_data = { | 211 | static const struct dw_mci_drv_data hi6220_data = { |
209 | .caps = dw_mci_hi6220_caps, | 212 | .caps = dw_mci_hi6220_caps, |
213 | .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), | ||
210 | .switch_voltage = dw_mci_hi6220_switch_voltage, | 214 | .switch_voltage = dw_mci_hi6220_switch_voltage, |
211 | .set_ios = dw_mci_hi6220_set_ios, | 215 | .set_ios = dw_mci_hi6220_set_ios, |
212 | .parse_dt = dw_mci_hi6220_parse_dt, | 216 | .parse_dt = dw_mci_hi6220_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c | |||
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = { | |||
319 | 319 | ||
320 | static const struct dw_mci_drv_data rk3288_drv_data = { | 320 | static const struct dw_mci_drv_data rk3288_drv_data = { |
321 | .caps = dw_mci_rk3288_dwmmc_caps, | 321 | .caps = dw_mci_rk3288_dwmmc_caps, |
322 | .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), | ||
322 | .set_ios = dw_mci_rk3288_set_ios, | 323 | .set_ios = dw_mci_rk3288_set_ios, |
323 | .execute_tuning = dw_mci_rk3288_execute_tuning, | 324 | .execute_tuning = dw_mci_rk3288_execute_tuning, |
324 | .parse_dt = dw_mci_rk3288_parse_dt, | 325 | .parse_dt = dw_mci_rk3288_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c | |||
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = { | |||
195 | 195 | ||
196 | static const struct dw_mci_drv_data zx_drv_data = { | 196 | static const struct dw_mci_drv_data zx_drv_data = { |
197 | .caps = zx_dwmmc_caps, | 197 | .caps = zx_dwmmc_caps, |
198 | .num_caps = ARRAY_SIZE(zx_dwmmc_caps), | ||
198 | .execute_tuning = dw_mci_zx_execute_tuning, | 199 | .execute_tuning = dw_mci_zx_execute_tuning, |
199 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, | 200 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, |
200 | .parse_dt = dw_mci_zx_parse_dt, | 201 | .parse_dt = dw_mci_zx_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 0aa39975f33b..d9b4acefed31 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
165 | { | 165 | { |
166 | struct dw_mci *host = s->private; | 166 | struct dw_mci *host = s->private; |
167 | 167 | ||
168 | pm_runtime_get_sync(host->dev); | ||
169 | |||
168 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); | 170 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); |
169 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); | 171 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); |
170 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); | 172 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); |
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
172 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); | 174 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); |
173 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); | 175 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); |
174 | 176 | ||
177 | pm_runtime_put_autosuspend(host->dev); | ||
178 | |||
175 | return 0; | 179 | return 0; |
176 | } | 180 | } |
177 | 181 | ||
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2778 | return IRQ_HANDLED; | 2782 | return IRQ_HANDLED; |
2779 | } | 2783 | } |
2780 | 2784 | ||
2785 | static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) | ||
2786 | { | ||
2787 | struct dw_mci *host = slot->host; | ||
2788 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
2789 | struct mmc_host *mmc = slot->mmc; | ||
2790 | int ctrl_id; | ||
2791 | |||
2792 | if (host->pdata->caps) | ||
2793 | mmc->caps = host->pdata->caps; | ||
2794 | |||
2795 | /* | ||
2796 | * Support MMC_CAP_ERASE by default. | ||
2797 | * It needs to use trim/discard/erase commands. | ||
2798 | */ | ||
2799 | mmc->caps |= MMC_CAP_ERASE; | ||
2800 | |||
2801 | if (host->pdata->pm_caps) | ||
2802 | mmc->pm_caps = host->pdata->pm_caps; | ||
2803 | |||
2804 | if (host->dev->of_node) { | ||
2805 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2806 | if (ctrl_id < 0) | ||
2807 | ctrl_id = 0; | ||
2808 | } else { | ||
2809 | ctrl_id = to_platform_device(host->dev)->id; | ||
2810 | } | ||
2811 | |||
2812 | if (drv_data && drv_data->caps) { | ||
2813 | if (ctrl_id >= drv_data->num_caps) { | ||
2814 | dev_err(host->dev, "invalid controller id %d\n", | ||
2815 | ctrl_id); | ||
2816 | return -EINVAL; | ||
2817 | } | ||
2818 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2819 | } | ||
2820 | |||
2821 | if (host->pdata->caps2) | ||
2822 | mmc->caps2 = host->pdata->caps2; | ||
2823 | |||
2824 | /* Process SDIO IRQs through the sdio_irq_work. */ | ||
2825 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | ||
2826 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | ||
2827 | |||
2828 | return 0; | ||
2829 | } | ||
2830 | |||
2781 | static int dw_mci_init_slot(struct dw_mci *host) | 2831 | static int dw_mci_init_slot(struct dw_mci *host) |
2782 | { | 2832 | { |
2783 | struct mmc_host *mmc; | 2833 | struct mmc_host *mmc; |
2784 | struct dw_mci_slot *slot; | 2834 | struct dw_mci_slot *slot; |
2785 | const struct dw_mci_drv_data *drv_data = host->drv_data; | 2835 | int ret; |
2786 | int ctrl_id, ret; | ||
2787 | u32 freq[2]; | 2836 | u32 freq[2]; |
2788 | 2837 | ||
2789 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); | 2838 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); |
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host) | |||
2817 | if (!mmc->ocr_avail) | 2866 | if (!mmc->ocr_avail) |
2818 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 2867 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
2819 | 2868 | ||
2820 | if (host->pdata->caps) | ||
2821 | mmc->caps = host->pdata->caps; | ||
2822 | |||
2823 | /* | ||
2824 | * Support MMC_CAP_ERASE by default. | ||
2825 | * It needs to use trim/discard/erase commands. | ||
2826 | */ | ||
2827 | mmc->caps |= MMC_CAP_ERASE; | ||
2828 | |||
2829 | if (host->pdata->pm_caps) | ||
2830 | mmc->pm_caps = host->pdata->pm_caps; | ||
2831 | |||
2832 | if (host->dev->of_node) { | ||
2833 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2834 | if (ctrl_id < 0) | ||
2835 | ctrl_id = 0; | ||
2836 | } else { | ||
2837 | ctrl_id = to_platform_device(host->dev)->id; | ||
2838 | } | ||
2839 | if (drv_data && drv_data->caps) | ||
2840 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2841 | |||
2842 | if (host->pdata->caps2) | ||
2843 | mmc->caps2 = host->pdata->caps2; | ||
2844 | |||
2845 | ret = mmc_of_parse(mmc); | 2869 | ret = mmc_of_parse(mmc); |
2846 | if (ret) | 2870 | if (ret) |
2847 | goto err_host_allocated; | 2871 | goto err_host_allocated; |
2848 | 2872 | ||
2849 | /* Process SDIO IRQs through the sdio_irq_work. */ | 2873 | ret = dw_mci_init_slot_caps(slot); |
2850 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | 2874 | if (ret) |
2851 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | 2875 | goto err_host_allocated; |
2852 | 2876 | ||
2853 | /* Useful defaults if platform data is unset. */ | 2877 | /* Useful defaults if platform data is unset. */ |
2854 | if (host->use_dma == TRANS_MODE_IDMAC) { | 2878 | if (host->use_dma == TRANS_MODE_IDMAC) { |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index e3124f06a47e..1424bd490dd1 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -543,6 +543,7 @@ struct dw_mci_slot { | |||
543 | /** | 543 | /** |
544 | * dw_mci driver data - dw-mshc implementation specific driver data. | 544 | * dw_mci driver data - dw-mshc implementation specific driver data. |
545 | * @caps: mmc subsystem specified capabilities of the controller(s). | 545 | * @caps: mmc subsystem specified capabilities of the controller(s). |
546 | * @num_caps: number of capabilities specified by @caps. | ||
546 | * @init: early implementation specific initialization. | 547 | * @init: early implementation specific initialization. |
547 | * @set_ios: handle bus specific extensions. | 548 | * @set_ios: handle bus specific extensions. |
548 | * @parse_dt: parse implementation specific device tree properties. | 549 | * @parse_dt: parse implementation specific device tree properties. |
@@ -554,6 +555,7 @@ struct dw_mci_slot { | |||
554 | */ | 555 | */ |
555 | struct dw_mci_drv_data { | 556 | struct dw_mci_drv_data { |
556 | unsigned long *caps; | 557 | unsigned long *caps; |
558 | u32 num_caps; | ||
557 | int (*init)(struct dw_mci *host); | 559 | int (*init)(struct dw_mci *host); |
558 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); | 560 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); |
559 | int (*parse_dt)(struct dw_mci *host); | 561 | int (*parse_dt)(struct dw_mci *host); |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 6d1a983e6227..82c4f05f91d8 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot) | |||
654 | slot->chip->rpm_retune = intel_host->d3_retune; | 654 | slot->chip->rpm_retune = intel_host->d3_retune; |
655 | } | 655 | } |
656 | 656 | ||
657 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | 657 | static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) |
658 | { | ||
659 | int err = sdhci_execute_tuning(mmc, opcode); | ||
660 | struct sdhci_host *host = mmc_priv(mmc); | ||
661 | |||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | /* | ||
666 | * Tuning can leave the IP in an active state (Buffer Read Enable bit | ||
667 | * set) which prevents the entry to low power states (i.e. S0i3). Data | ||
668 | * reset will clear it. | ||
669 | */ | ||
670 | sdhci_reset(host, SDHCI_RESET_DATA); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | static void byt_probe_slot(struct sdhci_pci_slot *slot) | ||
658 | { | 676 | { |
677 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; | ||
678 | |||
659 | byt_read_dsm(slot); | 679 | byt_read_dsm(slot); |
680 | |||
681 | ops->execute_tuning = intel_execute_tuning; | ||
682 | } | ||
683 | |||
684 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | ||
685 | { | ||
686 | byt_probe_slot(slot); | ||
660 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | | 687 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | |
661 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | | 688 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | |
662 | MMC_CAP_CMD_DURING_TFR | | 689 | MMC_CAP_CMD_DURING_TFR | |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
779 | { | 806 | { |
780 | int err; | 807 | int err; |
781 | 808 | ||
782 | byt_read_dsm(slot); | 809 | byt_probe_slot(slot); |
783 | 810 | ||
784 | err = ni_set_max_freq(slot); | 811 | err = ni_set_max_freq(slot); |
785 | if (err) | 812 | if (err) |
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
792 | 819 | ||
793 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | 820 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) |
794 | { | 821 | { |
795 | byt_read_dsm(slot); | 822 | byt_probe_slot(slot); |
796 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | | 823 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | |
797 | MMC_CAP_WAIT_WHILE_BUSY; | 824 | MMC_CAP_WAIT_WHILE_BUSY; |
798 | return 0; | 825 | return 0; |
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
800 | 827 | ||
801 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) | 828 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) |
802 | { | 829 | { |
803 | byt_read_dsm(slot); | 830 | byt_probe_slot(slot); |
804 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | | 831 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | |
805 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; | 832 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; |
806 | slot->cd_idx = 0; | 833 | slot->cd_idx = 0; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..eb23f9ba1a9a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c | |||
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev) | |||
426 | struct net_device *netdev = pdata->netdev; | 426 | struct net_device *netdev = pdata->netdev; |
427 | int ret = 0; | 427 | int ret = 0; |
428 | 428 | ||
429 | XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); | ||
430 | |||
429 | pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; | 431 | pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; |
430 | XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); | 432 | XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); |
431 | 433 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 22889fc158f2..87c4308b52a7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
226 | goto err_ioremap; | 226 | goto err_ioremap; |
227 | 227 | ||
228 | self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); | 228 | self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); |
229 | if (!self->aq_hw) { | ||
230 | err = -ENOMEM; | ||
231 | goto err_ioremap; | ||
232 | } | ||
229 | self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); | 233 | self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); |
230 | 234 | ||
231 | for (bar = 0; bar < 4; ++bar) { | 235 | for (bar = 0; bar < 4; ++bar) { |
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
235 | mmio_pa = pci_resource_start(pdev, bar); | 239 | mmio_pa = pci_resource_start(pdev, bar); |
236 | if (mmio_pa == 0U) { | 240 | if (mmio_pa == 0U) { |
237 | err = -EIO; | 241 | err = -EIO; |
238 | goto err_ioremap; | 242 | goto err_free_aq_hw; |
239 | } | 243 | } |
240 | 244 | ||
241 | reg_sz = pci_resource_len(pdev, bar); | 245 | reg_sz = pci_resource_len(pdev, bar); |
242 | if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { | 246 | if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { |
243 | err = -EIO; | 247 | err = -EIO; |
244 | goto err_ioremap; | 248 | goto err_free_aq_hw; |
245 | } | 249 | } |
246 | 250 | ||
247 | self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); | 251 | self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); |
248 | if (!self->aq_hw->mmio) { | 252 | if (!self->aq_hw->mmio) { |
249 | err = -EIO; | 253 | err = -EIO; |
250 | goto err_ioremap; | 254 | goto err_free_aq_hw; |
251 | } | 255 | } |
252 | break; | 256 | break; |
253 | } | 257 | } |
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
255 | 259 | ||
256 | if (bar == 4) { | 260 | if (bar == 4) { |
257 | err = -EIO; | 261 | err = -EIO; |
258 | goto err_ioremap; | 262 | goto err_free_aq_hw; |
259 | } | 263 | } |
260 | 264 | ||
261 | numvecs = min((u8)AQ_CFG_VECS_DEF, | 265 | numvecs = min((u8)AQ_CFG_VECS_DEF, |
@@ -290,6 +294,8 @@ err_register: | |||
290 | aq_pci_free_irq_vectors(self); | 294 | aq_pci_free_irq_vectors(self); |
291 | err_hwinit: | 295 | err_hwinit: |
292 | iounmap(self->aq_hw->mmio); | 296 | iounmap(self->aq_hw->mmio); |
297 | err_free_aq_hw: | ||
298 | kfree(self->aq_hw); | ||
293 | err_ioremap: | 299 | err_ioremap: |
294 | free_netdev(ndev); | 300 | free_netdev(ndev); |
295 | err_pci_func: | 301 | err_pci_func: |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a77ee2f8fb8d..c1841db1b500 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) | |||
820 | 820 | ||
821 | tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); | 821 | tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); |
822 | 822 | ||
823 | udelay(10); | 823 | usleep_range(10, 20); |
824 | timeout_us -= (timeout_us > 10) ? 10 : timeout_us; | 824 | timeout_us -= (timeout_us > 10) ? 10 : timeout_us; |
825 | } | 825 | } |
826 | 826 | ||
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event) | |||
922 | if (!(apedata & APE_FW_STATUS_READY)) | 922 | if (!(apedata & APE_FW_STATUS_READY)) |
923 | return -EAGAIN; | 923 | return -EAGAIN; |
924 | 924 | ||
925 | /* Wait for up to 1 millisecond for APE to service previous event. */ | 925 | /* Wait for up to 20 millisecond for APE to service previous event. */ |
926 | err = tg3_ape_event_lock(tp, 1000); | 926 | err = tg3_ape_event_lock(tp, 20000); |
927 | if (err) | 927 | if (err) |
928 | return err; | 928 | return err; |
929 | 929 | ||
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
946 | 946 | ||
947 | switch (kind) { | 947 | switch (kind) { |
948 | case RESET_KIND_INIT: | 948 | case RESET_KIND_INIT: |
949 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); | ||
949 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, | 950 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, |
950 | APE_HOST_SEG_SIG_MAGIC); | 951 | APE_HOST_SEG_SIG_MAGIC); |
951 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, | 952 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, |
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
962 | event = APE_EVENT_STATUS_STATE_START; | 963 | event = APE_EVENT_STATUS_STATE_START; |
963 | break; | 964 | break; |
964 | case RESET_KIND_SHUTDOWN: | 965 | case RESET_KIND_SHUTDOWN: |
965 | /* With the interface we are currently using, | ||
966 | * APE does not track driver state. Wiping | ||
967 | * out the HOST SEGMENT SIGNATURE forces | ||
968 | * the APE to assume OS absent status. | ||
969 | */ | ||
970 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); | ||
971 | |||
972 | if (device_may_wakeup(&tp->pdev->dev) && | 966 | if (device_may_wakeup(&tp->pdev->dev) && |
973 | tg3_flag(tp, WOL_ENABLE)) { | 967 | tg3_flag(tp, WOL_ENABLE)) { |
974 | tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, | 968 | tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, |
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
990 | tg3_ape_send_event(tp, event); | 984 | tg3_ape_send_event(tp, event); |
991 | } | 985 | } |
992 | 986 | ||
987 | static void tg3_send_ape_heartbeat(struct tg3 *tp, | ||
988 | unsigned long interval) | ||
989 | { | ||
990 | /* Check if hb interval has exceeded */ | ||
991 | if (!tg3_flag(tp, ENABLE_APE) || | ||
992 | time_before(jiffies, tp->ape_hb_jiffies + interval)) | ||
993 | return; | ||
994 | |||
995 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); | ||
996 | tp->ape_hb_jiffies = jiffies; | ||
997 | } | ||
998 | |||
993 | static void tg3_disable_ints(struct tg3 *tp) | 999 | static void tg3_disable_ints(struct tg3 *tp) |
994 | { | 1000 | { |
995 | int i; | 1001 | int i; |
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) | |||
7262 | } | 7268 | } |
7263 | } | 7269 | } |
7264 | 7270 | ||
7271 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); | ||
7265 | return work_done; | 7272 | return work_done; |
7266 | 7273 | ||
7267 | tx_recovery: | 7274 | tx_recovery: |
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
7344 | } | 7351 | } |
7345 | } | 7352 | } |
7346 | 7353 | ||
7354 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); | ||
7347 | return work_done; | 7355 | return work_done; |
7348 | 7356 | ||
7349 | tx_recovery: | 7357 | tx_recovery: |
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) | |||
10732 | if (tg3_flag(tp, ENABLE_APE)) | 10740 | if (tg3_flag(tp, ENABLE_APE)) |
10733 | /* Write our heartbeat update interval to APE. */ | 10741 | /* Write our heartbeat update interval to APE. */ |
10734 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, | 10742 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, |
10735 | APE_HOST_HEARTBEAT_INT_DISABLE); | 10743 | APE_HOST_HEARTBEAT_INT_5SEC); |
10736 | 10744 | ||
10737 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); | 10745 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); |
10738 | 10746 | ||
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t) | |||
11077 | tp->asf_counter = tp->asf_multiplier; | 11085 | tp->asf_counter = tp->asf_multiplier; |
11078 | } | 11086 | } |
11079 | 11087 | ||
11088 | /* Update the APE heartbeat every 5 seconds.*/ | ||
11089 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); | ||
11090 | |||
11080 | spin_unlock(&tp->lock); | 11091 | spin_unlock(&tp->lock); |
11081 | 11092 | ||
11082 | restart_timer: | 11093 | restart_timer: |
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) | |||
16653 | pci_state_reg); | 16664 | pci_state_reg); |
16654 | 16665 | ||
16655 | tg3_ape_lock_init(tp); | 16666 | tg3_ape_lock_init(tp); |
16667 | tp->ape_hb_interval = | ||
16668 | msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); | ||
16656 | } | 16669 | } |
16657 | 16670 | ||
16658 | /* Set up tp->grc_local_ctrl before calling | 16671 | /* Set up tp->grc_local_ctrl before calling |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 47f51cc0566d..1d61aa3efda1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -2508,6 +2508,7 @@ | |||
2508 | #define TG3_APE_LOCK_PHY3 5 | 2508 | #define TG3_APE_LOCK_PHY3 5 |
2509 | #define TG3_APE_LOCK_GPIO 7 | 2509 | #define TG3_APE_LOCK_GPIO 7 |
2510 | 2510 | ||
2511 | #define TG3_APE_HB_INTERVAL (tp->ape_hb_interval) | ||
2511 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 | 2512 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 |
2512 | 2513 | ||
2513 | 2514 | ||
@@ -3423,6 +3424,10 @@ struct tg3 { | |||
3423 | struct device *hwmon_dev; | 3424 | struct device *hwmon_dev; |
3424 | bool link_up; | 3425 | bool link_up; |
3425 | bool pcierr_recovery; | 3426 | bool pcierr_recovery; |
3427 | |||
3428 | u32 ape_hb; | ||
3429 | unsigned long ape_hb_interval; | ||
3430 | unsigned long ape_hb_jiffies; | ||
3426 | }; | 3431 | }; |
3427 | 3432 | ||
3428 | /* Accessor macros for chip and asic attributes | 3433 | /* Accessor macros for chip and asic attributes |
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index c87c9c684a33..d59497a7bdce 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c | |||
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get); | |||
75 | 75 | ||
76 | void cavium_ptp_put(struct cavium_ptp *ptp) | 76 | void cavium_ptp_put(struct cavium_ptp *ptp) |
77 | { | 77 | { |
78 | if (!ptp) | ||
79 | return; | ||
78 | pci_dev_put(ptp->pdev); | 80 | pci_dev_put(ptp->pdev); |
79 | } | 81 | } |
80 | EXPORT_SYMBOL(cavium_ptp_put); | 82 | EXPORT_SYMBOL(cavium_ptp_put); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index b68cde9f17d2..7d9c5ffbd041 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO); | |||
67 | MODULE_PARM_DESC(cpi_alg, | 67 | MODULE_PARM_DESC(cpi_alg, |
68 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); | 68 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); |
69 | 69 | ||
70 | struct nicvf_xdp_tx { | ||
71 | u64 dma_addr; | ||
72 | u8 qidx; | ||
73 | }; | ||
74 | |||
75 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) | 70 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) |
76 | { | 71 | { |
77 | if (nic->sqs_mode) | 72 | if (nic->sqs_mode) |
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic) | |||
507 | return 0; | 502 | return 0; |
508 | } | 503 | } |
509 | 504 | ||
510 | static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) | ||
511 | { | ||
512 | /* Check if it's a recycled page, if not unmap the DMA mapping. | ||
513 | * Recycled page holds an extra reference. | ||
514 | */ | ||
515 | if (page_ref_count(page) == 1) { | ||
516 | dma_addr &= PAGE_MASK; | ||
517 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
518 | RCV_FRAG_LEN + XDP_HEADROOM, | ||
519 | DMA_FROM_DEVICE, | ||
520 | DMA_ATTR_SKIP_CPU_SYNC); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | 505 | static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, |
525 | struct cqe_rx_t *cqe_rx, struct snd_queue *sq, | 506 | struct cqe_rx_t *cqe_rx, struct snd_queue *sq, |
526 | struct rcv_queue *rq, struct sk_buff **skb) | 507 | struct rcv_queue *rq, struct sk_buff **skb) |
527 | { | 508 | { |
528 | struct xdp_buff xdp; | 509 | struct xdp_buff xdp; |
529 | struct page *page; | 510 | struct page *page; |
530 | struct nicvf_xdp_tx *xdp_tx = NULL; | ||
531 | u32 action; | 511 | u32 action; |
532 | u16 len, err, offset = 0; | 512 | u16 len, offset = 0; |
533 | u64 dma_addr, cpu_addr; | 513 | u64 dma_addr, cpu_addr; |
534 | void *orig_data; | 514 | void *orig_data; |
535 | 515 | ||
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
543 | cpu_addr = (u64)phys_to_virt(cpu_addr); | 523 | cpu_addr = (u64)phys_to_virt(cpu_addr); |
544 | page = virt_to_page((void *)cpu_addr); | 524 | page = virt_to_page((void *)cpu_addr); |
545 | 525 | ||
546 | xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; | 526 | xdp.data_hard_start = page_address(page); |
547 | xdp.data = (void *)cpu_addr; | 527 | xdp.data = (void *)cpu_addr; |
548 | xdp_set_data_meta_invalid(&xdp); | 528 | xdp_set_data_meta_invalid(&xdp); |
549 | xdp.data_end = xdp.data + len; | 529 | xdp.data_end = xdp.data + len; |
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
563 | 543 | ||
564 | switch (action) { | 544 | switch (action) { |
565 | case XDP_PASS: | 545 | case XDP_PASS: |
566 | nicvf_unmap_page(nic, page, dma_addr); | 546 | /* Check if it's a recycled page, if not |
547 | * unmap the DMA mapping. | ||
548 | * | ||
549 | * Recycled page holds an extra reference. | ||
550 | */ | ||
551 | if (page_ref_count(page) == 1) { | ||
552 | dma_addr &= PAGE_MASK; | ||
553 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
554 | RCV_FRAG_LEN + XDP_PACKET_HEADROOM, | ||
555 | DMA_FROM_DEVICE, | ||
556 | DMA_ATTR_SKIP_CPU_SYNC); | ||
557 | } | ||
567 | 558 | ||
568 | /* Build SKB and pass on packet to network stack */ | 559 | /* Build SKB and pass on packet to network stack */ |
569 | *skb = build_skb(xdp.data, | 560 | *skb = build_skb(xdp.data, |
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
576 | case XDP_TX: | 567 | case XDP_TX: |
577 | nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); | 568 | nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); |
578 | return true; | 569 | return true; |
579 | case XDP_REDIRECT: | ||
580 | /* Save DMA address for use while transmitting */ | ||
581 | xdp_tx = (struct nicvf_xdp_tx *)page_address(page); | ||
582 | xdp_tx->dma_addr = dma_addr; | ||
583 | xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); | ||
584 | |||
585 | err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); | ||
586 | if (!err) | ||
587 | return true; | ||
588 | |||
589 | /* Free the page on error */ | ||
590 | nicvf_unmap_page(nic, page, dma_addr); | ||
591 | put_page(page); | ||
592 | break; | ||
593 | default: | 570 | default: |
594 | bpf_warn_invalid_xdp_action(action); | 571 | bpf_warn_invalid_xdp_action(action); |
595 | /* fall through */ | 572 | /* fall through */ |
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
597 | trace_xdp_exception(nic->netdev, prog, action); | 574 | trace_xdp_exception(nic->netdev, prog, action); |
598 | /* fall through */ | 575 | /* fall through */ |
599 | case XDP_DROP: | 576 | case XDP_DROP: |
600 | nicvf_unmap_page(nic, page, dma_addr); | 577 | /* Check if it's a recycled page, if not |
578 | * unmap the DMA mapping. | ||
579 | * | ||
580 | * Recycled page holds an extra reference. | ||
581 | */ | ||
582 | if (page_ref_count(page) == 1) { | ||
583 | dma_addr &= PAGE_MASK; | ||
584 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
585 | RCV_FRAG_LEN + XDP_PACKET_HEADROOM, | ||
586 | DMA_FROM_DEVICE, | ||
587 | DMA_ATTR_SKIP_CPU_SYNC); | ||
588 | } | ||
601 | put_page(page); | 589 | put_page(page); |
602 | return true; | 590 | return true; |
603 | } | 591 | } |
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) | |||
1864 | } | 1852 | } |
1865 | } | 1853 | } |
1866 | 1854 | ||
1867 | static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) | ||
1868 | { | ||
1869 | struct nicvf *nic = netdev_priv(netdev); | ||
1870 | struct nicvf *snic = nic; | ||
1871 | struct nicvf_xdp_tx *xdp_tx; | ||
1872 | struct snd_queue *sq; | ||
1873 | struct page *page; | ||
1874 | int err, qidx; | ||
1875 | |||
1876 | if (!netif_running(netdev) || !nic->xdp_prog) | ||
1877 | return -EINVAL; | ||
1878 | |||
1879 | page = virt_to_page(xdp->data); | ||
1880 | xdp_tx = (struct nicvf_xdp_tx *)page_address(page); | ||
1881 | qidx = xdp_tx->qidx; | ||
1882 | |||
1883 | if (xdp_tx->qidx >= nic->xdp_tx_queues) | ||
1884 | return -EINVAL; | ||
1885 | |||
1886 | /* Get secondary Qset's info */ | ||
1887 | if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { | ||
1888 | qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; | ||
1889 | snic = (struct nicvf *)nic->snicvf[qidx - 1]; | ||
1890 | if (!snic) | ||
1891 | return -EINVAL; | ||
1892 | qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; | ||
1893 | } | ||
1894 | |||
1895 | sq = &snic->qs->sq[qidx]; | ||
1896 | err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, | ||
1897 | xdp_tx->dma_addr, | ||
1898 | xdp->data_end - xdp->data); | ||
1899 | if (err) | ||
1900 | return -ENOMEM; | ||
1901 | |||
1902 | nicvf_xdp_sq_doorbell(snic, sq, qidx); | ||
1903 | return 0; | ||
1904 | } | ||
1905 | |||
1906 | static void nicvf_xdp_flush(struct net_device *dev) | ||
1907 | { | ||
1908 | return; | ||
1909 | } | ||
1910 | |||
1911 | static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) | 1855 | static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) |
1912 | { | 1856 | { |
1913 | struct hwtstamp_config config; | 1857 | struct hwtstamp_config config; |
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = { | |||
1986 | .ndo_fix_features = nicvf_fix_features, | 1930 | .ndo_fix_features = nicvf_fix_features, |
1987 | .ndo_set_features = nicvf_set_features, | 1931 | .ndo_set_features = nicvf_set_features, |
1988 | .ndo_bpf = nicvf_xdp, | 1932 | .ndo_bpf = nicvf_xdp, |
1989 | .ndo_xdp_xmit = nicvf_xdp_xmit, | ||
1990 | .ndo_xdp_flush = nicvf_xdp_flush, | ||
1991 | .ndo_do_ioctl = nicvf_ioctl, | 1933 | .ndo_do_ioctl = nicvf_ioctl, |
1992 | }; | 1934 | }; |
1993 | 1935 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 3eae9ff9b53a..d42704d07484 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, | |||
204 | 204 | ||
205 | /* Reserve space for header modifications by BPF program */ | 205 | /* Reserve space for header modifications by BPF program */ |
206 | if (rbdr->is_xdp) | 206 | if (rbdr->is_xdp) |
207 | buf_len += XDP_HEADROOM; | 207 | buf_len += XDP_PACKET_HEADROOM; |
208 | 208 | ||
209 | /* Check if it's recycled */ | 209 | /* Check if it's recycled */ |
210 | if (pgcache) | 210 | if (pgcache) |
@@ -224,9 +224,8 @@ ret: | |||
224 | nic->rb_page = NULL; | 224 | nic->rb_page = NULL; |
225 | return -ENOMEM; | 225 | return -ENOMEM; |
226 | } | 226 | } |
227 | |||
228 | if (pgcache) | 227 | if (pgcache) |
229 | pgcache->dma_addr = *rbuf + XDP_HEADROOM; | 228 | pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; |
230 | nic->rb_page_offset += buf_len; | 229 | nic->rb_page_offset += buf_len; |
231 | } | 230 | } |
232 | 231 | ||
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, | |||
1244 | int qentry; | 1243 | int qentry; |
1245 | 1244 | ||
1246 | if (subdesc_cnt > sq->xdp_free_cnt) | 1245 | if (subdesc_cnt > sq->xdp_free_cnt) |
1247 | return -1; | 1246 | return 0; |
1248 | 1247 | ||
1249 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | 1248 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
1250 | 1249 | ||
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, | |||
1255 | 1254 | ||
1256 | sq->xdp_desc_cnt += subdesc_cnt; | 1255 | sq->xdp_desc_cnt += subdesc_cnt; |
1257 | 1256 | ||
1258 | return 0; | 1257 | return 1; |
1259 | } | 1258 | } |
1260 | 1259 | ||
1261 | /* Calculate no of SQ subdescriptors needed to transmit all | 1260 | /* Calculate no of SQ subdescriptors needed to transmit all |
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, | |||
1656 | if (page_ref_count(page) != 1) | 1655 | if (page_ref_count(page) != 1) |
1657 | return; | 1656 | return; |
1658 | 1657 | ||
1659 | len += XDP_HEADROOM; | 1658 | len += XDP_PACKET_HEADROOM; |
1660 | /* Receive buffers in XDP mode are mapped from page start */ | 1659 | /* Receive buffers in XDP mode are mapped from page start */ |
1661 | dma_addr &= PAGE_MASK; | 1660 | dma_addr &= PAGE_MASK; |
1662 | } | 1661 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index ce1eed7a6d63..5e9a03cf1b4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/iommu.h> | 13 | #include <linux/iommu.h> |
14 | #include <linux/bpf.h> | ||
15 | #include <net/xdp.h> | 14 | #include <net/xdp.h> |
16 | #include "q_struct.h" | 15 | #include "q_struct.h" |
17 | 16 | ||
@@ -94,9 +93,6 @@ | |||
94 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ | 93 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ |
95 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | 94 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
96 | 95 | ||
97 | #define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */ | ||
98 | #define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) | ||
99 | |||
100 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ | 96 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ |
101 | MAX_CQE_PER_PKT_XMIT) | 97 | MAX_CQE_PER_PKT_XMIT) |
102 | 98 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 557fd8bfd54e..00a1d2d13169 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | |||
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, | |||
472 | 472 | ||
473 | if (is_t6(padap->params.chip)) { | 473 | if (is_t6(padap->params.chip)) { |
474 | size = padap->params.cim_la_size / 10 + 1; | 474 | size = padap->params.cim_la_size / 10 + 1; |
475 | size *= 11 * sizeof(u32); | 475 | size *= 10 * sizeof(u32); |
476 | } else { | 476 | } else { |
477 | size = padap->params.cim_la_size / 8; | 477 | size = padap->params.cim_la_size / 8; |
478 | size *= 8 * sizeof(u32); | 478 | size *= 8 * sizeof(u32); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 30485f9a598f..143686c60234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | |||
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) | |||
102 | case CUDBG_CIM_LA: | 102 | case CUDBG_CIM_LA: |
103 | if (is_t6(adap->params.chip)) { | 103 | if (is_t6(adap->params.chip)) { |
104 | len = adap->params.cim_la_size / 10 + 1; | 104 | len = adap->params.cim_la_size / 10 + 1; |
105 | len *= 11 * sizeof(u32); | 105 | len *= 10 * sizeof(u32); |
106 | } else { | 106 | } else { |
107 | len = adap->params.cim_la_size / 8; | 107 | len = adap->params.cim_la_size / 8; |
108 | len *= 8 * sizeof(u32); | 108 | len *= 8 * sizeof(u32); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 56bc626ef006..7b452e85de2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) | |||
4982 | 4982 | ||
4983 | pcie_fw = readl(adap->regs + PCIE_FW_A); | 4983 | pcie_fw = readl(adap->regs + PCIE_FW_A); |
4984 | /* Check if cxgb4 is the MASTER and fw is initialized */ | 4984 | /* Check if cxgb4 is the MASTER and fw is initialized */ |
4985 | if (!(pcie_fw & PCIE_FW_INIT_F) || | 4985 | if (num_vfs && |
4986 | (!(pcie_fw & PCIE_FW_INIT_F) || | ||
4986 | !(pcie_fw & PCIE_FW_MASTER_VLD_F) || | 4987 | !(pcie_fw & PCIE_FW_MASTER_VLD_F) || |
4987 | PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { | 4988 | PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { |
4988 | dev_warn(&pdev->dev, | 4989 | dev_warn(&pdev->dev, |
4989 | "cxgb4 driver needs to be MASTER to support SRIOV\n"); | 4990 | "cxgb4 driver needs to be MASTER to support SRIOV\n"); |
4990 | return -EOPNOTSUPP; | 4991 | return -EOPNOTSUPP; |
@@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev) | |||
5599 | #if IS_ENABLED(CONFIG_IPV6) | 5600 | #if IS_ENABLED(CONFIG_IPV6) |
5600 | t4_cleanup_clip_tbl(adapter); | 5601 | t4_cleanup_clip_tbl(adapter); |
5601 | #endif | 5602 | #endif |
5602 | iounmap(adapter->regs); | ||
5603 | if (!is_t4(adapter->params.chip)) | 5603 | if (!is_t4(adapter->params.chip)) |
5604 | iounmap(adapter->bar2); | 5604 | iounmap(adapter->bar2); |
5605 | pci_disable_pcie_error_reporting(pdev); | ||
5606 | if ((adapter->flags & DEV_ENABLED)) { | ||
5607 | pci_disable_device(pdev); | ||
5608 | adapter->flags &= ~DEV_ENABLED; | ||
5609 | } | ||
5610 | pci_release_regions(pdev); | ||
5611 | kfree(adapter->mbox_log); | ||
5612 | synchronize_rcu(); | ||
5613 | kfree(adapter); | ||
5614 | } | 5605 | } |
5615 | #ifdef CONFIG_PCI_IOV | 5606 | #ifdef CONFIG_PCI_IOV |
5616 | else { | 5607 | else { |
5617 | cxgb4_iov_configure(adapter->pdev, 0); | 5608 | cxgb4_iov_configure(adapter->pdev, 0); |
5618 | } | 5609 | } |
5619 | #endif | 5610 | #endif |
5611 | iounmap(adapter->regs); | ||
5612 | pci_disable_pcie_error_reporting(pdev); | ||
5613 | if ((adapter->flags & DEV_ENABLED)) { | ||
5614 | pci_disable_device(pdev); | ||
5615 | adapter->flags &= ~DEV_ENABLED; | ||
5616 | } | ||
5617 | pci_release_regions(pdev); | ||
5618 | kfree(adapter->mbox_log); | ||
5619 | synchronize_rcu(); | ||
5620 | kfree(adapter); | ||
5620 | } | 5621 | } |
5621 | 5622 | ||
5622 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt | 5623 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 047609ef0515..920bccd6bc40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) | |||
2637 | } | 2637 | } |
2638 | 2638 | ||
2639 | #define EEPROM_STAT_ADDR 0x7bfc | 2639 | #define EEPROM_STAT_ADDR 0x7bfc |
2640 | #define VPD_SIZE 0x800 | ||
2641 | #define VPD_BASE 0x400 | 2640 | #define VPD_BASE 0x400 |
2642 | #define VPD_BASE_OLD 0 | 2641 | #define VPD_BASE_OLD 0 |
2643 | #define VPD_LEN 1024 | 2642 | #define VPD_LEN 1024 |
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
2704 | if (!vpd) | 2703 | if (!vpd) |
2705 | return -ENOMEM; | 2704 | return -ENOMEM; |
2706 | 2705 | ||
2707 | /* We have two VPD data structures stored in the adapter VPD area. | ||
2708 | * By default, Linux calculates the size of the VPD area by traversing | ||
2709 | * the first VPD area at offset 0x0, so we need to tell the OS what | ||
2710 | * our real VPD size is. | ||
2711 | */ | ||
2712 | ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); | ||
2713 | if (ret < 0) | ||
2714 | goto out; | ||
2715 | |||
2716 | /* Card information normally starts at VPD_BASE but early cards had | 2706 | /* Card information normally starts at VPD_BASE but early cards had |
2717 | * it at 0. | 2707 | * it at 0. |
2718 | */ | 2708 | */ |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3bdeb295514b..f5c87bd35fa1 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, | |||
2934 | { | 2934 | { |
2935 | int size = lstatus & BD_LENGTH_MASK; | 2935 | int size = lstatus & BD_LENGTH_MASK; |
2936 | struct page *page = rxb->page; | 2936 | struct page *page = rxb->page; |
2937 | bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); | ||
2938 | |||
2939 | /* Remove the FCS from the packet length */ | ||
2940 | if (last) | ||
2941 | size -= ETH_FCS_LEN; | ||
2942 | 2937 | ||
2943 | if (likely(first)) { | 2938 | if (likely(first)) { |
2944 | skb_put(skb, size); | 2939 | skb_put(skb, size); |
2945 | } else { | 2940 | } else { |
2946 | /* the last fragments' length contains the full frame length */ | 2941 | /* the last fragments' length contains the full frame length */ |
2947 | if (last) | 2942 | if (lstatus & BD_LFLAG(RXBD_LAST)) |
2948 | size -= skb->len; | 2943 | size -= skb->len; |
2949 | 2944 | ||
2950 | /* Add the last fragment if it contains something other than | 2945 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
2951 | * the FCS, otherwise drop it and trim off any part of the FCS | 2946 | rxb->page_offset + RXBUF_ALIGNMENT, |
2952 | * that was already received. | 2947 | size, GFAR_RXB_TRUESIZE); |
2953 | */ | ||
2954 | if (size > 0) | ||
2955 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
2956 | rxb->page_offset + RXBUF_ALIGNMENT, | ||
2957 | size, GFAR_RXB_TRUESIZE); | ||
2958 | else if (size < 0) | ||
2959 | pskb_trim(skb, skb->len + size); | ||
2960 | } | 2948 | } |
2961 | 2949 | ||
2962 | /* try reuse page */ | 2950 | /* try reuse page */ |
@@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) | |||
3069 | if (priv->padding) | 3057 | if (priv->padding) |
3070 | skb_pull(skb, priv->padding); | 3058 | skb_pull(skb, priv->padding); |
3071 | 3059 | ||
3060 | /* Trim off the FCS */ | ||
3061 | pskb_trim(skb, skb->len - ETH_FCS_LEN); | ||
3062 | |||
3072 | if (ndev->features & NETIF_F_RXCSUM) | 3063 | if (ndev->features & NETIF_F_RXCSUM) |
3073 | gfar_rx_checksum(skb, fcb); | 3064 | gfar_rx_checksum(skb, fcb); |
3074 | 3065 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 27447260215d..1b3cc8bb0705 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev) | |||
791 | return 0; | 791 | return 0; |
792 | } | 792 | } |
793 | 793 | ||
794 | static void release_login_buffer(struct ibmvnic_adapter *adapter) | ||
795 | { | ||
796 | kfree(adapter->login_buf); | ||
797 | adapter->login_buf = NULL; | ||
798 | } | ||
799 | |||
800 | static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) | ||
801 | { | ||
802 | kfree(adapter->login_rsp_buf); | ||
803 | adapter->login_rsp_buf = NULL; | ||
804 | } | ||
805 | |||
794 | static void release_resources(struct ibmvnic_adapter *adapter) | 806 | static void release_resources(struct ibmvnic_adapter *adapter) |
795 | { | 807 | { |
796 | int i; | 808 | int i; |
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter) | |||
813 | } | 825 | } |
814 | } | 826 | } |
815 | } | 827 | } |
828 | kfree(adapter->napi); | ||
829 | adapter->napi = NULL; | ||
830 | |||
831 | release_login_rsp_buffer(adapter); | ||
816 | } | 832 | } |
817 | 833 | ||
818 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) | 834 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev) | |||
1057 | return rc; | 1073 | return rc; |
1058 | } | 1074 | } |
1059 | 1075 | ||
1076 | static void clean_rx_pools(struct ibmvnic_adapter *adapter) | ||
1077 | { | ||
1078 | struct ibmvnic_rx_pool *rx_pool; | ||
1079 | u64 rx_entries; | ||
1080 | int rx_scrqs; | ||
1081 | int i, j; | ||
1082 | |||
1083 | if (!adapter->rx_pool) | ||
1084 | return; | ||
1085 | |||
1086 | rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | ||
1087 | rx_entries = adapter->req_rx_add_entries_per_subcrq; | ||
1088 | |||
1089 | /* Free any remaining skbs in the rx buffer pools */ | ||
1090 | for (i = 0; i < rx_scrqs; i++) { | ||
1091 | rx_pool = &adapter->rx_pool[i]; | ||
1092 | if (!rx_pool) | ||
1093 | continue; | ||
1094 | |||
1095 | netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); | ||
1096 | for (j = 0; j < rx_entries; j++) { | ||
1097 | if (rx_pool->rx_buff[j].skb) { | ||
1098 | dev_kfree_skb_any(rx_pool->rx_buff[j].skb); | ||
1099 | rx_pool->rx_buff[j].skb = NULL; | ||
1100 | } | ||
1101 | } | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1060 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) | 1105 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) |
1061 | { | 1106 | { |
1062 | struct ibmvnic_tx_pool *tx_pool; | 1107 | struct ibmvnic_tx_pool *tx_pool; |
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev) | |||
1134 | } | 1179 | } |
1135 | } | 1180 | } |
1136 | } | 1181 | } |
1137 | 1182 | clean_rx_pools(adapter); | |
1138 | clean_tx_pools(adapter); | 1183 | clean_tx_pools(adapter); |
1139 | adapter->state = VNIC_CLOSED; | 1184 | adapter->state = VNIC_CLOSED; |
1140 | return rc; | 1185 | return rc; |
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1670 | return 0; | 1715 | return 0; |
1671 | } | 1716 | } |
1672 | 1717 | ||
1673 | netif_carrier_on(netdev); | ||
1674 | |||
1675 | /* kick napi */ | 1718 | /* kick napi */ |
1676 | for (i = 0; i < adapter->req_rx_queues; i++) | 1719 | for (i = 0; i < adapter->req_rx_queues; i++) |
1677 | napi_schedule(&adapter->napi[i]); | 1720 | napi_schedule(&adapter->napi[i]); |
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1679 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) | 1722 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) |
1680 | netdev_notify_peers(netdev); | 1723 | netdev_notify_peers(netdev); |
1681 | 1724 | ||
1725 | netif_carrier_on(netdev); | ||
1726 | |||
1682 | return 0; | 1727 | return 0; |
1683 | } | 1728 | } |
1684 | 1729 | ||
@@ -1853,6 +1898,12 @@ restart_poll: | |||
1853 | be16_to_cpu(next->rx_comp.rc)); | 1898 | be16_to_cpu(next->rx_comp.rc)); |
1854 | /* free the entry */ | 1899 | /* free the entry */ |
1855 | next->rx_comp.first = 0; | 1900 | next->rx_comp.first = 0; |
1901 | dev_kfree_skb_any(rx_buff->skb); | ||
1902 | remove_buff_from_pool(adapter, rx_buff); | ||
1903 | continue; | ||
1904 | } else if (!rx_buff->skb) { | ||
1905 | /* free the entry */ | ||
1906 | next->rx_comp.first = 0; | ||
1856 | remove_buff_from_pool(adapter, rx_buff); | 1907 | remove_buff_from_pool(adapter, rx_buff); |
1857 | continue; | 1908 | continue; |
1858 | } | 1909 | } |
@@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter) | |||
3013 | struct vnic_login_client_data *vlcd; | 3064 | struct vnic_login_client_data *vlcd; |
3014 | int i; | 3065 | int i; |
3015 | 3066 | ||
3067 | release_login_rsp_buffer(adapter); | ||
3016 | client_data_len = vnic_client_data_len(adapter); | 3068 | client_data_len = vnic_client_data_len(adapter); |
3017 | 3069 | ||
3018 | buffer_size = | 3070 | buffer_size = |
@@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |||
3738 | ibmvnic_remove(adapter->vdev); | 3790 | ibmvnic_remove(adapter->vdev); |
3739 | return -EIO; | 3791 | return -EIO; |
3740 | } | 3792 | } |
3793 | release_login_buffer(adapter); | ||
3741 | complete(&adapter->init_done); | 3794 | complete(&adapter->init_done); |
3742 | 3795 | ||
3743 | return 0; | 3796 | return 0; |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a1d7b88cf083..5a1668cdb461 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
7137 | int id = port->id; | 7137 | int id = port->id; |
7138 | bool allmulti = dev->flags & IFF_ALLMULTI; | 7138 | bool allmulti = dev->flags & IFF_ALLMULTI; |
7139 | 7139 | ||
7140 | retry: | ||
7140 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); | 7141 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); |
7141 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); | 7142 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); |
7142 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); | 7143 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); |
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
7144 | /* Remove all port->id's mcast enries */ | 7145 | /* Remove all port->id's mcast enries */ |
7145 | mvpp2_prs_mcast_del_all(priv, id); | 7146 | mvpp2_prs_mcast_del_all(priv, id); |
7146 | 7147 | ||
7147 | if (allmulti && !netdev_mc_empty(dev)) { | 7148 | if (!allmulti) { |
7148 | netdev_for_each_mc_addr(ha, dev) | 7149 | netdev_for_each_mc_addr(ha, dev) { |
7149 | mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); | 7150 | if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { |
7151 | allmulti = true; | ||
7152 | goto retry; | ||
7153 | } | ||
7154 | } | ||
7150 | } | 7155 | } |
7151 | } | 7156 | } |
7152 | 7157 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0be4575b58a2..fd509160c8f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c | |||
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p, | |||
96 | "%pI4"); | 96 | "%pI4"); |
97 | } else if (ethertype.v == ETH_P_IPV6) { | 97 | } else if (ethertype.v == ETH_P_IPV6) { |
98 | static const struct in6_addr full_ones = { | 98 | static const struct in6_addr full_ones = { |
99 | .in6_u.u6_addr32 = {htonl(0xffffffff), | 99 | .in6_u.u6_addr32 = {__constant_htonl(0xffffffff), |
100 | htonl(0xffffffff), | 100 | __constant_htonl(0xffffffff), |
101 | htonl(0xffffffff), | 101 | __constant_htonl(0xffffffff), |
102 | htonl(0xffffffff)}, | 102 | __constant_htonl(0xffffffff)}, |
103 | }; | 103 | }; |
104 | DECLARE_MASK_VAL(struct in6_addr, src_ipv6); | 104 | DECLARE_MASK_VAL(struct in6_addr, src_ipv6); |
105 | DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); | 105 | DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47bab842c5ee..da94c8cba5ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, | |||
1768 | param->wq.linear = 1; | 1768 | param->wq.linear = 1; |
1769 | } | 1769 | } |
1770 | 1770 | ||
1771 | static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) | 1771 | static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, |
1772 | struct mlx5e_rq_param *param) | ||
1772 | { | 1773 | { |
1773 | void *rqc = param->rqc; | 1774 | void *rqc = param->rqc; |
1774 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); | 1775 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); |
1775 | 1776 | ||
1776 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); | 1777 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); |
1777 | MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); | 1778 | MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); |
1779 | |||
1780 | param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); | ||
1778 | } | 1781 | } |
1779 | 1782 | ||
1780 | static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, | 1783 | static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, |
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, | |||
2634 | struct mlx5e_cq *cq, | 2637 | struct mlx5e_cq *cq, |
2635 | struct mlx5e_cq_param *param) | 2638 | struct mlx5e_cq_param *param) |
2636 | { | 2639 | { |
2640 | param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); | ||
2641 | param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); | ||
2642 | |||
2637 | return mlx5e_alloc_cq_common(mdev, param, cq); | 2643 | return mlx5e_alloc_cq_common(mdev, param, cq); |
2638 | } | 2644 | } |
2639 | 2645 | ||
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, | |||
2645 | struct mlx5e_cq *cq = &drop_rq->cq; | 2651 | struct mlx5e_cq *cq = &drop_rq->cq; |
2646 | int err; | 2652 | int err; |
2647 | 2653 | ||
2648 | mlx5e_build_drop_rq_param(&rq_param); | 2654 | mlx5e_build_drop_rq_param(mdev, &rq_param); |
2649 | 2655 | ||
2650 | err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); | 2656 | err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); |
2651 | if (err) | 2657 | if (err) |
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev, | |||
2994 | } | 3000 | } |
2995 | #endif | 3001 | #endif |
2996 | 3002 | ||
2997 | int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, | 3003 | static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, |
2998 | void *type_data) | 3004 | void *type_data) |
2999 | { | 3005 | { |
3000 | switch (type) { | 3006 | switch (type) { |
3001 | #ifdef CONFIG_MLX5_ESWITCH | 3007 | #ifdef CONFIG_MLX5_ESWITCH |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0d4bb0688faa..e5c3ab46a24a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
37 | #include <linux/bpf_trace.h> | 37 | #include <linux/bpf_trace.h> |
38 | #include <net/busy_poll.h> | 38 | #include <net/busy_poll.h> |
39 | #include <net/ip6_checksum.h> | ||
39 | #include "en.h" | 40 | #include "en.h" |
40 | #include "en_tc.h" | 41 | #include "en_tc.h" |
41 | #include "eswitch.h" | 42 | #include "eswitch.h" |
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) | |||
546 | return true; | 547 | return true; |
547 | } | 548 | } |
548 | 549 | ||
550 | static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) | ||
551 | { | ||
552 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | ||
553 | u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || | ||
554 | (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); | ||
555 | |||
556 | tcp->check = 0; | ||
557 | tcp->psh = get_cqe_lro_tcppsh(cqe); | ||
558 | |||
559 | if (tcp_ack) { | ||
560 | tcp->ack = 1; | ||
561 | tcp->ack_seq = cqe->lro_ack_seq_num; | ||
562 | tcp->window = cqe->lro_tcp_win; | ||
563 | } | ||
564 | } | ||
565 | |||
549 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, | 566 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
550 | u32 cqe_bcnt) | 567 | u32 cqe_bcnt) |
551 | { | 568 | { |
552 | struct ethhdr *eth = (struct ethhdr *)(skb->data); | 569 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
553 | struct tcphdr *tcp; | 570 | struct tcphdr *tcp; |
554 | int network_depth = 0; | 571 | int network_depth = 0; |
572 | __wsum check; | ||
555 | __be16 proto; | 573 | __be16 proto; |
556 | u16 tot_len; | 574 | u16 tot_len; |
557 | void *ip_p; | 575 | void *ip_p; |
558 | 576 | ||
559 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | ||
560 | u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || | ||
561 | (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); | ||
562 | |||
563 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); | 577 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); |
564 | 578 | ||
565 | tot_len = cqe_bcnt - network_depth; | 579 | tot_len = cqe_bcnt - network_depth; |
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, | |||
576 | ipv4->check = 0; | 590 | ipv4->check = 0; |
577 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, | 591 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, |
578 | ipv4->ihl); | 592 | ipv4->ihl); |
593 | |||
594 | mlx5e_lro_update_tcp_hdr(cqe, tcp); | ||
595 | check = csum_partial(tcp, tcp->doff * 4, | ||
596 | csum_unfold((__force __sum16)cqe->check_sum)); | ||
597 | /* Almost done, don't forget the pseudo header */ | ||
598 | tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, | ||
599 | tot_len - sizeof(struct iphdr), | ||
600 | IPPROTO_TCP, check); | ||
579 | } else { | 601 | } else { |
602 | u16 payload_len = tot_len - sizeof(struct ipv6hdr); | ||
580 | struct ipv6hdr *ipv6 = ip_p; | 603 | struct ipv6hdr *ipv6 = ip_p; |
581 | 604 | ||
582 | tcp = ip_p + sizeof(struct ipv6hdr); | 605 | tcp = ip_p + sizeof(struct ipv6hdr); |
583 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 606 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
584 | 607 | ||
585 | ipv6->hop_limit = cqe->lro_min_ttl; | 608 | ipv6->hop_limit = cqe->lro_min_ttl; |
586 | ipv6->payload_len = cpu_to_be16(tot_len - | 609 | ipv6->payload_len = cpu_to_be16(payload_len); |
587 | sizeof(struct ipv6hdr)); | 610 | |
588 | } | 611 | mlx5e_lro_update_tcp_hdr(cqe, tcp); |
589 | 612 | check = csum_partial(tcp, tcp->doff * 4, | |
590 | tcp->psh = get_cqe_lro_tcppsh(cqe); | 613 | csum_unfold((__force __sum16)cqe->check_sum)); |
591 | 614 | /* Almost done, don't forget the pseudo header */ | |
592 | if (tcp_ack) { | 615 | tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, |
593 | tcp->ack = 1; | 616 | IPPROTO_TCP, check); |
594 | tcp->ack_seq = cqe->lro_ack_seq_num; | ||
595 | tcp->window = cqe->lro_tcp_win; | ||
596 | } | 617 | } |
597 | } | 618 | } |
598 | 619 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5a4608281f38..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, | |||
216 | if (iph->protocol != IPPROTO_UDP) | 216 | if (iph->protocol != IPPROTO_UDP) |
217 | goto out; | 217 | goto out; |
218 | 218 | ||
219 | udph = udp_hdr(skb); | 219 | /* Don't assume skb_transport_header() was set */ |
220 | udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl); | ||
220 | if (udph->dest != htons(9)) | 221 | if (udph->dest != htons(9)) |
221 | goto out; | 222 | goto out; |
222 | 223 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd98b0dc610f..fa86a1466718 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
2529 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { | 2529 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { |
2530 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; | 2530 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; |
2531 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { | 2531 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { |
2532 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) | 2532 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || |
2533 | tcf_vlan_push_prio(a)) | ||
2533 | return -EOPNOTSUPP; | 2534 | return -EOPNOTSUPP; |
2534 | 2535 | ||
2535 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; | 2536 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 569b42a01026..11b4f1089d1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, | |||
176 | default: | 176 | default: |
177 | hlen = mlx5e_skb_l2_header_offset(skb); | 177 | hlen = mlx5e_skb_l2_header_offset(skb); |
178 | } | 178 | } |
179 | return min_t(u16, hlen, skb->len); | 179 | return min_t(u16, hlen, skb_headlen(skb)); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, | 182 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5ecf2cddc16d..c2b1d7d351fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
1529 | 1529 | ||
1530 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); | 1530 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); |
1531 | 1531 | ||
1532 | /* Create steering drop counters for ingress and egress ACLs */ | ||
1533 | if (vport_num && esw->mode == SRIOV_LEGACY) | ||
1534 | esw_vport_create_drop_counters(vport); | ||
1535 | |||
1532 | /* Restore old vport configuration */ | 1536 | /* Restore old vport configuration */ |
1533 | esw_apply_vport_conf(esw, vport); | 1537 | esw_apply_vport_conf(esw, vport); |
1534 | 1538 | ||
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
1545 | if (!vport_num) | 1549 | if (!vport_num) |
1546 | vport->info.trusted = true; | 1550 | vport->info.trusted = true; |
1547 | 1551 | ||
1548 | /* create steering drop counters for ingress and egress ACLs */ | ||
1549 | if (vport_num && esw->mode == SRIOV_LEGACY) | ||
1550 | esw_vport_create_drop_counters(vport); | ||
1551 | |||
1552 | esw_vport_change_handle_locked(vport); | 1552 | esw_vport_change_handle_locked(vport); |
1553 | 1553 | ||
1554 | esw->enabled_vports++; | 1554 | esw->enabled_vports++; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c025c98700e4..31fc2cfac3b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2) | |||
1429 | 1429 | ||
1430 | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | | 1430 | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | |
1431 | MLX5_FLOW_CONTEXT_ACTION_ENCAP | | 1431 | MLX5_FLOW_CONTEXT_ACTION_ENCAP | |
1432 | MLX5_FLOW_CONTEXT_ACTION_DECAP)) | 1432 | MLX5_FLOW_CONTEXT_ACTION_DECAP | |
1433 | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) | ||
1433 | return true; | 1434 | return true; |
1434 | 1435 | ||
1435 | return false; | 1436 | return false; |
@@ -1758,8 +1759,11 @@ search_again_locked: | |||
1758 | 1759 | ||
1759 | /* Collect all fgs which has a matching match_criteria */ | 1760 | /* Collect all fgs which has a matching match_criteria */ |
1760 | err = build_match_list(&match_head, ft, spec); | 1761 | err = build_match_list(&match_head, ft, spec); |
1761 | if (err) | 1762 | if (err) { |
1763 | if (take_write) | ||
1764 | up_write_ref_node(&ft->node); | ||
1762 | return ERR_PTR(err); | 1765 | return ERR_PTR(err); |
1766 | } | ||
1763 | 1767 | ||
1764 | if (!take_write) | 1768 | if (!take_write) |
1765 | up_read_ref_node(&ft->node); | 1769 | up_read_ref_node(&ft->node); |
@@ -1768,8 +1772,11 @@ search_again_locked: | |||
1768 | dest_num, version); | 1772 | dest_num, version); |
1769 | free_match_list(&match_head); | 1773 | free_match_list(&match_head); |
1770 | if (!IS_ERR(rule) || | 1774 | if (!IS_ERR(rule) || |
1771 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) | 1775 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { |
1776 | if (take_write) | ||
1777 | up_write_ref_node(&ft->node); | ||
1772 | return rule; | 1778 | return rule; |
1779 | } | ||
1773 | 1780 | ||
1774 | if (!take_write) { | 1781 | if (!take_write) { |
1775 | nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); | 1782 | nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index e159243e0fcf..857035583ccd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <rdma/mlx5-abi.h> | 35 | #include <rdma/mlx5-abi.h> |
36 | #include "en.h" | 36 | #include "en.h" |
37 | #include "clock.h" | ||
37 | 38 | ||
38 | enum { | 39 | enum { |
39 | MLX5_CYCLES_SHIFT = 23 | 40 | MLX5_CYCLES_SHIFT = 23 |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2ef641c91c26..ae391e4b7070 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
551 | MLX5_SET(cmd_hca_cap, | 551 | MLX5_SET(cmd_hca_cap, |
552 | set_hca_cap, | 552 | set_hca_cap, |
553 | cache_line_128byte, | 553 | cache_line_128byte, |
554 | cache_line_size() == 128 ? 1 : 0); | 554 | cache_line_size() >= 128 ? 1 : 0); |
555 | 555 | ||
556 | if (MLX5_CAP_GEN_MAX(dev, dct)) | 556 | if (MLX5_CAP_GEN_MAX(dev, dct)) |
557 | MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); | 557 | MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0b25baba09a..f7948e983637 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, | |||
788 | u32 tb_id, | 788 | u32 tb_id, |
789 | struct netlink_ext_ack *extack) | 789 | struct netlink_ext_ack *extack) |
790 | { | 790 | { |
791 | struct mlxsw_sp_mr_table *mr4_table; | ||
792 | struct mlxsw_sp_fib *fib4; | ||
793 | struct mlxsw_sp_fib *fib6; | ||
791 | struct mlxsw_sp_vr *vr; | 794 | struct mlxsw_sp_vr *vr; |
792 | int err; | 795 | int err; |
793 | 796 | ||
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, | |||
796 | NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); | 799 | NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); |
797 | return ERR_PTR(-EBUSY); | 800 | return ERR_PTR(-EBUSY); |
798 | } | 801 | } |
799 | vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); | 802 | fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); |
800 | if (IS_ERR(vr->fib4)) | 803 | if (IS_ERR(fib4)) |
801 | return ERR_CAST(vr->fib4); | 804 | return ERR_CAST(fib4); |
802 | vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); | 805 | fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); |
803 | if (IS_ERR(vr->fib6)) { | 806 | if (IS_ERR(fib6)) { |
804 | err = PTR_ERR(vr->fib6); | 807 | err = PTR_ERR(fib6); |
805 | goto err_fib6_create; | 808 | goto err_fib6_create; |
806 | } | 809 | } |
807 | vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, | 810 | mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, |
808 | MLXSW_SP_L3_PROTO_IPV4); | 811 | MLXSW_SP_L3_PROTO_IPV4); |
809 | if (IS_ERR(vr->mr4_table)) { | 812 | if (IS_ERR(mr4_table)) { |
810 | err = PTR_ERR(vr->mr4_table); | 813 | err = PTR_ERR(mr4_table); |
811 | goto err_mr_table_create; | 814 | goto err_mr_table_create; |
812 | } | 815 | } |
816 | vr->fib4 = fib4; | ||
817 | vr->fib6 = fib6; | ||
818 | vr->mr4_table = mr4_table; | ||
813 | vr->tb_id = tb_id; | 819 | vr->tb_id = tb_id; |
814 | return vr; | 820 | return vr; |
815 | 821 | ||
816 | err_mr_table_create: | 822 | err_mr_table_create: |
817 | mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); | 823 | mlxsw_sp_fib_destroy(mlxsw_sp, fib6); |
818 | vr->fib6 = NULL; | ||
819 | err_fib6_create: | 824 | err_fib6_create: |
820 | mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); | 825 | mlxsw_sp_fib_destroy(mlxsw_sp, fib4); |
821 | vr->fib4 = NULL; | ||
822 | return ERR_PTR(err); | 826 | return ERR_PTR(err); |
823 | } | 827 | } |
824 | 828 | ||
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) | |||
3790 | struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; | 3794 | struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; |
3791 | int i; | 3795 | int i; |
3792 | 3796 | ||
3797 | if (!list_is_singular(&nh_grp->fib_list)) | ||
3798 | return; | ||
3799 | |||
3793 | for (i = 0; i < nh_grp->count; i++) { | 3800 | for (i = 0; i < nh_grp->count; i++) { |
3794 | struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; | 3801 | struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; |
3795 | 3802 | ||
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 7e7704daf5f1..c4949183eef3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
@@ -43,12 +43,6 @@ | |||
43 | 43 | ||
44 | /* Local Definitions and Declarations */ | 44 | /* Local Definitions and Declarations */ |
45 | 45 | ||
46 | struct rmnet_walk_data { | ||
47 | struct net_device *real_dev; | ||
48 | struct list_head *head; | ||
49 | struct rmnet_port *port; | ||
50 | }; | ||
51 | |||
52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) | 46 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) |
53 | { | 47 | { |
54 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; | 48 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; |
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev) | |||
112 | static void rmnet_unregister_bridge(struct net_device *dev, | 106 | static void rmnet_unregister_bridge(struct net_device *dev, |
113 | struct rmnet_port *port) | 107 | struct rmnet_port *port) |
114 | { | 108 | { |
115 | struct net_device *rmnet_dev, *bridge_dev; | ||
116 | struct rmnet_port *bridge_port; | 109 | struct rmnet_port *bridge_port; |
110 | struct net_device *bridge_dev; | ||
117 | 111 | ||
118 | if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) | 112 | if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) |
119 | return; | 113 | return; |
120 | 114 | ||
121 | /* bridge slave handling */ | 115 | /* bridge slave handling */ |
122 | if (!port->nr_rmnet_devs) { | 116 | if (!port->nr_rmnet_devs) { |
123 | rmnet_dev = netdev_master_upper_dev_get_rcu(dev); | ||
124 | netdev_upper_dev_unlink(dev, rmnet_dev); | ||
125 | |||
126 | bridge_dev = port->bridge_ep; | 117 | bridge_dev = port->bridge_ep; |
127 | 118 | ||
128 | bridge_port = rmnet_get_port_rtnl(bridge_dev); | 119 | bridge_port = rmnet_get_port_rtnl(bridge_dev); |
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev, | |||
132 | bridge_dev = port->bridge_ep; | 123 | bridge_dev = port->bridge_ep; |
133 | 124 | ||
134 | bridge_port = rmnet_get_port_rtnl(bridge_dev); | 125 | bridge_port = rmnet_get_port_rtnl(bridge_dev); |
135 | rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); | ||
136 | netdev_upper_dev_unlink(bridge_dev, rmnet_dev); | ||
137 | |||
138 | rmnet_unregister_real_device(bridge_dev, bridge_port); | 126 | rmnet_unregister_real_device(bridge_dev, bridge_port); |
139 | } | 127 | } |
140 | } | 128 | } |
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, | |||
173 | if (err) | 161 | if (err) |
174 | goto err1; | 162 | goto err1; |
175 | 163 | ||
176 | err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); | ||
177 | if (err) | ||
178 | goto err2; | ||
179 | |||
180 | port->rmnet_mode = mode; | 164 | port->rmnet_mode = mode; |
181 | 165 | ||
182 | hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); | 166 | hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); |
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, | |||
193 | 177 | ||
194 | return 0; | 178 | return 0; |
195 | 179 | ||
196 | err2: | ||
197 | rmnet_vnd_dellink(mux_id, port, ep); | ||
198 | err1: | 180 | err1: |
199 | rmnet_unregister_real_device(real_dev, port); | 181 | rmnet_unregister_real_device(real_dev, port); |
200 | err0: | 182 | err0: |
@@ -204,14 +186,13 @@ err0: | |||
204 | 186 | ||
205 | static void rmnet_dellink(struct net_device *dev, struct list_head *head) | 187 | static void rmnet_dellink(struct net_device *dev, struct list_head *head) |
206 | { | 188 | { |
189 | struct rmnet_priv *priv = netdev_priv(dev); | ||
207 | struct net_device *real_dev; | 190 | struct net_device *real_dev; |
208 | struct rmnet_endpoint *ep; | 191 | struct rmnet_endpoint *ep; |
209 | struct rmnet_port *port; | 192 | struct rmnet_port *port; |
210 | u8 mux_id; | 193 | u8 mux_id; |
211 | 194 | ||
212 | rcu_read_lock(); | 195 | real_dev = priv->real_dev; |
213 | real_dev = netdev_master_upper_dev_get_rcu(dev); | ||
214 | rcu_read_unlock(); | ||
215 | 196 | ||
216 | if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) | 197 | if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) |
217 | return; | 198 | return; |
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) | |||
219 | port = rmnet_get_port_rtnl(real_dev); | 200 | port = rmnet_get_port_rtnl(real_dev); |
220 | 201 | ||
221 | mux_id = rmnet_vnd_get_mux(dev); | 202 | mux_id = rmnet_vnd_get_mux(dev); |
222 | netdev_upper_dev_unlink(dev, real_dev); | ||
223 | 203 | ||
224 | ep = rmnet_get_endpoint(port, mux_id); | 204 | ep = rmnet_get_endpoint(port, mux_id); |
225 | if (ep) { | 205 | if (ep) { |
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) | |||
233 | unregister_netdevice_queue(dev, head); | 213 | unregister_netdevice_queue(dev, head); |
234 | } | 214 | } |
235 | 215 | ||
236 | static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) | ||
237 | { | ||
238 | struct rmnet_walk_data *d = data; | ||
239 | struct rmnet_endpoint *ep; | ||
240 | u8 mux_id; | ||
241 | |||
242 | mux_id = rmnet_vnd_get_mux(rmnet_dev); | ||
243 | ep = rmnet_get_endpoint(d->port, mux_id); | ||
244 | if (ep) { | ||
245 | hlist_del_init_rcu(&ep->hlnode); | ||
246 | rmnet_vnd_dellink(mux_id, d->port, ep); | ||
247 | kfree(ep); | ||
248 | } | ||
249 | netdev_upper_dev_unlink(rmnet_dev, d->real_dev); | ||
250 | unregister_netdevice_queue(rmnet_dev, d->head); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static void rmnet_force_unassociate_device(struct net_device *dev) | 216 | static void rmnet_force_unassociate_device(struct net_device *dev) |
256 | { | 217 | { |
257 | struct net_device *real_dev = dev; | 218 | struct net_device *real_dev = dev; |
258 | struct rmnet_walk_data d; | 219 | struct hlist_node *tmp_ep; |
220 | struct rmnet_endpoint *ep; | ||
259 | struct rmnet_port *port; | 221 | struct rmnet_port *port; |
222 | unsigned long bkt_ep; | ||
260 | LIST_HEAD(list); | 223 | LIST_HEAD(list); |
261 | 224 | ||
262 | if (!rmnet_is_real_dev_registered(real_dev)) | 225 | if (!rmnet_is_real_dev_registered(real_dev)) |
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev) | |||
264 | 227 | ||
265 | ASSERT_RTNL(); | 228 | ASSERT_RTNL(); |
266 | 229 | ||
267 | d.real_dev = real_dev; | ||
268 | d.head = &list; | ||
269 | |||
270 | port = rmnet_get_port_rtnl(dev); | 230 | port = rmnet_get_port_rtnl(dev); |
271 | d.port = port; | ||
272 | 231 | ||
273 | rcu_read_lock(); | 232 | rcu_read_lock(); |
274 | rmnet_unregister_bridge(dev, port); | 233 | rmnet_unregister_bridge(dev, port); |
275 | 234 | ||
276 | netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); | 235 | hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { |
236 | unregister_netdevice_queue(ep->egress_dev, &list); | ||
237 | rmnet_vnd_dellink(ep->mux_id, port, ep); | ||
238 | |||
239 | hlist_del_init_rcu(&ep->hlnode); | ||
240 | kfree(ep); | ||
241 | } | ||
242 | |||
277 | rcu_read_unlock(); | 243 | rcu_read_unlock(); |
278 | unregister_netdevice_many(&list); | 244 | unregister_netdevice_many(&list); |
279 | 245 | ||
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, | |||
422 | if (err) | 388 | if (err) |
423 | return -EBUSY; | 389 | return -EBUSY; |
424 | 390 | ||
425 | err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, | ||
426 | extack); | ||
427 | if (err) | ||
428 | return -EINVAL; | ||
429 | |||
430 | slave_port = rmnet_get_port(slave_dev); | 391 | slave_port = rmnet_get_port(slave_dev); |
431 | slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; | 392 | slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; |
432 | slave_port->bridge_ep = real_dev; | 393 | slave_port->bridge_ep = real_dev; |
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, | |||
449 | port->rmnet_mode = RMNET_EPMODE_VND; | 410 | port->rmnet_mode = RMNET_EPMODE_VND; |
450 | port->bridge_ep = NULL; | 411 | port->bridge_ep = NULL; |
451 | 412 | ||
452 | netdev_upper_dev_unlink(slave_dev, rmnet_dev); | ||
453 | slave_port = rmnet_get_port(slave_dev); | 413 | slave_port = rmnet_get_port(slave_dev); |
454 | rmnet_unregister_real_device(slave_dev, slave_port); | 414 | rmnet_unregister_real_device(slave_dev, slave_port); |
455 | 415 | ||
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 6bc328fb88e1..b0dbca070c00 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c | |||
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, | |||
38 | } | 38 | } |
39 | 39 | ||
40 | ep = rmnet_get_endpoint(port, mux_id); | 40 | ep = rmnet_get_endpoint(port, mux_id); |
41 | if (!ep) { | ||
42 | kfree_skb(skb); | ||
43 | return RX_HANDLER_CONSUMED; | ||
44 | } | ||
45 | |||
41 | vnd = ep->egress_dev; | 46 | vnd = ep->egress_dev; |
42 | 47 | ||
43 | ip_family = cmd->flow_control.ip_family; | 48 | ip_family = cmd->flow_control.ip_family; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 570a227acdd8..346d310914df 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |||
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev, | |||
121 | memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); | 121 | memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); |
122 | 122 | ||
123 | for_each_possible_cpu(cpu) { | 123 | for_each_possible_cpu(cpu) { |
124 | pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); | 124 | pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); |
125 | 125 | ||
126 | do { | 126 | do { |
127 | start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); | 127 | start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..a95fbd5510d9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev) | |||
2255 | /* Enable MagicPacket */ | 2255 | /* Enable MagicPacket */ |
2256 | ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); | 2256 | ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); |
2257 | 2257 | ||
2258 | /* Increased clock usage so device won't be suspended */ | ||
2259 | clk_enable(priv->clk); | ||
2260 | |||
2261 | return enable_irq_wake(priv->emac_irq); | 2258 | return enable_irq_wake(priv->emac_irq); |
2262 | } | 2259 | } |
2263 | 2260 | ||
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev) | |||
2276 | if (ret < 0) | 2273 | if (ret < 0) |
2277 | return ret; | 2274 | return ret; |
2278 | 2275 | ||
2279 | /* Restore clock usage count */ | ||
2280 | clk_disable(priv->clk); | ||
2281 | |||
2282 | return disable_irq_wake(priv->emac_irq); | 2276 | return disable_irq_wake(priv->emac_irq); |
2283 | } | 2277 | } |
2284 | 2278 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a197e11f3a56..92dcf8717fc6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/ethtool.h> | 41 | #include <linux/ethtool.h> |
42 | #include <linux/if_vlan.h> | 42 | #include <linux/if_vlan.h> |
43 | #include <linux/clk.h> | ||
44 | #include <linux/sh_eth.h> | 43 | #include <linux/sh_eth.h> |
45 | #include <linux/of_mdio.h> | 44 | #include <linux/of_mdio.h> |
46 | 45 | ||
@@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
2304 | wol->supported = 0; | 2303 | wol->supported = 0; |
2305 | wol->wolopts = 0; | 2304 | wol->wolopts = 0; |
2306 | 2305 | ||
2307 | if (mdp->cd->magic && mdp->clk) { | 2306 | if (mdp->cd->magic) { |
2308 | wol->supported = WAKE_MAGIC; | 2307 | wol->supported = WAKE_MAGIC; |
2309 | wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; | 2308 | wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; |
2310 | } | 2309 | } |
@@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
2314 | { | 2313 | { |
2315 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2314 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2316 | 2315 | ||
2317 | if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) | 2316 | if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) |
2318 | return -EOPNOTSUPP; | 2317 | return -EOPNOTSUPP; |
2319 | 2318 | ||
2320 | mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); | 2319 | mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); |
@@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3153 | goto out_release; | 3152 | goto out_release; |
3154 | } | 3153 | } |
3155 | 3154 | ||
3156 | /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ | ||
3157 | mdp->clk = devm_clk_get(&pdev->dev, NULL); | ||
3158 | if (IS_ERR(mdp->clk)) | ||
3159 | mdp->clk = NULL; | ||
3160 | |||
3161 | ndev->base_addr = res->start; | 3155 | ndev->base_addr = res->start; |
3162 | 3156 | ||
3163 | spin_lock_init(&mdp->lock); | 3157 | spin_lock_init(&mdp->lock); |
@@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3278 | if (ret) | 3272 | if (ret) |
3279 | goto out_napi_del; | 3273 | goto out_napi_del; |
3280 | 3274 | ||
3281 | if (mdp->cd->magic && mdp->clk) | 3275 | if (mdp->cd->magic) |
3282 | device_set_wakeup_capable(&pdev->dev, 1); | 3276 | device_set_wakeup_capable(&pdev->dev, 1); |
3283 | 3277 | ||
3284 | /* print device information */ | 3278 | /* print device information */ |
@@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev) | |||
3331 | /* Enable MagicPacket */ | 3325 | /* Enable MagicPacket */ |
3332 | sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); | 3326 | sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); |
3333 | 3327 | ||
3334 | /* Increased clock usage so device won't be suspended */ | ||
3335 | clk_enable(mdp->clk); | ||
3336 | |||
3337 | return enable_irq_wake(ndev->irq); | 3328 | return enable_irq_wake(ndev->irq); |
3338 | } | 3329 | } |
3339 | 3330 | ||
@@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev) | |||
3359 | if (ret < 0) | 3350 | if (ret < 0) |
3360 | return ret; | 3351 | return ret; |
3361 | 3352 | ||
3362 | /* Restore clock usage count */ | ||
3363 | clk_disable(mdp->clk); | ||
3364 | |||
3365 | return disable_irq_wake(ndev->irq); | 3353 | return disable_irq_wake(ndev->irq); |
3366 | } | 3354 | } |
3367 | 3355 | ||
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 63aca9f847e1..4c2f612e4414 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig | |||
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC | |||
20 | 20 | ||
21 | config SMC9194 | 21 | config SMC9194 |
22 | tristate "SMC 9194 support" | 22 | tristate "SMC 9194 support" |
23 | depends on (ISA || MAC && BROKEN) | 23 | depends on ISA |
24 | select CRC32 | 24 | select CRC32 |
25 | ---help--- | 25 | ---help--- |
26 | This is support for the SMC9xxx based Ethernet cards. Choose this | 26 | This is support for the SMC9xxx based Ethernet cards. Choose this |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a0f2be81d52e..8fc02d9db3d0 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1451,7 +1451,7 @@ destroy_macvlan_port: | |||
1451 | /* the macvlan port may be freed by macvlan_uninit when fail to register. | 1451 | /* the macvlan port may be freed by macvlan_uninit when fail to register. |
1452 | * so we destroy the macvlan port only when it's valid. | 1452 | * so we destroy the macvlan port only when it's valid. |
1453 | */ | 1453 | */ |
1454 | if (create && macvlan_port_get_rtnl(dev)) | 1454 | if (create && macvlan_port_get_rtnl(lowerdev)) |
1455 | macvlan_port_destroy(port->dev); | 1455 | macvlan_port_destroy(port->dev); |
1456 | return err; | 1456 | return err; |
1457 | } | 1457 | } |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b13eed21c87d..d39ae77707ef 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev) | |||
1382 | ctl |= BMCR_FULLDPLX; | 1382 | ctl |= BMCR_FULLDPLX; |
1383 | 1383 | ||
1384 | return phy_modify(phydev, MII_BMCR, | 1384 | return phy_modify(phydev, MII_BMCR, |
1385 | BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); | 1385 | ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl); |
1386 | } | 1386 | } |
1387 | EXPORT_SYMBOL(genphy_setup_forced); | 1387 | EXPORT_SYMBOL(genphy_setup_forced); |
1388 | 1388 | ||
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index ca5e375de27c..e0d6760f3219 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c | |||
@@ -166,6 +166,8 @@ struct tbnet_ring { | |||
166 | * @connected_work: Worker that finalizes the ThunderboltIP connection | 166 | * @connected_work: Worker that finalizes the ThunderboltIP connection |
167 | * setup and enables DMA paths for high speed data | 167 | * setup and enables DMA paths for high speed data |
168 | * transfers | 168 | * transfers |
169 | * @disconnect_work: Worker that handles tearing down the ThunderboltIP | ||
170 | * connection | ||
169 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a | 171 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a |
170 | * network packet consists of multiple Thunderbolt frames. | 172 | * network packet consists of multiple Thunderbolt frames. |
171 | * In host byte order. | 173 | * In host byte order. |
@@ -190,6 +192,7 @@ struct tbnet { | |||
190 | int login_retries; | 192 | int login_retries; |
191 | struct delayed_work login_work; | 193 | struct delayed_work login_work; |
192 | struct work_struct connected_work; | 194 | struct work_struct connected_work; |
195 | struct work_struct disconnect_work; | ||
193 | struct thunderbolt_ip_frame_header rx_hdr; | 196 | struct thunderbolt_ip_frame_header rx_hdr; |
194 | struct tbnet_ring rx_ring; | 197 | struct tbnet_ring rx_ring; |
195 | atomic_t frame_id; | 198 | atomic_t frame_id; |
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) | |||
445 | case TBIP_LOGOUT: | 448 | case TBIP_LOGOUT: |
446 | ret = tbnet_logout_response(net, route, sequence, command_id); | 449 | ret = tbnet_logout_response(net, route, sequence, command_id); |
447 | if (!ret) | 450 | if (!ret) |
448 | tbnet_tear_down(net, false); | 451 | queue_work(system_long_wq, &net->disconnect_work); |
449 | break; | 452 | break; |
450 | 453 | ||
451 | default: | 454 | default: |
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work) | |||
659 | } | 662 | } |
660 | } | 663 | } |
661 | 664 | ||
665 | static void tbnet_disconnect_work(struct work_struct *work) | ||
666 | { | ||
667 | struct tbnet *net = container_of(work, typeof(*net), disconnect_work); | ||
668 | |||
669 | tbnet_tear_down(net, false); | ||
670 | } | ||
671 | |||
662 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, | 672 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, |
663 | const struct thunderbolt_ip_frame_header *hdr) | 673 | const struct thunderbolt_ip_frame_header *hdr) |
664 | { | 674 | { |
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev) | |||
881 | 891 | ||
882 | napi_disable(&net->napi); | 892 | napi_disable(&net->napi); |
883 | 893 | ||
894 | cancel_work_sync(&net->disconnect_work); | ||
884 | tbnet_tear_down(net, true); | 895 | tbnet_tear_down(net, true); |
885 | 896 | ||
886 | tb_ring_free(net->rx_ring.ring); | 897 | tb_ring_free(net->rx_ring.ring); |
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) | |||
1195 | net = netdev_priv(dev); | 1206 | net = netdev_priv(dev); |
1196 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); | 1207 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); |
1197 | INIT_WORK(&net->connected_work, tbnet_connected_work); | 1208 | INIT_WORK(&net->connected_work, tbnet_connected_work); |
1209 | INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); | ||
1198 | mutex_init(&net->connection_lock); | 1210 | mutex_init(&net->connection_lock); |
1199 | atomic_set(&net->command_id, 0); | 1211 | atomic_set(&net->command_id, 0); |
1200 | atomic_set(&net->frame_id, 0); | 1212 | atomic_set(&net->frame_id, 0); |
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) | |||
1270 | stop_login(net); | 1282 | stop_login(net); |
1271 | if (netif_running(net->dev)) { | 1283 | if (netif_running(net->dev)) { |
1272 | netif_device_detach(net->dev); | 1284 | netif_device_detach(net->dev); |
1273 | tb_ring_stop(net->rx_ring.ring); | 1285 | tbnet_tear_down(net, true); |
1274 | tb_ring_stop(net->tx_ring.ring); | ||
1275 | tbnet_free_buffers(&net->rx_ring); | ||
1276 | tbnet_free_buffers(&net->tx_ring); | ||
1277 | } | 1286 | } |
1278 | 1287 | ||
1279 | return 0; | 1288 | return 0; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..b52258c327d2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, | |||
1489 | skb->truesize += skb->data_len; | 1489 | skb->truesize += skb->data_len; |
1490 | 1490 | ||
1491 | for (i = 1; i < it->nr_segs; i++) { | 1491 | for (i = 1; i < it->nr_segs; i++) { |
1492 | struct page_frag *pfrag = ¤t->task_frag; | ||
1492 | size_t fragsz = it->iov[i].iov_len; | 1493 | size_t fragsz = it->iov[i].iov_len; |
1493 | unsigned long offset; | ||
1494 | struct page *page; | ||
1495 | void *data; | ||
1496 | 1494 | ||
1497 | if (fragsz == 0 || fragsz > PAGE_SIZE) { | 1495 | if (fragsz == 0 || fragsz > PAGE_SIZE) { |
1498 | err = -EINVAL; | 1496 | err = -EINVAL; |
1499 | goto free; | 1497 | goto free; |
1500 | } | 1498 | } |
1501 | 1499 | ||
1502 | local_bh_disable(); | 1500 | if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { |
1503 | data = napi_alloc_frag(fragsz); | ||
1504 | local_bh_enable(); | ||
1505 | if (!data) { | ||
1506 | err = -ENOMEM; | 1501 | err = -ENOMEM; |
1507 | goto free; | 1502 | goto free; |
1508 | } | 1503 | } |
1509 | 1504 | ||
1510 | page = virt_to_head_page(data); | 1505 | skb_fill_page_desc(skb, i - 1, pfrag->page, |
1511 | offset = data - page_address(page); | 1506 | pfrag->offset, fragsz); |
1512 | skb_fill_page_desc(skb, i - 1, page, offset, fragsz); | 1507 | page_ref_inc(pfrag->page); |
1508 | pfrag->offset += fragsz; | ||
1513 | } | 1509 | } |
1514 | 1510 | ||
1515 | return skb; | 1511 | return skb; |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195..7a6a1fe79309 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, | |||
954 | /* it's racing here! */ | 954 | /* it's racing here! */ |
955 | 955 | ||
956 | ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); | 956 | ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); |
957 | if (ret < 0) | 957 | if (ret < 0) { |
958 | netdev_warn(dev->net, "Error writing RFE_CTL\n"); | 958 | netdev_warn(dev->net, "Error writing RFE_CTL\n"); |
959 | 959 | return ret; | |
960 | return ret; | 960 | } |
961 | return 0; | ||
961 | } | 962 | } |
962 | 963 | ||
963 | static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) | 964 | static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 626c27352ae2..9bb9e562b893 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, | |||
443 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); | 443 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); |
444 | 444 | ||
445 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); | 445 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); |
446 | if (unlikely(err)) { | 446 | if (unlikely(err)) |
447 | struct page *page = virt_to_head_page(xdp->data); | 447 | return false; /* Caller handle free/refcnt */ |
448 | |||
449 | put_page(page); | ||
450 | return false; | ||
451 | } | ||
452 | 448 | ||
453 | return true; | 449 | return true; |
454 | } | 450 | } |
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, | |||
456 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) | 452 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) |
457 | { | 453 | { |
458 | struct virtnet_info *vi = netdev_priv(dev); | 454 | struct virtnet_info *vi = netdev_priv(dev); |
459 | bool sent = __virtnet_xdp_xmit(vi, xdp); | 455 | struct receive_queue *rq = vi->rq; |
456 | struct bpf_prog *xdp_prog; | ||
457 | bool sent; | ||
460 | 458 | ||
459 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this | ||
460 | * indicate XDP resources have been successfully allocated. | ||
461 | */ | ||
462 | xdp_prog = rcu_dereference(rq->xdp_prog); | ||
463 | if (!xdp_prog) | ||
464 | return -ENXIO; | ||
465 | |||
466 | sent = __virtnet_xdp_xmit(vi, xdp); | ||
461 | if (!sent) | 467 | if (!sent) |
462 | return -ENOSPC; | 468 | return -ENOSPC; |
463 | return 0; | 469 | return 0; |
@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
546 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | 552 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
547 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 553 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
548 | struct page *page = virt_to_head_page(buf); | 554 | struct page *page = virt_to_head_page(buf); |
549 | unsigned int delta = 0, err; | 555 | unsigned int delta = 0; |
550 | struct page *xdp_page; | 556 | struct page *xdp_page; |
557 | bool sent; | ||
558 | int err; | ||
559 | |||
551 | len -= vi->hdr_len; | 560 | len -= vi->hdr_len; |
552 | 561 | ||
553 | rcu_read_lock(); | 562 | rcu_read_lock(); |
@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
558 | void *orig_data; | 567 | void *orig_data; |
559 | u32 act; | 568 | u32 act; |
560 | 569 | ||
561 | if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) | 570 | if (unlikely(hdr->hdr.gso_type)) |
562 | goto err_xdp; | 571 | goto err_xdp; |
563 | 572 | ||
564 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { | 573 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
596 | delta = orig_data - xdp.data; | 605 | delta = orig_data - xdp.data; |
597 | break; | 606 | break; |
598 | case XDP_TX: | 607 | case XDP_TX: |
599 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) | 608 | sent = __virtnet_xdp_xmit(vi, &xdp); |
609 | if (unlikely(!sent)) { | ||
600 | trace_xdp_exception(vi->dev, xdp_prog, act); | 610 | trace_xdp_exception(vi->dev, xdp_prog, act); |
601 | else | 611 | goto err_xdp; |
602 | *xdp_xmit = true; | 612 | } |
613 | *xdp_xmit = true; | ||
603 | rcu_read_unlock(); | 614 | rcu_read_unlock(); |
604 | goto xdp_xmit; | 615 | goto xdp_xmit; |
605 | case XDP_REDIRECT: | 616 | case XDP_REDIRECT: |
606 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | 617 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
607 | if (!err) | 618 | if (err) |
608 | *xdp_xmit = true; | 619 | goto err_xdp; |
620 | *xdp_xmit = true; | ||
609 | rcu_read_unlock(); | 621 | rcu_read_unlock(); |
610 | goto xdp_xmit; | 622 | goto xdp_xmit; |
611 | default: | 623 | default: |
@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
677 | struct bpf_prog *xdp_prog; | 689 | struct bpf_prog *xdp_prog; |
678 | unsigned int truesize; | 690 | unsigned int truesize; |
679 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); | 691 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
680 | int err; | 692 | bool sent; |
681 | 693 | ||
682 | head_skb = NULL; | 694 | head_skb = NULL; |
683 | 695 | ||
@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
746 | } | 758 | } |
747 | break; | 759 | break; |
748 | case XDP_TX: | 760 | case XDP_TX: |
749 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) | 761 | sent = __virtnet_xdp_xmit(vi, &xdp); |
762 | if (unlikely(!sent)) { | ||
750 | trace_xdp_exception(vi->dev, xdp_prog, act); | 763 | trace_xdp_exception(vi->dev, xdp_prog, act); |
751 | else | 764 | if (unlikely(xdp_page != page)) |
752 | *xdp_xmit = true; | 765 | put_page(xdp_page); |
766 | goto err_xdp; | ||
767 | } | ||
768 | *xdp_xmit = true; | ||
753 | if (unlikely(xdp_page != page)) | 769 | if (unlikely(xdp_page != page)) |
754 | goto err_xdp; | 770 | goto err_xdp; |
755 | rcu_read_unlock(); | 771 | rcu_read_unlock(); |
756 | goto xdp_xmit; | 772 | goto xdp_xmit; |
757 | case XDP_REDIRECT: | ||
758 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | ||
759 | if (!err) | ||
760 | *xdp_xmit = true; | ||
761 | rcu_read_unlock(); | ||
762 | goto xdp_xmit; | ||
763 | default: | 773 | default: |
764 | bpf_warn_invalid_xdp_action(act); | 774 | bpf_warn_invalid_xdp_action(act); |
765 | case XDP_ABORTED: | 775 | case XDP_ABORTED: |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1cf22e62e3dd..6e0af815f25e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void) | |||
3516 | 3516 | ||
3517 | spin_lock_init(&hwsim_radio_lock); | 3517 | spin_lock_init(&hwsim_radio_lock); |
3518 | 3518 | ||
3519 | hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); | 3519 | hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); |
3520 | if (!hwsim_wq) | 3520 | if (!hwsim_wq) |
3521 | return -ENOMEM; | 3521 | return -ENOMEM; |
3522 | rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); | 3522 | rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8328d395e332..3127bc8633ca 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
2005 | case XenbusStateInitialised: | 2005 | case XenbusStateInitialised: |
2006 | case XenbusStateReconfiguring: | 2006 | case XenbusStateReconfiguring: |
2007 | case XenbusStateReconfigured: | 2007 | case XenbusStateReconfigured: |
2008 | break; | ||
2009 | |||
2008 | case XenbusStateUnknown: | 2010 | case XenbusStateUnknown: |
2011 | wake_up_all(&module_unload_q); | ||
2009 | break; | 2012 | break; |
2010 | 2013 | ||
2011 | case XenbusStateInitWait: | 2014 | case XenbusStateInitWait: |
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2136 | xenbus_switch_state(dev, XenbusStateClosing); | 2139 | xenbus_switch_state(dev, XenbusStateClosing); |
2137 | wait_event(module_unload_q, | 2140 | wait_event(module_unload_q, |
2138 | xenbus_read_driver_state(dev->otherend) == | 2141 | xenbus_read_driver_state(dev->otherend) == |
2139 | XenbusStateClosing); | 2142 | XenbusStateClosing || |
2143 | xenbus_read_driver_state(dev->otherend) == | ||
2144 | XenbusStateUnknown); | ||
2140 | 2145 | ||
2141 | xenbus_switch_state(dev, XenbusStateClosed); | 2146 | xenbus_switch_state(dev, XenbusStateClosed); |
2142 | wait_event(module_unload_q, | 2147 | wait_event(module_unload_q, |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0fe7ea35c221..817e5e2766da 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -2844,7 +2844,7 @@ out: | |||
2844 | } | 2844 | } |
2845 | 2845 | ||
2846 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | 2846 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, |
2847 | struct nvme_id_ns *id, bool *new) | 2847 | struct nvme_id_ns *id) |
2848 | { | 2848 | { |
2849 | struct nvme_ctrl *ctrl = ns->ctrl; | 2849 | struct nvme_ctrl *ctrl = ns->ctrl; |
2850 | bool is_shared = id->nmic & (1 << 0); | 2850 | bool is_shared = id->nmic & (1 << 0); |
@@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2860 | ret = PTR_ERR(head); | 2860 | ret = PTR_ERR(head); |
2861 | goto out_unlock; | 2861 | goto out_unlock; |
2862 | } | 2862 | } |
2863 | |||
2864 | *new = true; | ||
2865 | } else { | 2863 | } else { |
2866 | struct nvme_ns_ids ids; | 2864 | struct nvme_ns_ids ids; |
2867 | 2865 | ||
@@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2873 | ret = -EINVAL; | 2871 | ret = -EINVAL; |
2874 | goto out_unlock; | 2872 | goto out_unlock; |
2875 | } | 2873 | } |
2876 | |||
2877 | *new = false; | ||
2878 | } | 2874 | } |
2879 | 2875 | ||
2880 | list_add_tail(&ns->siblings, &head->list); | 2876 | list_add_tail(&ns->siblings, &head->list); |
@@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2945 | struct nvme_id_ns *id; | 2941 | struct nvme_id_ns *id; |
2946 | char disk_name[DISK_NAME_LEN]; | 2942 | char disk_name[DISK_NAME_LEN]; |
2947 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; | 2943 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; |
2948 | bool new = true; | ||
2949 | 2944 | ||
2950 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); | 2945 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
2951 | if (!ns) | 2946 | if (!ns) |
@@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2971 | if (id->ncap == 0) | 2966 | if (id->ncap == 0) |
2972 | goto out_free_id; | 2967 | goto out_free_id; |
2973 | 2968 | ||
2974 | if (nvme_init_ns_head(ns, nsid, id, &new)) | 2969 | if (nvme_init_ns_head(ns, nsid, id)) |
2975 | goto out_free_id; | 2970 | goto out_free_id; |
2976 | nvme_setup_streams_ns(ctrl, ns); | 2971 | nvme_setup_streams_ns(ctrl, ns); |
2977 | 2972 | ||
@@ -3037,8 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
3037 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", | 3032 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", |
3038 | ns->disk->disk_name); | 3033 | ns->disk->disk_name); |
3039 | 3034 | ||
3040 | if (new) | 3035 | nvme_mpath_add_disk(ns->head); |
3041 | nvme_mpath_add_disk(ns->head); | ||
3042 | nvme_mpath_add_disk_links(ns); | 3036 | nvme_mpath_add_disk_links(ns); |
3043 | return; | 3037 | return; |
3044 | out_unlink_ns: | 3038 | out_unlink_ns: |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dd4ceefed8f..a1c58e35075e 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); | |||
493 | */ | 493 | */ |
494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) | 494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) |
495 | { | 495 | { |
496 | if (!ops->create_ctrl || !ops->module) | 496 | if (!ops->create_ctrl) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | 498 | ||
499 | down_write(&nvmf_transports_rwsem); | 499 | down_write(&nvmf_transports_rwsem); |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3b211d9e58b8..b7e5c6db4d92 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -198,11 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
198 | { | 198 | { |
199 | if (!head->disk) | 199 | if (!head->disk) |
200 | return; | 200 | return; |
201 | device_add_disk(&head->subsys->dev, head->disk); | 201 | |
202 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, | 202 | mutex_lock(&head->subsys->lock); |
203 | &nvme_ns_id_attr_group)) | 203 | if (!(head->disk->flags & GENHD_FL_UP)) { |
204 | pr_warn("%s: failed to create sysfs group for identification\n", | 204 | device_add_disk(&head->subsys->dev, head->disk); |
205 | head->disk->disk_name); | 205 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, |
206 | &nvme_ns_id_attr_group)) | ||
207 | pr_warn("%s: failed to create sysfs group for identification\n", | ||
208 | head->disk->disk_name); | ||
209 | } | ||
210 | mutex_unlock(&head->subsys->lock); | ||
206 | } | 211 | } |
207 | 212 | ||
208 | void nvme_mpath_add_disk_links(struct nvme_ns *ns) | 213 | void nvme_mpath_add_disk_links(struct nvme_ns *ns) |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 73036d2fbbd5..5933a5c732e8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1459,7 +1459,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1459 | nvmeq->cq_vector = qid - 1; | 1459 | nvmeq->cq_vector = qid - 1; |
1460 | result = adapter_alloc_cq(dev, qid, nvmeq); | 1460 | result = adapter_alloc_cq(dev, qid, nvmeq); |
1461 | if (result < 0) | 1461 | if (result < 0) |
1462 | return result; | 1462 | goto release_vector; |
1463 | 1463 | ||
1464 | result = adapter_alloc_sq(dev, qid, nvmeq); | 1464 | result = adapter_alloc_sq(dev, qid, nvmeq); |
1465 | if (result < 0) | 1465 | if (result < 0) |
@@ -1473,9 +1473,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1473 | return result; | 1473 | return result; |
1474 | 1474 | ||
1475 | release_sq: | 1475 | release_sq: |
1476 | dev->online_queues--; | ||
1476 | adapter_delete_sq(dev, qid); | 1477 | adapter_delete_sq(dev, qid); |
1477 | release_cq: | 1478 | release_cq: |
1478 | adapter_delete_cq(dev, qid); | 1479 | adapter_delete_cq(dev, qid); |
1480 | release_vector: | ||
1481 | nvmeq->cq_vector = -1; | ||
1479 | return result; | 1482 | return result; |
1480 | } | 1483 | } |
1481 | 1484 | ||
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3a51ed50eff2..4d84a73ee12d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
1051 | struct nvme_rdma_device *dev = queue->device; | 1051 | struct nvme_rdma_device *dev = queue->device; |
1052 | struct ib_device *ibdev = dev->dev; | 1052 | struct ib_device *ibdev = dev->dev; |
1053 | 1053 | ||
1054 | if (!blk_rq_bytes(rq)) | 1054 | if (!blk_rq_payload_bytes(rq)) |
1055 | return; | 1055 | return; |
1056 | 1056 | ||
1057 | if (req->mr) { | 1057 | if (req->mr) { |
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
1166 | 1166 | ||
1167 | c->common.flags |= NVME_CMD_SGL_METABUF; | 1167 | c->common.flags |= NVME_CMD_SGL_METABUF; |
1168 | 1168 | ||
1169 | if (!blk_rq_bytes(rq)) | 1169 | if (!blk_rq_payload_bytes(rq)) |
1170 | return nvme_rdma_set_sg_null(c); | 1170 | return nvme_rdma_set_sg_null(c); |
1171 | 1171 | ||
1172 | req->sg_table.sgl = req->first_sgl; | 1172 | req->sg_table.sgl = req->first_sgl; |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 0bd737117a80..a78029e4e5f4 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | |||
520 | goto fail; | 520 | goto fail; |
521 | } | 521 | } |
522 | 522 | ||
523 | /* either variant of SGLs is fine, as we don't support metadata */ | 523 | /* |
524 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && | 524 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that |
525 | (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { | 525 | * contains an address of a single contiguous physical buffer that is |
526 | * byte aligned. | ||
527 | */ | ||
528 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { | ||
526 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 529 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
527 | goto fail; | 530 | goto fail; |
528 | } | 531 | } |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7991ec3a17db..861d1509b22b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
184 | return BLK_STS_OK; | 184 | return BLK_STS_OK; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (blk_rq_bytes(req)) { | 187 | if (blk_rq_payload_bytes(req)) { |
188 | iod->sg_table.sgl = iod->first_sgl; | 188 | iod->sg_table.sgl = iod->first_sgl; |
189 | if (sg_alloc_table_chained(&iod->sg_table, | 189 | if (sg_alloc_table_chained(&iod->sg_table, |
190 | blk_rq_nr_phys_segments(req), | 190 | blk_rq_nr_phys_segments(req), |
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
193 | 193 | ||
194 | iod->req.sg = iod->sg_table.sgl; | 194 | iod->req.sg = iod->sg_table.sgl; |
195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); | 195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); |
196 | iod->req.transfer_len = blk_rq_bytes(req); | 196 | iod->req.transfer_len = blk_rq_payload_bytes(req); |
197 | } | 197 | } |
198 | 198 | ||
199 | blk_mq_start_request(req); | 199 | blk_mq_start_request(req); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fc734014206f..8b14bd326d4a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, | |||
3419 | 3419 | ||
3420 | static void quirk_chelsio_extend_vpd(struct pci_dev *dev) | 3420 | static void quirk_chelsio_extend_vpd(struct pci_dev *dev) |
3421 | { | 3421 | { |
3422 | pci_set_vpd_size(dev, 8192); | 3422 | int chip = (dev->device & 0xf000) >> 12; |
3423 | } | 3423 | int func = (dev->device & 0x0f00) >> 8; |
3424 | 3424 | int prod = (dev->device & 0x00ff) >> 0; | |
3425 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); | 3425 | |
3426 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); | 3426 | /* |
3427 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); | 3427 | * If this is a T3-based adapter, there's a 1KB VPD area at offset |
3428 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); | 3428 | * 0xc00 which contains the preferred VPD values. If this is a T4 or |
3429 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); | 3429 | * later based adapter, the special VPD is at offset 0x400 for the |
3430 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); | 3430 | * Physical Functions (the SR-IOV Virtual Functions have no VPD |
3431 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); | 3431 | * Capabilities). The PCI VPD Access core routines will normally |
3432 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); | 3432 | * compute the size of the VPD by parsing the VPD Data Structure at |
3433 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); | 3433 | * offset 0x000. This will result in silent failures when attempting |
3434 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); | 3434 | * to accesses these other VPD areas which are beyond those computed |
3435 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); | 3435 | * limits. |
3436 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); | 3436 | */ |
3437 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); | 3437 | if (chip == 0x0 && prod >= 0x20) |
3438 | pci_set_vpd_size(dev, 8192); | ||
3439 | else if (chip >= 0x4 && func < 0x8) | ||
3440 | pci_set_vpd_size(dev, 2048); | ||
3441 | } | ||
3442 | |||
3443 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, | ||
3444 | quirk_chelsio_extend_vpd); | ||
3438 | 3445 | ||
3439 | #ifdef CONFIG_ACPI | 3446 | #ifdef CONFIG_ACPI |
3440 | /* | 3447 | /* |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 369d48d6c6f1..365447240d95 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno) | |||
401 | struct resource *res = dev->resource + resno; | 401 | struct resource *res = dev->resource + resno; |
402 | 402 | ||
403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); | 403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); |
404 | |||
405 | if (!res->parent) | ||
406 | return; | ||
407 | |||
404 | release_resource(res); | 408 | release_resource(res); |
405 | res->end = resource_size(res) - 1; | 409 | res->end = resource_size(res) - 1; |
406 | res->start = 0; | 410 | res->start = 0; |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 7bc5eee96b31..0c2ed11c0603 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/export.h> | 17 | #include <linux/export.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/perf/arm_pmu.h> | 19 | #include <linux/perf/arm_pmu.h> |
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | #include <linux/sched/clock.h> | 21 | #include <linux/sched/clock.h> |
23 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
@@ -26,6 +25,9 @@ | |||
26 | 25 | ||
27 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
28 | 27 | ||
28 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); | ||
29 | static DEFINE_PER_CPU(int, cpu_irq); | ||
30 | |||
29 | static int | 31 | static int |
30 | armpmu_map_cache_event(const unsigned (*cache_map) | 32 | armpmu_map_cache_event(const unsigned (*cache_map) |
31 | [PERF_COUNT_HW_CACHE_MAX] | 33 | [PERF_COUNT_HW_CACHE_MAX] |
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event) | |||
320 | return 0; | 322 | return 0; |
321 | } | 323 | } |
322 | 324 | ||
323 | static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) | ||
324 | { | ||
325 | struct platform_device *pdev = armpmu->plat_device; | ||
326 | |||
327 | return pdev ? dev_get_platdata(&pdev->dev) : NULL; | ||
328 | } | ||
329 | |||
330 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | 325 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
331 | { | 326 | { |
332 | struct arm_pmu *armpmu; | 327 | struct arm_pmu *armpmu; |
333 | struct arm_pmu_platdata *plat; | ||
334 | int ret; | 328 | int ret; |
335 | u64 start_clock, finish_clock; | 329 | u64 start_clock, finish_clock; |
336 | 330 | ||
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | |||
341 | * dereference. | 335 | * dereference. |
342 | */ | 336 | */ |
343 | armpmu = *(void **)dev; | 337 | armpmu = *(void **)dev; |
344 | 338 | if (WARN_ON_ONCE(!armpmu)) | |
345 | plat = armpmu_get_platdata(armpmu); | 339 | return IRQ_NONE; |
346 | 340 | ||
347 | start_clock = sched_clock(); | 341 | start_clock = sched_clock(); |
348 | if (plat && plat->handle_irq) | 342 | ret = armpmu->handle_irq(irq, armpmu); |
349 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); | ||
350 | else | ||
351 | ret = armpmu->handle_irq(irq, armpmu); | ||
352 | finish_clock = sched_clock(); | 343 | finish_clock = sched_clock(); |
353 | 344 | ||
354 | perf_sample_event_took(finish_clock - start_clock); | 345 | perf_sample_event_took(finish_clock - start_clock); |
@@ -531,54 +522,41 @@ int perf_num_counters(void) | |||
531 | } | 522 | } |
532 | EXPORT_SYMBOL_GPL(perf_num_counters); | 523 | EXPORT_SYMBOL_GPL(perf_num_counters); |
533 | 524 | ||
534 | void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) | 525 | static int armpmu_count_irq_users(const int irq) |
535 | { | 526 | { |
536 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | 527 | int cpu, count = 0; |
537 | int irq = per_cpu(hw_events->irq, cpu); | ||
538 | 528 | ||
539 | if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) | 529 | for_each_possible_cpu(cpu) { |
540 | return; | 530 | if (per_cpu(cpu_irq, cpu) == irq) |
541 | 531 | count++; | |
542 | if (irq_is_percpu_devid(irq)) { | ||
543 | free_percpu_irq(irq, &hw_events->percpu_pmu); | ||
544 | cpumask_clear(&armpmu->active_irqs); | ||
545 | return; | ||
546 | } | 532 | } |
547 | 533 | ||
548 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | 534 | return count; |
549 | } | 535 | } |
550 | 536 | ||
551 | void armpmu_free_irqs(struct arm_pmu *armpmu) | 537 | void armpmu_free_irq(int irq, int cpu) |
552 | { | 538 | { |
553 | int cpu; | 539 | if (per_cpu(cpu_irq, cpu) == 0) |
540 | return; | ||
541 | if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) | ||
542 | return; | ||
543 | |||
544 | if (!irq_is_percpu_devid(irq)) | ||
545 | free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); | ||
546 | else if (armpmu_count_irq_users(irq) == 1) | ||
547 | free_percpu_irq(irq, &cpu_armpmu); | ||
554 | 548 | ||
555 | for_each_cpu(cpu, &armpmu->supported_cpus) | 549 | per_cpu(cpu_irq, cpu) = 0; |
556 | armpmu_free_irq(armpmu, cpu); | ||
557 | } | 550 | } |
558 | 551 | ||
559 | int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) | 552 | int armpmu_request_irq(int irq, int cpu) |
560 | { | 553 | { |
561 | int err = 0; | 554 | int err = 0; |
562 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
563 | const irq_handler_t handler = armpmu_dispatch_irq; | 555 | const irq_handler_t handler = armpmu_dispatch_irq; |
564 | int irq = per_cpu(hw_events->irq, cpu); | ||
565 | if (!irq) | 556 | if (!irq) |
566 | return 0; | 557 | return 0; |
567 | 558 | ||
568 | if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { | 559 | if (!irq_is_percpu_devid(irq)) { |
569 | err = request_percpu_irq(irq, handler, "arm-pmu", | ||
570 | &hw_events->percpu_pmu); | ||
571 | } else if (irq_is_percpu_devid(irq)) { | ||
572 | int other_cpu = cpumask_first(&armpmu->active_irqs); | ||
573 | int other_irq = per_cpu(hw_events->irq, other_cpu); | ||
574 | |||
575 | if (irq != other_irq) { | ||
576 | pr_warn("mismatched PPIs detected.\n"); | ||
577 | err = -EINVAL; | ||
578 | goto err_out; | ||
579 | } | ||
580 | } else { | ||
581 | struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); | ||
582 | unsigned long irq_flags; | 560 | unsigned long irq_flags; |
583 | 561 | ||
584 | err = irq_force_affinity(irq, cpumask_of(cpu)); | 562 | err = irq_force_affinity(irq, cpumask_of(cpu)); |
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) | |||
589 | goto err_out; | 567 | goto err_out; |
590 | } | 568 | } |
591 | 569 | ||
592 | if (platdata && platdata->irq_flags) { | 570 | irq_flags = IRQF_PERCPU | |
593 | irq_flags = platdata->irq_flags; | 571 | IRQF_NOBALANCING | |
594 | } else { | 572 | IRQF_NO_THREAD; |
595 | irq_flags = IRQF_PERCPU | | ||
596 | IRQF_NOBALANCING | | ||
597 | IRQF_NO_THREAD; | ||
598 | } | ||
599 | 573 | ||
574 | irq_set_status_flags(irq, IRQ_NOAUTOEN); | ||
600 | err = request_irq(irq, handler, irq_flags, "arm-pmu", | 575 | err = request_irq(irq, handler, irq_flags, "arm-pmu", |
601 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | 576 | per_cpu_ptr(&cpu_armpmu, cpu)); |
577 | } else if (armpmu_count_irq_users(irq) == 0) { | ||
578 | err = request_percpu_irq(irq, handler, "arm-pmu", | ||
579 | &cpu_armpmu); | ||
602 | } | 580 | } |
603 | 581 | ||
604 | if (err) | 582 | if (err) |
605 | goto err_out; | 583 | goto err_out; |
606 | 584 | ||
607 | cpumask_set_cpu(cpu, &armpmu->active_irqs); | 585 | per_cpu(cpu_irq, cpu) = irq; |
608 | return 0; | 586 | return 0; |
609 | 587 | ||
610 | err_out: | 588 | err_out: |
@@ -612,19 +590,6 @@ err_out: | |||
612 | return err; | 590 | return err; |
613 | } | 591 | } |
614 | 592 | ||
615 | int armpmu_request_irqs(struct arm_pmu *armpmu) | ||
616 | { | ||
617 | int cpu, err; | ||
618 | |||
619 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
620 | err = armpmu_request_irq(armpmu, cpu); | ||
621 | if (err) | ||
622 | break; | ||
623 | } | ||
624 | |||
625 | return err; | ||
626 | } | ||
627 | |||
628 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) | 593 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
629 | { | 594 | { |
630 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | 595 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; |
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) | |||
647 | if (pmu->reset) | 612 | if (pmu->reset) |
648 | pmu->reset(pmu); | 613 | pmu->reset(pmu); |
649 | 614 | ||
615 | per_cpu(cpu_armpmu, cpu) = pmu; | ||
616 | |||
650 | irq = armpmu_get_cpu_irq(pmu, cpu); | 617 | irq = armpmu_get_cpu_irq(pmu, cpu); |
651 | if (irq) { | 618 | if (irq) { |
652 | if (irq_is_percpu_devid(irq)) { | 619 | if (irq_is_percpu_devid(irq)) |
653 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | 620 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
654 | return 0; | 621 | else |
655 | } | 622 | enable_irq(irq); |
656 | } | 623 | } |
657 | 624 | ||
658 | return 0; | 625 | return 0; |
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |||
667 | return 0; | 634 | return 0; |
668 | 635 | ||
669 | irq = armpmu_get_cpu_irq(pmu, cpu); | 636 | irq = armpmu_get_cpu_irq(pmu, cpu); |
670 | if (irq && irq_is_percpu_devid(irq)) | 637 | if (irq) { |
671 | disable_percpu_irq(irq); | 638 | if (irq_is_percpu_devid(irq)) |
639 | disable_percpu_irq(irq); | ||
640 | else | ||
641 | disable_irq(irq); | ||
642 | } | ||
643 | |||
644 | per_cpu(cpu_armpmu, cpu) = NULL; | ||
672 | 645 | ||
673 | return 0; | 646 | return 0; |
674 | } | 647 | } |
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |||
800 | &cpu_pmu->node); | 773 | &cpu_pmu->node); |
801 | } | 774 | } |
802 | 775 | ||
803 | struct arm_pmu *armpmu_alloc(void) | 776 | static struct arm_pmu *__armpmu_alloc(gfp_t flags) |
804 | { | 777 | { |
805 | struct arm_pmu *pmu; | 778 | struct arm_pmu *pmu; |
806 | int cpu; | 779 | int cpu; |
807 | 780 | ||
808 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | 781 | pmu = kzalloc(sizeof(*pmu), flags); |
809 | if (!pmu) { | 782 | if (!pmu) { |
810 | pr_info("failed to allocate PMU device!\n"); | 783 | pr_info("failed to allocate PMU device!\n"); |
811 | goto out; | 784 | goto out; |
812 | } | 785 | } |
813 | 786 | ||
814 | pmu->hw_events = alloc_percpu(struct pmu_hw_events); | 787 | pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); |
815 | if (!pmu->hw_events) { | 788 | if (!pmu->hw_events) { |
816 | pr_info("failed to allocate per-cpu PMU data.\n"); | 789 | pr_info("failed to allocate per-cpu PMU data.\n"); |
817 | goto out_free_pmu; | 790 | goto out_free_pmu; |
@@ -857,6 +830,17 @@ out: | |||
857 | return NULL; | 830 | return NULL; |
858 | } | 831 | } |
859 | 832 | ||
833 | struct arm_pmu *armpmu_alloc(void) | ||
834 | { | ||
835 | return __armpmu_alloc(GFP_KERNEL); | ||
836 | } | ||
837 | |||
838 | struct arm_pmu *armpmu_alloc_atomic(void) | ||
839 | { | ||
840 | return __armpmu_alloc(GFP_ATOMIC); | ||
841 | } | ||
842 | |||
843 | |||
860 | void armpmu_free(struct arm_pmu *pmu) | 844 | void armpmu_free(struct arm_pmu *pmu) |
861 | { | 845 | { |
862 | free_percpu(pmu->hw_events); | 846 | free_percpu(pmu->hw_events); |
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 705f1a390e31..0f197516d708 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/irq.h> | ||
15 | #include <linux/irqdesc.h> | ||
14 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
15 | #include <linux/perf/arm_pmu.h> | 17 | #include <linux/perf/arm_pmu.h> |
16 | 18 | ||
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void) | |||
87 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); | 89 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); |
88 | } | 90 | } |
89 | 91 | ||
92 | /* | ||
93 | * Log and request the IRQ so the core arm_pmu code can manage | ||
94 | * it. We'll have to sanity-check IRQs later when we associate | ||
95 | * them with their PMUs. | ||
96 | */ | ||
90 | per_cpu(pmu_irqs, cpu) = irq; | 97 | per_cpu(pmu_irqs, cpu) = irq; |
98 | armpmu_request_irq(irq, cpu); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | return 0; | 101 | return 0; |
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |||
127 | return pmu; | 135 | return pmu; |
128 | } | 136 | } |
129 | 137 | ||
130 | pmu = armpmu_alloc(); | 138 | pmu = armpmu_alloc_atomic(); |
131 | if (!pmu) { | 139 | if (!pmu) { |
132 | pr_warn("Unable to allocate PMU for CPU%d\n", | 140 | pr_warn("Unable to allocate PMU for CPU%d\n", |
133 | smp_processor_id()); | 141 | smp_processor_id()); |
@@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |||
140 | } | 148 | } |
141 | 149 | ||
142 | /* | 150 | /* |
151 | * Check whether the new IRQ is compatible with those already associated with | ||
152 | * the PMU (e.g. we don't have mismatched PPIs). | ||
153 | */ | ||
154 | static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) | ||
155 | { | ||
156 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | ||
157 | int cpu; | ||
158 | |||
159 | if (!irq) | ||
160 | return true; | ||
161 | |||
162 | for_each_cpu(cpu, &pmu->supported_cpus) { | ||
163 | int other_irq = per_cpu(hw_events->irq, cpu); | ||
164 | if (!other_irq) | ||
165 | continue; | ||
166 | |||
167 | if (irq == other_irq) | ||
168 | continue; | ||
169 | if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq)) | ||
170 | continue; | ||
171 | |||
172 | pr_warn("mismatched PPIs detected\n"); | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | return true; | ||
177 | } | ||
178 | |||
179 | /* | ||
143 | * This must run before the common arm_pmu hotplug logic, so that we can | 180 | * This must run before the common arm_pmu hotplug logic, so that we can |
144 | * associate a CPU and its interrupt before the common code tries to manage the | 181 | * associate a CPU and its interrupt before the common code tries to manage the |
145 | * affinity and so on. | 182 | * affinity and so on. |
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu) | |||
164 | if (!pmu) | 201 | if (!pmu) |
165 | return -ENOMEM; | 202 | return -ENOMEM; |
166 | 203 | ||
167 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | ||
168 | |||
169 | per_cpu(probed_pmus, cpu) = pmu; | 204 | per_cpu(probed_pmus, cpu) = pmu; |
170 | 205 | ||
171 | /* | 206 | if (pmu_irq_matches(pmu, irq)) { |
172 | * Log and request the IRQ so the core arm_pmu code can manage it. In | 207 | hw_events = pmu->hw_events; |
173 | * some situations (e.g. mismatched PPIs), we may fail to request the | 208 | per_cpu(hw_events->irq, cpu) = irq; |
174 | * IRQ. However, it may be too late for us to do anything about it. | 209 | } |
175 | * The common ARM PMU code will log a warning in this case. | 210 | |
176 | */ | 211 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
177 | hw_events = pmu->hw_events; | ||
178 | per_cpu(hw_events->irq, cpu) = irq; | ||
179 | armpmu_request_irq(pmu, cpu); | ||
180 | 212 | ||
181 | /* | 213 | /* |
182 | * Ideally, we'd probe the PMU here when we find the first matching | 214 | * Ideally, we'd probe the PMU here when we find the first matching |
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void) | |||
247 | if (acpi_disabled) | 279 | if (acpi_disabled) |
248 | return 0; | 280 | return 0; |
249 | 281 | ||
250 | /* | ||
251 | * We can't request IRQs yet, since we don't know the cookie value | ||
252 | * until we know which CPUs share the same logical PMU. We'll handle | ||
253 | * that in arm_pmu_acpi_cpu_starting(). | ||
254 | */ | ||
255 | ret = arm_pmu_acpi_parse_irqs(); | 282 | ret = arm_pmu_acpi_parse_irqs(); |
256 | if (ret) | 283 | if (ret) |
257 | return ret; | 284 | return ret; |
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 46501cc79fd7..7729eda5909d 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c | |||
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) | |||
127 | pdev->dev.of_node); | 127 | pdev->dev.of_node); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | ||
131 | * Some platforms have all PMU IRQs OR'd into a single IRQ, with a | ||
132 | * special platdata function that attempts to demux them. | ||
133 | */ | ||
134 | if (dev_get_platdata(&pdev->dev)) | ||
135 | cpumask_setall(&pmu->supported_cpus); | ||
136 | |||
137 | for (i = 0; i < num_irqs; i++) { | 130 | for (i = 0; i < num_irqs; i++) { |
138 | int cpu, irq; | 131 | int cpu, irq; |
139 | 132 | ||
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) | |||
164 | return 0; | 157 | return 0; |
165 | } | 158 | } |
166 | 159 | ||
160 | static int armpmu_request_irqs(struct arm_pmu *armpmu) | ||
161 | { | ||
162 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
163 | int cpu, err; | ||
164 | |||
165 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
166 | int irq = per_cpu(hw_events->irq, cpu); | ||
167 | if (!irq) | ||
168 | continue; | ||
169 | |||
170 | err = armpmu_request_irq(irq, cpu); | ||
171 | if (err) | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | static void armpmu_free_irqs(struct arm_pmu *armpmu) | ||
179 | { | ||
180 | int cpu; | ||
181 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
182 | |||
183 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
184 | int irq = per_cpu(hw_events->irq, cpu); | ||
185 | |||
186 | armpmu_free_irq(irq, cpu); | ||
187 | } | ||
188 | } | ||
189 | |||
167 | int arm_pmu_device_probe(struct platform_device *pdev, | 190 | int arm_pmu_device_probe(struct platform_device *pdev, |
168 | const struct of_device_id *of_table, | 191 | const struct of_device_id *of_table, |
169 | const struct pmu_probe_info *probe_table) | 192 | const struct pmu_probe_info *probe_table) |
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 1fda9d6c7ea3..4b91ff74779b 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c | |||
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = { | |||
716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", | 716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", |
717 | }; | 717 | }; |
718 | 718 | ||
719 | static const char * const uart_ao_b_gpioz_groups[] = { | 719 | static const char * const uart_ao_b_z_groups[] = { |
720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", | 720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", |
721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", | 721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", |
722 | }; | 722 | }; |
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = { | |||
855 | FUNCTION(nand), | 855 | FUNCTION(nand), |
856 | FUNCTION(uart_a), | 856 | FUNCTION(uart_a), |
857 | FUNCTION(uart_b), | 857 | FUNCTION(uart_b), |
858 | FUNCTION(uart_ao_b_gpioz), | 858 | FUNCTION(uart_ao_b_z), |
859 | FUNCTION(i2c0), | 859 | FUNCTION(i2c0), |
860 | FUNCTION(i2c1), | 860 | FUNCTION(i2c1), |
861 | FUNCTION(i2c2), | 861 | FUNCTION(i2c2), |
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index d1a01311c1a2..5e3df194723e 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c | |||
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device) | |||
376 | { | 376 | { |
377 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 377 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
378 | 378 | ||
379 | device_init_wakeup(&device->dev, false); | ||
379 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 380 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
380 | intel_hid_set_enable(&device->dev, false); | 381 | intel_hid_set_enable(&device->dev, false); |
381 | intel_button_array_enable(&device->dev, false); | 382 | intel_button_array_enable(&device->dev, false); |
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b703d6f5b099..c13780b8dabb 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/acpi.h> | 9 | #include <linux/acpi.h> |
10 | #include <linux/dmi.h> | ||
10 | #include <linux/input.h> | 11 | #include <linux/input.h> |
11 | #include <linux/input/sparse-keymap.h> | 12 | #include <linux/input/sparse-keymap.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -97,9 +98,35 @@ out_unknown: | |||
97 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); | 98 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); |
98 | } | 99 | } |
99 | 100 | ||
100 | static int intel_vbtn_probe(struct platform_device *device) | 101 | static void detect_tablet_mode(struct platform_device *device) |
101 | { | 102 | { |
103 | const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); | ||
104 | struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); | ||
105 | acpi_handle handle = ACPI_HANDLE(&device->dev); | ||
102 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; | 106 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; |
107 | union acpi_object *obj; | ||
108 | acpi_status status; | ||
109 | int m; | ||
110 | |||
111 | if (!(chassis_type && strcmp(chassis_type, "31") == 0)) | ||
112 | goto out; | ||
113 | |||
114 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
115 | if (ACPI_FAILURE(status)) | ||
116 | goto out; | ||
117 | |||
118 | obj = vgbs_output.pointer; | ||
119 | if (!(obj && obj->type == ACPI_TYPE_INTEGER)) | ||
120 | goto out; | ||
121 | |||
122 | m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
123 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
124 | out: | ||
125 | kfree(vgbs_output.pointer); | ||
126 | } | ||
127 | |||
128 | static int intel_vbtn_probe(struct platform_device *device) | ||
129 | { | ||
103 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 130 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
104 | struct intel_vbtn_priv *priv; | 131 | struct intel_vbtn_priv *priv; |
105 | acpi_status status; | 132 | acpi_status status; |
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device) | |||
122 | return err; | 149 | return err; |
123 | } | 150 | } |
124 | 151 | ||
125 | /* | 152 | detect_tablet_mode(device); |
126 | * VGBS being present and returning something means we have | ||
127 | * a tablet mode switch. | ||
128 | */ | ||
129 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
130 | if (ACPI_SUCCESS(status)) { | ||
131 | union acpi_object *obj = vgbs_output.pointer; | ||
132 | |||
133 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
134 | int m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
135 | |||
136 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | kfree(vgbs_output.pointer); | ||
141 | 153 | ||
142 | status = acpi_install_notify_handler(handle, | 154 | status = acpi_install_notify_handler(handle, |
143 | ACPI_DEVICE_NOTIFY, | 155 | ACPI_DEVICE_NOTIFY, |
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device) | |||
154 | { | 166 | { |
155 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 167 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
156 | 168 | ||
169 | device_init_wakeup(&device->dev, false); | ||
157 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 170 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
158 | 171 | ||
159 | /* | 172 | /* |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index c0c8945603cb..8796211ef24a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev) | |||
945 | wblock->char_dev.mode = 0444; | 945 | wblock->char_dev.mode = 0444; |
946 | ret = misc_register(&wblock->char_dev); | 946 | ret = misc_register(&wblock->char_dev); |
947 | if (ret) { | 947 | if (ret) { |
948 | dev_warn(dev, "failed to register char dev: %d", ret); | 948 | dev_warn(dev, "failed to register char dev: %d\n", ret); |
949 | ret = -ENOMEM; | 949 | ret = -ENOMEM; |
950 | goto probe_misc_failure; | 950 | goto probe_misc_failure; |
951 | } | 951 | } |
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev, | |||
1048 | 1048 | ||
1049 | if (result) { | 1049 | if (result) { |
1050 | dev_warn(wmi_bus_dev, | 1050 | dev_warn(wmi_bus_dev, |
1051 | "%s data block query control method not found", | 1051 | "%s data block query control method not found\n", |
1052 | method); | 1052 | method); |
1053 | return result; | 1053 | return result; |
1054 | } | 1054 | } |
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) | |||
1198 | 1198 | ||
1199 | retval = device_add(&wblock->dev.dev); | 1199 | retval = device_add(&wblock->dev.dev); |
1200 | if (retval) { | 1200 | if (retval) { |
1201 | dev_err(wmi_bus_dev, "failed to register %pULL\n", | 1201 | dev_err(wmi_bus_dev, "failed to register %pUL\n", |
1202 | wblock->gblock.guid); | 1202 | wblock->gblock.guid); |
1203 | if (debug_event) | 1203 | if (debug_event) |
1204 | wmi_method_enable(wblock, 0); | 1204 | wmi_method_enable(wblock, 0); |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index fcfd28d2884c..de1b3fce936d 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ | |||
185 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) | 185 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) |
186 | zalon7xx-objs := zalon.o ncr53c8xx.o | 186 | zalon7xx-objs := zalon.o ncr53c8xx.o |
187 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o | 187 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o |
188 | oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o | ||
189 | 188 | ||
190 | # Files generated that shall be removed upon make clean | 189 | # Files generated that shall be removed upon make clean |
191 | clean-files := 53c700_d.h 53c700_u.h | 190 | clean-files := 53c700_d.h 53c700_u.h |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index b3b931ab77eb..2664ea0df35f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1693 | * Map in the registers from the adapter. | 1693 | * Map in the registers from the adapter. |
1694 | */ | 1694 | */ |
1695 | aac->base_size = AAC_MIN_FOOTPRINT_SIZE; | 1695 | aac->base_size = AAC_MIN_FOOTPRINT_SIZE; |
1696 | if ((*aac_drivers[index].init)(aac)) | 1696 | if ((*aac_drivers[index].init)(aac)) { |
1697 | error = -ENODEV; | ||
1697 | goto out_unmap; | 1698 | goto out_unmap; |
1699 | } | ||
1698 | 1700 | ||
1699 | if (aac->sync_mode) { | 1701 | if (aac->sync_mode) { |
1700 | if (aac_sync_mode) | 1702 | if (aac_sync_mode) |
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c deleted file mode 100644 index 828ae3d9a510..000000000000 --- a/drivers/scsi/aic7xxx/aiclib.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* | ||
2 | * Implementation of Utility functions for all SCSI device types. | ||
3 | * | ||
4 | * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. | ||
5 | * Copyright (c) 1997, 1998 Kenneth D. Merry. | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Redistribution and use in source and binary forms, with or without | ||
9 | * modification, are permitted provided that the following conditions | ||
10 | * are met: | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions, and the following disclaimer, | ||
13 | * without modification, immediately at the beginning of the file. | ||
14 | * 2. The name of the author may not be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR | ||
21 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
27 | * SUCH DAMAGE. | ||
28 | * | ||
29 | * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $ | ||
30 | * $Id$ | ||
31 | */ | ||
32 | |||
33 | #include "aiclib.h" | ||
34 | |||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 8e2f767147cb..5a645b8b9af1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, | |||
1889 | /* we will not receive ABTS response for this IO */ | 1889 | /* we will not receive ABTS response for this IO */ |
1890 | BNX2FC_IO_DBG(io_req, "Timer context finished processing " | 1890 | BNX2FC_IO_DBG(io_req, "Timer context finished processing " |
1891 | "this scsi cmd\n"); | 1891 | "this scsi cmd\n"); |
1892 | return; | ||
1892 | } | 1893 | } |
1893 | 1894 | ||
1894 | /* Cancel the timeout_work, as we received IO completion */ | 1895 | /* Cancel the timeout_work, as we received IO completion */ |
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index be5ee2d37815..7dbbbb81a1e7 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c | |||
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = { | |||
114 | static struct csio_lnode * | 114 | static struct csio_lnode * |
115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) | 115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) |
116 | { | 116 | { |
117 | struct csio_lnode *ln = hw->rln; | 117 | struct csio_lnode *ln; |
118 | struct list_head *tmp; | 118 | struct list_head *tmp; |
119 | 119 | ||
120 | /* Match siblings lnode with portid */ | 120 | /* Match siblings lnode with portid */ |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 022e421c2185..4b44325d1a82 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work) | |||
876 | 876 | ||
877 | /** | 877 | /** |
878 | * alua_rtpg_queue() - cause RTPG to be submitted asynchronously | 878 | * alua_rtpg_queue() - cause RTPG to be submitted asynchronously |
879 | * @pg: ALUA port group associated with @sdev. | ||
880 | * @sdev: SCSI device for which to submit an RTPG. | ||
881 | * @qdata: Information about the callback to invoke after the RTPG. | ||
882 | * @force: Whether or not to submit an RTPG if a work item that will submit an | ||
883 | * RTPG already has been scheduled. | ||
879 | * | 884 | * |
880 | * Returns true if and only if alua_rtpg_work() will be called asynchronously. | 885 | * Returns true if and only if alua_rtpg_work() will be called asynchronously. |
881 | * That function is responsible for calling @qdata->fn(). | 886 | * That function is responsible for calling @qdata->fn(). |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 9a0696f68f37..b81a53c4a9a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes { | |||
367 | }; | 367 | }; |
368 | 368 | ||
369 | struct ibmvfc_fcp_rsp_info { | 369 | struct ibmvfc_fcp_rsp_info { |
370 | __be16 reserved; | 370 | u8 reserved[3]; |
371 | u8 rsp_code; | 371 | u8 rsp_code; |
372 | u8 reserved2[4]; | 372 | u8 reserved2[4]; |
373 | }__attribute__((packed, aligned (2))); | 373 | }__attribute__((packed, aligned (2))); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 13d6e4ec3022..59a87ca328d3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |||
2410 | continue; | 2410 | continue; |
2411 | } | 2411 | } |
2412 | 2412 | ||
2413 | for_each_cpu(cpu, mask) | 2413 | for_each_cpu_and(cpu, mask, cpu_online_mask) { |
2414 | if (cpu >= ioc->cpu_msix_table_sz) | ||
2415 | break; | ||
2414 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; | 2416 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; |
2417 | } | ||
2415 | } | 2418 | } |
2416 | return; | 2419 | return; |
2417 | } | 2420 | } |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 029e2e69b29f..f57a94b4f0d9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) | |||
1724 | { | 1724 | { |
1725 | struct qedi_ctx *qedi = data; | 1725 | struct qedi_ctx *qedi = data; |
1726 | struct nvm_iscsi_initiator *initiator; | 1726 | struct nvm_iscsi_initiator *initiator; |
1727 | char *str = buf; | ||
1728 | int rc = 1; | 1727 | int rc = 1; |
1729 | u32 ipv6_en, dhcp_en, ip_len; | 1728 | u32 ipv6_en, dhcp_en, ip_len; |
1730 | struct nvm_iscsi_block *block; | 1729 | struct nvm_iscsi_block *block; |
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) | |||
1758 | 1757 | ||
1759 | switch (type) { | 1758 | switch (type) { |
1760 | case ISCSI_BOOT_ETH_IP_ADDR: | 1759 | case ISCSI_BOOT_ETH_IP_ADDR: |
1761 | rc = snprintf(str, ip_len, fmt, ip); | 1760 | rc = snprintf(buf, ip_len, fmt, ip); |
1762 | break; | 1761 | break; |
1763 | case ISCSI_BOOT_ETH_SUBNET_MASK: | 1762 | case ISCSI_BOOT_ETH_SUBNET_MASK: |
1764 | rc = snprintf(str, ip_len, fmt, sub); | 1763 | rc = snprintf(buf, ip_len, fmt, sub); |
1765 | break; | 1764 | break; |
1766 | case ISCSI_BOOT_ETH_GATEWAY: | 1765 | case ISCSI_BOOT_ETH_GATEWAY: |
1767 | rc = snprintf(str, ip_len, fmt, gw); | 1766 | rc = snprintf(buf, ip_len, fmt, gw); |
1768 | break; | 1767 | break; |
1769 | case ISCSI_BOOT_ETH_FLAGS: | 1768 | case ISCSI_BOOT_ETH_FLAGS: |
1770 | rc = snprintf(str, 3, "%hhd\n", | 1769 | rc = snprintf(buf, 3, "%hhd\n", |
1771 | SYSFS_FLAG_FW_SEL_BOOT); | 1770 | SYSFS_FLAG_FW_SEL_BOOT); |
1772 | break; | 1771 | break; |
1773 | case ISCSI_BOOT_ETH_INDEX: | 1772 | case ISCSI_BOOT_ETH_INDEX: |
1774 | rc = snprintf(str, 3, "0\n"); | 1773 | rc = snprintf(buf, 3, "0\n"); |
1775 | break; | 1774 | break; |
1776 | case ISCSI_BOOT_ETH_MAC: | 1775 | case ISCSI_BOOT_ETH_MAC: |
1777 | rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN); | 1776 | rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN); |
1778 | break; | 1777 | break; |
1779 | case ISCSI_BOOT_ETH_VLAN: | 1778 | case ISCSI_BOOT_ETH_VLAN: |
1780 | rc = snprintf(str, 12, "%d\n", | 1779 | rc = snprintf(buf, 12, "%d\n", |
1781 | GET_FIELD2(initiator->generic_cont0, | 1780 | GET_FIELD2(initiator->generic_cont0, |
1782 | NVM_ISCSI_CFG_INITIATOR_VLAN)); | 1781 | NVM_ISCSI_CFG_INITIATOR_VLAN)); |
1783 | break; | 1782 | break; |
1784 | case ISCSI_BOOT_ETH_ORIGIN: | 1783 | case ISCSI_BOOT_ETH_ORIGIN: |
1785 | if (dhcp_en) | 1784 | if (dhcp_en) |
1786 | rc = snprintf(str, 3, "3\n"); | 1785 | rc = snprintf(buf, 3, "3\n"); |
1787 | break; | 1786 | break; |
1788 | default: | 1787 | default: |
1789 | rc = 0; | 1788 | rc = 0; |
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) | |||
1819 | { | 1818 | { |
1820 | struct qedi_ctx *qedi = data; | 1819 | struct qedi_ctx *qedi = data; |
1821 | struct nvm_iscsi_initiator *initiator; | 1820 | struct nvm_iscsi_initiator *initiator; |
1822 | char *str = buf; | ||
1823 | int rc; | 1821 | int rc; |
1824 | struct nvm_iscsi_block *block; | 1822 | struct nvm_iscsi_block *block; |
1825 | 1823 | ||
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) | |||
1831 | 1829 | ||
1832 | switch (type) { | 1830 | switch (type) { |
1833 | case ISCSI_BOOT_INI_INITIATOR_NAME: | 1831 | case ISCSI_BOOT_INI_INITIATOR_NAME: |
1834 | rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", | 1832 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, |
1835 | initiator->initiator_name.byte); | 1833 | initiator->initiator_name.byte); |
1836 | break; | 1834 | break; |
1837 | default: | 1835 | default: |
1838 | rc = 0; | 1836 | rc = 0; |
@@ -1860,7 +1858,6 @@ static ssize_t | |||
1860 | qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, | 1858 | qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, |
1861 | char *buf, enum qedi_nvm_tgts idx) | 1859 | char *buf, enum qedi_nvm_tgts idx) |
1862 | { | 1860 | { |
1863 | char *str = buf; | ||
1864 | int rc = 1; | 1861 | int rc = 1; |
1865 | u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; | 1862 | u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; |
1866 | struct nvm_iscsi_block *block; | 1863 | struct nvm_iscsi_block *block; |
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, | |||
1899 | 1896 | ||
1900 | switch (type) { | 1897 | switch (type) { |
1901 | case ISCSI_BOOT_TGT_NAME: | 1898 | case ISCSI_BOOT_TGT_NAME: |
1902 | rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", | 1899 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, |
1903 | block->target[idx].target_name.byte); | 1900 | block->target[idx].target_name.byte); |
1904 | break; | 1901 | break; |
1905 | case ISCSI_BOOT_TGT_IP_ADDR: | 1902 | case ISCSI_BOOT_TGT_IP_ADDR: |
1906 | if (ipv6_en) | 1903 | if (ipv6_en) |
1907 | rc = snprintf(str, ip_len, "%pI6\n", | 1904 | rc = snprintf(buf, ip_len, "%pI6\n", |
1908 | block->target[idx].ipv6_addr.byte); | 1905 | block->target[idx].ipv6_addr.byte); |
1909 | else | 1906 | else |
1910 | rc = snprintf(str, ip_len, "%pI4\n", | 1907 | rc = snprintf(buf, ip_len, "%pI4\n", |
1911 | block->target[idx].ipv4_addr.byte); | 1908 | block->target[idx].ipv4_addr.byte); |
1912 | break; | 1909 | break; |
1913 | case ISCSI_BOOT_TGT_PORT: | 1910 | case ISCSI_BOOT_TGT_PORT: |
1914 | rc = snprintf(str, 12, "%d\n", | 1911 | rc = snprintf(buf, 12, "%d\n", |
1915 | GET_FIELD2(block->target[idx].generic_cont0, | 1912 | GET_FIELD2(block->target[idx].generic_cont0, |
1916 | NVM_ISCSI_CFG_TARGET_TCP_PORT)); | 1913 | NVM_ISCSI_CFG_TARGET_TCP_PORT)); |
1917 | break; | 1914 | break; |
1918 | case ISCSI_BOOT_TGT_LUN: | 1915 | case ISCSI_BOOT_TGT_LUN: |
1919 | rc = snprintf(str, 22, "%.*d\n", | 1916 | rc = snprintf(buf, 22, "%.*d\n", |
1920 | block->target[idx].lun.value[1], | 1917 | block->target[idx].lun.value[1], |
1921 | block->target[idx].lun.value[0]); | 1918 | block->target[idx].lun.value[0]); |
1922 | break; | 1919 | break; |
1923 | case ISCSI_BOOT_TGT_CHAP_NAME: | 1920 | case ISCSI_BOOT_TGT_CHAP_NAME: |
1924 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", | 1921 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1925 | chap_name); | 1922 | chap_name); |
1926 | break; | 1923 | break; |
1927 | case ISCSI_BOOT_TGT_CHAP_SECRET: | 1924 | case ISCSI_BOOT_TGT_CHAP_SECRET: |
1928 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", | 1925 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1929 | chap_secret); | 1926 | chap_secret); |
1930 | break; | 1927 | break; |
1931 | case ISCSI_BOOT_TGT_REV_CHAP_NAME: | 1928 | case ISCSI_BOOT_TGT_REV_CHAP_NAME: |
1932 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", | 1929 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1933 | mchap_name); | 1930 | mchap_name); |
1934 | break; | 1931 | break; |
1935 | case ISCSI_BOOT_TGT_REV_CHAP_SECRET: | 1932 | case ISCSI_BOOT_TGT_REV_CHAP_SECRET: |
1936 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", | 1933 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1937 | mchap_secret); | 1934 | mchap_secret); |
1938 | break; | 1935 | break; |
1939 | case ISCSI_BOOT_TGT_FLAGS: | 1936 | case ISCSI_BOOT_TGT_FLAGS: |
1940 | rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); | 1937 | rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); |
1941 | break; | 1938 | break; |
1942 | case ISCSI_BOOT_TGT_NIC_ASSOC: | 1939 | case ISCSI_BOOT_TGT_NIC_ASSOC: |
1943 | rc = snprintf(str, 3, "0\n"); | 1940 | rc = snprintf(buf, 3, "0\n"); |
1944 | break; | 1941 | break; |
1945 | default: | 1942 | default: |
1946 | rc = 0; | 1943 | rc = 0; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aececf664654..2dea1129d396 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t) | |||
59 | req->outstanding_cmds[sp->handle] = NULL; | 59 | req->outstanding_cmds[sp->handle] = NULL; |
60 | iocb = &sp->u.iocb_cmd; | 60 | iocb = &sp->u.iocb_cmd; |
61 | iocb->timeout(sp); | 61 | iocb->timeout(sp); |
62 | if (sp->type != SRB_ELS_DCMD) | ||
63 | sp->free(sp); | ||
64 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | 62 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
65 | } | 63 | } |
66 | 64 | ||
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data) | |||
102 | srb_t *sp = data; | 100 | srb_t *sp = data; |
103 | fc_port_t *fcport = sp->fcport; | 101 | fc_port_t *fcport = sp->fcport; |
104 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 102 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
105 | struct event_arg ea; | ||
106 | 103 | ||
107 | if (fcport) { | 104 | if (fcport) { |
108 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | 105 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, |
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data) | |||
117 | 114 | ||
118 | switch (sp->type) { | 115 | switch (sp->type) { |
119 | case SRB_LOGIN_CMD: | 116 | case SRB_LOGIN_CMD: |
120 | if (!fcport) | ||
121 | break; | ||
122 | /* Retry as needed. */ | 117 | /* Retry as needed. */ |
123 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; | 118 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; |
124 | lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? | 119 | lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? |
125 | QLA_LOGIO_LOGIN_RETRIED : 0; | 120 | QLA_LOGIO_LOGIN_RETRIED : 0; |
126 | memset(&ea, 0, sizeof(ea)); | 121 | sp->done(sp, QLA_FUNCTION_TIMEOUT); |
127 | ea.event = FCME_PLOGI_DONE; | ||
128 | ea.fcport = sp->fcport; | ||
129 | ea.data[0] = lio->u.logio.data[0]; | ||
130 | ea.data[1] = lio->u.logio.data[1]; | ||
131 | ea.sp = sp; | ||
132 | qla24xx_handle_plogi_done_event(fcport->vha, &ea); | ||
133 | break; | 122 | break; |
134 | case SRB_LOGOUT_CMD: | 123 | case SRB_LOGOUT_CMD: |
135 | if (!fcport) | ||
136 | break; | ||
137 | qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); | ||
138 | break; | ||
139 | case SRB_CT_PTHRU_CMD: | 124 | case SRB_CT_PTHRU_CMD: |
140 | case SRB_MB_IOCB: | 125 | case SRB_MB_IOCB: |
141 | case SRB_NACK_PLOGI: | 126 | case SRB_NACK_PLOGI: |
@@ -235,12 +220,10 @@ static void | |||
235 | qla2x00_async_logout_sp_done(void *ptr, int res) | 220 | qla2x00_async_logout_sp_done(void *ptr, int res) |
236 | { | 221 | { |
237 | srb_t *sp = ptr; | 222 | srb_t *sp = ptr; |
238 | struct srb_iocb *lio = &sp->u.iocb_cmd; | ||
239 | 223 | ||
240 | sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | 224 | sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
241 | if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) | 225 | sp->fcport->login_gen++; |
242 | qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, | 226 | qlt_logo_completion_handler(sp->fcport, res); |
243 | lio->u.logio.data); | ||
244 | sp->free(sp); | 227 | sp->free(sp); |
245 | } | 228 | } |
246 | 229 | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 1b62e943ec49..8d00d559bd26 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) | |||
3275 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); | 3275 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); |
3276 | abt_iocb->entry_type = ABORT_IOCB_TYPE; | 3276 | abt_iocb->entry_type = ABORT_IOCB_TYPE; |
3277 | abt_iocb->entry_count = 1; | 3277 | abt_iocb->entry_count = 1; |
3278 | abt_iocb->handle = | 3278 | abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); |
3279 | cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, | ||
3280 | aio->u.abt.cmd_hndl)); | ||
3281 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 3279 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3282 | abt_iocb->handle_to_abort = | 3280 | abt_iocb->handle_to_abort = |
3283 | cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); | 3281 | cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, |
3282 | aio->u.abt.cmd_hndl)); | ||
3284 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; | 3283 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
3285 | abt_iocb->port_id[1] = sp->fcport->d_id.b.area; | 3284 | abt_iocb->port_id[1] = sp->fcport->d_id.b.area; |
3286 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; | 3285 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14109d86c3f6..89f93ebd819d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
272 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 272 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
273 | 273 | ||
274 | /* Read all mbox registers? */ | 274 | /* Read all mbox registers? */ |
275 | mboxes = (1 << ha->mbx_count) - 1; | 275 | WARN_ON_ONCE(ha->mbx_count > 32); |
276 | mboxes = (1ULL << ha->mbx_count) - 1; | ||
276 | if (!ha->mcp) | 277 | if (!ha->mcp) |
277 | ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); | 278 | ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); |
278 | else | 279 | else |
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
2880 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 2881 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2881 | 2882 | ||
2882 | /* Read all mbox registers? */ | 2883 | /* Read all mbox registers? */ |
2883 | mboxes = (1 << ha->mbx_count) - 1; | 2884 | WARN_ON_ONCE(ha->mbx_count > 32); |
2885 | mboxes = (1ULL << ha->mbx_count) - 1; | ||
2884 | if (!ha->mcp) | 2886 | if (!ha->mcp) |
2885 | ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); | 2887 | ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); |
2886 | else | 2888 | else |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ee6e02d146..afcb5567998a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
3625 | } | 3625 | } |
3626 | qla2x00_wait_for_hba_ready(base_vha); | 3626 | qla2x00_wait_for_hba_ready(base_vha); |
3627 | 3627 | ||
3628 | qla2x00_wait_for_sess_deletion(base_vha); | ||
3629 | |||
3628 | /* | 3630 | /* |
3629 | * if UNLOAD flag is already set, then continue unload, | 3631 | * if UNLOAD flag is already set, then continue unload, |
3630 | * where it was set first. | 3632 | * where it was set first. |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index fc89af8fe256..896b2d8bd803 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -4871,8 +4871,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | |||
4871 | sess); | 4871 | sess); |
4872 | qlt_send_term_imm_notif(vha, iocb, 1); | 4872 | qlt_send_term_imm_notif(vha, iocb, 1); |
4873 | res = 0; | 4873 | res = 0; |
4874 | spin_lock_irqsave(&tgt->ha->tgt.sess_lock, | ||
4875 | flags); | ||
4876 | break; | 4874 | break; |
4877 | } | 4875 | } |
4878 | 4876 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index fc233717355f..817f312023a9 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -168,6 +168,8 @@ | |||
168 | #define DEV_DB_NON_PERSISTENT 0 | 168 | #define DEV_DB_NON_PERSISTENT 0 |
169 | #define DEV_DB_PERSISTENT 1 | 169 | #define DEV_DB_PERSISTENT 1 |
170 | 170 | ||
171 | #define QL4_ISP_REG_DISCONNECT 0xffffffffU | ||
172 | |||
171 | #define COPY_ISID(dst_isid, src_isid) { \ | 173 | #define COPY_ISID(dst_isid, src_isid) { \ |
172 | int i, j; \ | 174 | int i, j; \ |
173 | for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ | 175 | for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 82e889bbe0ed..fc2c97d9a0d6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { | |||
262 | 262 | ||
263 | static struct scsi_transport_template *qla4xxx_scsi_transport; | 263 | static struct scsi_transport_template *qla4xxx_scsi_transport; |
264 | 264 | ||
265 | static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) | ||
266 | { | ||
267 | u32 reg_val = 0; | ||
268 | int rval = QLA_SUCCESS; | ||
269 | |||
270 | if (is_qla8022(ha)) | ||
271 | reg_val = readl(&ha->qla4_82xx_reg->host_status); | ||
272 | else if (is_qla8032(ha) || is_qla8042(ha)) | ||
273 | reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); | ||
274 | else | ||
275 | reg_val = readw(&ha->reg->ctrl_status); | ||
276 | |||
277 | if (reg_val == QL4_ISP_REG_DISCONNECT) | ||
278 | rval = QLA_ERROR; | ||
279 | |||
280 | return rval; | ||
281 | } | ||
282 | |||
265 | static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, | 283 | static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, |
266 | uint32_t iface_type, uint32_t payload_size, | 284 | uint32_t iface_type, uint32_t payload_size, |
267 | uint32_t pid, struct sockaddr *dst_addr) | 285 | uint32_t pid, struct sockaddr *dst_addr) |
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) | |||
9186 | struct srb *srb = NULL; | 9204 | struct srb *srb = NULL; |
9187 | int ret = SUCCESS; | 9205 | int ret = SUCCESS; |
9188 | int wait = 0; | 9206 | int wait = 0; |
9207 | int rval; | ||
9189 | 9208 | ||
9190 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", | 9209 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", |
9191 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); | 9210 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); |
9192 | 9211 | ||
9212 | rval = qla4xxx_isp_check_reg(ha); | ||
9213 | if (rval != QLA_SUCCESS) { | ||
9214 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9215 | return FAILED; | ||
9216 | } | ||
9217 | |||
9193 | spin_lock_irqsave(&ha->hardware_lock, flags); | 9218 | spin_lock_irqsave(&ha->hardware_lock, flags); |
9194 | srb = (struct srb *) CMD_SP(cmd); | 9219 | srb = (struct srb *) CMD_SP(cmd); |
9195 | if (!srb) { | 9220 | if (!srb) { |
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
9241 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 9266 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
9242 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | 9267 | struct ddb_entry *ddb_entry = cmd->device->hostdata; |
9243 | int ret = FAILED, stat; | 9268 | int ret = FAILED, stat; |
9269 | int rval; | ||
9244 | 9270 | ||
9245 | if (!ddb_entry) | 9271 | if (!ddb_entry) |
9246 | return ret; | 9272 | return ret; |
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
9260 | cmd, jiffies, cmd->request->timeout / HZ, | 9286 | cmd, jiffies, cmd->request->timeout / HZ, |
9261 | ha->dpc_flags, cmd->result, cmd->allowed)); | 9287 | ha->dpc_flags, cmd->result, cmd->allowed)); |
9262 | 9288 | ||
9289 | rval = qla4xxx_isp_check_reg(ha); | ||
9290 | if (rval != QLA_SUCCESS) { | ||
9291 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9292 | return FAILED; | ||
9293 | } | ||
9294 | |||
9263 | /* FIXME: wait for hba to go online */ | 9295 | /* FIXME: wait for hba to go online */ |
9264 | stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); | 9296 | stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); |
9265 | if (stat != QLA_SUCCESS) { | 9297 | if (stat != QLA_SUCCESS) { |
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
9303 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 9335 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
9304 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | 9336 | struct ddb_entry *ddb_entry = cmd->device->hostdata; |
9305 | int stat, ret; | 9337 | int stat, ret; |
9338 | int rval; | ||
9306 | 9339 | ||
9307 | if (!ddb_entry) | 9340 | if (!ddb_entry) |
9308 | return FAILED; | 9341 | return FAILED; |
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
9320 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, | 9353 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, |
9321 | ha->dpc_flags, cmd->result, cmd->allowed)); | 9354 | ha->dpc_flags, cmd->result, cmd->allowed)); |
9322 | 9355 | ||
9356 | rval = qla4xxx_isp_check_reg(ha); | ||
9357 | if (rval != QLA_SUCCESS) { | ||
9358 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9359 | return FAILED; | ||
9360 | } | ||
9361 | |||
9323 | stat = qla4xxx_reset_target(ha, ddb_entry); | 9362 | stat = qla4xxx_reset_target(ha, ddb_entry); |
9324 | if (stat != QLA_SUCCESS) { | 9363 | if (stat != QLA_SUCCESS) { |
9325 | starget_printk(KERN_INFO, scsi_target(cmd->device), | 9364 | starget_printk(KERN_INFO, scsi_target(cmd->device), |
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
9374 | { | 9413 | { |
9375 | int return_status = FAILED; | 9414 | int return_status = FAILED; |
9376 | struct scsi_qla_host *ha; | 9415 | struct scsi_qla_host *ha; |
9416 | int rval; | ||
9377 | 9417 | ||
9378 | ha = to_qla_host(cmd->device->host); | 9418 | ha = to_qla_host(cmd->device->host); |
9379 | 9419 | ||
9420 | rval = qla4xxx_isp_check_reg(ha); | ||
9421 | if (rval != QLA_SUCCESS) { | ||
9422 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9423 | return FAILED; | ||
9424 | } | ||
9425 | |||
9380 | if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) | 9426 | if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) |
9381 | qla4_83xx_set_idc_dontreset(ha); | 9427 | qla4_83xx_set_idc_dontreset(ha); |
9382 | 9428 | ||
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 40fc7a590e81..6be5ab32c94f 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -1657,7 +1657,7 @@ static struct scsi_host_template scsi_driver = { | |||
1657 | .eh_timed_out = storvsc_eh_timed_out, | 1657 | .eh_timed_out = storvsc_eh_timed_out, |
1658 | .slave_alloc = storvsc_device_alloc, | 1658 | .slave_alloc = storvsc_device_alloc, |
1659 | .slave_configure = storvsc_device_configure, | 1659 | .slave_configure = storvsc_device_configure, |
1660 | .cmd_per_lun = 255, | 1660 | .cmd_per_lun = 2048, |
1661 | .this_id = -1, | 1661 | .this_id = -1, |
1662 | .use_clustering = ENABLE_CLUSTERING, | 1662 | .use_clustering = ENABLE_CLUSTERING, |
1663 | /* Make sure we dont get a sg segment crosses a page boundary */ | 1663 | /* Make sure we dont get a sg segment crosses a page boundary */ |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ca360daa6a25..378af306fda1 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa | |||
536 | * Look for the greatest clock divisor that allows an | 536 | * Look for the greatest clock divisor that allows an |
537 | * input speed faster than the period. | 537 | * input speed faster than the period. |
538 | */ | 538 | */ |
539 | while (div-- > 0) | 539 | while (--div > 0) |
540 | if (kpc >= (div_10M[div] << 2)) break; | 540 | if (kpc >= (div_10M[div] << 2)) break; |
541 | 541 | ||
542 | /* | 542 | /* |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a355d989b414..c7da2c185990 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
4352 | /* REPORT SUPPORTED OPERATION CODES is not supported */ | 4352 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
4353 | sdev->no_report_opcodes = 1; | 4353 | sdev->no_report_opcodes = 1; |
4354 | 4354 | ||
4355 | /* WRITE_SAME command is not supported */ | ||
4356 | sdev->no_write_same = 1; | ||
4355 | 4357 | ||
4356 | ufshcd_set_queue_depth(sdev); | 4358 | ufshcd_set_queue_depth(sdev); |
4357 | 4359 | ||
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 53f7275d6cbd..750f93197411 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c | |||
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap, | |||
348 | if (i == 1) { | 348 | if (i == 1) { |
349 | domain->supply = devm_regulator_get(dev, "pu"); | 349 | domain->supply = devm_regulator_get(dev, "pu"); |
350 | if (IS_ERR(domain->supply)) | 350 | if (IS_ERR(domain->supply)) |
351 | return PTR_ERR(domain->supply);; | 351 | return PTR_ERR(domain->supply); |
352 | 352 | ||
353 | ret = imx_pgc_get_clocks(dev, domain); | 353 | ret = imx_pgc_get_clocks(dev, domain); |
354 | if (ret) | 354 | if (ret) |
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev) | |||
470 | 470 | ||
471 | static int imx_gpc_remove(struct platform_device *pdev) | 471 | static int imx_gpc_remove(struct platform_device *pdev) |
472 | { | 472 | { |
473 | struct device_node *pgc_node; | ||
473 | int ret; | 474 | int ret; |
474 | 475 | ||
476 | pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); | ||
477 | |||
478 | /* bail out if DT too old and doesn't provide the necessary info */ | ||
479 | if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && | ||
480 | !pgc_node) | ||
481 | return 0; | ||
482 | |||
475 | /* | 483 | /* |
476 | * If the old DT binding is used the toplevel driver needs to | 484 | * If the old DT binding is used the toplevel driver needs to |
477 | * de-register the power domains | 485 | * de-register the power domains |
478 | */ | 486 | */ |
479 | if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { | 487 | if (!pgc_node) { |
480 | of_genpd_del_provider(pdev->dev.of_node); | 488 | of_genpd_del_provider(pdev->dev.of_node); |
481 | 489 | ||
482 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); | 490 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index bbdc53b686dd..6dbba5aff191 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -702,30 +702,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |||
702 | size_t pgstart, pgend; | 702 | size_t pgstart, pgend; |
703 | int ret = -EINVAL; | 703 | int ret = -EINVAL; |
704 | 704 | ||
705 | mutex_lock(&ashmem_mutex); | ||
706 | |||
705 | if (unlikely(!asma->file)) | 707 | if (unlikely(!asma->file)) |
706 | return -EINVAL; | 708 | goto out_unlock; |
707 | 709 | ||
708 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) | 710 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) { |
709 | return -EFAULT; | 711 | ret = -EFAULT; |
712 | goto out_unlock; | ||
713 | } | ||
710 | 714 | ||
711 | /* per custom, you can pass zero for len to mean "everything onward" */ | 715 | /* per custom, you can pass zero for len to mean "everything onward" */ |
712 | if (!pin.len) | 716 | if (!pin.len) |
713 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; | 717 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; |
714 | 718 | ||
715 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) | 719 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) |
716 | return -EINVAL; | 720 | goto out_unlock; |
717 | 721 | ||
718 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) | 722 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) |
719 | return -EINVAL; | 723 | goto out_unlock; |
720 | 724 | ||
721 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) | 725 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) |
722 | return -EINVAL; | 726 | goto out_unlock; |
723 | 727 | ||
724 | pgstart = pin.offset / PAGE_SIZE; | 728 | pgstart = pin.offset / PAGE_SIZE; |
725 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; | 729 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; |
726 | 730 | ||
727 | mutex_lock(&ashmem_mutex); | ||
728 | |||
729 | switch (cmd) { | 731 | switch (cmd) { |
730 | case ASHMEM_PIN: | 732 | case ASHMEM_PIN: |
731 | ret = ashmem_pin(asma, pgstart, pgend); | 733 | ret = ashmem_pin(asma, pgstart, pgend); |
@@ -738,6 +740,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |||
738 | break; | 740 | break; |
739 | } | 741 | } |
740 | 742 | ||
743 | out_unlock: | ||
741 | mutex_unlock(&ashmem_mutex); | 744 | mutex_unlock(&ashmem_mutex); |
742 | 745 | ||
743 | return ret; | 746 | return ret; |
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 94e06925c712..49718c96bf9e 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/cma.h> | 13 | #include <linux/cma.h> |
14 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
15 | #include <linux/highmem.h> | ||
15 | 16 | ||
16 | #include "ion.h" | 17 | #include "ion.h" |
17 | 18 | ||
@@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, | |||
42 | if (!pages) | 43 | if (!pages) |
43 | return -ENOMEM; | 44 | return -ENOMEM; |
44 | 45 | ||
46 | if (PageHighMem(pages)) { | ||
47 | unsigned long nr_clear_pages = nr_pages; | ||
48 | struct page *page = pages; | ||
49 | |||
50 | while (nr_clear_pages > 0) { | ||
51 | void *vaddr = kmap_atomic(page); | ||
52 | |||
53 | memset(vaddr, 0, PAGE_SIZE); | ||
54 | kunmap_atomic(vaddr); | ||
55 | page++; | ||
56 | nr_clear_pages--; | ||
57 | } | ||
58 | } else { | ||
59 | memset(page_address(pages), 0, size); | ||
60 | } | ||
61 | |||
45 | table = kmalloc(sizeof(*table), GFP_KERNEL); | 62 | table = kmalloc(sizeof(*table), GFP_KERNEL); |
46 | if (!table) | 63 | if (!table) |
47 | goto err; | 64 | goto err; |
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 1f9100049176..b35ef7ee6901 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | config FSL_MC_BUS | 8 | config FSL_MC_BUS |
9 | bool "QorIQ DPAA2 fsl-mc bus driver" | 9 | bool "QorIQ DPAA2 fsl-mc bus driver" |
10 | depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC))) | 10 | depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC))) |
11 | select GENERIC_MSI_IRQ_DOMAIN | 11 | select GENERIC_MSI_IRQ_DOMAIN |
12 | help | 12 | help |
13 | Driver to enable the bus infrastructure for the QorIQ DPAA2 | 13 | Driver to enable the bus infrastructure for the QorIQ DPAA2 |
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index f01595593ce2..425e8b82533b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c | |||
@@ -141,6 +141,8 @@ | |||
141 | #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ | 141 | #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ |
142 | #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ | 142 | #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ |
143 | 143 | ||
144 | #define AD7192_EXT_FREQ_MHZ_MIN 2457600 | ||
145 | #define AD7192_EXT_FREQ_MHZ_MAX 5120000 | ||
144 | #define AD7192_INT_FREQ_MHZ 4915200 | 146 | #define AD7192_INT_FREQ_MHZ 4915200 |
145 | 147 | ||
146 | /* NOTE: | 148 | /* NOTE: |
@@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st) | |||
218 | ARRAY_SIZE(ad7192_calib_arr)); | 220 | ARRAY_SIZE(ad7192_calib_arr)); |
219 | } | 221 | } |
220 | 222 | ||
223 | static inline bool ad7192_valid_external_frequency(u32 freq) | ||
224 | { | ||
225 | return (freq >= AD7192_EXT_FREQ_MHZ_MIN && | ||
226 | freq <= AD7192_EXT_FREQ_MHZ_MAX); | ||
227 | } | ||
228 | |||
221 | static int ad7192_setup(struct ad7192_state *st, | 229 | static int ad7192_setup(struct ad7192_state *st, |
222 | const struct ad7192_platform_data *pdata) | 230 | const struct ad7192_platform_data *pdata) |
223 | { | 231 | { |
@@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st, | |||
243 | id); | 251 | id); |
244 | 252 | ||
245 | switch (pdata->clock_source_sel) { | 253 | switch (pdata->clock_source_sel) { |
246 | case AD7192_CLK_EXT_MCLK1_2: | ||
247 | case AD7192_CLK_EXT_MCLK2: | ||
248 | st->mclk = AD7192_INT_FREQ_MHZ; | ||
249 | break; | ||
250 | case AD7192_CLK_INT: | 254 | case AD7192_CLK_INT: |
251 | case AD7192_CLK_INT_CO: | 255 | case AD7192_CLK_INT_CO: |
252 | if (pdata->ext_clk_hz) | 256 | st->mclk = AD7192_INT_FREQ_MHZ; |
253 | st->mclk = pdata->ext_clk_hz; | ||
254 | else | ||
255 | st->mclk = AD7192_INT_FREQ_MHZ; | ||
256 | break; | 257 | break; |
258 | case AD7192_CLK_EXT_MCLK1_2: | ||
259 | case AD7192_CLK_EXT_MCLK2: | ||
260 | if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) { | ||
261 | st->mclk = pdata->ext_clk_hz; | ||
262 | break; | ||
263 | } | ||
264 | dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n", | ||
265 | pdata->ext_clk_hz); | ||
266 | ret = -EINVAL; | ||
267 | goto out; | ||
257 | default: | 268 | default: |
258 | ret = -EINVAL; | 269 | ret = -EINVAL; |
259 | goto out; | 270 | goto out; |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 2b28fb9c0048..3bcf49466361 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
648 | /* Ring buffer functions - here trigger setup related */ | 648 | /* Ring buffer functions - here trigger setup related */ |
649 | indio_dev->setup_ops = &ad5933_ring_setup_ops; | 649 | indio_dev->setup_ops = &ad5933_ring_setup_ops; |
650 | 650 | ||
651 | indio_dev->modes |= INDIO_BUFFER_HARDWARE; | ||
652 | |||
653 | return 0; | 651 | return 0; |
654 | } | 652 | } |
655 | 653 | ||
@@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client, | |||
762 | indio_dev->dev.parent = &client->dev; | 760 | indio_dev->dev.parent = &client->dev; |
763 | indio_dev->info = &ad5933_info; | 761 | indio_dev->info = &ad5933_info; |
764 | indio_dev->name = id->name; | 762 | indio_dev->name = id->name; |
765 | indio_dev->modes = INDIO_DIRECT_MODE; | 763 | indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE); |
766 | indio_dev->channels = ad5933_channels; | 764 | indio_dev->channels = ad5933_channels; |
767 | indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); | 765 | indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); |
768 | 766 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 06b3b54a0e68..7b366a6c0b49 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm) | |||
174 | wb = &acm->wb[wbn]; | 174 | wb = &acm->wb[wbn]; |
175 | if (!wb->use) { | 175 | if (!wb->use) { |
176 | wb->use = 1; | 176 | wb->use = 1; |
177 | wb->len = 0; | ||
177 | return wbn; | 178 | return wbn; |
178 | } | 179 | } |
179 | wbn = (wbn + 1) % ACM_NW; | 180 | wbn = (wbn + 1) % ACM_NW; |
@@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty, | |||
805 | static void acm_tty_flush_chars(struct tty_struct *tty) | 806 | static void acm_tty_flush_chars(struct tty_struct *tty) |
806 | { | 807 | { |
807 | struct acm *acm = tty->driver_data; | 808 | struct acm *acm = tty->driver_data; |
808 | struct acm_wb *cur = acm->putbuffer; | 809 | struct acm_wb *cur; |
809 | int err; | 810 | int err; |
810 | unsigned long flags; | 811 | unsigned long flags; |
811 | 812 | ||
813 | spin_lock_irqsave(&acm->write_lock, flags); | ||
814 | |||
815 | cur = acm->putbuffer; | ||
812 | if (!cur) /* nothing to do */ | 816 | if (!cur) /* nothing to do */ |
813 | return; | 817 | goto out; |
814 | 818 | ||
815 | acm->putbuffer = NULL; | 819 | acm->putbuffer = NULL; |
816 | err = usb_autopm_get_interface_async(acm->control); | 820 | err = usb_autopm_get_interface_async(acm->control); |
817 | spin_lock_irqsave(&acm->write_lock, flags); | ||
818 | if (err < 0) { | 821 | if (err < 0) { |
819 | cur->use = 0; | 822 | cur->use = 0; |
820 | acm->putbuffer = cur; | 823 | acm->putbuffer = cur; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 4024926c1d68..f4a548471f0f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -226,6 +226,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
226 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | 226 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = |
227 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | 227 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, |
228 | 228 | ||
229 | /* Corsair K70 RGB */ | ||
230 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
231 | |||
229 | /* Corsair Strafe RGB */ | 232 | /* Corsair Strafe RGB */ |
230 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, | 233 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, |
231 | 234 | ||
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index e4c3ce0de5de..5bcad1d869b5 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
@@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, | |||
1917 | /* Not specific buffer needed for ep0 ZLP */ | 1917 | /* Not specific buffer needed for ep0 ZLP */ |
1918 | dma_addr_t dma = hs_ep->desc_list_dma; | 1918 | dma_addr_t dma = hs_ep->desc_list_dma; |
1919 | 1919 | ||
1920 | dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); | 1920 | if (!index) |
1921 | dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); | ||
1922 | |||
1921 | dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); | 1923 | dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); |
1922 | } else { | 1924 | } else { |
1923 | dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | | 1925 | dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | |
@@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, | |||
2974 | if (ints & DXEPINT_STSPHSERCVD) { | 2976 | if (ints & DXEPINT_STSPHSERCVD) { |
2975 | dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); | 2977 | dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); |
2976 | 2978 | ||
2977 | /* Move to STATUS IN for DDMA */ | 2979 | /* Safety check EP0 state when STSPHSERCVD asserted */ |
2978 | if (using_desc_dma(hsotg)) | 2980 | if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { |
2979 | dwc2_hsotg_ep0_zlp(hsotg, true); | 2981 | /* Move to STATUS IN for DDMA */ |
2982 | if (using_desc_dma(hsotg)) | ||
2983 | dwc2_hsotg_ep0_zlp(hsotg, true); | ||
2984 | } | ||
2985 | |||
2980 | } | 2986 | } |
2981 | 2987 | ||
2982 | if (ints & DXEPINT_BACK2BACKSETUP) | 2988 | if (ints & DXEPINT_BACK2BACKSETUP) |
@@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, | |||
3375 | dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | | 3381 | dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | |
3376 | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); | 3382 | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); |
3377 | 3383 | ||
3378 | dwc2_hsotg_enqueue_setup(hsotg); | ||
3379 | |||
3380 | dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", | ||
3381 | dwc2_readl(hsotg->regs + DIEPCTL0), | ||
3382 | dwc2_readl(hsotg->regs + DOEPCTL0)); | ||
3383 | |||
3384 | /* clear global NAKs */ | 3384 | /* clear global NAKs */ |
3385 | val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; | 3385 | val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; |
3386 | if (!is_usb_reset) | 3386 | if (!is_usb_reset) |
@@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, | |||
3391 | mdelay(3); | 3391 | mdelay(3); |
3392 | 3392 | ||
3393 | hsotg->lx_state = DWC2_L0; | 3393 | hsotg->lx_state = DWC2_L0; |
3394 | |||
3395 | dwc2_hsotg_enqueue_setup(hsotg); | ||
3396 | |||
3397 | dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", | ||
3398 | dwc2_readl(hsotg->regs + DIEPCTL0), | ||
3399 | dwc2_readl(hsotg->regs + DOEPCTL0)); | ||
3394 | } | 3400 | } |
3395 | 3401 | ||
3396 | static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) | 3402 | static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ade2ab00d37a..f1d838a4acd6 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) | |||
100 | reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); | 100 | reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); |
101 | reg |= DWC3_GCTL_PRTCAPDIR(mode); | 101 | reg |= DWC3_GCTL_PRTCAPDIR(mode); |
102 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | 102 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); |
103 | |||
104 | dwc->current_dr_role = mode; | ||
103 | } | 105 | } |
104 | 106 | ||
105 | static void __dwc3_set_mode(struct work_struct *work) | 107 | static void __dwc3_set_mode(struct work_struct *work) |
@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work) | |||
133 | 135 | ||
134 | dwc3_set_prtcap(dwc, dwc->desired_dr_role); | 136 | dwc3_set_prtcap(dwc, dwc->desired_dr_role); |
135 | 137 | ||
136 | dwc->current_dr_role = dwc->desired_dr_role; | ||
137 | |||
138 | spin_unlock_irqrestore(&dwc->lock, flags); | 138 | spin_unlock_irqrestore(&dwc->lock, flags); |
139 | 139 | ||
140 | switch (dwc->desired_dr_role) { | 140 | switch (dwc->desired_dr_role) { |
@@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) | |||
219 | * XHCI driver will reset the host block. If dwc3 was configured for | 219 | * XHCI driver will reset the host block. If dwc3 was configured for |
220 | * host-only mode, then we can return early. | 220 | * host-only mode, then we can return early. |
221 | */ | 221 | */ |
222 | if (dwc->dr_mode == USB_DR_MODE_HOST) | 222 | if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) |
223 | return 0; | 223 | return 0; |
224 | 224 | ||
225 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | 225 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); |
@@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) | |||
234 | udelay(1); | 234 | udelay(1); |
235 | } while (--retries); | 235 | } while (--retries); |
236 | 236 | ||
237 | phy_exit(dwc->usb3_generic_phy); | ||
238 | phy_exit(dwc->usb2_generic_phy); | ||
239 | |||
237 | return -ETIMEDOUT; | 240 | return -ETIMEDOUT; |
238 | } | 241 | } |
239 | 242 | ||
@@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) | |||
483 | parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); | 486 | parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); |
484 | } | 487 | } |
485 | 488 | ||
489 | static int dwc3_core_ulpi_init(struct dwc3 *dwc) | ||
490 | { | ||
491 | int intf; | ||
492 | int ret = 0; | ||
493 | |||
494 | intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); | ||
495 | |||
496 | if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || | ||
497 | (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && | ||
498 | dwc->hsphy_interface && | ||
499 | !strncmp(dwc->hsphy_interface, "ulpi", 4))) | ||
500 | ret = dwc3_ulpi_init(dwc); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
486 | /** | 505 | /** |
487 | * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core | 506 | * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core |
488 | * @dwc: Pointer to our controller context structure | 507 | * @dwc: Pointer to our controller context structure |
@@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) | |||
494 | static int dwc3_phy_setup(struct dwc3 *dwc) | 513 | static int dwc3_phy_setup(struct dwc3 *dwc) |
495 | { | 514 | { |
496 | u32 reg; | 515 | u32 reg; |
497 | int ret; | ||
498 | 516 | ||
499 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); | 517 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); |
500 | 518 | ||
@@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) | |||
565 | } | 583 | } |
566 | /* FALLTHROUGH */ | 584 | /* FALLTHROUGH */ |
567 | case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: | 585 | case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: |
568 | ret = dwc3_ulpi_init(dwc); | ||
569 | if (ret) | ||
570 | return ret; | ||
571 | /* FALLTHROUGH */ | 586 | /* FALLTHROUGH */ |
572 | default: | 587 | default: |
573 | break; | 588 | break; |
@@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) | |||
724 | } | 739 | } |
725 | 740 | ||
726 | static int dwc3_core_get_phy(struct dwc3 *dwc); | 741 | static int dwc3_core_get_phy(struct dwc3 *dwc); |
742 | static int dwc3_core_ulpi_init(struct dwc3 *dwc); | ||
727 | 743 | ||
728 | /** | 744 | /** |
729 | * dwc3_core_init - Low-level initialization of DWC3 Core | 745 | * dwc3_core_init - Low-level initialization of DWC3 Core |
@@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc) | |||
755 | dwc->maximum_speed = USB_SPEED_HIGH; | 771 | dwc->maximum_speed = USB_SPEED_HIGH; |
756 | } | 772 | } |
757 | 773 | ||
758 | ret = dwc3_core_get_phy(dwc); | 774 | ret = dwc3_phy_setup(dwc); |
759 | if (ret) | 775 | if (ret) |
760 | goto err0; | 776 | goto err0; |
761 | 777 | ||
762 | ret = dwc3_core_soft_reset(dwc); | 778 | if (!dwc->ulpi_ready) { |
763 | if (ret) | 779 | ret = dwc3_core_ulpi_init(dwc); |
764 | goto err0; | 780 | if (ret) |
781 | goto err0; | ||
782 | dwc->ulpi_ready = true; | ||
783 | } | ||
765 | 784 | ||
766 | ret = dwc3_phy_setup(dwc); | 785 | if (!dwc->phys_ready) { |
786 | ret = dwc3_core_get_phy(dwc); | ||
787 | if (ret) | ||
788 | goto err0a; | ||
789 | dwc->phys_ready = true; | ||
790 | } | ||
791 | |||
792 | ret = dwc3_core_soft_reset(dwc); | ||
767 | if (ret) | 793 | if (ret) |
768 | goto err0; | 794 | goto err0a; |
769 | 795 | ||
770 | dwc3_core_setup_global_control(dwc); | 796 | dwc3_core_setup_global_control(dwc); |
771 | dwc3_core_num_eps(dwc); | 797 | dwc3_core_num_eps(dwc); |
@@ -838,6 +864,9 @@ err1: | |||
838 | phy_exit(dwc->usb2_generic_phy); | 864 | phy_exit(dwc->usb2_generic_phy); |
839 | phy_exit(dwc->usb3_generic_phy); | 865 | phy_exit(dwc->usb3_generic_phy); |
840 | 866 | ||
867 | err0a: | ||
868 | dwc3_ulpi_exit(dwc); | ||
869 | |||
841 | err0: | 870 | err0: |
842 | return ret; | 871 | return ret; |
843 | } | 872 | } |
@@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | |||
916 | 945 | ||
917 | switch (dwc->dr_mode) { | 946 | switch (dwc->dr_mode) { |
918 | case USB_DR_MODE_PERIPHERAL: | 947 | case USB_DR_MODE_PERIPHERAL: |
919 | dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE; | ||
920 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); | 948 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); |
921 | 949 | ||
922 | if (dwc->usb2_phy) | 950 | if (dwc->usb2_phy) |
@@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | |||
932 | } | 960 | } |
933 | break; | 961 | break; |
934 | case USB_DR_MODE_HOST: | 962 | case USB_DR_MODE_HOST: |
935 | dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST; | ||
936 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); | 963 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); |
937 | 964 | ||
938 | if (dwc->usb2_phy) | 965 | if (dwc->usb2_phy) |
@@ -1234,7 +1261,6 @@ err4: | |||
1234 | 1261 | ||
1235 | err3: | 1262 | err3: |
1236 | dwc3_free_event_buffers(dwc); | 1263 | dwc3_free_event_buffers(dwc); |
1237 | dwc3_ulpi_exit(dwc); | ||
1238 | 1264 | ||
1239 | err2: | 1265 | err2: |
1240 | pm_runtime_allow(&pdev->dev); | 1266 | pm_runtime_allow(&pdev->dev); |
@@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev) | |||
1284 | } | 1310 | } |
1285 | 1311 | ||
1286 | #ifdef CONFIG_PM | 1312 | #ifdef CONFIG_PM |
1287 | static int dwc3_suspend_common(struct dwc3 *dwc) | 1313 | static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) |
1288 | { | 1314 | { |
1289 | unsigned long flags; | 1315 | unsigned long flags; |
1290 | 1316 | ||
@@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | |||
1296 | dwc3_core_exit(dwc); | 1322 | dwc3_core_exit(dwc); |
1297 | break; | 1323 | break; |
1298 | case DWC3_GCTL_PRTCAP_HOST: | 1324 | case DWC3_GCTL_PRTCAP_HOST: |
1325 | /* do nothing during host runtime_suspend */ | ||
1326 | if (!PMSG_IS_AUTO(msg)) | ||
1327 | dwc3_core_exit(dwc); | ||
1328 | break; | ||
1299 | default: | 1329 | default: |
1300 | /* do nothing */ | 1330 | /* do nothing */ |
1301 | break; | 1331 | break; |
@@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | |||
1304 | return 0; | 1334 | return 0; |
1305 | } | 1335 | } |
1306 | 1336 | ||
1307 | static int dwc3_resume_common(struct dwc3 *dwc) | 1337 | static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) |
1308 | { | 1338 | { |
1309 | unsigned long flags; | 1339 | unsigned long flags; |
1310 | int ret; | 1340 | int ret; |
@@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc) | |||
1320 | spin_unlock_irqrestore(&dwc->lock, flags); | 1350 | spin_unlock_irqrestore(&dwc->lock, flags); |
1321 | break; | 1351 | break; |
1322 | case DWC3_GCTL_PRTCAP_HOST: | 1352 | case DWC3_GCTL_PRTCAP_HOST: |
1353 | /* nothing to do on host runtime_resume */ | ||
1354 | if (!PMSG_IS_AUTO(msg)) { | ||
1355 | ret = dwc3_core_init(dwc); | ||
1356 | if (ret) | ||
1357 | return ret; | ||
1358 | } | ||
1359 | break; | ||
1323 | default: | 1360 | default: |
1324 | /* do nothing */ | 1361 | /* do nothing */ |
1325 | break; | 1362 | break; |
@@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc) | |||
1331 | static int dwc3_runtime_checks(struct dwc3 *dwc) | 1368 | static int dwc3_runtime_checks(struct dwc3 *dwc) |
1332 | { | 1369 | { |
1333 | switch (dwc->current_dr_role) { | 1370 | switch (dwc->current_dr_role) { |
1334 | case USB_DR_MODE_PERIPHERAL: | 1371 | case DWC3_GCTL_PRTCAP_DEVICE: |
1335 | case USB_DR_MODE_OTG: | ||
1336 | if (dwc->connected) | 1372 | if (dwc->connected) |
1337 | return -EBUSY; | 1373 | return -EBUSY; |
1338 | break; | 1374 | break; |
1339 | case USB_DR_MODE_HOST: | 1375 | case DWC3_GCTL_PRTCAP_HOST: |
1340 | default: | 1376 | default: |
1341 | /* do nothing */ | 1377 | /* do nothing */ |
1342 | break; | 1378 | break; |
@@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev) | |||
1353 | if (dwc3_runtime_checks(dwc)) | 1389 | if (dwc3_runtime_checks(dwc)) |
1354 | return -EBUSY; | 1390 | return -EBUSY; |
1355 | 1391 | ||
1356 | ret = dwc3_suspend_common(dwc); | 1392 | ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); |
1357 | if (ret) | 1393 | if (ret) |
1358 | return ret; | 1394 | return ret; |
1359 | 1395 | ||
@@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev) | |||
1369 | 1405 | ||
1370 | device_init_wakeup(dev, false); | 1406 | device_init_wakeup(dev, false); |
1371 | 1407 | ||
1372 | ret = dwc3_resume_common(dwc); | 1408 | ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); |
1373 | if (ret) | 1409 | if (ret) |
1374 | return ret; | 1410 | return ret; |
1375 | 1411 | ||
@@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev) | |||
1416 | struct dwc3 *dwc = dev_get_drvdata(dev); | 1452 | struct dwc3 *dwc = dev_get_drvdata(dev); |
1417 | int ret; | 1453 | int ret; |
1418 | 1454 | ||
1419 | ret = dwc3_suspend_common(dwc); | 1455 | ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); |
1420 | if (ret) | 1456 | if (ret) |
1421 | return ret; | 1457 | return ret; |
1422 | 1458 | ||
@@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev) | |||
1432 | 1468 | ||
1433 | pinctrl_pm_select_default_state(dev); | 1469 | pinctrl_pm_select_default_state(dev); |
1434 | 1470 | ||
1435 | ret = dwc3_resume_common(dwc); | 1471 | ret = dwc3_resume_common(dwc, PMSG_RESUME); |
1436 | if (ret) | 1472 | if (ret) |
1437 | return ret; | 1473 | return ret; |
1438 | 1474 | ||
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 03c7aaaac926..860d2bc184d1 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h | |||
@@ -158,13 +158,15 @@ | |||
158 | #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) | 158 | #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) |
159 | #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) | 159 | #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) |
160 | 160 | ||
161 | #define DWC3_TXFIFOQ 1 | 161 | #define DWC3_TXFIFOQ 0 |
162 | #define DWC3_RXFIFOQ 3 | 162 | #define DWC3_RXFIFOQ 1 |
163 | #define DWC3_TXREQQ 5 | 163 | #define DWC3_TXREQQ 2 |
164 | #define DWC3_RXREQQ 7 | 164 | #define DWC3_RXREQQ 3 |
165 | #define DWC3_RXINFOQ 9 | 165 | #define DWC3_RXINFOQ 4 |
166 | #define DWC3_DESCFETCHQ 13 | 166 | #define DWC3_PSTATQ 5 |
167 | #define DWC3_EVENTQ 15 | 167 | #define DWC3_DESCFETCHQ 6 |
168 | #define DWC3_EVENTQ 7 | ||
169 | #define DWC3_AUXEVENTQ 8 | ||
168 | 170 | ||
169 | /* Global RX Threshold Configuration Register */ | 171 | /* Global RX Threshold Configuration Register */ |
170 | #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) | 172 | #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) |
@@ -795,7 +797,9 @@ struct dwc3_scratchpad_array { | |||
795 | * @usb3_phy: pointer to USB3 PHY | 797 | * @usb3_phy: pointer to USB3 PHY |
796 | * @usb2_generic_phy: pointer to USB2 PHY | 798 | * @usb2_generic_phy: pointer to USB2 PHY |
797 | * @usb3_generic_phy: pointer to USB3 PHY | 799 | * @usb3_generic_phy: pointer to USB3 PHY |
800 | * @phys_ready: flag to indicate that PHYs are ready | ||
798 | * @ulpi: pointer to ulpi interface | 801 | * @ulpi: pointer to ulpi interface |
802 | * @ulpi_ready: flag to indicate that ULPI is initialized | ||
799 | * @u2sel: parameter from Set SEL request. | 803 | * @u2sel: parameter from Set SEL request. |
800 | * @u2pel: parameter from Set SEL request. | 804 | * @u2pel: parameter from Set SEL request. |
801 | * @u1sel: parameter from Set SEL request. | 805 | * @u1sel: parameter from Set SEL request. |
@@ -893,7 +897,10 @@ struct dwc3 { | |||
893 | struct phy *usb2_generic_phy; | 897 | struct phy *usb2_generic_phy; |
894 | struct phy *usb3_generic_phy; | 898 | struct phy *usb3_generic_phy; |
895 | 899 | ||
900 | bool phys_ready; | ||
901 | |||
896 | struct ulpi *ulpi; | 902 | struct ulpi *ulpi; |
903 | bool ulpi_ready; | ||
897 | 904 | ||
898 | void __iomem *regs; | 905 | void __iomem *regs; |
899 | size_t regs_size; | 906 | size_t regs_size; |
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 7ae0eefc7cc7..e54c3622eb28 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) | |||
143 | clk_disable_unprepare(simple->clks[i]); | 143 | clk_disable_unprepare(simple->clks[i]); |
144 | clk_put(simple->clks[i]); | 144 | clk_put(simple->clks[i]); |
145 | } | 145 | } |
146 | simple->num_clocks = 0; | ||
146 | 147 | ||
147 | reset_control_assert(simple->resets); | 148 | reset_control_assert(simple->resets); |
148 | reset_control_put(simple->resets); | 149 | reset_control_put(simple->resets); |
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index a4719e853b85..ed8b86517675 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev) | |||
582 | return 0; | 582 | return 0; |
583 | } | 583 | } |
584 | 584 | ||
585 | static void dwc3_omap_complete(struct device *dev) | ||
586 | { | ||
587 | struct dwc3_omap *omap = dev_get_drvdata(dev); | ||
588 | |||
589 | if (extcon_get_state(omap->edev, EXTCON_USB)) | ||
590 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); | ||
591 | else | ||
592 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF); | ||
593 | |||
594 | if (extcon_get_state(omap->edev, EXTCON_USB_HOST)) | ||
595 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); | ||
596 | else | ||
597 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT); | ||
598 | } | ||
599 | |||
585 | static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { | 600 | static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { |
586 | 601 | ||
587 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) | 602 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) |
603 | .complete = dwc3_omap_complete, | ||
588 | }; | 604 | }; |
589 | 605 | ||
590 | #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) | 606 | #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) |
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 9c2e4a17918e..18be31d5743a 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c | |||
@@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, | |||
854 | trb++; | 854 | trb++; |
855 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | 855 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; |
856 | trace_dwc3_complete_trb(ep0, trb); | 856 | trace_dwc3_complete_trb(ep0, trb); |
857 | ep0->trb_enqueue = 0; | 857 | |
858 | if (r->direction) | ||
859 | dwc->eps[1]->trb_enqueue = 0; | ||
860 | else | ||
861 | dwc->eps[0]->trb_enqueue = 0; | ||
862 | |||
858 | dwc->ep0_bounced = false; | 863 | dwc->ep0_bounced = false; |
859 | } | 864 | } |
860 | 865 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 616ef49ccb49..2bda4eb1e9ac 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) | |||
2745 | break; | 2745 | break; |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; | ||
2749 | |||
2748 | /* Enable USB2 LPM Capability */ | 2750 | /* Enable USB2 LPM Capability */ |
2749 | 2751 | ||
2750 | if ((dwc->revision > DWC3_REVISION_194A) && | 2752 | if ((dwc->revision > DWC3_REVISION_194A) && |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8f2cf3baa19c..c2592d883f67 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -1855,44 +1855,20 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
1855 | 1855 | ||
1856 | spin_lock_irqsave(&func->ffs->eps_lock, flags); | 1856 | spin_lock_irqsave(&func->ffs->eps_lock, flags); |
1857 | while(count--) { | 1857 | while(count--) { |
1858 | struct usb_endpoint_descriptor *ds; | ||
1859 | struct usb_ss_ep_comp_descriptor *comp_desc = NULL; | ||
1860 | int needs_comp_desc = false; | ||
1861 | int desc_idx; | ||
1862 | |||
1863 | if (ffs->gadget->speed == USB_SPEED_SUPER) { | ||
1864 | desc_idx = 2; | ||
1865 | needs_comp_desc = true; | ||
1866 | } else if (ffs->gadget->speed == USB_SPEED_HIGH) | ||
1867 | desc_idx = 1; | ||
1868 | else | ||
1869 | desc_idx = 0; | ||
1870 | |||
1871 | /* fall-back to lower speed if desc missing for current speed */ | ||
1872 | do { | ||
1873 | ds = ep->descs[desc_idx]; | ||
1874 | } while (!ds && --desc_idx >= 0); | ||
1875 | |||
1876 | if (!ds) { | ||
1877 | ret = -EINVAL; | ||
1878 | break; | ||
1879 | } | ||
1880 | |||
1881 | ep->ep->driver_data = ep; | 1858 | ep->ep->driver_data = ep; |
1882 | ep->ep->desc = ds; | ||
1883 | 1859 | ||
1884 | if (needs_comp_desc) { | 1860 | ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); |
1885 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + | 1861 | if (ret) { |
1886 | USB_DT_ENDPOINT_SIZE); | 1862 | pr_err("%s: config_ep_by_speed(%s) returned %d\n", |
1887 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | 1863 | __func__, ep->ep->name, ret); |
1888 | ep->ep->comp_desc = comp_desc; | 1864 | break; |
1889 | } | 1865 | } |
1890 | 1866 | ||
1891 | ret = usb_ep_enable(ep->ep); | 1867 | ret = usb_ep_enable(ep->ep); |
1892 | if (likely(!ret)) { | 1868 | if (likely(!ret)) { |
1893 | epfile->ep = ep; | 1869 | epfile->ep = ep; |
1894 | epfile->in = usb_endpoint_dir_in(ds); | 1870 | epfile->in = usb_endpoint_dir_in(ep->ep->desc); |
1895 | epfile->isoc = usb_endpoint_xfer_isoc(ds); | 1871 | epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); |
1896 | } else { | 1872 | } else { |
1897 | break; | 1873 | break; |
1898 | } | 1874 | } |
@@ -2979,10 +2955,8 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
2979 | struct ffs_data *ffs = func->ffs; | 2955 | struct ffs_data *ffs = func->ffs; |
2980 | 2956 | ||
2981 | const int full = !!func->ffs->fs_descs_count; | 2957 | const int full = !!func->ffs->fs_descs_count; |
2982 | const int high = gadget_is_dualspeed(func->gadget) && | 2958 | const int high = !!func->ffs->hs_descs_count; |
2983 | func->ffs->hs_descs_count; | 2959 | const int super = !!func->ffs->ss_descs_count; |
2984 | const int super = gadget_is_superspeed(func->gadget) && | ||
2985 | func->ffs->ss_descs_count; | ||
2986 | 2960 | ||
2987 | int fs_len, hs_len, ss_len, ret, i; | 2961 | int fs_len, hs_len, ss_len, ret, i; |
2988 | struct ffs_ep *eps_ptr; | 2962 | struct ffs_ep *eps_ptr; |
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 11fe788b4308..d2dc1f00180b 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
@@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) | |||
524 | dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); | 524 | dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); |
525 | return ret; | 525 | return ret; |
526 | } | 526 | } |
527 | iad_desc.bFirstInterface = ret; | ||
528 | |||
527 | std_ac_if_desc.bInterfaceNumber = ret; | 529 | std_ac_if_desc.bInterfaceNumber = ret; |
528 | uac2->ac_intf = ret; | 530 | uac2->ac_intf = ret; |
529 | uac2->ac_alt = 0; | 531 | uac2->ac_alt = 0; |
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 1e9567091d86..0875d38476ee 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig | |||
@@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT | |||
274 | tristate "Synopsys USB 2.0 Device controller" | 274 | tristate "Synopsys USB 2.0 Device controller" |
275 | depends on USB_GADGET && OF && HAS_DMA | 275 | depends on USB_GADGET && OF && HAS_DMA |
276 | depends on EXTCON || EXTCON=n | 276 | depends on EXTCON || EXTCON=n |
277 | select USB_GADGET_DUALSPEED | ||
278 | select USB_SNP_CORE | 277 | select USB_SNP_CORE |
279 | default ARCH_BCM_IPROC | 278 | default ARCH_BCM_IPROC |
280 | help | 279 | help |
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 1e940f054cb8..6dbc489513cd 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c | |||
@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) | |||
77 | if (ret) { | 77 | if (ret) { |
78 | dev_err(&pci->dev, | 78 | dev_err(&pci->dev, |
79 | "couldn't add resources to bdc device\n"); | 79 | "couldn't add resources to bdc device\n"); |
80 | platform_device_put(bdc); | ||
80 | return ret; | 81 | return ret; |
81 | } | 82 | } |
82 | 83 | ||
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 859d5b11ba4c..1f8b19d9cf97 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request); | |||
180 | void usb_ep_free_request(struct usb_ep *ep, | 180 | void usb_ep_free_request(struct usb_ep *ep, |
181 | struct usb_request *req) | 181 | struct usb_request *req) |
182 | { | 182 | { |
183 | ep->ops->free_request(ep, req); | ||
184 | trace_usb_ep_free_request(ep, req, 0); | 183 | trace_usb_ep_free_request(ep, req, 0); |
184 | ep->ops->free_request(ep, req); | ||
185 | } | 185 | } |
186 | EXPORT_SYMBOL_GPL(usb_ep_free_request); | 186 | EXPORT_SYMBOL_GPL(usb_ep_free_request); |
187 | 187 | ||
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index e5b4ee96c4bf..56b517a38865 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c | |||
@@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe) | |||
1305 | { | 1305 | { |
1306 | struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); | 1306 | struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); |
1307 | 1307 | ||
1308 | if (ep->name) | 1308 | if (ep->ep.name) |
1309 | nuke(ep, -ESHUTDOWN); | 1309 | nuke(ep, -ESHUTDOWN); |
1310 | } | 1310 | } |
1311 | 1311 | ||
@@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) | |||
1693 | curr_ep = get_ep_by_pipe(udc, i); | 1693 | curr_ep = get_ep_by_pipe(udc, i); |
1694 | 1694 | ||
1695 | /* If the ep is configured */ | 1695 | /* If the ep is configured */ |
1696 | if (curr_ep->name == NULL) { | 1696 | if (!curr_ep->ep.name) { |
1697 | WARNING("Invalid EP?"); | 1697 | WARNING("Invalid EP?"); |
1698 | continue; | 1698 | continue; |
1699 | } | 1699 | } |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 6e87af248367..409cde4e6a51 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) | |||
2410 | __renesas_usb3_ep_free_request(usb3->ep0_req); | 2410 | __renesas_usb3_ep_free_request(usb3->ep0_req); |
2411 | if (usb3->phy) | 2411 | if (usb3->phy) |
2412 | phy_put(usb3->phy); | 2412 | phy_put(usb3->phy); |
2413 | pm_runtime_disable(usb3_to_dev(usb3)); | 2413 | pm_runtime_disable(&pdev->dev); |
2414 | 2414 | ||
2415 | return 0; | 2415 | return 0; |
2416 | } | 2416 | } |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index facafdf8fb95..d7641cbdee43 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb( | |||
774 | atomic_inc(&urb->use_count); | 774 | atomic_inc(&urb->use_count); |
775 | atomic_inc(&urb->dev->urbnum); | 775 | atomic_inc(&urb->dev->urbnum); |
776 | urb->setup_dma = dma_map_single( | 776 | urb->setup_dma = dma_map_single( |
777 | hcd->self.controller, | 777 | hcd->self.sysdev, |
778 | urb->setup_packet, | 778 | urb->setup_packet, |
779 | sizeof(struct usb_ctrlrequest), | 779 | sizeof(struct usb_ctrlrequest), |
780 | DMA_TO_DEVICE); | 780 | DMA_TO_DEVICE); |
781 | urb->transfer_dma = dma_map_single( | 781 | urb->transfer_dma = dma_map_single( |
782 | hcd->self.controller, | 782 | hcd->self.sysdev, |
783 | urb->transfer_buffer, | 783 | urb->transfer_buffer, |
784 | urb->transfer_buffer_length, | 784 | urb->transfer_buffer_length, |
785 | DMA_FROM_DEVICE); | 785 | DMA_FROM_DEVICE); |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 88158324dcae..327630405695 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature( | |||
1188 | * 15 secs after the setup | 1188 | * 15 secs after the setup |
1189 | */ | 1189 | */ |
1190 | if (is_setup) { | 1190 | if (is_setup) { |
1191 | /* SETUP pid */ | 1191 | /* SETUP pid, and interrupt after SETUP completion */ |
1192 | qtd_fill(ehci, qtd, urb->setup_dma, | 1192 | qtd_fill(ehci, qtd, urb->setup_dma, |
1193 | sizeof(struct usb_ctrlrequest), | 1193 | sizeof(struct usb_ctrlrequest), |
1194 | token | (2 /* "setup" */ << 8), 8); | 1194 | QTD_IOC | token | (2 /* "setup" */ << 8), 8); |
1195 | 1195 | ||
1196 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); | 1196 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); |
1197 | return 0; /*Return now; we shall come back after 15 seconds*/ | 1197 | return 0; /*Return now; we shall come back after 15 seconds*/ |
@@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature( | |||
1228 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); | 1228 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1229 | list_add_tail(&qtd->qtd_list, head); | 1229 | list_add_tail(&qtd->qtd_list, head); |
1230 | 1230 | ||
1231 | /* dont fill any data in such packets */ | 1231 | /* Interrupt after STATUS completion */ |
1232 | qtd_fill(ehci, qtd, 0, 0, token, 0); | 1232 | qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0); |
1233 | |||
1234 | /* by default, enable interrupt on urb completion */ | ||
1235 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | ||
1236 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); | ||
1237 | 1233 | ||
1238 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); | 1234 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); |
1239 | 1235 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index ee9676349333..84f88fa411cd 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -74,6 +74,7 @@ static const char hcd_name [] = "ohci_hcd"; | |||
74 | 74 | ||
75 | #define STATECHANGE_DELAY msecs_to_jiffies(300) | 75 | #define STATECHANGE_DELAY msecs_to_jiffies(300) |
76 | #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) | 76 | #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) |
77 | #define IO_WATCHDOG_OFF 0xffffff00 | ||
77 | 78 | ||
78 | #include "ohci.h" | 79 | #include "ohci.h" |
79 | #include "pci-quirks.h" | 80 | #include "pci-quirks.h" |
@@ -231,7 +232,7 @@ static int ohci_urb_enqueue ( | |||
231 | } | 232 | } |
232 | 233 | ||
233 | /* Start up the I/O watchdog timer, if it's not running */ | 234 | /* Start up the I/O watchdog timer, if it's not running */ |
234 | if (!timer_pending(&ohci->io_watchdog) && | 235 | if (ohci->prev_frame_no == IO_WATCHDOG_OFF && |
235 | list_empty(&ohci->eds_in_use) && | 236 | list_empty(&ohci->eds_in_use) && |
236 | !(ohci->flags & OHCI_QUIRK_QEMU)) { | 237 | !(ohci->flags & OHCI_QUIRK_QEMU)) { |
237 | ohci->prev_frame_no = ohci_frame_no(ohci); | 238 | ohci->prev_frame_no = ohci_frame_no(ohci); |
@@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
501 | return 0; | 502 | return 0; |
502 | 503 | ||
503 | timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); | 504 | timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); |
505 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
504 | 506 | ||
505 | ohci->hcca = dma_alloc_coherent (hcd->self.controller, | 507 | ohci->hcca = dma_alloc_coherent (hcd->self.controller, |
506 | sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); | 508 | sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); |
@@ -730,7 +732,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
730 | u32 head; | 732 | u32 head; |
731 | struct ed *ed; | 733 | struct ed *ed; |
732 | struct td *td, *td_start, *td_next; | 734 | struct td *td, *td_start, *td_next; |
733 | unsigned frame_no; | 735 | unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF; |
734 | unsigned long flags; | 736 | unsigned long flags; |
735 | 737 | ||
736 | spin_lock_irqsave(&ohci->lock, flags); | 738 | spin_lock_irqsave(&ohci->lock, flags); |
@@ -835,7 +837,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
835 | } | 837 | } |
836 | } | 838 | } |
837 | if (!list_empty(&ohci->eds_in_use)) { | 839 | if (!list_empty(&ohci->eds_in_use)) { |
838 | ohci->prev_frame_no = frame_no; | 840 | prev_frame_no = frame_no; |
839 | ohci->prev_wdh_cnt = ohci->wdh_cnt; | 841 | ohci->prev_wdh_cnt = ohci->wdh_cnt; |
840 | ohci->prev_donehead = ohci_readl(ohci, | 842 | ohci->prev_donehead = ohci_readl(ohci, |
841 | &ohci->regs->donehead); | 843 | &ohci->regs->donehead); |
@@ -845,6 +847,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
845 | } | 847 | } |
846 | 848 | ||
847 | done: | 849 | done: |
850 | ohci->prev_frame_no = prev_frame_no; | ||
848 | spin_unlock_irqrestore(&ohci->lock, flags); | 851 | spin_unlock_irqrestore(&ohci->lock, flags); |
849 | } | 852 | } |
850 | 853 | ||
@@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd) | |||
973 | if (quirk_nec(ohci)) | 976 | if (quirk_nec(ohci)) |
974 | flush_work(&ohci->nec_work); | 977 | flush_work(&ohci->nec_work); |
975 | del_timer_sync(&ohci->io_watchdog); | 978 | del_timer_sync(&ohci->io_watchdog); |
979 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
976 | 980 | ||
977 | ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); | 981 | ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); |
978 | ohci_usb_reset(ohci); | 982 | ohci_usb_reset(ohci); |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index fb7aaa3b9d06..634f3c7bf774 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) | |||
311 | rc = ohci_rh_suspend (ohci, 0); | 311 | rc = ohci_rh_suspend (ohci, 0); |
312 | spin_unlock_irq (&ohci->lock); | 312 | spin_unlock_irq (&ohci->lock); |
313 | 313 | ||
314 | if (rc == 0) | 314 | if (rc == 0) { |
315 | del_timer_sync(&ohci->io_watchdog); | 315 | del_timer_sync(&ohci->io_watchdog); |
316 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
317 | } | ||
316 | return rc; | 318 | return rc; |
317 | } | 319 | } |
318 | 320 | ||
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index b2ec8c399363..4ccb85a67bb3 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
@@ -1019,6 +1019,8 @@ skip_ed: | |||
1019 | * have modified this list. normally it's just prepending | 1019 | * have modified this list. normally it's just prepending |
1020 | * entries (which we'd ignore), but paranoia won't hurt. | 1020 | * entries (which we'd ignore), but paranoia won't hurt. |
1021 | */ | 1021 | */ |
1022 | *last = ed->ed_next; | ||
1023 | ed->ed_next = NULL; | ||
1022 | modified = 0; | 1024 | modified = 0; |
1023 | 1025 | ||
1024 | /* unlink urbs as requested, but rescan the list after | 1026 | /* unlink urbs as requested, but rescan the list after |
@@ -1077,21 +1079,22 @@ rescan_this: | |||
1077 | goto rescan_this; | 1079 | goto rescan_this; |
1078 | 1080 | ||
1079 | /* | 1081 | /* |
1080 | * If no TDs are queued, take ED off the ed_rm_list. | 1082 | * If no TDs are queued, ED is now idle. |
1081 | * Otherwise, if the HC is running, reschedule. | 1083 | * Otherwise, if the HC is running, reschedule. |
1082 | * If not, leave it on the list for further dequeues. | 1084 | * If the HC isn't running, add ED back to the |
1085 | * start of the list for later processing. | ||
1083 | */ | 1086 | */ |
1084 | if (list_empty(&ed->td_list)) { | 1087 | if (list_empty(&ed->td_list)) { |
1085 | *last = ed->ed_next; | ||
1086 | ed->ed_next = NULL; | ||
1087 | ed->state = ED_IDLE; | 1088 | ed->state = ED_IDLE; |
1088 | list_del(&ed->in_use_list); | 1089 | list_del(&ed->in_use_list); |
1089 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { | 1090 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { |
1090 | *last = ed->ed_next; | ||
1091 | ed->ed_next = NULL; | ||
1092 | ed_schedule(ohci, ed); | 1091 | ed_schedule(ohci, ed); |
1093 | } else { | 1092 | } else { |
1094 | last = &ed->ed_next; | 1093 | ed->ed_next = ohci->ed_rm_list; |
1094 | ohci->ed_rm_list = ed; | ||
1095 | /* Don't loop on the same ED */ | ||
1096 | if (last == &ohci->ed_rm_list) | ||
1097 | last = &ed->ed_next; | ||
1095 | } | 1098 | } |
1096 | 1099 | ||
1097 | if (modified) | 1100 | if (modified) |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 161536717025..67ad4bb6919a 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -66,6 +66,23 @@ | |||
66 | #define AX_INDXC 0x30 | 66 | #define AX_INDXC 0x30 |
67 | #define AX_DATAC 0x34 | 67 | #define AX_DATAC 0x34 |
68 | 68 | ||
69 | #define PT_ADDR_INDX 0xE8 | ||
70 | #define PT_READ_INDX 0xE4 | ||
71 | #define PT_SIG_1_ADDR 0xA520 | ||
72 | #define PT_SIG_2_ADDR 0xA521 | ||
73 | #define PT_SIG_3_ADDR 0xA522 | ||
74 | #define PT_SIG_4_ADDR 0xA523 | ||
75 | #define PT_SIG_1_DATA 0x78 | ||
76 | #define PT_SIG_2_DATA 0x56 | ||
77 | #define PT_SIG_3_DATA 0x34 | ||
78 | #define PT_SIG_4_DATA 0x12 | ||
79 | #define PT4_P1_REG 0xB521 | ||
80 | #define PT4_P2_REG 0xB522 | ||
81 | #define PT2_P1_REG 0xD520 | ||
82 | #define PT2_P2_REG 0xD521 | ||
83 | #define PT1_P1_REG 0xD522 | ||
84 | #define PT1_P2_REG 0xD523 | ||
85 | |||
69 | #define NB_PCIE_INDX_ADDR 0xe0 | 86 | #define NB_PCIE_INDX_ADDR 0xe0 |
70 | #define NB_PCIE_INDX_DATA 0xe4 | 87 | #define NB_PCIE_INDX_DATA 0xe4 |
71 | #define PCIE_P_CNTL 0x10040 | 88 | #define PCIE_P_CNTL 0x10040 |
@@ -513,6 +530,98 @@ void usb_amd_dev_put(void) | |||
513 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); | 530 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); |
514 | 531 | ||
515 | /* | 532 | /* |
533 | * Check if port is disabled in BIOS on AMD Promontory host. | ||
534 | * BIOS Disabled ports may wake on connect/disconnect and need | ||
535 | * driver workaround to keep them disabled. | ||
536 | * Returns true if port is marked disabled. | ||
537 | */ | ||
538 | bool usb_amd_pt_check_port(struct device *device, int port) | ||
539 | { | ||
540 | unsigned char value, port_shift; | ||
541 | struct pci_dev *pdev; | ||
542 | u16 reg; | ||
543 | |||
544 | pdev = to_pci_dev(device); | ||
545 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); | ||
546 | |||
547 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
548 | if (value != PT_SIG_1_DATA) | ||
549 | return false; | ||
550 | |||
551 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); | ||
552 | |||
553 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
554 | if (value != PT_SIG_2_DATA) | ||
555 | return false; | ||
556 | |||
557 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); | ||
558 | |||
559 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
560 | if (value != PT_SIG_3_DATA) | ||
561 | return false; | ||
562 | |||
563 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); | ||
564 | |||
565 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
566 | if (value != PT_SIG_4_DATA) | ||
567 | return false; | ||
568 | |||
569 | /* Check disabled port setting, if bit is set port is enabled */ | ||
570 | switch (pdev->device) { | ||
571 | case 0x43b9: | ||
572 | case 0x43ba: | ||
573 | /* | ||
574 | * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) | ||
575 | * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 | ||
576 | * PT4_P2_REG bits[6..0] represents ports 13 to 7 | ||
577 | */ | ||
578 | if (port > 6) { | ||
579 | reg = PT4_P2_REG; | ||
580 | port_shift = port - 7; | ||
581 | } else { | ||
582 | reg = PT4_P1_REG; | ||
583 | port_shift = port + 1; | ||
584 | } | ||
585 | break; | ||
586 | case 0x43bb: | ||
587 | /* | ||
588 | * device is AMD_PROMONTORYA_2(0x43bb) | ||
589 | * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 | ||
590 | * PT2_P2_REG bits[5..0] represents ports 9 to 3 | ||
591 | */ | ||
592 | if (port > 2) { | ||
593 | reg = PT2_P2_REG; | ||
594 | port_shift = port - 3; | ||
595 | } else { | ||
596 | reg = PT2_P1_REG; | ||
597 | port_shift = port + 5; | ||
598 | } | ||
599 | break; | ||
600 | case 0x43bc: | ||
601 | /* | ||
602 | * device is AMD_PROMONTORYA_1(0x43bc) | ||
603 | * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 | ||
604 | * PT1_P2_REG[5..0] represents ports 9 to 4 | ||
605 | */ | ||
606 | if (port > 3) { | ||
607 | reg = PT1_P2_REG; | ||
608 | port_shift = port - 4; | ||
609 | } else { | ||
610 | reg = PT1_P1_REG; | ||
611 | port_shift = port + 4; | ||
612 | } | ||
613 | break; | ||
614 | default: | ||
615 | return false; | ||
616 | } | ||
617 | pci_write_config_word(pdev, PT_ADDR_INDX, reg); | ||
618 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
619 | |||
620 | return !(value & BIT(port_shift)); | ||
621 | } | ||
622 | EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); | ||
623 | |||
624 | /* | ||
516 | * Make sure the controller is completely inactive, unable to | 625 | * Make sure the controller is completely inactive, unable to |
517 | * generate interrupts or do DMA. | 626 | * generate interrupts or do DMA. |
518 | */ | 627 | */ |
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b68dcb5dd0fd..4ca0d9b7e463 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h | |||
@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); | |||
17 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); | 17 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); |
18 | void sb800_prefetch(struct device *dev, int on); | 18 | void sb800_prefetch(struct device *dev, int on); |
19 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); | 19 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); |
20 | bool usb_amd_pt_check_port(struct device *device, int port); | ||
20 | #else | 21 | #else |
21 | struct pci_dev; | 22 | struct pci_dev; |
22 | static inline void usb_amd_quirk_pll_disable(void) {} | 23 | static inline void usb_amd_quirk_pll_disable(void) {} |
@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} | |||
25 | static inline void usb_amd_dev_put(void) {} | 26 | static inline void usb_amd_dev_put(void) {} |
26 | static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} | 27 | static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} |
27 | static inline void sb800_prefetch(struct device *dev, int on) {} | 28 | static inline void sb800_prefetch(struct device *dev, int on) {} |
29 | static inline bool usb_amd_pt_check_port(struct device *device, int port) | ||
30 | { | ||
31 | return false; | ||
32 | } | ||
28 | #endif /* CONFIG_USB_PCI */ | 33 | #endif /* CONFIG_USB_PCI */ |
29 | 34 | ||
30 | #endif /* __LINUX_USB_PCI_QUIRKS_H */ | 35 | #endif /* __LINUX_USB_PCI_QUIRKS_H */ |
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index e26e685d8a57..5851052d4668 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c | |||
@@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s, | |||
211 | static int xhci_ring_trb_show(struct seq_file *s, void *unused) | 211 | static int xhci_ring_trb_show(struct seq_file *s, void *unused) |
212 | { | 212 | { |
213 | int i; | 213 | int i; |
214 | struct xhci_ring *ring = s->private; | 214 | struct xhci_ring *ring = *(struct xhci_ring **)s->private; |
215 | struct xhci_segment *seg = ring->first_seg; | 215 | struct xhci_segment *seg = ring->first_seg; |
216 | 216 | ||
217 | for (i = 0; i < ring->num_segs; i++) { | 217 | for (i = 0; i < ring->num_segs; i++) { |
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, | |||
387 | 387 | ||
388 | snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); | 388 | snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); |
389 | epriv->root = xhci_debugfs_create_ring_dir(xhci, | 389 | epriv->root = xhci_debugfs_create_ring_dir(xhci, |
390 | &dev->eps[ep_index].new_ring, | 390 | &dev->eps[ep_index].ring, |
391 | epriv->name, | 391 | epriv->name, |
392 | spriv->root); | 392 | spriv->root); |
393 | spriv->eps[ep_index] = epriv; | 393 | spriv->eps[ep_index] = epriv; |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 46d5e08f05f1..72ebbc908e19 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
1224 | temp = readl(port_array[wIndex]); | 1224 | temp = readl(port_array[wIndex]); |
1225 | break; | 1225 | break; |
1226 | } | 1226 | } |
1227 | 1227 | /* Port must be enabled */ | |
1228 | /* Software should not attempt to set | 1228 | if (!(temp & PORT_PE)) { |
1229 | * port link state above '3' (U3) and the port | 1229 | retval = -ENODEV; |
1230 | * must be enabled. | 1230 | break; |
1231 | */ | 1231 | } |
1232 | if ((temp & PORT_PE) == 0 || | 1232 | /* Can't set port link state above '3' (U3) */ |
1233 | (link_state > USB_SS_PORT_LS_U3)) { | 1233 | if (link_state > USB_SS_PORT_LS_U3) { |
1234 | xhci_warn(xhci, "Cannot set link state.\n"); | 1234 | xhci_warn(xhci, "Cannot set port %d link state %d\n", |
1235 | wIndex, link_state); | ||
1235 | goto error; | 1236 | goto error; |
1236 | } | 1237 | } |
1237 | |||
1238 | if (link_state == USB_SS_PORT_LS_U3) { | 1238 | if (link_state == USB_SS_PORT_LS_U3) { |
1239 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 1239 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, |
1240 | wIndex + 1); | 1240 | wIndex + 1); |
@@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
1522 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; | 1522 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; |
1523 | t2 &= ~PORT_WKDISC_E; | 1523 | t2 &= ~PORT_WKDISC_E; |
1524 | } | 1524 | } |
1525 | |||
1526 | if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && | ||
1527 | (hcd->speed < HCD_USB3)) { | ||
1528 | if (usb_amd_pt_check_port(hcd->self.controller, | ||
1529 | port_index)) | ||
1530 | t2 &= ~PORT_WAKE_BITS; | ||
1531 | } | ||
1525 | } else | 1532 | } else |
1526 | t2 &= ~PORT_WAKE_BITS; | 1533 | t2 &= ~PORT_WAKE_BITS; |
1527 | 1534 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6c79037876db..5262fa571a5d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -42,6 +42,10 @@ | |||
42 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 | 42 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 |
43 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 | 43 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 |
44 | 44 | ||
45 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 | ||
46 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba | ||
47 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb | ||
48 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc | ||
45 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 | 49 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 |
46 | 50 | ||
47 | static const char hcd_name[] = "xhci_hcd"; | 51 | static const char hcd_name[] = "xhci_hcd"; |
@@ -125,6 +129,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
125 | if (pdev->vendor == PCI_VENDOR_ID_AMD) | 129 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
126 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 130 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
127 | 131 | ||
132 | if ((pdev->vendor == PCI_VENDOR_ID_AMD) && | ||
133 | ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || | ||
134 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || | ||
135 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || | ||
136 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) | ||
137 | xhci->quirks |= XHCI_U2_DISABLE_WAKE; | ||
138 | |||
128 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 139 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
129 | xhci->quirks |= XHCI_LPM_SUPPORT; | 140 | xhci->quirks |= XHCI_LPM_SUPPORT; |
130 | xhci->quirks |= XHCI_INTEL_HOST; | 141 | xhci->quirks |= XHCI_INTEL_HOST; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1eeb3396300f..25d4b748a56f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd) | |||
646 | return; | 646 | return; |
647 | } | 647 | } |
648 | 648 | ||
649 | xhci_debugfs_exit(xhci); | ||
650 | |||
651 | xhci_dbc_exit(xhci); | 649 | xhci_dbc_exit(xhci); |
652 | 650 | ||
653 | spin_lock_irq(&xhci->lock); | 651 | spin_lock_irq(&xhci->lock); |
@@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd) | |||
680 | 678 | ||
681 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); | 679 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); |
682 | xhci_mem_cleanup(xhci); | 680 | xhci_mem_cleanup(xhci); |
681 | xhci_debugfs_exit(xhci); | ||
683 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 682 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
684 | "xhci_stop completed - status = %x", | 683 | "xhci_stop completed - status = %x", |
685 | readl(&xhci->op_regs->status)); | 684 | readl(&xhci->op_regs->status)); |
@@ -1014,6 +1013,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
1014 | 1013 | ||
1015 | xhci_dbg(xhci, "cleaning up memory\n"); | 1014 | xhci_dbg(xhci, "cleaning up memory\n"); |
1016 | xhci_mem_cleanup(xhci); | 1015 | xhci_mem_cleanup(xhci); |
1016 | xhci_debugfs_exit(xhci); | ||
1017 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | 1017 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
1018 | readl(&xhci->op_regs->status)); | 1018 | readl(&xhci->op_regs->status)); |
1019 | 1019 | ||
@@ -3544,12 +3544,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3544 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; | 3544 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
3545 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | 3545 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
3546 | } | 3546 | } |
3547 | 3547 | xhci_debugfs_remove_slot(xhci, udev->slot_id); | |
3548 | ret = xhci_disable_slot(xhci, udev->slot_id); | 3548 | ret = xhci_disable_slot(xhci, udev->slot_id); |
3549 | if (ret) { | 3549 | if (ret) |
3550 | xhci_debugfs_remove_slot(xhci, udev->slot_id); | ||
3551 | xhci_free_virt_device(xhci, udev->slot_id); | 3550 | xhci_free_virt_device(xhci, udev->slot_id); |
3552 | } | ||
3553 | } | 3551 | } |
3554 | 3552 | ||
3555 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) | 3553 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 96099a245c69..e4d7d3d06a75 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1822,7 +1822,7 @@ struct xhci_hcd { | |||
1822 | /* For controller with a broken Port Disable implementation */ | 1822 | /* For controller with a broken Port Disable implementation */ |
1823 | #define XHCI_BROKEN_PORT_PED (1 << 25) | 1823 | #define XHCI_BROKEN_PORT_PED (1 << 25) |
1824 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) | 1824 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) |
1825 | /* Reserved. It was XHCI_U2_DISABLE_WAKE */ | 1825 | #define XHCI_U2_DISABLE_WAKE (1 << 27) |
1826 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) | 1826 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) |
1827 | #define XHCI_HW_LPM_DISABLE (1 << 29) | 1827 | #define XHCI_HW_LPM_DISABLE (1 << 29) |
1828 | 1828 | ||
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 63b9e85dc0e9..236a60f53099 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ | 42 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ |
43 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ | 43 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ |
44 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ | 44 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ |
45 | #define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */ | ||
46 | #define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */ | ||
47 | #define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */ | ||
45 | #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ | 48 | #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ |
46 | #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ | 49 | #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ |
47 | #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ | 50 | #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ |
@@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = { | |||
84 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, | 87 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, |
85 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, | 88 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, |
86 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, | 89 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, |
90 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, | ||
91 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, | ||
92 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, | ||
87 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, | 93 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, |
88 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, | 94 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, |
89 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, | 95 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 968bf1e8b0fe..eef4ad578b31 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -2708,7 +2708,8 @@ static int musb_resume(struct device *dev) | |||
2708 | if ((devctl & mask) != (musb->context.devctl & mask)) | 2708 | if ((devctl & mask) != (musb->context.devctl & mask)) |
2709 | musb->port1_status = 0; | 2709 | musb->port1_status = 0; |
2710 | 2710 | ||
2711 | musb_start(musb); | 2711 | musb_enable_interrupts(musb); |
2712 | musb_platform_enable(musb); | ||
2712 | 2713 | ||
2713 | spin_lock_irqsave(&musb->lock, flags); | 2714 | spin_lock_irqsave(&musb->lock, flags); |
2714 | error = musb_run_resume_work(musb); | 2715 | error = musb_run_resume_work(musb); |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 394b4ac86161..45ed32c2cba9 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, | |||
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
394 | /* | 394 | if (qh != NULL && qh->is_ready) { |
395 | * The pipe must be broken if current urb->status is set, so don't | ||
396 | * start next urb. | ||
397 | * TODO: to minimize the risk of regression, only check urb->status | ||
398 | * for RX, until we have a test case to understand the behavior of TX. | ||
399 | */ | ||
400 | if ((!status || !is_in) && qh && qh->is_ready) { | ||
401 | musb_dbg(musb, "... next ep%d %cX urb %p", | 395 | musb_dbg(musb, "... next ep%d %cX urb %p", |
402 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 396 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
403 | musb_start_urb(musb, is_in, qh); | 397 | musb_start_urb(musb, is_in, qh); |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index da031c45395a..fbec863350f6 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
@@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy) | |||
602 | void __iomem *base = phy->io_priv; | 602 | void __iomem *base = phy->io_priv; |
603 | enum usb_charger_type chgr_type = UNKNOWN_TYPE; | 603 | enum usb_charger_type chgr_type = UNKNOWN_TYPE; |
604 | 604 | ||
605 | if (!regmap) | ||
606 | return UNKNOWN_TYPE; | ||
607 | |||
605 | if (mxs_charger_data_contact_detect(mxs_phy)) | 608 | if (mxs_charger_data_contact_detect(mxs_phy)) |
606 | return chgr_type; | 609 | return chgr_type; |
607 | 610 | ||
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 5925d111bd47..39fa2fc1b8b7 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, | |||
982 | if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) | 982 | if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) |
983 | goto usbhsf_pio_prepare_pop; | 983 | goto usbhsf_pio_prepare_pop; |
984 | 984 | ||
985 | /* return at this time if the pipe is running */ | ||
986 | if (usbhs_pipe_is_running(pipe)) | ||
987 | return 0; | ||
988 | |||
985 | usbhs_pipe_config_change_bfre(pipe, 1); | 989 | usbhs_pipe_config_change_bfre(pipe, 1); |
986 | 990 | ||
987 | ret = usbhsf_fifo_select(pipe, fifo, 0); | 991 | ret = usbhsf_fifo_select(pipe, fifo, 0); |
@@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt, | |||
1172 | usbhsf_fifo_clear(pipe, fifo); | 1176 | usbhsf_fifo_clear(pipe, fifo); |
1173 | pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); | 1177 | pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); |
1174 | 1178 | ||
1179 | usbhs_pipe_running(pipe, 0); | ||
1175 | usbhsf_dma_stop(pipe, fifo); | 1180 | usbhsf_dma_stop(pipe, fifo); |
1176 | usbhsf_dma_unmap(pkt); | 1181 | usbhsf_dma_unmap(pkt); |
1177 | usbhsf_fifo_unselect(pipe, pipe->fifo); | 1182 | usbhsf_fifo_unselect(pipe, pipe->fifo); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5db8ed517e0e..2d8d9150da0c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); | |||
241 | #define QUECTEL_PRODUCT_EC21 0x0121 | 241 | #define QUECTEL_PRODUCT_EC21 0x0121 |
242 | #define QUECTEL_PRODUCT_EC25 0x0125 | 242 | #define QUECTEL_PRODUCT_EC25 0x0125 |
243 | #define QUECTEL_PRODUCT_BG96 0x0296 | 243 | #define QUECTEL_PRODUCT_BG96 0x0296 |
244 | #define QUECTEL_PRODUCT_EP06 0x0306 | ||
244 | 245 | ||
245 | #define CMOTECH_VENDOR_ID 0x16d8 | 246 | #define CMOTECH_VENDOR_ID 0x16d8 |
246 | #define CMOTECH_PRODUCT_6001 0x6001 | 247 | #define CMOTECH_PRODUCT_6001 0x6001 |
@@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { | |||
689 | .reserved = BIT(1) | BIT(4), | 690 | .reserved = BIT(1) | BIT(4), |
690 | }; | 691 | }; |
691 | 692 | ||
693 | static const struct option_blacklist_info quectel_ep06_blacklist = { | ||
694 | .reserved = BIT(4) | BIT(5), | ||
695 | }; | ||
696 | |||
692 | static const struct usb_device_id option_ids[] = { | 697 | static const struct usb_device_id option_ids[] = { |
693 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 698 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
694 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 699 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = { | |||
1203 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1208 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1204 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), | 1209 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), |
1205 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1210 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1211 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), | ||
1212 | .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist }, | ||
1206 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, | 1213 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
1207 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, | 1214 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
1208 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), | 1215 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 49e552472c3f..dd8ef36ab10e 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c | |||
@@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a | |||
73 | goto err; | 73 | goto err; |
74 | 74 | ||
75 | sdev->ud.tcp_socket = socket; | 75 | sdev->ud.tcp_socket = socket; |
76 | sdev->ud.sockfd = sockfd; | ||
76 | 77 | ||
77 | spin_unlock_irq(&sdev->ud.lock); | 78 | spin_unlock_irq(&sdev->ud.lock); |
78 | 79 | ||
@@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) | |||
172 | if (ud->tcp_socket) { | 173 | if (ud->tcp_socket) { |
173 | sockfd_put(ud->tcp_socket); | 174 | sockfd_put(ud->tcp_socket); |
174 | ud->tcp_socket = NULL; | 175 | ud->tcp_socket = NULL; |
176 | ud->sockfd = -1; | ||
175 | } | 177 | } |
176 | 178 | ||
177 | /* 3. free used data */ | 179 | /* 3. free used data */ |
@@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) | |||
266 | sdev->ud.status = SDEV_ST_AVAILABLE; | 268 | sdev->ud.status = SDEV_ST_AVAILABLE; |
267 | spin_lock_init(&sdev->ud.lock); | 269 | spin_lock_init(&sdev->ud.lock); |
268 | sdev->ud.tcp_socket = NULL; | 270 | sdev->ud.tcp_socket = NULL; |
271 | sdev->ud.sockfd = -1; | ||
269 | 272 | ||
270 | INIT_LIST_HEAD(&sdev->priv_init); | 273 | INIT_LIST_HEAD(&sdev->priv_init); |
271 | INIT_LIST_HEAD(&sdev->priv_tx); | 274 | INIT_LIST_HEAD(&sdev->priv_tx); |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index c3e1008aa491..20e3d4609583 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
@@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) | |||
984 | if (vdev->ud.tcp_socket) { | 984 | if (vdev->ud.tcp_socket) { |
985 | sockfd_put(vdev->ud.tcp_socket); | 985 | sockfd_put(vdev->ud.tcp_socket); |
986 | vdev->ud.tcp_socket = NULL; | 986 | vdev->ud.tcp_socket = NULL; |
987 | vdev->ud.sockfd = -1; | ||
987 | } | 988 | } |
988 | pr_info("release socket\n"); | 989 | pr_info("release socket\n"); |
989 | 990 | ||
@@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud) | |||
1030 | if (ud->tcp_socket) { | 1031 | if (ud->tcp_socket) { |
1031 | sockfd_put(ud->tcp_socket); | 1032 | sockfd_put(ud->tcp_socket); |
1032 | ud->tcp_socket = NULL; | 1033 | ud->tcp_socket = NULL; |
1034 | ud->sockfd = -1; | ||
1033 | } | 1035 | } |
1034 | ud->status = VDEV_ST_NULL; | 1036 | ud->status = VDEV_ST_NULL; |
1035 | 1037 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index aff773bcebdb..37460cd6cabb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG | |||
226 | config RAVE_SP_WATCHDOG | 226 | config RAVE_SP_WATCHDOG |
227 | tristate "RAVE SP Watchdog timer" | 227 | tristate "RAVE SP Watchdog timer" |
228 | depends on RAVE_SP_CORE | 228 | depends on RAVE_SP_CORE |
229 | depends on NVMEM || !NVMEM | ||
229 | select WATCHDOG_CORE | 230 | select WATCHDOG_CORE |
230 | help | 231 | help |
231 | Support for the watchdog on RAVE SP device. | 232 | Support for the watchdog on RAVE SP device. |
@@ -903,6 +904,7 @@ config F71808E_WDT | |||
903 | config SP5100_TCO | 904 | config SP5100_TCO |
904 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" | 905 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" |
905 | depends on X86 && PCI | 906 | depends on X86 && PCI |
907 | select WATCHDOG_CORE | ||
906 | ---help--- | 908 | ---help--- |
907 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO | 909 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO |
908 | (Total Cost of Ownership) timer is a watchdog timer that will reboot | 910 | (Total Cost of Ownership) timer is a watchdog timer that will reboot |
@@ -1008,6 +1010,7 @@ config WAFER_WDT | |||
1008 | config I6300ESB_WDT | 1010 | config I6300ESB_WDT |
1009 | tristate "Intel 6300ESB Timer/Watchdog" | 1011 | tristate "Intel 6300ESB Timer/Watchdog" |
1010 | depends on PCI | 1012 | depends on PCI |
1013 | select WATCHDOG_CORE | ||
1011 | ---help--- | 1014 | ---help--- |
1012 | Hardware driver for the watchdog timer built into the Intel | 1015 | Hardware driver for the watchdog timer built into the Intel |
1013 | 6300ESB controller hub. | 1016 | 6300ESB controller hub. |
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V | |||
1837 | config XEN_WDT | 1840 | config XEN_WDT |
1838 | tristate "Xen Watchdog support" | 1841 | tristate "Xen Watchdog support" |
1839 | depends on XEN | 1842 | depends on XEN |
1843 | select WATCHDOG_CORE | ||
1840 | help | 1844 | help |
1841 | Say Y here to support the hypervisor watchdog capability provided | 1845 | Say Y here to support the hypervisor watchdog capability provided |
1842 | by Xen 4.0 and newer. The watchdog timeout period is normally one | 1846 | by Xen 4.0 and newer. The watchdog timeout period is normally one |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1ab4bd11f5f3..762378f1811c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -755,8 +755,8 @@ out: | |||
755 | mutex_unlock(&irq_mapping_update_lock); | 755 | mutex_unlock(&irq_mapping_update_lock); |
756 | return irq; | 756 | return irq; |
757 | error_irq: | 757 | error_irq: |
758 | for (; i >= 0; i--) | 758 | while (nvec--) |
759 | __unbind_from_irq(irq + i); | 759 | __unbind_from_irq(irq + nvec); |
760 | mutex_unlock(&irq_mapping_update_lock); | 760 | mutex_unlock(&irq_mapping_update_lock); |
761 | return ret; | 761 | return ret; |
762 | } | 762 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 156e5aea36db..b1092fbefa63 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev, | |||
416 | sock); | 416 | sock); |
417 | if (!map) { | 417 | if (!map) { |
418 | ret = -EFAULT; | 418 | ret = -EFAULT; |
419 | sock_release(map->sock); | 419 | sock_release(sock); |
420 | } | 420 | } |
421 | 421 | ||
422 | out: | 422 | out: |
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index aedbee3b2838..2f11ca72a281 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
@@ -73,20 +73,25 @@ struct sock_mapping { | |||
73 | wait_queue_head_t inflight_conn_req; | 73 | wait_queue_head_t inflight_conn_req; |
74 | } active; | 74 | } active; |
75 | struct { | 75 | struct { |
76 | /* Socket status */ | 76 | /* |
77 | * Socket status, needs to be 64-bit aligned due to the | ||
78 | * test_and_* functions which have this requirement on arm64. | ||
79 | */ | ||
77 | #define PVCALLS_STATUS_UNINITALIZED 0 | 80 | #define PVCALLS_STATUS_UNINITALIZED 0 |
78 | #define PVCALLS_STATUS_BIND 1 | 81 | #define PVCALLS_STATUS_BIND 1 |
79 | #define PVCALLS_STATUS_LISTEN 2 | 82 | #define PVCALLS_STATUS_LISTEN 2 |
80 | uint8_t status; | 83 | uint8_t status __attribute__((aligned(8))); |
81 | /* | 84 | /* |
82 | * Internal state-machine flags. | 85 | * Internal state-machine flags. |
83 | * Only one accept operation can be inflight for a socket. | 86 | * Only one accept operation can be inflight for a socket. |
84 | * Only one poll operation can be inflight for a given socket. | 87 | * Only one poll operation can be inflight for a given socket. |
88 | * flags needs to be 64-bit aligned due to the test_and_* | ||
89 | * functions which have this requirement on arm64. | ||
85 | */ | 90 | */ |
86 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 | 91 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 |
87 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 | 92 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 |
88 | #define PVCALLS_FLAG_POLL_RET 2 | 93 | #define PVCALLS_FLAG_POLL_RET 2 |
89 | uint8_t flags; | 94 | uint8_t flags __attribute__((aligned(8))); |
90 | uint32_t inflight_req_id; | 95 | uint32_t inflight_req_id; |
91 | struct sock_mapping *accept_map; | 96 | struct sock_mapping *accept_map; |
92 | wait_queue_head_t inflight_accept_req; | 97 | wait_queue_head_t inflight_accept_req; |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index bf13d1ec51f3..04e7b3b29bac 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, | |||
284 | int pool = tmem_frontswap_poolid; | 284 | int pool = tmem_frontswap_poolid; |
285 | int ret; | 285 | int ret; |
286 | 286 | ||
287 | /* THP isn't supported */ | ||
288 | if (PageTransHuge(page)) | ||
289 | return -1; | ||
290 | |||
287 | if (pool < 0) | 291 | if (pool < 0) |
288 | return -1; | 292 | return -1; |
289 | if (ind64 != ind) | 293 | if (ind64 != ind) |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 4a181fcb5175..fe09ef9c21f3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1058,6 +1058,27 @@ retry: | |||
1058 | return 0; | 1058 | return 0; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) | ||
1062 | { | ||
1063 | struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); | ||
1064 | |||
1065 | if (!disk) | ||
1066 | return NULL; | ||
1067 | /* | ||
1068 | * Now that we hold gendisk reference we make sure bdev we looked up is | ||
1069 | * not stale. If it is, it means device got removed and created before | ||
1070 | * we looked up gendisk and we fail open in such case. Associating | ||
1071 | * unhashed bdev with newly created gendisk could lead to two bdevs | ||
1072 | * (and thus two independent caches) being associated with one device | ||
1073 | * which is bad. | ||
1074 | */ | ||
1075 | if (inode_unhashed(bdev->bd_inode)) { | ||
1076 | put_disk_and_module(disk); | ||
1077 | return NULL; | ||
1078 | } | ||
1079 | return disk; | ||
1080 | } | ||
1081 | |||
1061 | /** | 1082 | /** |
1062 | * bd_start_claiming - start claiming a block device | 1083 | * bd_start_claiming - start claiming a block device |
1063 | * @bdev: block device of interest | 1084 | * @bdev: block device of interest |
@@ -1094,7 +1115,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
1094 | * @bdev might not have been initialized properly yet, look up | 1115 | * @bdev might not have been initialized properly yet, look up |
1095 | * and grab the outer block device the hard way. | 1116 | * and grab the outer block device the hard way. |
1096 | */ | 1117 | */ |
1097 | disk = get_gendisk(bdev->bd_dev, &partno); | 1118 | disk = bdev_get_gendisk(bdev, &partno); |
1098 | if (!disk) | 1119 | if (!disk) |
1099 | return ERR_PTR(-ENXIO); | 1120 | return ERR_PTR(-ENXIO); |
1100 | 1121 | ||
@@ -1111,8 +1132,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
1111 | else | 1132 | else |
1112 | whole = bdgrab(bdev); | 1133 | whole = bdgrab(bdev); |
1113 | 1134 | ||
1114 | module_put(disk->fops->owner); | 1135 | put_disk_and_module(disk); |
1115 | put_disk(disk); | ||
1116 | if (!whole) | 1136 | if (!whole) |
1117 | return ERR_PTR(-ENOMEM); | 1137 | return ERR_PTR(-ENOMEM); |
1118 | 1138 | ||
@@ -1407,10 +1427,10 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); | |||
1407 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | 1427 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
1408 | { | 1428 | { |
1409 | struct gendisk *disk; | 1429 | struct gendisk *disk; |
1410 | struct module *owner; | ||
1411 | int ret; | 1430 | int ret; |
1412 | int partno; | 1431 | int partno; |
1413 | int perm = 0; | 1432 | int perm = 0; |
1433 | bool first_open = false; | ||
1414 | 1434 | ||
1415 | if (mode & FMODE_READ) | 1435 | if (mode & FMODE_READ) |
1416 | perm |= MAY_READ; | 1436 | perm |= MAY_READ; |
@@ -1430,14 +1450,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1430 | restart: | 1450 | restart: |
1431 | 1451 | ||
1432 | ret = -ENXIO; | 1452 | ret = -ENXIO; |
1433 | disk = get_gendisk(bdev->bd_dev, &partno); | 1453 | disk = bdev_get_gendisk(bdev, &partno); |
1434 | if (!disk) | 1454 | if (!disk) |
1435 | goto out; | 1455 | goto out; |
1436 | owner = disk->fops->owner; | ||
1437 | 1456 | ||
1438 | disk_block_events(disk); | 1457 | disk_block_events(disk); |
1439 | mutex_lock_nested(&bdev->bd_mutex, for_part); | 1458 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1440 | if (!bdev->bd_openers) { | 1459 | if (!bdev->bd_openers) { |
1460 | first_open = true; | ||
1441 | bdev->bd_disk = disk; | 1461 | bdev->bd_disk = disk; |
1442 | bdev->bd_queue = disk->queue; | 1462 | bdev->bd_queue = disk->queue; |
1443 | bdev->bd_contains = bdev; | 1463 | bdev->bd_contains = bdev; |
@@ -1463,8 +1483,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1463 | bdev->bd_queue = NULL; | 1483 | bdev->bd_queue = NULL; |
1464 | mutex_unlock(&bdev->bd_mutex); | 1484 | mutex_unlock(&bdev->bd_mutex); |
1465 | disk_unblock_events(disk); | 1485 | disk_unblock_events(disk); |
1466 | put_disk(disk); | 1486 | put_disk_and_module(disk); |
1467 | module_put(owner); | ||
1468 | goto restart; | 1487 | goto restart; |
1469 | } | 1488 | } |
1470 | } | 1489 | } |
@@ -1524,15 +1543,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1524 | if (ret) | 1543 | if (ret) |
1525 | goto out_unlock_bdev; | 1544 | goto out_unlock_bdev; |
1526 | } | 1545 | } |
1527 | /* only one opener holds refs to the module and disk */ | ||
1528 | put_disk(disk); | ||
1529 | module_put(owner); | ||
1530 | } | 1546 | } |
1531 | bdev->bd_openers++; | 1547 | bdev->bd_openers++; |
1532 | if (for_part) | 1548 | if (for_part) |
1533 | bdev->bd_part_count++; | 1549 | bdev->bd_part_count++; |
1534 | mutex_unlock(&bdev->bd_mutex); | 1550 | mutex_unlock(&bdev->bd_mutex); |
1535 | disk_unblock_events(disk); | 1551 | disk_unblock_events(disk); |
1552 | /* only one opener holds refs to the module and disk */ | ||
1553 | if (!first_open) | ||
1554 | put_disk_and_module(disk); | ||
1536 | return 0; | 1555 | return 0; |
1537 | 1556 | ||
1538 | out_clear: | 1557 | out_clear: |
@@ -1546,8 +1565,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1546 | out_unlock_bdev: | 1565 | out_unlock_bdev: |
1547 | mutex_unlock(&bdev->bd_mutex); | 1566 | mutex_unlock(&bdev->bd_mutex); |
1548 | disk_unblock_events(disk); | 1567 | disk_unblock_events(disk); |
1549 | put_disk(disk); | 1568 | put_disk_and_module(disk); |
1550 | module_put(owner); | ||
1551 | out: | 1569 | out: |
1552 | bdput(bdev); | 1570 | bdput(bdev); |
1553 | 1571 | ||
@@ -1770,8 +1788,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1770 | disk->fops->release(disk, mode); | 1788 | disk->fops->release(disk, mode); |
1771 | } | 1789 | } |
1772 | if (!bdev->bd_openers) { | 1790 | if (!bdev->bd_openers) { |
1773 | struct module *owner = disk->fops->owner; | ||
1774 | |||
1775 | disk_put_part(bdev->bd_part); | 1791 | disk_put_part(bdev->bd_part); |
1776 | bdev->bd_part = NULL; | 1792 | bdev->bd_part = NULL; |
1777 | bdev->bd_disk = NULL; | 1793 | bdev->bd_disk = NULL; |
@@ -1779,8 +1795,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1779 | victim = bdev->bd_contains; | 1795 | victim = bdev->bd_contains; |
1780 | bdev->bd_contains = NULL; | 1796 | bdev->bd_contains = NULL; |
1781 | 1797 | ||
1782 | put_disk(disk); | 1798 | put_disk_and_module(disk); |
1783 | module_put(owner); | ||
1784 | } | 1799 | } |
1785 | mutex_unlock(&bdev->bd_mutex); | 1800 | mutex_unlock(&bdev->bd_mutex); |
1786 | bdput(bdev); | 1801 | bdput(bdev); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 6582c4507e6c..0e5bd3e3344e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -3965,6 +3965,32 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) | |||
3965 | } | 3965 | } |
3966 | 3966 | ||
3967 | /* | 3967 | /* |
3968 | * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it | ||
3969 | * looks like the link count will hit 0, drop any other caps (other | ||
3970 | * than PIN) we don't specifically want (due to the file still being | ||
3971 | * open). | ||
3972 | */ | ||
3973 | int ceph_drop_caps_for_unlink(struct inode *inode) | ||
3974 | { | ||
3975 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
3976 | int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | ||
3977 | |||
3978 | spin_lock(&ci->i_ceph_lock); | ||
3979 | if (inode->i_nlink == 1) { | ||
3980 | drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); | ||
3981 | |||
3982 | ci->i_ceph_flags |= CEPH_I_NODELAY; | ||
3983 | if (__ceph_caps_dirty(ci)) { | ||
3984 | struct ceph_mds_client *mdsc = | ||
3985 | ceph_inode_to_client(inode)->mdsc; | ||
3986 | __cap_delay_requeue_front(mdsc, ci); | ||
3987 | } | ||
3988 | } | ||
3989 | spin_unlock(&ci->i_ceph_lock); | ||
3990 | return drop; | ||
3991 | } | ||
3992 | |||
3993 | /* | ||
3968 | * Helpers for embedding cap and dentry lease releases into mds | 3994 | * Helpers for embedding cap and dentry lease releases into mds |
3969 | * requests. | 3995 | * requests. |
3970 | * | 3996 | * |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0c4346806e17..f1d9c6cc0491 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -1003,26 +1003,6 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir, | |||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
1006 | * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it | ||
1007 | * looks like the link count will hit 0, drop any other caps (other | ||
1008 | * than PIN) we don't specifically want (due to the file still being | ||
1009 | * open). | ||
1010 | */ | ||
1011 | static int drop_caps_for_unlink(struct inode *inode) | ||
1012 | { | ||
1013 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
1014 | int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | ||
1015 | |||
1016 | spin_lock(&ci->i_ceph_lock); | ||
1017 | if (inode->i_nlink == 1) { | ||
1018 | drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); | ||
1019 | ci->i_ceph_flags |= CEPH_I_NODELAY; | ||
1020 | } | ||
1021 | spin_unlock(&ci->i_ceph_lock); | ||
1022 | return drop; | ||
1023 | } | ||
1024 | |||
1025 | /* | ||
1026 | * rmdir and unlink are differ only by the metadata op code | 1006 | * rmdir and unlink are differ only by the metadata op code |
1027 | */ | 1007 | */ |
1028 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) | 1008 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) |
@@ -1056,7 +1036,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) | |||
1056 | set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); | 1036 | set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); |
1057 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | 1037 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; |
1058 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | 1038 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; |
1059 | req->r_inode_drop = drop_caps_for_unlink(inode); | 1039 | req->r_inode_drop = ceph_drop_caps_for_unlink(inode); |
1060 | err = ceph_mdsc_do_request(mdsc, dir, req); | 1040 | err = ceph_mdsc_do_request(mdsc, dir, req); |
1061 | if (!err && !req->r_reply_info.head->is_dentry) | 1041 | if (!err && !req->r_reply_info.head->is_dentry) |
1062 | d_delete(dentry); | 1042 | d_delete(dentry); |
@@ -1104,8 +1084,10 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1104 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | 1084 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; |
1105 | /* release LINK_RDCACHE on source inode (mds will lock it) */ | 1085 | /* release LINK_RDCACHE on source inode (mds will lock it) */ |
1106 | req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | 1086 | req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; |
1107 | if (d_really_is_positive(new_dentry)) | 1087 | if (d_really_is_positive(new_dentry)) { |
1108 | req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); | 1088 | req->r_inode_drop = |
1089 | ceph_drop_caps_for_unlink(d_inode(new_dentry)); | ||
1090 | } | ||
1109 | err = ceph_mdsc_do_request(mdsc, old_dir, req); | 1091 | err = ceph_mdsc_do_request(mdsc, old_dir, req); |
1110 | if (!err && !req->r_reply_info.head->is_dentry) { | 1092 | if (!err && !req->r_reply_info.head->is_dentry) { |
1111 | /* | 1093 | /* |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a62d2a9841dc..fb2bc9c15a23 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -225,6 +225,7 @@ static int parse_fsopt_token(char *c, void *private) | |||
225 | return -ENOMEM; | 225 | return -ENOMEM; |
226 | break; | 226 | break; |
227 | case Opt_mds_namespace: | 227 | case Opt_mds_namespace: |
228 | kfree(fsopt->mds_namespace); | ||
228 | fsopt->mds_namespace = kstrndup(argstr[0].from, | 229 | fsopt->mds_namespace = kstrndup(argstr[0].from, |
229 | argstr[0].to-argstr[0].from, | 230 | argstr[0].to-argstr[0].from, |
230 | GFP_KERNEL); | 231 | GFP_KERNEL); |
@@ -232,6 +233,7 @@ static int parse_fsopt_token(char *c, void *private) | |||
232 | return -ENOMEM; | 233 | return -ENOMEM; |
233 | break; | 234 | break; |
234 | case Opt_fscache_uniq: | 235 | case Opt_fscache_uniq: |
236 | kfree(fsopt->fscache_uniq); | ||
235 | fsopt->fscache_uniq = kstrndup(argstr[0].from, | 237 | fsopt->fscache_uniq = kstrndup(argstr[0].from, |
236 | argstr[0].to-argstr[0].from, | 238 | argstr[0].to-argstr[0].from, |
237 | GFP_KERNEL); | 239 | GFP_KERNEL); |
@@ -711,14 +713,17 @@ static int __init init_caches(void) | |||
711 | goto bad_dentry; | 713 | goto bad_dentry; |
712 | 714 | ||
713 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); | 715 | ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); |
714 | |||
715 | if (!ceph_file_cachep) | 716 | if (!ceph_file_cachep) |
716 | goto bad_file; | 717 | goto bad_file; |
717 | 718 | ||
718 | if ((error = ceph_fscache_register())) | 719 | error = ceph_fscache_register(); |
719 | goto bad_file; | 720 | if (error) |
721 | goto bad_fscache; | ||
720 | 722 | ||
721 | return 0; | 723 | return 0; |
724 | |||
725 | bad_fscache: | ||
726 | kmem_cache_destroy(ceph_file_cachep); | ||
722 | bad_file: | 727 | bad_file: |
723 | kmem_cache_destroy(ceph_dentry_cachep); | 728 | kmem_cache_destroy(ceph_dentry_cachep); |
724 | bad_dentry: | 729 | bad_dentry: |
@@ -836,7 +841,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
836 | int err; | 841 | int err; |
837 | unsigned long started = jiffies; /* note the start time */ | 842 | unsigned long started = jiffies; /* note the start time */ |
838 | struct dentry *root; | 843 | struct dentry *root; |
839 | int first = 0; /* first vfsmount for this super_block */ | ||
840 | 844 | ||
841 | dout("mount start %p\n", fsc); | 845 | dout("mount start %p\n", fsc); |
842 | mutex_lock(&fsc->client->mount_mutex); | 846 | mutex_lock(&fsc->client->mount_mutex); |
@@ -861,17 +865,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
861 | path = fsc->mount_options->server_path + 1; | 865 | path = fsc->mount_options->server_path + 1; |
862 | dout("mount opening path %s\n", path); | 866 | dout("mount opening path %s\n", path); |
863 | } | 867 | } |
868 | |||
869 | err = ceph_fs_debugfs_init(fsc); | ||
870 | if (err < 0) | ||
871 | goto out; | ||
872 | |||
864 | root = open_root_dentry(fsc, path, started); | 873 | root = open_root_dentry(fsc, path, started); |
865 | if (IS_ERR(root)) { | 874 | if (IS_ERR(root)) { |
866 | err = PTR_ERR(root); | 875 | err = PTR_ERR(root); |
867 | goto out; | 876 | goto out; |
868 | } | 877 | } |
869 | fsc->sb->s_root = dget(root); | 878 | fsc->sb->s_root = dget(root); |
870 | first = 1; | ||
871 | |||
872 | err = ceph_fs_debugfs_init(fsc); | ||
873 | if (err < 0) | ||
874 | goto fail; | ||
875 | } else { | 879 | } else { |
876 | root = dget(fsc->sb->s_root); | 880 | root = dget(fsc->sb->s_root); |
877 | } | 881 | } |
@@ -881,11 +885,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) | |||
881 | mutex_unlock(&fsc->client->mount_mutex); | 885 | mutex_unlock(&fsc->client->mount_mutex); |
882 | return root; | 886 | return root; |
883 | 887 | ||
884 | fail: | ||
885 | if (first) { | ||
886 | dput(fsc->sb->s_root); | ||
887 | fsc->sb->s_root = NULL; | ||
888 | } | ||
889 | out: | 888 | out: |
890 | mutex_unlock(&fsc->client->mount_mutex); | 889 | mutex_unlock(&fsc->client->mount_mutex); |
891 | return ERR_PTR(err); | 890 | return ERR_PTR(err); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 21b2e5b004eb..1c2086e0fec2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -987,7 +987,7 @@ extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, | |||
987 | struct ceph_mds_session *session); | 987 | struct ceph_mds_session *session); |
988 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); | 988 | extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
989 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); | 989 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); |
990 | 990 | extern int ceph_drop_caps_for_unlink(struct inode *inode); | |
991 | extern int ceph_encode_inode_release(void **p, struct inode *inode, | 991 | extern int ceph_encode_inode_release(void **p, struct inode *inode, |
992 | int mds, int drop, int unless, int force); | 992 | int mds, int drop, int unless, int force); |
993 | extern int ceph_encode_dentry_release(void **p, struct dentry *dn, | 993 | extern int ceph_encode_dentry_release(void **p, struct dentry *dn, |
diff --git a/fs/direct-io.c b/fs/direct-io.c index a0ca9e48e993..1357ef563893 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1274,8 +1274,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1274 | */ | 1274 | */ |
1275 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { | 1275 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { |
1276 | retval = 0; | 1276 | retval = 0; |
1277 | if ((iocb->ki_filp->f_flags & O_DSYNC) || | 1277 | if (iocb->ki_flags & IOCB_DSYNC) |
1278 | IS_SYNC(iocb->ki_filp->f_mapping->host)) | ||
1279 | retval = dio_set_defer_completion(dio); | 1278 | retval = dio_set_defer_completion(dio); |
1280 | else if (!dio->inode->i_sb->s_dio_done_wq) { | 1279 | else if (!dio->inode->i_sb->s_dio_done_wq) { |
1281 | /* | 1280 | /* |
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index 5f22e74bbade..8e568428c88b 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/efi.h> | 10 | #include <linux/efi.h> |
11 | #include <linux/delay.h> | ||
11 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
13 | #include <linux/mount.h> | 14 | #include <linux/mount.h> |
@@ -74,6 +75,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, | |||
74 | ssize_t size = 0; | 75 | ssize_t size = 0; |
75 | int err; | 76 | int err; |
76 | 77 | ||
78 | while (!__ratelimit(&file->f_cred->user->ratelimit)) { | ||
79 | if (!msleep_interruptible(50)) | ||
80 | return -EINTR; | ||
81 | } | ||
82 | |||
77 | err = efivar_entry_size(var, &datasize); | 83 | err = efivar_entry_size(var, &datasize); |
78 | 84 | ||
79 | /* | 85 | /* |
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 2435af56b87e..a50d7813e3ea 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c | |||
@@ -572,7 +572,7 @@ out: | |||
572 | } | 572 | } |
573 | 573 | ||
574 | static bool | 574 | static bool |
575 | validate_bitmap_values(unsigned long mask) | 575 | validate_bitmap_values(unsigned int mask) |
576 | { | 576 | { |
577 | return (mask & ~RCA4_TYPE_MASK_ALL) == 0; | 577 | return (mask & ~RCA4_TYPE_MASK_ALL) == 0; |
578 | } | 578 | } |
@@ -596,17 +596,15 @@ __be32 nfs4_callback_recallany(void *argp, void *resp, | |||
596 | goto out; | 596 | goto out; |
597 | 597 | ||
598 | status = cpu_to_be32(NFS4_OK); | 598 | status = cpu_to_be32(NFS4_OK); |
599 | if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) | 599 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG)) |
600 | &args->craa_type_mask)) | ||
601 | flags = FMODE_READ; | 600 | flags = FMODE_READ; |
602 | if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) | 601 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG)) |
603 | &args->craa_type_mask)) | ||
604 | flags |= FMODE_WRITE; | 602 | flags |= FMODE_WRITE; |
605 | if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *) | ||
606 | &args->craa_type_mask)) | ||
607 | pnfs_recall_all_layouts(cps->clp); | ||
608 | if (flags) | 603 | if (flags) |
609 | nfs_expire_unused_delegation_types(cps->clp, flags); | 604 | nfs_expire_unused_delegation_types(cps->clp, flags); |
605 | |||
606 | if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT)) | ||
607 | pnfs_recall_all_layouts(cps->clp); | ||
610 | out: | 608 | out: |
611 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); | 609 | dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); |
612 | return status; | 610 | return status; |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 49f848fd1f04..7327930ad970 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -873,7 +873,7 @@ static void nfs3_nlm_release_call(void *data) | |||
873 | } | 873 | } |
874 | } | 874 | } |
875 | 875 | ||
876 | const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = { | 876 | static const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = { |
877 | .nlmclnt_alloc_call = nfs3_nlm_alloc_call, | 877 | .nlmclnt_alloc_call = nfs3_nlm_alloc_call, |
878 | .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare, | 878 | .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare, |
879 | .nlmclnt_release_call = nfs3_nlm_release_call, | 879 | .nlmclnt_release_call = nfs3_nlm_release_call, |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 04612c24d394..979631411a0e 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -868,8 +868,10 @@ static int nfs4_set_client(struct nfs_server *server, | |||
868 | if (IS_ERR(clp)) | 868 | if (IS_ERR(clp)) |
869 | return PTR_ERR(clp); | 869 | return PTR_ERR(clp); |
870 | 870 | ||
871 | if (server->nfs_client == clp) | 871 | if (server->nfs_client == clp) { |
872 | nfs_put_client(clp); | ||
872 | return -ELOOP; | 873 | return -ELOOP; |
874 | } | ||
873 | 875 | ||
874 | /* | 876 | /* |
875 | * Query for the lease time on clientid setup or renewal | 877 | * Query for the lease time on clientid setup or renewal |
@@ -1244,11 +1246,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, | |||
1244 | clp->cl_proto, clnt->cl_timeout, | 1246 | clp->cl_proto, clnt->cl_timeout, |
1245 | clp->cl_minorversion, net); | 1247 | clp->cl_minorversion, net); |
1246 | clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); | 1248 | clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); |
1247 | nfs_put_client(clp); | ||
1248 | if (error != 0) { | 1249 | if (error != 0) { |
1249 | nfs_server_insert_lists(server); | 1250 | nfs_server_insert_lists(server); |
1250 | return error; | 1251 | return error; |
1251 | } | 1252 | } |
1253 | nfs_put_client(clp); | ||
1252 | 1254 | ||
1253 | if (server->nfs_client->cl_hostname == NULL) | 1255 | if (server->nfs_client->cl_hostname == NULL) |
1254 | server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); | 1256 | server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); |
diff --git a/fs/signalfd.c b/fs/signalfd.c index 9990957264e3..76bf9cc62074 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -118,13 +118,22 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, | |||
118 | err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno); | 118 | err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno); |
119 | #endif | 119 | #endif |
120 | #ifdef BUS_MCEERR_AO | 120 | #ifdef BUS_MCEERR_AO |
121 | /* | 121 | /* |
122 | * Other callers might not initialize the si_lsb field, | ||
123 | * so check explicitly for the right codes here. | ||
124 | */ | ||
125 | if (kinfo->si_signo == SIGBUS && | ||
126 | kinfo->si_code == BUS_MCEERR_AO) | ||
127 | err |= __put_user((short) kinfo->si_addr_lsb, | ||
128 | &uinfo->ssi_addr_lsb); | ||
129 | #endif | ||
130 | #ifdef BUS_MCEERR_AR | ||
131 | /* | ||
122 | * Other callers might not initialize the si_lsb field, | 132 | * Other callers might not initialize the si_lsb field, |
123 | * so check explicitly for the right codes here. | 133 | * so check explicitly for the right codes here. |
124 | */ | 134 | */ |
125 | if (kinfo->si_signo == SIGBUS && | 135 | if (kinfo->si_signo == SIGBUS && |
126 | (kinfo->si_code == BUS_MCEERR_AR || | 136 | kinfo->si_code == BUS_MCEERR_AR) |
127 | kinfo->si_code == BUS_MCEERR_AO)) | ||
128 | err |= __put_user((short) kinfo->si_addr_lsb, | 137 | err |= __put_user((short) kinfo->si_addr_lsb, |
129 | &uinfo->ssi_addr_lsb); | 138 | &uinfo->ssi_addr_lsb); |
130 | #endif | 139 | #endif |
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c index fd975524f460..05c66e05ae20 100644 --- a/fs/xfs/scrub/agheader.c +++ b/fs/xfs/scrub/agheader.c | |||
@@ -767,7 +767,7 @@ int | |||
767 | xfs_scrub_agfl( | 767 | xfs_scrub_agfl( |
768 | struct xfs_scrub_context *sc) | 768 | struct xfs_scrub_context *sc) |
769 | { | 769 | { |
770 | struct xfs_scrub_agfl_info sai = { 0 }; | 770 | struct xfs_scrub_agfl_info sai; |
771 | struct xfs_agf *agf; | 771 | struct xfs_agf *agf; |
772 | xfs_agnumber_t agno; | 772 | xfs_agnumber_t agno; |
773 | unsigned int agflcount; | 773 | unsigned int agflcount; |
@@ -795,6 +795,7 @@ xfs_scrub_agfl( | |||
795 | xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); | 795 | xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); |
796 | goto out; | 796 | goto out; |
797 | } | 797 | } |
798 | memset(&sai, 0, sizeof(sai)); | ||
798 | sai.sz_entries = agflcount; | 799 | sai.sz_entries = agflcount; |
799 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS); | 800 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS); |
800 | if (!sai.entries) { | 801 | if (!sai.entries) { |
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 3a55d6fc271b..7a39f40645f7 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "xfs_log_format.h" | 23 | #include "xfs_log_format.h" |
24 | #include "xfs_trans_resv.h" | 24 | #include "xfs_trans_resv.h" |
25 | #include "xfs_bit.h" | 25 | #include "xfs_bit.h" |
26 | #include "xfs_shared.h" | ||
26 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
27 | #include "xfs_defer.h" | 28 | #include "xfs_defer.h" |
28 | #include "xfs_trans.h" | 29 | #include "xfs_trans.h" |
@@ -456,10 +457,12 @@ xfs_cui_recover( | |||
456 | * transaction. Normally, any work that needs to be deferred | 457 | * transaction. Normally, any work that needs to be deferred |
457 | * gets attached to the same defer_ops that scheduled the | 458 | * gets attached to the same defer_ops that scheduled the |
458 | * refcount update. However, we're in log recovery here, so we | 459 | * refcount update. However, we're in log recovery here, so we |
459 | * we create our own defer_ops and use that to finish up any | 460 | * we use the passed in defer_ops and to finish up any work that |
460 | * work that doesn't fit. | 461 | * doesn't fit. We need to reserve enough blocks to handle a |
462 | * full btree split on either end of the refcount range. | ||
461 | */ | 463 | */ |
462 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); | 464 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, |
465 | mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp); | ||
463 | if (error) | 466 | if (error) |
464 | return error; | 467 | return error; |
465 | cudp = xfs_trans_get_cud(tp, cuip); | 468 | cudp = xfs_trans_get_cud(tp, cuip); |
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index f3b139c9aa16..49d3124863a8 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "xfs_log_format.h" | 23 | #include "xfs_log_format.h" |
24 | #include "xfs_trans_resv.h" | 24 | #include "xfs_trans_resv.h" |
25 | #include "xfs_bit.h" | 25 | #include "xfs_bit.h" |
26 | #include "xfs_shared.h" | ||
26 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
27 | #include "xfs_defer.h" | 28 | #include "xfs_defer.h" |
28 | #include "xfs_trans.h" | 29 | #include "xfs_trans.h" |
@@ -470,7 +471,8 @@ xfs_rui_recover( | |||
470 | } | 471 | } |
471 | } | 472 | } |
472 | 473 | ||
473 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); | 474 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, |
475 | mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp); | ||
474 | if (error) | 476 | if (error) |
475 | return error; | 477 | return error; |
476 | rudp = xfs_trans_get_rud(tp, ruip); | 478 | rudp = xfs_trans_get_rud(tp, ruip); |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 7aba628dc527..93588ea3d3d2 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -250,6 +250,7 @@ xfs_parseargs( | |||
250 | return -EINVAL; | 250 | return -EINVAL; |
251 | break; | 251 | break; |
252 | case Opt_logdev: | 252 | case Opt_logdev: |
253 | kfree(mp->m_logname); | ||
253 | mp->m_logname = match_strdup(args); | 254 | mp->m_logname = match_strdup(args); |
254 | if (!mp->m_logname) | 255 | if (!mp->m_logname) |
255 | return -ENOMEM; | 256 | return -ENOMEM; |
@@ -258,6 +259,7 @@ xfs_parseargs( | |||
258 | xfs_warn(mp, "%s option not allowed on this system", p); | 259 | xfs_warn(mp, "%s option not allowed on this system", p); |
259 | return -EINVAL; | 260 | return -EINVAL; |
260 | case Opt_rtdev: | 261 | case Opt_rtdev: |
262 | kfree(mp->m_rtname); | ||
261 | mp->m_rtname = match_strdup(args); | 263 | mp->m_rtname = match_strdup(args); |
262 | if (!mp->m_rtname) | 264 | if (!mp->m_rtname) |
263 | return -ENOMEM; | 265 | return -ENOMEM; |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 963b755d19b0..a7613e1b0c87 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -52,6 +52,7 @@ struct bug_entry { | |||
52 | #ifndef HAVE_ARCH_BUG | 52 | #ifndef HAVE_ARCH_BUG |
53 | #define BUG() do { \ | 53 | #define BUG() do { \ |
54 | printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ | 54 | printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ |
55 | barrier_before_unreachable(); \ | ||
55 | panic("BUG!"); \ | 56 | panic("BUG!"); \ |
56 | } while (0) | 57 | } while (0) |
57 | #endif | 58 | #endif |
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 1c27526c499e..cf13842a6dbd 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h | |||
@@ -134,6 +134,15 @@ struct drm_crtc_commit { | |||
134 | * &drm_pending_vblank_event pointer to clean up private events. | 134 | * &drm_pending_vblank_event pointer to clean up private events. |
135 | */ | 135 | */ |
136 | struct drm_pending_vblank_event *event; | 136 | struct drm_pending_vblank_event *event; |
137 | |||
138 | /** | ||
139 | * @abort_completion: | ||
140 | * | ||
141 | * A flag that's set after drm_atomic_helper_setup_commit takes a second | ||
142 | * reference for the completion of $drm_crtc_state.event. It's used by | ||
143 | * the free code to remove the second reference if commit fails. | ||
144 | */ | ||
145 | bool abort_completion; | ||
137 | }; | 146 | }; |
138 | 147 | ||
139 | struct __drm_planes_state { | 148 | struct __drm_planes_state { |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 76e237bd989b..6914633037a5 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev); | |||
77 | 77 | ||
78 | void drm_kms_helper_poll_disable(struct drm_device *dev); | 78 | void drm_kms_helper_poll_disable(struct drm_device *dev); |
79 | void drm_kms_helper_poll_enable(struct drm_device *dev); | 79 | void drm_kms_helper_poll_enable(struct drm_device *dev); |
80 | bool drm_kms_helper_is_poll_worker(void); | ||
80 | 81 | ||
81 | #endif | 82 | #endif |
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index d32b688eb346..d23dcdd1bd95 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h | |||
@@ -56,6 +56,7 @@ struct drm_printer; | |||
56 | #define DRIVER_ATOMIC 0x10000 | 56 | #define DRIVER_ATOMIC 0x10000 |
57 | #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 | 57 | #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 |
58 | #define DRIVER_SYNCOBJ 0x40000 | 58 | #define DRIVER_SYNCOBJ 0x40000 |
59 | #define DRIVER_PREFER_XBGR_30BPP 0x80000 | ||
59 | 60 | ||
60 | /** | 61 | /** |
61 | * struct drm_driver - DRM driver structure | 62 | * struct drm_driver - DRM driver structure |
diff --git a/include/linux/bio.h b/include/linux/bio.h index d0eb659fa733..ce547a25e8ae 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -511,6 +511,7 @@ void zero_fill_bio(struct bio *bio); | |||
511 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); | 511 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
512 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); | 512 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); |
513 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 513 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
514 | extern const char *bio_devname(struct bio *bio, char *buffer); | ||
514 | 515 | ||
515 | #define bio_set_dev(bio, bdev) \ | 516 | #define bio_set_dev(bio, bdev) \ |
516 | do { \ | 517 | do { \ |
@@ -529,9 +530,6 @@ do { \ | |||
529 | #define bio_dev(bio) \ | 530 | #define bio_dev(bio) \ |
530 | disk_devt((bio)->bi_disk) | 531 | disk_devt((bio)->bi_disk) |
531 | 532 | ||
532 | #define bio_devname(bio, buf) \ | ||
533 | __bdevname(bio_dev(bio), (buf)) | ||
534 | |||
535 | #ifdef CONFIG_BLK_CGROUP | 533 | #ifdef CONFIG_BLK_CGROUP |
536 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); | 534 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); |
537 | void bio_disassociate_task(struct bio *bio); | 535 | void bio_disassociate_task(struct bio *bio); |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d02a4df3f473..d3f264a5b04d 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -27,3 +27,8 @@ | |||
27 | #if __has_feature(address_sanitizer) | 27 | #if __has_feature(address_sanitizer) |
28 | #define __SANITIZE_ADDRESS__ | 28 | #define __SANITIZE_ADDRESS__ |
29 | #endif | 29 | #endif |
30 | |||
31 | /* Clang doesn't have a way to turn it off per-function, yet. */ | ||
32 | #ifdef __noretpoline | ||
33 | #undef __noretpoline | ||
34 | #endif | ||
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 73bc63e0a1c4..e2c7f4369eff 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -93,6 +93,10 @@ | |||
93 | #define __weak __attribute__((weak)) | 93 | #define __weak __attribute__((weak)) |
94 | #define __alias(symbol) __attribute__((alias(#symbol))) | 94 | #define __alias(symbol) __attribute__((alias(#symbol))) |
95 | 95 | ||
96 | #ifdef RETPOLINE | ||
97 | #define __noretpoline __attribute__((indirect_branch("keep"))) | ||
98 | #endif | ||
99 | |||
96 | /* | 100 | /* |
97 | * it doesn't make sense on ARM (currently the only user of __naked) | 101 | * it doesn't make sense on ARM (currently the only user of __naked) |
98 | * to trace naked functions because then mcount is called without | 102 | * to trace naked functions because then mcount is called without |
@@ -208,6 +212,15 @@ | |||
208 | #endif | 212 | #endif |
209 | 213 | ||
210 | /* | 214 | /* |
215 | * calling noreturn functions, __builtin_unreachable() and __builtin_trap() | ||
216 | * confuse the stack allocation in gcc, leading to overly large stack | ||
217 | * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 | ||
218 | * | ||
219 | * Adding an empty inline assembly before it works around the problem | ||
220 | */ | ||
221 | #define barrier_before_unreachable() asm volatile("") | ||
222 | |||
223 | /* | ||
211 | * Mark a position in code as unreachable. This can be used to | 224 | * Mark a position in code as unreachable. This can be used to |
212 | * suppress control flow warnings after asm blocks that transfer | 225 | * suppress control flow warnings after asm blocks that transfer |
213 | * control elsewhere. | 226 | * control elsewhere. |
@@ -217,7 +230,11 @@ | |||
217 | * unreleased. Really, we need to have autoconf for the kernel. | 230 | * unreleased. Really, we need to have autoconf for the kernel. |
218 | */ | 231 | */ |
219 | #define unreachable() \ | 232 | #define unreachable() \ |
220 | do { annotate_unreachable(); __builtin_unreachable(); } while (0) | 233 | do { \ |
234 | annotate_unreachable(); \ | ||
235 | barrier_before_unreachable(); \ | ||
236 | __builtin_unreachable(); \ | ||
237 | } while (0) | ||
221 | 238 | ||
222 | /* Mark a function definition as prohibited from being cloned. */ | 239 | /* Mark a function definition as prohibited from being cloned. */ |
223 | #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) | 240 | #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index e835fc0423ec..ab4711c63601 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
86 | # define barrier_data(ptr) barrier() | 86 | # define barrier_data(ptr) barrier() |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | /* workaround for GCC PR82365 if needed */ | ||
90 | #ifndef barrier_before_unreachable | ||
91 | # define barrier_before_unreachable() do { } while (0) | ||
92 | #endif | ||
93 | |||
89 | /* Unreachable code */ | 94 | /* Unreachable code */ |
90 | #ifdef CONFIG_STACK_VALIDATION | 95 | #ifdef CONFIG_STACK_VALIDATION |
91 | /* | 96 | /* |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5e3531027b51..c826b0b5232a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -198,6 +198,7 @@ struct gendisk { | |||
198 | void *private_data; | 198 | void *private_data; |
199 | 199 | ||
200 | int flags; | 200 | int flags; |
201 | struct rw_semaphore lookup_sem; | ||
201 | struct kobject *slave_dir; | 202 | struct kobject *slave_dir; |
202 | 203 | ||
203 | struct timer_rand_state *random; | 204 | struct timer_rand_state *random; |
@@ -600,8 +601,9 @@ extern void delete_partition(struct gendisk *, int); | |||
600 | extern void printk_all_partitions(void); | 601 | extern void printk_all_partitions(void); |
601 | 602 | ||
602 | extern struct gendisk *__alloc_disk_node(int minors, int node_id); | 603 | extern struct gendisk *__alloc_disk_node(int minors, int node_id); |
603 | extern struct kobject *get_disk(struct gendisk *disk); | 604 | extern struct kobject *get_disk_and_module(struct gendisk *disk); |
604 | extern void put_disk(struct gendisk *disk); | 605 | extern void put_disk(struct gendisk *disk); |
606 | extern void put_disk_and_module(struct gendisk *disk); | ||
605 | extern void blk_register_region(dev_t devt, unsigned long range, | 607 | extern void blk_register_region(dev_t devt, unsigned long range, |
606 | struct module *module, | 608 | struct module *module, |
607 | struct kobject *(*probe)(dev_t, int *, void *), | 609 | struct kobject *(*probe)(dev_t, int *, void *), |
diff --git a/include/linux/init.h b/include/linux/init.h index 506a98151131..bc27cf03c41e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -6,10 +6,10 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | 7 | ||
8 | /* Built-in __init functions needn't be compiled with retpoline */ | 8 | /* Built-in __init functions needn't be compiled with retpoline */ |
9 | #if defined(RETPOLINE) && !defined(MODULE) | 9 | #if defined(__noretpoline) && !defined(MODULE) |
10 | #define __noretpoline __attribute__((indirect_branch("keep"))) | 10 | #define __noinitretpoline __noretpoline |
11 | #else | 11 | #else |
12 | #define __noretpoline | 12 | #define __noinitretpoline |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | /* These macros are used to mark some functions or | 15 | /* These macros are used to mark some functions or |
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | /* These are for everybody (although not all archs will actually | 48 | /* These are for everybody (although not all archs will actually |
49 | discard it in modules) */ | 49 | discard it in modules) */ |
50 | #define __init __section(.init.text) __cold __latent_entropy __noretpoline | 50 | #define __init __section(.init.text) __cold __latent_entropy __noinitretpoline |
51 | #define __initdata __section(.init.data) | 51 | #define __initdata __section(.init.data) |
52 | #define __initconst __section(.init.rodata) | 52 | #define __initconst __section(.init.rodata) |
53 | #define __exitdata __section(.exit.data) | 53 | #define __exitdata __section(.exit.data) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b6a29c126cc4..2168cc6b8b30 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[]; | |||
151 | extern struct jump_entry __stop___jump_table[]; | 151 | extern struct jump_entry __stop___jump_table[]; |
152 | 152 | ||
153 | extern void jump_label_init(void); | 153 | extern void jump_label_init(void); |
154 | extern void jump_label_invalidate_init(void); | ||
154 | extern void jump_label_lock(void); | 155 | extern void jump_label_lock(void); |
155 | extern void jump_label_unlock(void); | 156 | extern void jump_label_unlock(void); |
156 | extern void arch_jump_label_transform(struct jump_entry *entry, | 157 | extern void arch_jump_label_transform(struct jump_entry *entry, |
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void) | |||
198 | static_key_initialized = true; | 199 | static_key_initialized = true; |
199 | } | 200 | } |
200 | 201 | ||
202 | static inline void jump_label_invalidate_init(void) {} | ||
203 | |||
201 | static __always_inline bool static_key_false(struct static_key *key) | 204 | static __always_inline bool static_key_false(struct static_key *key) |
202 | { | 205 | { |
203 | if (unlikely(static_key_count(key) > 0)) | 206 | if (unlikely(static_key_count(key) > 0)) |
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index fec5076eda91..dcde9471897d 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h | |||
@@ -4,6 +4,12 @@ | |||
4 | 4 | ||
5 | #include <generated/autoconf.h> | 5 | #include <generated/autoconf.h> |
6 | 6 | ||
7 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
8 | #define __BIG_ENDIAN 4321 | ||
9 | #else | ||
10 | #define __LITTLE_ENDIAN 1234 | ||
11 | #endif | ||
12 | |||
7 | #define __ARG_PLACEHOLDER_1 0, | 13 | #define __ARG_PLACEHOLDER_1 0, |
8 | #define __take_second_arg(__ignored, val, ...) val | 14 | #define __take_second_arg(__ignored, val, ...) val |
9 | 15 | ||
@@ -64,4 +70,7 @@ | |||
64 | */ | 70 | */ |
65 | #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) | 71 | #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) |
66 | 72 | ||
73 | /* Make sure we always have all types and struct attributes defined. */ | ||
74 | #include <linux/compiler_types.h> | ||
75 | |||
67 | #endif /* __LINUX_KCONFIG_H */ | 76 | #endif /* __LINUX_KCONFIG_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index ce51455e2adf..3fd291503576 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option); | |||
472 | extern char *next_arg(char *args, char **param, char **val); | 472 | extern char *next_arg(char *args, char **param, char **val); |
473 | 473 | ||
474 | extern int core_kernel_text(unsigned long addr); | 474 | extern int core_kernel_text(unsigned long addr); |
475 | extern int init_kernel_text(unsigned long addr); | ||
475 | extern int core_kernel_data(unsigned long addr); | 476 | extern int core_kernel_data(unsigned long addr); |
476 | extern int __kernel_text_address(unsigned long addr); | 477 | extern int __kernel_text_address(unsigned long addr); |
477 | extern int kernel_text_address(unsigned long addr); | 478 | extern int kernel_text_address(unsigned long addr); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ac0062b74aed..6930c63126c7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) | |||
1105 | { | 1105 | { |
1106 | } | 1106 | } |
1107 | #endif | 1107 | #endif |
1108 | void kvm_arch_irq_routing_update(struct kvm *kvm); | ||
1109 | 1108 | ||
1110 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 1109 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
1111 | { | 1110 | { |
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | |||
1114 | 1113 | ||
1115 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | 1114 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
1116 | 1115 | ||
1116 | void kvm_arch_irq_routing_update(struct kvm *kvm); | ||
1117 | |||
1117 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | 1118 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1118 | { | 1119 | { |
1119 | /* | 1120 | /* |
@@ -1272,4 +1273,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, | |||
1272 | } | 1273 | } |
1273 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ | 1274 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ |
1274 | 1275 | ||
1276 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, | ||
1277 | unsigned long start, unsigned long end); | ||
1278 | |||
1275 | #endif | 1279 | #endif |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 882046863581..c46016bb25eb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg, | |||
523 | static inline void mod_memcg_state(struct mem_cgroup *memcg, | 523 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
524 | int idx, int val) | 524 | int idx, int val) |
525 | { | 525 | { |
526 | preempt_disable(); | 526 | unsigned long flags; |
527 | |||
528 | local_irq_save(flags); | ||
527 | __mod_memcg_state(memcg, idx, val); | 529 | __mod_memcg_state(memcg, idx, val); |
528 | preempt_enable(); | 530 | local_irq_restore(flags); |
529 | } | 531 | } |
530 | 532 | ||
531 | /** | 533 | /** |
@@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec, | |||
606 | static inline void mod_lruvec_state(struct lruvec *lruvec, | 608 | static inline void mod_lruvec_state(struct lruvec *lruvec, |
607 | enum node_stat_item idx, int val) | 609 | enum node_stat_item idx, int val) |
608 | { | 610 | { |
609 | preempt_disable(); | 611 | unsigned long flags; |
612 | |||
613 | local_irq_save(flags); | ||
610 | __mod_lruvec_state(lruvec, idx, val); | 614 | __mod_lruvec_state(lruvec, idx, val); |
611 | preempt_enable(); | 615 | local_irq_restore(flags); |
612 | } | 616 | } |
613 | 617 | ||
614 | static inline void __mod_lruvec_page_state(struct page *page, | 618 | static inline void __mod_lruvec_page_state(struct page *page, |
@@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page, | |||
630 | static inline void mod_lruvec_page_state(struct page *page, | 634 | static inline void mod_lruvec_page_state(struct page *page, |
631 | enum node_stat_item idx, int val) | 635 | enum node_stat_item idx, int val) |
632 | { | 636 | { |
633 | preempt_disable(); | 637 | unsigned long flags; |
638 | |||
639 | local_irq_save(flags); | ||
634 | __mod_lruvec_page_state(page, idx, val); | 640 | __mod_lruvec_page_state(page, idx, val); |
635 | preempt_enable(); | 641 | local_irq_restore(flags); |
636 | } | 642 | } |
637 | 643 | ||
638 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | 644 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
@@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, | |||
659 | static inline void count_memcg_events(struct mem_cgroup *memcg, | 665 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
660 | int idx, unsigned long count) | 666 | int idx, unsigned long count) |
661 | { | 667 | { |
662 | preempt_disable(); | 668 | unsigned long flags; |
669 | |||
670 | local_irq_save(flags); | ||
663 | __count_memcg_events(memcg, idx, count); | 671 | __count_memcg_events(memcg, idx, count); |
664 | preempt_enable(); | 672 | local_irq_restore(flags); |
665 | } | 673 | } |
666 | 674 | ||
667 | /* idx can be of type enum memcg_event_item or vm_event_item */ | 675 | /* idx can be of type enum memcg_event_item or vm_event_item */ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index f25c13423bd4..cb3bbed4e633 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -66,6 +66,11 @@ struct mutex { | |||
66 | #endif | 66 | #endif |
67 | }; | 67 | }; |
68 | 68 | ||
69 | /* | ||
70 | * Internal helper function; C doesn't allow us to hide it :/ | ||
71 | * | ||
72 | * DO NOT USE (outside of mutex code). | ||
73 | */ | ||
69 | static inline struct task_struct *__mutex_owner(struct mutex *lock) | 74 | static inline struct task_struct *__mutex_owner(struct mutex *lock) |
70 | { | 75 | { |
71 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); | 76 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); |
diff --git a/include/linux/nospec.h b/include/linux/nospec.h index fbc98e2c8228..e791ebc65c9c 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #ifndef _LINUX_NOSPEC_H | 6 | #ifndef _LINUX_NOSPEC_H |
7 | #define _LINUX_NOSPEC_H | 7 | #define _LINUX_NOSPEC_H |
8 | #include <asm/barrier.h> | ||
8 | 9 | ||
9 | /** | 10 | /** |
10 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise | 11 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise |
@@ -30,26 +31,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * Warn developers about inappropriate array_index_nospec() usage. | ||
34 | * | ||
35 | * Even if the CPU speculates past the WARN_ONCE branch, the | ||
36 | * sign bit of @index is taken into account when generating the | ||
37 | * mask. | ||
38 | * | ||
39 | * This warning is compiled out when the compiler can infer that | ||
40 | * @index and @size are less than LONG_MAX. | ||
41 | */ | ||
42 | #define array_index_mask_nospec_check(index, size) \ | ||
43 | ({ \ | ||
44 | if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ | ||
45 | "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ | ||
46 | _mask = 0; \ | ||
47 | else \ | ||
48 | _mask = array_index_mask_nospec(index, size); \ | ||
49 | _mask; \ | ||
50 | }) | ||
51 | |||
52 | /* | ||
53 | * array_index_nospec - sanitize an array index after a bounds check | 34 | * array_index_nospec - sanitize an array index after a bounds check |
54 | * | 35 | * |
55 | * For a code sequence like: | 36 | * For a code sequence like: |
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
67 | ({ \ | 48 | ({ \ |
68 | typeof(index) _i = (index); \ | 49 | typeof(index) _i = (index); \ |
69 | typeof(size) _s = (size); \ | 50 | typeof(size) _s = (size); \ |
70 | unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ | 51 | unsigned long _mask = array_index_mask_nospec(_i, _s); \ |
71 | \ | 52 | \ |
72 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ | 53 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ |
73 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ | 54 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ |
74 | \ | 55 | \ |
75 | _i &= _mask; \ | 56 | (typeof(_i)) (_i & _mask); \ |
76 | _i; \ | ||
77 | }) | 57 | }) |
78 | #endif /* _LINUX_NOSPEC_H */ | 58 | #endif /* _LINUX_NOSPEC_H */ |
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index af0f44effd44..40036a57d072 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h | |||
@@ -14,26 +14,10 @@ | |||
14 | 14 | ||
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/perf_event.h> | 16 | #include <linux/perf_event.h> |
17 | #include <linux/platform_device.h> | ||
17 | #include <linux/sysfs.h> | 18 | #include <linux/sysfs.h> |
18 | #include <asm/cputype.h> | 19 | #include <asm/cputype.h> |
19 | 20 | ||
20 | /* | ||
21 | * struct arm_pmu_platdata - ARM PMU platform data | ||
22 | * | ||
23 | * @handle_irq: an optional handler which will be called from the | ||
24 | * interrupt and passed the address of the low level handler, | ||
25 | * and can be used to implement any platform specific handling | ||
26 | * before or after calling it. | ||
27 | * | ||
28 | * @irq_flags: if non-zero, these flags will be passed to request_irq | ||
29 | * when requesting interrupts for this PMU device. | ||
30 | */ | ||
31 | struct arm_pmu_platdata { | ||
32 | irqreturn_t (*handle_irq)(int irq, void *dev, | ||
33 | irq_handler_t pmu_handler); | ||
34 | unsigned long irq_flags; | ||
35 | }; | ||
36 | |||
37 | #ifdef CONFIG_ARM_PMU | 21 | #ifdef CONFIG_ARM_PMU |
38 | 22 | ||
39 | /* | 23 | /* |
@@ -92,7 +76,6 @@ enum armpmu_attr_groups { | |||
92 | 76 | ||
93 | struct arm_pmu { | 77 | struct arm_pmu { |
94 | struct pmu pmu; | 78 | struct pmu pmu; |
95 | cpumask_t active_irqs; | ||
96 | cpumask_t supported_cpus; | 79 | cpumask_t supported_cpus; |
97 | char *name; | 80 | char *name; |
98 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 81 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } | |||
174 | 157 | ||
175 | /* Internal functions only for core arm_pmu code */ | 158 | /* Internal functions only for core arm_pmu code */ |
176 | struct arm_pmu *armpmu_alloc(void); | 159 | struct arm_pmu *armpmu_alloc(void); |
160 | struct arm_pmu *armpmu_alloc_atomic(void); | ||
177 | void armpmu_free(struct arm_pmu *pmu); | 161 | void armpmu_free(struct arm_pmu *pmu); |
178 | int armpmu_register(struct arm_pmu *pmu); | 162 | int armpmu_register(struct arm_pmu *pmu); |
179 | int armpmu_request_irqs(struct arm_pmu *armpmu); | 163 | int armpmu_request_irq(int irq, int cpu); |
180 | void armpmu_free_irqs(struct arm_pmu *armpmu); | 164 | void armpmu_free_irq(int irq, int cpu); |
181 | int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); | ||
182 | void armpmu_free_irq(struct arm_pmu *armpmu, int cpu); | ||
183 | 165 | ||
184 | #define ARMV8_PMU_PDEV_NAME "armv8-pmu" | 166 | #define ARMV8_PMU_PDEV_NAME "armv8-pmu" |
185 | 167 | ||
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index b884b7794187..e6335227b844 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
@@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, | |||
469 | */ | 469 | */ |
470 | static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) | 470 | static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) |
471 | { | 471 | { |
472 | if (size * sizeof(void *) > KMALLOC_MAX_SIZE) | 472 | if (size > KMALLOC_MAX_SIZE / sizeof(void *)) |
473 | return NULL; | 473 | return NULL; |
474 | return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); | 474 | return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); |
475 | } | 475 | } |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 1149533aa2fa..9806184bb3d5 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm) | |||
36 | atomic_inc(&mm->mm_count); | 36 | atomic_inc(&mm->mm_count); |
37 | } | 37 | } |
38 | 38 | ||
39 | extern void mmdrop(struct mm_struct *mm); | 39 | extern void __mmdrop(struct mm_struct *mm); |
40 | |||
41 | static inline void mmdrop(struct mm_struct *mm) | ||
42 | { | ||
43 | /* | ||
44 | * The implicit full barrier implied by atomic_dec_and_test() is | ||
45 | * required by the membarrier system call before returning to | ||
46 | * user-space, after storing to rq->curr. | ||
47 | */ | ||
48 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | ||
49 | __mmdrop(mm); | ||
50 | } | ||
40 | 51 | ||
41 | /** | 52 | /** |
42 | * mmget() - Pin the address space associated with a &struct mm_struct. | 53 | * mmget() - Pin the address space associated with a &struct mm_struct. |
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 0dcf4e480ef7..96fe289c4c6e 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/uidgid.h> | 5 | #include <linux/uidgid.h> |
6 | #include <linux/atomic.h> | 6 | #include <linux/atomic.h> |
7 | #include <linux/ratelimit.h> | ||
7 | 8 | ||
8 | struct key; | 9 | struct key; |
9 | 10 | ||
@@ -41,6 +42,9 @@ struct user_struct { | |||
41 | defined(CONFIG_NET) | 42 | defined(CONFIG_NET) |
42 | atomic_long_t locked_vm; | 43 | atomic_long_t locked_vm; |
43 | #endif | 44 | #endif |
45 | |||
46 | /* Miscellaneous per-user rate limit */ | ||
47 | struct ratelimit_state ratelimit; | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | extern int uids_sysfs_init(void); | 50 | extern int uids_sysfs_init(void); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5ebc0f869720..c1e66bdcf583 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, | |||
3646 | return true; | 3646 | return true; |
3647 | } | 3647 | } |
3648 | 3648 | ||
3649 | /* For small packets <= CHECKSUM_BREAK peform checksum complete directly | 3649 | /* For small packets <= CHECKSUM_BREAK perform checksum complete directly |
3650 | * in checksum_init. | 3650 | * in checksum_init. |
3651 | */ | 3651 | */ |
3652 | #define CHECKSUM_BREAK 76 | 3652 | #define CHECKSUM_BREAK 76 |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 7b6a59f722a3..a1a3f4ed94ce 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page); | |||
337 | extern void mark_page_lazyfree(struct page *page); | 337 | extern void mark_page_lazyfree(struct page *page); |
338 | extern void swap_setup(void); | 338 | extern void swap_setup(void); |
339 | 339 | ||
340 | extern void add_page_to_unevictable_list(struct page *page); | ||
341 | |||
342 | extern void lru_cache_add_active_or_unevictable(struct page *page, | 340 | extern void lru_cache_add_active_or_unevictable(struct page *page, |
343 | struct vm_area_struct *vma); | 341 | struct vm_area_struct *vma); |
344 | 342 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4a54ef96aff5..bc0cda180c8b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | |||
465 | 465 | ||
466 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 466 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
467 | int max_active); | 467 | int max_active); |
468 | extern struct work_struct *current_work(void); | ||
468 | extern bool current_is_workqueue_rescuer(void); | 469 | extern bool current_is_workqueue_rescuer(void); |
469 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); | 470 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
470 | extern unsigned int work_busy(struct work_struct *work); | 471 | extern unsigned int work_busy(struct work_struct *work); |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 906e90223066..c96511fa9198 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -4149,7 +4149,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid); | |||
4149 | * The TX headroom reserved by mac80211 for its own tx_status functions. | 4149 | * The TX headroom reserved by mac80211 for its own tx_status functions. |
4150 | * This is enough for the radiotap header. | 4150 | * This is enough for the radiotap header. |
4151 | */ | 4151 | */ |
4152 | #define IEEE80211_TX_STATUS_HEADROOM 14 | 4152 | #define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4) |
4153 | 4153 | ||
4154 | /** | 4154 | /** |
4155 | * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames | 4155 | * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames |
diff --git a/include/net/regulatory.h b/include/net/regulatory.h index ebc5a2ed8631..f83cacce3308 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h | |||
@@ -78,7 +78,7 @@ struct regulatory_request { | |||
78 | int wiphy_idx; | 78 | int wiphy_idx; |
79 | enum nl80211_reg_initiator initiator; | 79 | enum nl80211_reg_initiator initiator; |
80 | enum nl80211_user_reg_hint_type user_reg_hint_type; | 80 | enum nl80211_user_reg_hint_type user_reg_hint_type; |
81 | char alpha2[2]; | 81 | char alpha2[3]; |
82 | enum nl80211_dfs_regions dfs_region; | 82 | enum nl80211_dfs_regions dfs_region; |
83 | bool intersect; | 83 | bool intersect; |
84 | bool processed; | 84 | bool processed; |
diff --git a/include/net/udplite.h b/include/net/udplite.h index 81bdbf97319b..9185e45b997f 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h | |||
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) | |||
64 | UDP_SKB_CB(skb)->cscov = cscov; | 64 | UDP_SKB_CB(skb)->cscov = cscov; |
65 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 65 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
66 | skb->ip_summed = CHECKSUM_NONE; | 66 | skb->ip_summed = CHECKSUM_NONE; |
67 | skb->csum_valid = 0; | ||
67 | } | 68 | } |
68 | 69 | ||
69 | return 0; | 70 | return 0; |
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index c2d81167c858..2cdf8dcf4bdc 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h | |||
@@ -29,10 +29,6 @@ enum rdma_restrack_type { | |||
29 | */ | 29 | */ |
30 | RDMA_RESTRACK_QP, | 30 | RDMA_RESTRACK_QP, |
31 | /** | 31 | /** |
32 | * @RDMA_RESTRACK_XRCD: XRC domain (XRCD) | ||
33 | */ | ||
34 | RDMA_RESTRACK_XRCD, | ||
35 | /** | ||
36 | * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations | 32 | * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations |
37 | */ | 33 | */ |
38 | RDMA_RESTRACK_MAX | 34 | RDMA_RESTRACK_MAX |
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 6da44079aa58..38287d9d23a1 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h | |||
@@ -276,10 +276,7 @@ struct uverbs_object_tree_def { | |||
276 | */ | 276 | */ |
277 | 277 | ||
278 | struct uverbs_ptr_attr { | 278 | struct uverbs_ptr_attr { |
279 | union { | 279 | u64 data; |
280 | u64 data; | ||
281 | void __user *ptr; | ||
282 | }; | ||
283 | u16 len; | 280 | u16 len; |
284 | /* Combination of bits from enum UVERBS_ATTR_F_XXXX */ | 281 | /* Combination of bits from enum UVERBS_ATTR_F_XXXX */ |
285 | u16 flags; | 282 | u16 flags; |
@@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr | |||
351 | } | 348 | } |
352 | 349 | ||
353 | static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, | 350 | static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, |
354 | size_t idx, const void *from) | 351 | size_t idx, const void *from, size_t size) |
355 | { | 352 | { |
356 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); | 353 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); |
357 | u16 flags; | 354 | u16 flags; |
355 | size_t min_size; | ||
358 | 356 | ||
359 | if (IS_ERR(attr)) | 357 | if (IS_ERR(attr)) |
360 | return PTR_ERR(attr); | 358 | return PTR_ERR(attr); |
361 | 359 | ||
360 | min_size = min_t(size_t, attr->ptr_attr.len, size); | ||
361 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) | ||
362 | return -EFAULT; | ||
363 | |||
362 | flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT; | 364 | flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT; |
363 | return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) && | 365 | if (put_user(flags, &attr->uattr->flags)) |
364 | !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT; | 366 | return -EFAULT; |
367 | |||
368 | return 0; | ||
365 | } | 369 | } |
366 | 370 | ||
367 | static inline int _uverbs_copy_from(void *to, size_t to_size, | 371 | static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr) |
372 | { | ||
373 | return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data); | ||
374 | } | ||
375 | |||
376 | static inline int _uverbs_copy_from(void *to, | ||
368 | const struct uverbs_attr_bundle *attrs_bundle, | 377 | const struct uverbs_attr_bundle *attrs_bundle, |
369 | size_t idx) | 378 | size_t idx, |
379 | size_t size) | ||
370 | { | 380 | { |
371 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); | 381 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); |
372 | 382 | ||
373 | if (IS_ERR(attr)) | 383 | if (IS_ERR(attr)) |
374 | return PTR_ERR(attr); | 384 | return PTR_ERR(attr); |
375 | 385 | ||
376 | if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data)) | 386 | /* |
387 | * Validation ensures attr->ptr_attr.len >= size. If the caller is | ||
388 | * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with | ||
389 | * the right size. | ||
390 | */ | ||
391 | if (unlikely(size < attr->ptr_attr.len)) | ||
392 | return -EINVAL; | ||
393 | |||
394 | if (uverbs_attr_ptr_is_inline(attr)) | ||
377 | memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); | 395 | memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); |
378 | else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len)) | 396 | else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data), |
397 | attr->ptr_attr.len)) | ||
379 | return -EFAULT; | 398 | return -EFAULT; |
380 | 399 | ||
381 | return 0; | 400 | return 0; |
382 | } | 401 | } |
383 | 402 | ||
384 | #define uverbs_copy_from(to, attrs_bundle, idx) \ | 403 | #define uverbs_copy_from(to, attrs_bundle, idx) \ |
385 | _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx) | 404 | _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to)) |
386 | 405 | ||
387 | /* ================================================= | 406 | /* ================================================= |
388 | * Definitions -> Specs infrastructure | 407 | * Definitions -> Specs infrastructure |
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index c2d1b15da136..a91f25151a5b 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #define ARC_REG_MCIP_BCR 0x0d0 | 16 | #define ARC_REG_MCIP_BCR 0x0d0 |
17 | #define ARC_REG_MCIP_IDU_BCR 0x0D5 | 17 | #define ARC_REG_MCIP_IDU_BCR 0x0D5 |
18 | #define ARC_REG_GFRC_BUILD 0x0D6 | ||
18 | #define ARC_REG_MCIP_CMD 0x600 | 19 | #define ARC_REG_MCIP_CMD 0x600 |
19 | #define ARC_REG_MCIP_WDATA 0x601 | 20 | #define ARC_REG_MCIP_WDATA 0x601 |
20 | #define ARC_REG_MCIP_READBACK 0x602 | 21 | #define ARC_REG_MCIP_READBACK 0x602 |
@@ -36,10 +37,14 @@ struct mcip_cmd { | |||
36 | #define CMD_SEMA_RELEASE 0x12 | 37 | #define CMD_SEMA_RELEASE 0x12 |
37 | 38 | ||
38 | #define CMD_DEBUG_SET_MASK 0x34 | 39 | #define CMD_DEBUG_SET_MASK 0x34 |
40 | #define CMD_DEBUG_READ_MASK 0x35 | ||
39 | #define CMD_DEBUG_SET_SELECT 0x36 | 41 | #define CMD_DEBUG_SET_SELECT 0x36 |
42 | #define CMD_DEBUG_READ_SELECT 0x37 | ||
40 | 43 | ||
41 | #define CMD_GFRC_READ_LO 0x42 | 44 | #define CMD_GFRC_READ_LO 0x42 |
42 | #define CMD_GFRC_READ_HI 0x43 | 45 | #define CMD_GFRC_READ_HI 0x43 |
46 | #define CMD_GFRC_SET_CORE 0x47 | ||
47 | #define CMD_GFRC_READ_CORE 0x48 | ||
43 | 48 | ||
44 | #define CMD_IDU_ENABLE 0x71 | 49 | #define CMD_IDU_ENABLE 0x71 |
45 | #define CMD_IDU_DISABLE 0x72 | 50 | #define CMD_IDU_DISABLE 0x72 |
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index 91a31ffed828..9a781f0611df 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h | |||
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ | 65 | #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ |
66 | #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ | ||
66 | 67 | ||
67 | struct drm_virtgpu_getparam { | 68 | struct drm_virtgpu_getparam { |
68 | __u64 param; | 69 | __u64 param; |
diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h index 20d1490d6377..3c50e07ee833 100644 --- a/include/uapi/linux/blktrace_api.h +++ b/include/uapi/linux/blktrace_api.h | |||
@@ -131,7 +131,7 @@ enum { | |||
131 | #define BLKTRACE_BDEV_SIZE 32 | 131 | #define BLKTRACE_BDEV_SIZE 32 |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * User setup structure passed with BLKTRACESTART | 134 | * User setup structure passed with BLKTRACESETUP |
135 | */ | 135 | */ |
136 | struct blk_user_trace_setup { | 136 | struct blk_user_trace_setup { |
137 | char name[BLKTRACE_BDEV_SIZE]; /* output */ | 137 | char name[BLKTRACE_BDEV_SIZE]; /* output */ |
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index f8cb5760ea4f..8bbbcb5cd94b 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #define _UAPI_LINUX_IF_ETHER_H | 23 | #define _UAPI_LINUX_IF_ETHER_H |
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/libc-compat.h> | ||
27 | 26 | ||
28 | /* | 27 | /* |
29 | * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble | 28 | * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble |
@@ -151,6 +150,11 @@ | |||
151 | * This is an Ethernet frame header. | 150 | * This is an Ethernet frame header. |
152 | */ | 151 | */ |
153 | 152 | ||
153 | /* allow libcs like musl to deactivate this, glibc does not implement this. */ | ||
154 | #ifndef __UAPI_DEF_ETHHDR | ||
155 | #define __UAPI_DEF_ETHHDR 1 | ||
156 | #endif | ||
157 | |||
154 | #if __UAPI_DEF_ETHHDR | 158 | #if __UAPI_DEF_ETHHDR |
155 | struct ethhdr { | 159 | struct ethhdr { |
156 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 160 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0fb5ef939732..7b26d4b0b052 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt { | |||
761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 | 761 | #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 |
762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 | 762 | #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 |
763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) | 763 | #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) |
764 | #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) | ||
764 | 765 | ||
765 | /* | 766 | /* |
766 | * Extension capability list. | 767 | * Extension capability list. |
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt { | |||
934 | #define KVM_CAP_S390_AIS_MIGRATION 150 | 935 | #define KVM_CAP_S390_AIS_MIGRATION 150 |
935 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 | 936 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 |
936 | #define KVM_CAP_S390_BPB 152 | 937 | #define KVM_CAP_S390_BPB 152 |
938 | #define KVM_CAP_GET_MSR_FEATURES 153 | ||
937 | 939 | ||
938 | #ifdef KVM_CAP_IRQ_ROUTING | 940 | #ifdef KVM_CAP_IRQ_ROUTING |
939 | 941 | ||
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index fc29efaa918c..8254c937c9f4 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h | |||
@@ -264,10 +264,4 @@ | |||
264 | 264 | ||
265 | #endif /* __GLIBC__ */ | 265 | #endif /* __GLIBC__ */ |
266 | 266 | ||
267 | /* Definitions for if_ether.h */ | ||
268 | /* allow libcs like musl to deactivate this, glibc does not implement this. */ | ||
269 | #ifndef __UAPI_DEF_ETHHDR | ||
270 | #define __UAPI_DEF_ETHHDR 1 | ||
271 | #endif | ||
272 | |||
273 | #endif /* _UAPI_LIBC_COMPAT_H */ | 267 | #endif /* _UAPI_LIBC_COMPAT_H */ |
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 3d77fe91239a..9008f31c7eb6 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h | |||
@@ -42,7 +42,7 @@ typedef enum { | |||
42 | SEV_RET_INVALID_PLATFORM_STATE, | 42 | SEV_RET_INVALID_PLATFORM_STATE, |
43 | SEV_RET_INVALID_GUEST_STATE, | 43 | SEV_RET_INVALID_GUEST_STATE, |
44 | SEV_RET_INAVLID_CONFIG, | 44 | SEV_RET_INAVLID_CONFIG, |
45 | SEV_RET_INVALID_len, | 45 | SEV_RET_INVALID_LEN, |
46 | SEV_RET_ALREADY_OWNED, | 46 | SEV_RET_ALREADY_OWNED, |
47 | SEV_RET_INVALID_CERTIFICATE, | 47 | SEV_RET_INVALID_CERTIFICATE, |
48 | SEV_RET_POLICY_FAILURE, | 48 | SEV_RET_POLICY_FAILURE, |
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index e46d82b91166..d5a1b8a492b9 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h | |||
@@ -69,8 +69,8 @@ struct ptrace_peeksiginfo_args { | |||
69 | #define PTRACE_SECCOMP_GET_METADATA 0x420d | 69 | #define PTRACE_SECCOMP_GET_METADATA 0x420d |
70 | 70 | ||
71 | struct seccomp_metadata { | 71 | struct seccomp_metadata { |
72 | unsigned long filter_off; /* Input: which filter */ | 72 | __u64 filter_off; /* Input: which filter */ |
73 | unsigned int flags; /* Output: filter's flags */ | 73 | __u64 flags; /* Output: filter's flags */ |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /* Read signals from a shared (process wide) queue */ | 76 | /* Read signals from a shared (process wide) queue */ |
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h index 03557b5f9aa6..46de0885e800 100644 --- a/include/uapi/rdma/rdma_user_ioctl.h +++ b/include/uapi/rdma/rdma_user_ioctl.h | |||
@@ -65,7 +65,7 @@ struct ib_uverbs_attr { | |||
65 | __u16 len; /* only for pointers */ | 65 | __u16 len; /* only for pointers */ |
66 | __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ | 66 | __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ |
67 | __u16 reserved; | 67 | __u16 reserved; |
68 | __u64 data; /* ptr to command, inline data or idr/fd */ | 68 | __aligned_u64 data; /* ptr to command, inline data or idr/fd */ |
69 | }; | 69 | }; |
70 | 70 | ||
71 | struct ib_uverbs_ioctl_hdr { | 71 | struct ib_uverbs_ioctl_hdr { |
@@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr { | |||
73 | __u16 object_id; | 73 | __u16 object_id; |
74 | __u16 method_id; | 74 | __u16 method_id; |
75 | __u16 num_attrs; | 75 | __u16 num_attrs; |
76 | __u64 reserved; | 76 | __aligned_u64 reserved; |
77 | struct ib_uverbs_attr attrs[0]; | 77 | struct ib_uverbs_attr attrs[0]; |
78 | }; | 78 | }; |
79 | 79 | ||
diff --git a/init/main.c b/init/main.c index a8100b954839..969eaf140ef0 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -89,6 +89,7 @@ | |||
89 | #include <linux/io.h> | 89 | #include <linux/io.h> |
90 | #include <linux/cache.h> | 90 | #include <linux/cache.h> |
91 | #include <linux/rodata_test.h> | 91 | #include <linux/rodata_test.h> |
92 | #include <linux/jump_label.h> | ||
92 | 93 | ||
93 | #include <asm/io.h> | 94 | #include <asm/io.h> |
94 | #include <asm/bugs.h> | 95 | #include <asm/bugs.h> |
@@ -1000,6 +1001,7 @@ static int __ref kernel_init(void *unused) | |||
1000 | /* need to finish all async __init code before freeing the memory */ | 1001 | /* need to finish all async __init code before freeing the memory */ |
1001 | async_synchronize_full(); | 1002 | async_synchronize_full(); |
1002 | ftrace_free_init_mem(); | 1003 | ftrace_free_init_mem(); |
1004 | jump_label_invalidate_init(); | ||
1003 | free_initmem(); | 1005 | free_initmem(); |
1004 | mark_readonly(); | 1006 | mark_readonly(); |
1005 | system_state = SYSTEM_RUNNING; | 1007 | system_state = SYSTEM_RUNNING; |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index b1f66480135b..14750e7c5ee4 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array) | |||
26 | { | 26 | { |
27 | int i; | 27 | int i; |
28 | 28 | ||
29 | for (i = 0; i < array->map.max_entries; i++) | 29 | for (i = 0; i < array->map.max_entries; i++) { |
30 | free_percpu(array->pptrs[i]); | 30 | free_percpu(array->pptrs[i]); |
31 | cond_resched(); | ||
32 | } | ||
31 | } | 33 | } |
32 | 34 | ||
33 | static int bpf_array_alloc_percpu(struct bpf_array *array) | 35 | static int bpf_array_alloc_percpu(struct bpf_array *array) |
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array) | |||
43 | return -ENOMEM; | 45 | return -ENOMEM; |
44 | } | 46 | } |
45 | array->pptrs[i] = ptr; | 47 | array->pptrs[i] = ptr; |
48 | cond_resched(); | ||
46 | } | 49 | } |
47 | 50 | ||
48 | return 0; | 51 | return 0; |
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr) | |||
73 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) | 76 | static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
74 | { | 77 | { |
75 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; | 78 | bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; |
76 | int numa_node = bpf_map_attr_numa_node(attr); | 79 | int ret, numa_node = bpf_map_attr_numa_node(attr); |
77 | u32 elem_size, index_mask, max_entries; | 80 | u32 elem_size, index_mask, max_entries; |
78 | bool unpriv = !capable(CAP_SYS_ADMIN); | 81 | bool unpriv = !capable(CAP_SYS_ADMIN); |
82 | u64 cost, array_size, mask64; | ||
79 | struct bpf_array *array; | 83 | struct bpf_array *array; |
80 | u64 array_size, mask64; | ||
81 | 84 | ||
82 | elem_size = round_up(attr->value_size, 8); | 85 | elem_size = round_up(attr->value_size, 8); |
83 | 86 | ||
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
109 | array_size += (u64) max_entries * elem_size; | 112 | array_size += (u64) max_entries * elem_size; |
110 | 113 | ||
111 | /* make sure there is no u32 overflow later in round_up() */ | 114 | /* make sure there is no u32 overflow later in round_up() */ |
112 | if (array_size >= U32_MAX - PAGE_SIZE) | 115 | cost = array_size; |
116 | if (cost >= U32_MAX - PAGE_SIZE) | ||
113 | return ERR_PTR(-ENOMEM); | 117 | return ERR_PTR(-ENOMEM); |
118 | if (percpu) { | ||
119 | cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); | ||
120 | if (cost >= U32_MAX - PAGE_SIZE) | ||
121 | return ERR_PTR(-ENOMEM); | ||
122 | } | ||
123 | cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | ||
124 | |||
125 | ret = bpf_map_precharge_memlock(cost); | ||
126 | if (ret < 0) | ||
127 | return ERR_PTR(ret); | ||
114 | 128 | ||
115 | /* allocate all map elements and zero-initialize them */ | 129 | /* allocate all map elements and zero-initialize them */ |
116 | array = bpf_map_area_alloc(array_size, numa_node); | 130 | array = bpf_map_area_alloc(array_size, numa_node); |
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
121 | 135 | ||
122 | /* copy mandatory map attributes */ | 136 | /* copy mandatory map attributes */ |
123 | bpf_map_init_from_attr(&array->map, attr); | 137 | bpf_map_init_from_attr(&array->map, attr); |
138 | array->map.pages = cost; | ||
124 | array->elem_size = elem_size; | 139 | array->elem_size = elem_size; |
125 | 140 | ||
126 | if (!percpu) | 141 | if (percpu && bpf_array_alloc_percpu(array)) { |
127 | goto out; | ||
128 | |||
129 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); | ||
130 | |||
131 | if (array_size >= U32_MAX - PAGE_SIZE || | ||
132 | bpf_array_alloc_percpu(array)) { | ||
133 | bpf_map_area_free(array); | 142 | bpf_map_area_free(array); |
134 | return ERR_PTR(-ENOMEM); | 143 | return ERR_PTR(-ENOMEM); |
135 | } | 144 | } |
136 | out: | ||
137 | array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; | ||
138 | 145 | ||
139 | return &array->map; | 146 | return &array->map; |
140 | } | 147 | } |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 29ca9208dcfa..d315b393abdd 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, | |||
1590 | * so always copy 'cnt' prog_ids to the user. | 1590 | * so always copy 'cnt' prog_ids to the user. |
1591 | * In a rare race the user will see zero prog_ids | 1591 | * In a rare race the user will see zero prog_ids |
1592 | */ | 1592 | */ |
1593 | ids = kcalloc(cnt, sizeof(u32), GFP_USER); | 1593 | ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1594 | if (!ids) | 1594 | if (!ids) |
1595 | return -ENOMEM; | 1595 | return -ENOMEM; |
1596 | rcu_read_lock(); | 1596 | rcu_read_lock(); |
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index fbfdada6caee..a4bb0b34375a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c | |||
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data) | |||
334 | static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, | 334 | static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, |
335 | int map_id) | 335 | int map_id) |
336 | { | 336 | { |
337 | gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN; | 337 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
338 | struct bpf_cpu_map_entry *rcpu; | 338 | struct bpf_cpu_map_entry *rcpu; |
339 | int numa, err; | 339 | int numa, err; |
340 | 340 | ||
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 7b469d10d0e9..b4b5b81e7251 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c | |||
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map) | |||
555 | struct lpm_trie_node __rcu **slot; | 555 | struct lpm_trie_node __rcu **slot; |
556 | struct lpm_trie_node *node; | 556 | struct lpm_trie_node *node; |
557 | 557 | ||
558 | raw_spin_lock(&trie->lock); | 558 | /* Wait for outstanding programs to complete |
559 | * update/lookup/delete/get_next_key and free the trie. | ||
560 | */ | ||
561 | synchronize_rcu(); | ||
559 | 562 | ||
560 | /* Always start at the root and walk down to a node that has no | 563 | /* Always start at the root and walk down to a node that has no |
561 | * children. Then free that node, nullify its reference in the parent | 564 | * children. Then free that node, nullify its reference in the parent |
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map) | |||
566 | slot = &trie->root; | 569 | slot = &trie->root; |
567 | 570 | ||
568 | for (;;) { | 571 | for (;;) { |
569 | node = rcu_dereference_protected(*slot, | 572 | node = rcu_dereference_protected(*slot, 1); |
570 | lockdep_is_held(&trie->lock)); | ||
571 | if (!node) | 573 | if (!node) |
572 | goto unlock; | 574 | goto out; |
573 | 575 | ||
574 | if (rcu_access_pointer(node->child[0])) { | 576 | if (rcu_access_pointer(node->child[0])) { |
575 | slot = &node->child[0]; | 577 | slot = &node->child[0]; |
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map) | |||
587 | } | 589 | } |
588 | } | 590 | } |
589 | 591 | ||
590 | unlock: | 592 | out: |
591 | raw_spin_unlock(&trie->lock); | 593 | kfree(trie); |
592 | } | 594 | } |
593 | 595 | ||
594 | static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) | 596 | static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 48c33417d13c..a927e89dad6e 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock, | |||
521 | static struct bpf_map *sock_map_alloc(union bpf_attr *attr) | 521 | static struct bpf_map *sock_map_alloc(union bpf_attr *attr) |
522 | { | 522 | { |
523 | struct bpf_stab *stab; | 523 | struct bpf_stab *stab; |
524 | int err = -EINVAL; | ||
525 | u64 cost; | 524 | u64 cost; |
525 | int err; | ||
526 | 526 | ||
527 | if (!capable(CAP_NET_ADMIN)) | 527 | if (!capable(CAP_NET_ADMIN)) |
528 | return ERR_PTR(-EPERM); | 528 | return ERR_PTR(-EPERM); |
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) | |||
547 | 547 | ||
548 | /* make sure page count doesn't overflow */ | 548 | /* make sure page count doesn't overflow */ |
549 | cost = (u64) stab->map.max_entries * sizeof(struct sock *); | 549 | cost = (u64) stab->map.max_entries * sizeof(struct sock *); |
550 | err = -EINVAL; | ||
550 | if (cost >= U32_MAX - PAGE_SIZE) | 551 | if (cost >= U32_MAX - PAGE_SIZE) |
551 | goto free_stab; | 552 | goto free_stab; |
552 | 553 | ||
diff --git a/kernel/extable.c b/kernel/extable.c index a17fdb63dc3e..6a5b61ebc66c 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
64 | return e; | 64 | return e; |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline int init_kernel_text(unsigned long addr) | 67 | int init_kernel_text(unsigned long addr) |
68 | { | 68 | { |
69 | if (addr >= (unsigned long)_sinittext && | 69 | if (addr >= (unsigned long)_sinittext && |
70 | addr < (unsigned long)_einittext) | 70 | addr < (unsigned long)_einittext) |
diff --git a/kernel/fork.c b/kernel/fork.c index be8aa5b98666..e5d9d405ae4e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm) | |||
592 | * is dropped: either by a lazy thread or by | 592 | * is dropped: either by a lazy thread or by |
593 | * mmput. Free the page directory and the mm. | 593 | * mmput. Free the page directory and the mm. |
594 | */ | 594 | */ |
595 | static void __mmdrop(struct mm_struct *mm) | 595 | void __mmdrop(struct mm_struct *mm) |
596 | { | 596 | { |
597 | BUG_ON(mm == &init_mm); | 597 | BUG_ON(mm == &init_mm); |
598 | mm_free_pgd(mm); | 598 | mm_free_pgd(mm); |
@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm) | |||
603 | put_user_ns(mm->user_ns); | 603 | put_user_ns(mm->user_ns); |
604 | free_mm(mm); | 604 | free_mm(mm); |
605 | } | 605 | } |
606 | 606 | EXPORT_SYMBOL_GPL(__mmdrop); | |
607 | void mmdrop(struct mm_struct *mm) | ||
608 | { | ||
609 | /* | ||
610 | * The implicit full barrier implied by atomic_dec_and_test() is | ||
611 | * required by the membarrier system call before returning to | ||
612 | * user-space, after storing to rq->curr. | ||
613 | */ | ||
614 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | ||
615 | __mmdrop(mm); | ||
616 | } | ||
617 | EXPORT_SYMBOL_GPL(mmdrop); | ||
618 | 607 | ||
619 | static void mmdrop_async_fn(struct work_struct *work) | 608 | static void mmdrop_async_fn(struct work_struct *work) |
620 | { | 609 | { |
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 5187dfe809ac..4c5770407031 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c | |||
@@ -16,6 +16,7 @@ struct cpumap { | |||
16 | unsigned int available; | 16 | unsigned int available; |
17 | unsigned int allocated; | 17 | unsigned int allocated; |
18 | unsigned int managed; | 18 | unsigned int managed; |
19 | bool initialized; | ||
19 | bool online; | 20 | bool online; |
20 | unsigned long alloc_map[IRQ_MATRIX_SIZE]; | 21 | unsigned long alloc_map[IRQ_MATRIX_SIZE]; |
21 | unsigned long managed_map[IRQ_MATRIX_SIZE]; | 22 | unsigned long managed_map[IRQ_MATRIX_SIZE]; |
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m) | |||
81 | 82 | ||
82 | BUG_ON(cm->online); | 83 | BUG_ON(cm->online); |
83 | 84 | ||
84 | bitmap_zero(cm->alloc_map, m->matrix_bits); | 85 | if (!cm->initialized) { |
85 | cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc); | 86 | cm->available = m->alloc_size; |
86 | cm->allocated = 0; | 87 | cm->available -= cm->managed + m->systembits_inalloc; |
88 | cm->initialized = true; | ||
89 | } | ||
87 | m->global_available += cm->available; | 90 | m->global_available += cm->available; |
88 | cm->online = true; | 91 | cm->online = true; |
89 | m->online_maps++; | 92 | m->online_maps++; |
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, | |||
370 | if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) | 373 | if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) |
371 | return; | 374 | return; |
372 | 375 | ||
373 | if (cm->online) { | 376 | clear_bit(bit, cm->alloc_map); |
374 | clear_bit(bit, cm->alloc_map); | 377 | cm->allocated--; |
375 | cm->allocated--; | 378 | |
379 | if (cm->online) | ||
376 | m->total_allocated--; | 380 | m->total_allocated--; |
377 | if (!managed) { | 381 | |
378 | cm->available++; | 382 | if (!managed) { |
383 | cm->available++; | ||
384 | if (cm->online) | ||
379 | m->global_available++; | 385 | m->global_available++; |
380 | } | ||
381 | } | 386 | } |
382 | trace_irq_matrix_free(bit, cpu, m, cm); | 387 | trace_irq_matrix_free(bit, cpu, m, cm); |
383 | } | 388 | } |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index b4517095db6a..52a0a7af8640 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -366,12 +366,15 @@ static void __jump_label_update(struct static_key *key, | |||
366 | { | 366 | { |
367 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { | 367 | for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
368 | /* | 368 | /* |
369 | * entry->code set to 0 invalidates module init text sections | 369 | * An entry->code of 0 indicates an entry which has been |
370 | * kernel_text_address() verifies we are not in core kernel | 370 | * disabled because it was in an init text area. |
371 | * init code, see jump_label_invalidate_module_init(). | ||
372 | */ | 371 | */ |
373 | if (entry->code && kernel_text_address(entry->code)) | 372 | if (entry->code) { |
374 | arch_jump_label_transform(entry, jump_label_type(entry)); | 373 | if (kernel_text_address(entry->code)) |
374 | arch_jump_label_transform(entry, jump_label_type(entry)); | ||
375 | else | ||
376 | WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code); | ||
377 | } | ||
375 | } | 378 | } |
376 | } | 379 | } |
377 | 380 | ||
@@ -417,6 +420,19 @@ void __init jump_label_init(void) | |||
417 | cpus_read_unlock(); | 420 | cpus_read_unlock(); |
418 | } | 421 | } |
419 | 422 | ||
423 | /* Disable any jump label entries in __init code */ | ||
424 | void __init jump_label_invalidate_init(void) | ||
425 | { | ||
426 | struct jump_entry *iter_start = __start___jump_table; | ||
427 | struct jump_entry *iter_stop = __stop___jump_table; | ||
428 | struct jump_entry *iter; | ||
429 | |||
430 | for (iter = iter_start; iter < iter_stop; iter++) { | ||
431 | if (init_kernel_text(iter->code)) | ||
432 | iter->code = 0; | ||
433 | } | ||
434 | } | ||
435 | |||
420 | #ifdef CONFIG_MODULES | 436 | #ifdef CONFIG_MODULES |
421 | 437 | ||
422 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) | 438 | static enum jump_label_type jump_label_init_type(struct jump_entry *entry) |
@@ -633,6 +649,7 @@ static void jump_label_del_module(struct module *mod) | |||
633 | } | 649 | } |
634 | } | 650 | } |
635 | 651 | ||
652 | /* Disable any jump label entries in module init code */ | ||
636 | static void jump_label_invalidate_module_init(struct module *mod) | 653 | static void jump_label_invalidate_module_init(struct module *mod) |
637 | { | 654 | { |
638 | struct jump_entry *iter_start = mod->jump_entries; | 655 | struct jump_entry *iter_start = mod->jump_entries; |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index fc1123583fa6..f274fbef821d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2397,7 +2397,7 @@ skip: | |||
2397 | 2397 | ||
2398 | if (console_lock_spinning_disable_and_check()) { | 2398 | if (console_lock_spinning_disable_and_check()) { |
2399 | printk_safe_exit_irqrestore(flags); | 2399 | printk_safe_exit_irqrestore(flags); |
2400 | return; | 2400 | goto out; |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | printk_safe_exit_irqrestore(flags); | 2403 | printk_safe_exit_irqrestore(flags); |
@@ -2430,6 +2430,7 @@ skip: | |||
2430 | if (retry && console_trylock()) | 2430 | if (retry && console_trylock()) |
2431 | goto again; | 2431 | goto again; |
2432 | 2432 | ||
2433 | out: | ||
2433 | if (wake_klogd) | 2434 | if (wake_klogd) |
2434 | wake_up_klogd(); | 2435 | wake_up_klogd(); |
2435 | } | 2436 | } |
diff --git a/kernel/relay.c b/kernel/relay.c index c3029402f15c..c955b10c973c 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) | |||
163 | { | 163 | { |
164 | struct rchan_buf *buf; | 164 | struct rchan_buf *buf; |
165 | 165 | ||
166 | if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) | 166 | if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) |
167 | return NULL; | 167 | return NULL; |
168 | 168 | ||
169 | buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); | 169 | buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 940fa408a288..dc77548167ef 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task, | |||
1076 | 1076 | ||
1077 | size = min_t(unsigned long, size, sizeof(kmd)); | 1077 | size = min_t(unsigned long, size, sizeof(kmd)); |
1078 | 1078 | ||
1079 | if (copy_from_user(&kmd, data, size)) | 1079 | if (size < sizeof(kmd.filter_off)) |
1080 | return -EINVAL; | ||
1081 | |||
1082 | if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off))) | ||
1080 | return -EFAULT; | 1083 | return -EFAULT; |
1081 | 1084 | ||
1082 | filter = get_nth_filter(task, kmd.filter_off); | 1085 | filter = get_nth_filter(task, kmd.filter_off); |
1083 | if (IS_ERR(filter)) | 1086 | if (IS_ERR(filter)) |
1084 | return PTR_ERR(filter); | 1087 | return PTR_ERR(filter); |
1085 | 1088 | ||
1086 | memset(&kmd, 0, sizeof(kmd)); | ||
1087 | if (filter->log) | 1089 | if (filter->log) |
1088 | kmd.flags |= SECCOMP_FILTER_FLAG_LOG; | 1090 | kmd.flags |= SECCOMP_FILTER_FLAG_LOG; |
1089 | 1091 | ||
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index fc2838ac8b78..c0a9e310d715 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) | |||
872 | return -EINVAL; | 872 | return -EINVAL; |
873 | if (copy_from_user(&query, uquery, sizeof(query))) | 873 | if (copy_from_user(&query, uquery, sizeof(query))) |
874 | return -EFAULT; | 874 | return -EFAULT; |
875 | if (query.ids_len > BPF_TRACE_MAX_PROGS) | ||
876 | return -E2BIG; | ||
875 | 877 | ||
876 | mutex_lock(&bpf_event_mutex); | 878 | mutex_lock(&bpf_event_mutex); |
877 | ret = bpf_prog_array_copy_info(event->tp_event->prog_array, | 879 | ret = bpf_prog_array_copy_info(event->tp_event->prog_array, |
diff --git a/kernel/user.c b/kernel/user.c index 9a20acce460d..36288d840675 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -101,6 +101,7 @@ struct user_struct root_user = { | |||
101 | .sigpending = ATOMIC_INIT(0), | 101 | .sigpending = ATOMIC_INIT(0), |
102 | .locked_shm = 0, | 102 | .locked_shm = 0, |
103 | .uid = GLOBAL_ROOT_UID, | 103 | .uid = GLOBAL_ROOT_UID, |
104 | .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0), | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | /* | 107 | /* |
@@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid) | |||
191 | 192 | ||
192 | new->uid = uid; | 193 | new->uid = uid; |
193 | atomic_set(&new->__count, 1); | 194 | atomic_set(&new->__count, 1); |
195 | ratelimit_state_init(&new->ratelimit, HZ, 100); | ||
196 | ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); | ||
194 | 197 | ||
195 | /* | 198 | /* |
196 | * Before adding this, check whether we raced | 199 | * Before adding this, check whether we raced |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 017044c26233..bb9a519cbf50 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4180,6 +4180,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
4180 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | 4180 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); |
4181 | 4181 | ||
4182 | /** | 4182 | /** |
4183 | * current_work - retrieve %current task's work struct | ||
4184 | * | ||
4185 | * Determine if %current task is a workqueue worker and what it's working on. | ||
4186 | * Useful to find out the context that the %current task is running in. | ||
4187 | * | ||
4188 | * Return: work struct if %current task is a workqueue worker, %NULL otherwise. | ||
4189 | */ | ||
4190 | struct work_struct *current_work(void) | ||
4191 | { | ||
4192 | struct worker *worker = current_wq_worker(); | ||
4193 | |||
4194 | return worker ? worker->current_work : NULL; | ||
4195 | } | ||
4196 | EXPORT_SYMBOL(current_work); | ||
4197 | |||
4198 | /** | ||
4183 | * current_is_workqueue_rescuer - is %current workqueue rescuer? | 4199 | * current_is_workqueue_rescuer - is %current workqueue rescuer? |
4184 | * | 4200 | * |
4185 | * Determine whether %current is a workqueue rescuer. Can be used from | 4201 | * Determine whether %current is a workqueue rescuer. Can be used from |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6088408ef26c..64155e310a9f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1642,6 +1642,7 @@ config DMA_API_DEBUG | |||
1642 | 1642 | ||
1643 | menuconfig RUNTIME_TESTING_MENU | 1643 | menuconfig RUNTIME_TESTING_MENU |
1644 | bool "Runtime Testing" | 1644 | bool "Runtime Testing" |
1645 | def_bool y | ||
1645 | 1646 | ||
1646 | if RUNTIME_TESTING_MENU | 1647 | if RUNTIME_TESTING_MENU |
1647 | 1648 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 1b34d210452c..7f5cdc1e6b29 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
1491 | if (unlikely(virt == NULL)) | 1491 | if (unlikely(virt == NULL)) |
1492 | return; | 1492 | return; |
1493 | 1493 | ||
1494 | entry = dma_entry_alloc(); | 1494 | /* handle vmalloc and linear addresses */ |
1495 | if (!entry) | 1495 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
1496 | return; | 1496 | return; |
1497 | 1497 | ||
1498 | /* handle vmalloc and linear addresses */ | 1498 | entry = dma_entry_alloc(); |
1499 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1499 | if (!entry) |
1500 | return; | 1500 | return; |
1501 | 1501 | ||
1502 | entry->type = dma_debug_coherent; | 1502 | entry->type = dma_debug_coherent; |
@@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
1528 | }; | 1528 | }; |
1529 | 1529 | ||
1530 | /* handle vmalloc and linear addresses */ | 1530 | /* handle vmalloc and linear addresses */ |
1531 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1531 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
1532 | return; | 1532 | return; |
1533 | 1533 | ||
1534 | if (is_vmalloc_addr(virt)) | 1534 | if (is_vmalloc_addr(virt)) |
@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, | |||
36 | { | 36 | { |
37 | struct radix_tree_iter iter; | 37 | struct radix_tree_iter iter; |
38 | void __rcu **slot; | 38 | void __rcu **slot; |
39 | int base = idr->idr_base; | 39 | unsigned int base = idr->idr_base; |
40 | int id = *nextid; | 40 | unsigned int id = *nextid; |
41 | 41 | ||
42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) | 42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
43 | return -EINVAL; | 43 | return -EINVAL; |
@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr, | |||
204 | 204 | ||
205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { | 205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
206 | int ret; | 206 | int ret; |
207 | unsigned long id = iter.index + base; | ||
207 | 208 | ||
208 | if (WARN_ON_ONCE(iter.index > INT_MAX)) | 209 | if (WARN_ON_ONCE(id > INT_MAX)) |
209 | break; | 210 | break; |
210 | ret = fn(iter.index + base, rcu_dereference_raw(*slot), data); | 211 | ret = fn(id, rcu_dereference_raw(*slot), data); |
211 | if (ret) | 212 | if (ret) |
212 | return ret; | 213 | return ret; |
213 | } | 214 | } |
@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid) | |||
230 | { | 231 | { |
231 | struct radix_tree_iter iter; | 232 | struct radix_tree_iter iter; |
232 | void __rcu **slot; | 233 | void __rcu **slot; |
233 | int base = idr->idr_base; | 234 | unsigned long base = idr->idr_base; |
234 | int id = *nextid; | 235 | unsigned long id = *nextid; |
235 | 236 | ||
236 | id = (id < base) ? 0 : id - base; | 237 | id = (id < base) ? 0 : id - base; |
237 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); | 238 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); |
@@ -431,7 +432,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id) | |||
431 | bitmap = this_cpu_xchg(ida_bitmap, NULL); | 432 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
432 | if (!bitmap) | 433 | if (!bitmap) |
433 | return -EAGAIN; | 434 | return -EAGAIN; |
434 | memset(bitmap, 0, sizeof(*bitmap)); | ||
435 | bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; | 435 | bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; |
436 | rcu_assign_pointer(*slot, bitmap); | 436 | rcu_assign_pointer(*slot, bitmap); |
437 | } | 437 | } |
@@ -464,7 +464,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id) | |||
464 | bitmap = this_cpu_xchg(ida_bitmap, NULL); | 464 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
465 | if (!bitmap) | 465 | if (!bitmap) |
466 | return -EAGAIN; | 466 | return -EAGAIN; |
467 | memset(bitmap, 0, sizeof(*bitmap)); | ||
468 | __set_bit(bit, bitmap->bitmap); | 467 | __set_bit(bit, bitmap->bitmap); |
469 | radix_tree_iter_replace(root, &iter, slot, bitmap); | 468 | radix_tree_iter_replace(root, &iter, slot, bitmap); |
470 | } | 469 | } |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0a7ae3288a24..8e00138d593f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -2125,7 +2125,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) | |||
2125 | preempt_enable(); | 2125 | preempt_enable(); |
2126 | 2126 | ||
2127 | if (!this_cpu_read(ida_bitmap)) { | 2127 | if (!this_cpu_read(ida_bitmap)) { |
2128 | struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); | 2128 | struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); |
2129 | if (!bitmap) | 2129 | if (!bitmap) |
2130 | return 0; | 2130 | return 0; |
2131 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) | 2131 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 77ee6ced11b1..d7a708f82559 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1849 | { | 1849 | { |
1850 | const int default_width = 2 * sizeof(void *); | 1850 | const int default_width = 2 * sizeof(void *); |
1851 | 1851 | ||
1852 | if (!ptr && *fmt != 'K') { | 1852 | if (!ptr && *fmt != 'K' && *fmt != 'x') { |
1853 | /* | 1853 | /* |
1854 | * Print (null) with the same width as a pointer so it makes | 1854 | * Print (null) with the same width as a pointer so it makes |
1855 | * tabular output look nice. | 1855 | * tabular output look nice. |
diff --git a/mm/mlock.c b/mm/mlock.c index 79398200e423..74e5a6547c3d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page) | |||
64 | mod_zone_page_state(page_zone(page), NR_MLOCK, | 64 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
65 | -hpage_nr_pages(page)); | 65 | -hpage_nr_pages(page)); |
66 | count_vm_event(UNEVICTABLE_PGCLEARED); | 66 | count_vm_event(UNEVICTABLE_PGCLEARED); |
67 | /* | ||
68 | * The previous TestClearPageMlocked() corresponds to the smp_mb() | ||
69 | * in __pagevec_lru_add_fn(). | ||
70 | * | ||
71 | * See __pagevec_lru_add_fn for more explanation. | ||
72 | */ | ||
67 | if (!isolate_lru_page(page)) { | 73 | if (!isolate_lru_page(page)) { |
68 | putback_lru_page(page); | 74 | putback_lru_page(page); |
69 | } else { | 75 | } else { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 81e18ceef579..cb416723538f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/stop_machine.h> | 46 | #include <linux/stop_machine.h> |
47 | #include <linux/sort.h> | 47 | #include <linux/sort.h> |
48 | #include <linux/pfn.h> | 48 | #include <linux/pfn.h> |
49 | #include <xen/xen.h> | ||
49 | #include <linux/backing-dev.h> | 50 | #include <linux/backing-dev.h> |
50 | #include <linux/fault-inject.h> | 51 | #include <linux/fault-inject.h> |
51 | #include <linux/page-isolation.h> | 52 | #include <linux/page-isolation.h> |
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat, | |||
347 | /* Always populate low zones for address-constrained allocations */ | 348 | /* Always populate low zones for address-constrained allocations */ |
348 | if (zone_end < pgdat_end_pfn(pgdat)) | 349 | if (zone_end < pgdat_end_pfn(pgdat)) |
349 | return true; | 350 | return true; |
351 | /* Xen PV domains need page structures early */ | ||
352 | if (xen_pv_domain()) | ||
353 | return true; | ||
350 | (*nr_initialised)++; | 354 | (*nr_initialised)++; |
351 | if ((*nr_initialised > pgdat->static_init_pgcnt) && | 355 | if ((*nr_initialised > pgdat->static_init_pgcnt) && |
352 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { | 356 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { |
@@ -446,30 +446,6 @@ void lru_cache_add(struct page *page) | |||
446 | } | 446 | } |
447 | 447 | ||
448 | /** | 448 | /** |
449 | * add_page_to_unevictable_list - add a page to the unevictable list | ||
450 | * @page: the page to be added to the unevictable list | ||
451 | * | ||
452 | * Add page directly to its zone's unevictable list. To avoid races with | ||
453 | * tasks that might be making the page evictable, through eg. munlock, | ||
454 | * munmap or exit, while it's not on the lru, we want to add the page | ||
455 | * while it's locked or otherwise "invisible" to other tasks. This is | ||
456 | * difficult to do when using the pagevec cache, so bypass that. | ||
457 | */ | ||
458 | void add_page_to_unevictable_list(struct page *page) | ||
459 | { | ||
460 | struct pglist_data *pgdat = page_pgdat(page); | ||
461 | struct lruvec *lruvec; | ||
462 | |||
463 | spin_lock_irq(&pgdat->lru_lock); | ||
464 | lruvec = mem_cgroup_page_lruvec(page, pgdat); | ||
465 | ClearPageActive(page); | ||
466 | SetPageUnevictable(page); | ||
467 | SetPageLRU(page); | ||
468 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); | ||
469 | spin_unlock_irq(&pgdat->lru_lock); | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * lru_cache_add_active_or_unevictable | 449 | * lru_cache_add_active_or_unevictable |
474 | * @page: the page to be added to LRU | 450 | * @page: the page to be added to LRU |
475 | * @vma: vma in which page is mapped for determining reclaimability | 451 | * @vma: vma in which page is mapped for determining reclaimability |
@@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page, | |||
484 | { | 460 | { |
485 | VM_BUG_ON_PAGE(PageLRU(page), page); | 461 | VM_BUG_ON_PAGE(PageLRU(page), page); |
486 | 462 | ||
487 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { | 463 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
488 | SetPageActive(page); | 464 | SetPageActive(page); |
489 | lru_cache_add(page); | 465 | else if (!TestSetPageMlocked(page)) { |
490 | return; | ||
491 | } | ||
492 | |||
493 | if (!TestSetPageMlocked(page)) { | ||
494 | /* | 466 | /* |
495 | * We use the irq-unsafe __mod_zone_page_stat because this | 467 | * We use the irq-unsafe __mod_zone_page_stat because this |
496 | * counter is not modified from interrupt context, and the pte | 468 | * counter is not modified from interrupt context, and the pte |
@@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, | |||
500 | hpage_nr_pages(page)); | 472 | hpage_nr_pages(page)); |
501 | count_vm_event(UNEVICTABLE_PGMLOCKED); | 473 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
502 | } | 474 | } |
503 | add_page_to_unevictable_list(page); | 475 | lru_cache_add(page); |
504 | } | 476 | } |
505 | 477 | ||
506 | /* | 478 | /* |
@@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
886 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | 858 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
887 | void *arg) | 859 | void *arg) |
888 | { | 860 | { |
889 | int file = page_is_file_cache(page); | 861 | enum lru_list lru; |
890 | int active = PageActive(page); | 862 | int was_unevictable = TestClearPageUnevictable(page); |
891 | enum lru_list lru = page_lru(page); | ||
892 | 863 | ||
893 | VM_BUG_ON_PAGE(PageLRU(page), page); | 864 | VM_BUG_ON_PAGE(PageLRU(page), page); |
894 | 865 | ||
895 | SetPageLRU(page); | 866 | SetPageLRU(page); |
867 | /* | ||
868 | * Page becomes evictable in two ways: | ||
869 | * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()]. | ||
870 | * 2) Before acquiring LRU lock to put the page to correct LRU and then | ||
871 | * a) do PageLRU check with lock [check_move_unevictable_pages] | ||
872 | * b) do PageLRU check before lock [clear_page_mlock] | ||
873 | * | ||
874 | * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need | ||
875 | * following strict ordering: | ||
876 | * | ||
877 | * #0: __pagevec_lru_add_fn #1: clear_page_mlock | ||
878 | * | ||
879 | * SetPageLRU() TestClearPageMlocked() | ||
880 | * smp_mb() // explicit ordering // above provides strict | ||
881 | * // ordering | ||
882 | * PageMlocked() PageLRU() | ||
883 | * | ||
884 | * | ||
885 | * if '#1' does not observe setting of PG_lru by '#0' and fails | ||
886 | * isolation, the explicit barrier will make sure that page_evictable | ||
887 | * check will put the page in correct LRU. Without smp_mb(), SetPageLRU | ||
888 | * can be reordered after PageMlocked check and can make '#1' to fail | ||
889 | * the isolation of the page whose Mlocked bit is cleared (#0 is also | ||
890 | * looking at the same page) and the evictable page will be stranded | ||
891 | * in an unevictable LRU. | ||
892 | */ | ||
893 | smp_mb(); | ||
894 | |||
895 | if (page_evictable(page)) { | ||
896 | lru = page_lru(page); | ||
897 | update_page_reclaim_stat(lruvec, page_is_file_cache(page), | ||
898 | PageActive(page)); | ||
899 | if (was_unevictable) | ||
900 | count_vm_event(UNEVICTABLE_PGRESCUED); | ||
901 | } else { | ||
902 | lru = LRU_UNEVICTABLE; | ||
903 | ClearPageActive(page); | ||
904 | SetPageUnevictable(page); | ||
905 | if (!was_unevictable) | ||
906 | count_vm_event(UNEVICTABLE_PGCULLED); | ||
907 | } | ||
908 | |||
896 | add_page_to_lru_list(page, lruvec, lru); | 909 | add_page_to_lru_list(page, lruvec, lru); |
897 | update_page_reclaim_stat(lruvec, file, active); | ||
898 | trace_mm_lru_insertion(page, lru); | 910 | trace_mm_lru_insertion(page, lru); |
899 | } | 911 | } |
900 | 912 | ||
@@ -913,7 +925,7 @@ EXPORT_SYMBOL(__pagevec_lru_add); | |||
913 | * @pvec: Where the resulting entries are placed | 925 | * @pvec: Where the resulting entries are placed |
914 | * @mapping: The address_space to search | 926 | * @mapping: The address_space to search |
915 | * @start: The starting entry index | 927 | * @start: The starting entry index |
916 | * @nr_pages: The maximum number of pages | 928 | * @nr_entries: The maximum number of pages |
917 | * @indices: The cache indices corresponding to the entries in @pvec | 929 | * @indices: The cache indices corresponding to the entries in @pvec |
918 | * | 930 | * |
919 | * pagevec_lookup_entries() will search for and return a group of up | 931 | * pagevec_lookup_entries() will search for and return a group of up |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 673942094328..ebff729cc956 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size) | |||
1943 | } | 1943 | } |
1944 | 1944 | ||
1945 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) | 1945 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
1946 | #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL | 1946 | #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) |
1947 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) | 1947 | #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) |
1948 | #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL | 1948 | #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) |
1949 | #else | 1949 | #else |
1950 | #define GFP_VMALLOC32 GFP_KERNEL | 1950 | /* |
1951 | * 64b systems should always have either DMA or DMA32 zones. For others | ||
1952 | * GFP_DMA32 should do the right thing and use the normal zone. | ||
1953 | */ | ||
1954 | #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL | ||
1951 | #endif | 1955 | #endif |
1952 | 1956 | ||
1953 | /** | 1957 | /** |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 444749669187..bee53495a829 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page) | |||
769 | */ | 769 | */ |
770 | void putback_lru_page(struct page *page) | 770 | void putback_lru_page(struct page *page) |
771 | { | 771 | { |
772 | bool is_unevictable; | 772 | lru_cache_add(page); |
773 | int was_unevictable = PageUnevictable(page); | ||
774 | |||
775 | VM_BUG_ON_PAGE(PageLRU(page), page); | ||
776 | |||
777 | redo: | ||
778 | ClearPageUnevictable(page); | ||
779 | |||
780 | if (page_evictable(page)) { | ||
781 | /* | ||
782 | * For evictable pages, we can use the cache. | ||
783 | * In event of a race, worst case is we end up with an | ||
784 | * unevictable page on [in]active list. | ||
785 | * We know how to handle that. | ||
786 | */ | ||
787 | is_unevictable = false; | ||
788 | lru_cache_add(page); | ||
789 | } else { | ||
790 | /* | ||
791 | * Put unevictable pages directly on zone's unevictable | ||
792 | * list. | ||
793 | */ | ||
794 | is_unevictable = true; | ||
795 | add_page_to_unevictable_list(page); | ||
796 | /* | ||
797 | * When racing with an mlock or AS_UNEVICTABLE clearing | ||
798 | * (page is unlocked) make sure that if the other thread | ||
799 | * does not observe our setting of PG_lru and fails | ||
800 | * isolation/check_move_unevictable_pages, | ||
801 | * we see PG_mlocked/AS_UNEVICTABLE cleared below and move | ||
802 | * the page back to the evictable list. | ||
803 | * | ||
804 | * The other side is TestClearPageMlocked() or shmem_lock(). | ||
805 | */ | ||
806 | smp_mb(); | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * page's status can change while we move it among lru. If an evictable | ||
811 | * page is on unevictable list, it never be freed. To avoid that, | ||
812 | * check after we added it to the list, again. | ||
813 | */ | ||
814 | if (is_unevictable && page_evictable(page)) { | ||
815 | if (!isolate_lru_page(page)) { | ||
816 | put_page(page); | ||
817 | goto redo; | ||
818 | } | ||
819 | /* This means someone else dropped this page from LRU | ||
820 | * So, it will be freed or putback to LRU again. There is | ||
821 | * nothing to do here. | ||
822 | */ | ||
823 | } | ||
824 | |||
825 | if (was_unevictable && !is_unevictable) | ||
826 | count_vm_event(UNEVICTABLE_PGRESCUED); | ||
827 | else if (!was_unevictable && is_unevictable) | ||
828 | count_vm_event(UNEVICTABLE_PGCULLED); | ||
829 | |||
830 | put_page(page); /* drop ref from isolate */ | 773 | put_page(page); /* drop ref from isolate */ |
831 | } | 774 | } |
832 | 775 | ||
diff --git a/mm/zpool.c b/mm/zpool.c index f8cb83e7699b..01a771e304fa 100644 --- a/mm/zpool.c +++ b/mm/zpool.c | |||
@@ -360,7 +360,7 @@ u64 zpool_get_total_size(struct zpool *zpool) | |||
360 | 360 | ||
361 | /** | 361 | /** |
362 | * zpool_evictable() - Test if zpool is potentially evictable | 362 | * zpool_evictable() - Test if zpool is potentially evictable |
363 | * @pool The zpool to test | 363 | * @zpool: The zpool to test |
364 | * | 364 | * |
365 | * Zpool is only potentially evictable when it's created with struct | 365 | * Zpool is only potentially evictable when it's created with struct |
366 | * zpool_ops.evict and its driver implements struct zpool_driver.shrink. | 366 | * zpool_ops.evict and its driver implements struct zpool_driver.shrink. |
diff --git a/mm/zswap.c b/mm/zswap.c index c004aa4fd3f4..61a5c41972db 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -1007,6 +1007,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, | |||
1007 | u8 *src, *dst; | 1007 | u8 *src, *dst; |
1008 | struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; | 1008 | struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; |
1009 | 1009 | ||
1010 | /* THP isn't supported */ | ||
1011 | if (PageTransHuge(page)) { | ||
1012 | ret = -EINVAL; | ||
1013 | goto reject; | ||
1014 | } | ||
1015 | |||
1010 | if (!zswap_enabled || !tree) { | 1016 | if (!zswap_enabled || !tree) { |
1011 | ret = -ENODEV; | 1017 | ret = -ENODEV; |
1012 | goto reject; | 1018 | goto reject; |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0254c35b2bf0..126a8ea73c96 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj, | |||
255 | struct brport_attribute *brport_attr = to_brport_attr(attr); | 255 | struct brport_attribute *brport_attr = to_brport_attr(attr); |
256 | struct net_bridge_port *p = to_brport(kobj); | 256 | struct net_bridge_port *p = to_brport(kobj); |
257 | 257 | ||
258 | if (!brport_attr->show) | ||
259 | return -EINVAL; | ||
260 | |||
258 | return brport_attr->show(p, buf); | 261 | return brport_attr->show(p, buf); |
259 | } | 262 | } |
260 | 263 | ||
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index 279527f8b1fe..ce7152a12bd8 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c | |||
@@ -187,17 +187,17 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par) | |||
187 | expected_length += ebt_mac_wormhash_size(wh_src); | 187 | expected_length += ebt_mac_wormhash_size(wh_src); |
188 | 188 | ||
189 | if (em->match_size != EBT_ALIGN(expected_length)) { | 189 | if (em->match_size != EBT_ALIGN(expected_length)) { |
190 | pr_info("wrong size: %d against expected %d, rounded to %zd\n", | 190 | pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n", |
191 | em->match_size, expected_length, | 191 | em->match_size, expected_length, |
192 | EBT_ALIGN(expected_length)); | 192 | EBT_ALIGN(expected_length)); |
193 | return -EINVAL; | 193 | return -EINVAL; |
194 | } | 194 | } |
195 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { | 195 | if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { |
196 | pr_info("dst integrity fail: %x\n", -err); | 196 | pr_err_ratelimited("dst integrity fail: %x\n", -err); |
197 | return -EINVAL; | 197 | return -EINVAL; |
198 | } | 198 | } |
199 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { | 199 | if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { |
200 | pr_info("src integrity fail: %x\n", -err); | 200 | pr_err_ratelimited("src integrity fail: %x\n", -err); |
201 | return -EINVAL; | 201 | return -EINVAL; |
202 | } | 202 | } |
203 | return 0; | 203 | return 0; |
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index 61a9f1be1263..165b9d678cf1 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c | |||
@@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par) | |||
72 | /* Check for overflow. */ | 72 | /* Check for overflow. */ |
73 | if (info->burst == 0 || | 73 | if (info->burst == 0 || |
74 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { | 74 | user2credits(info->avg * info->burst) < user2credits(info->avg)) { |
75 | pr_info("overflow, try lower: %u/%u\n", | 75 | pr_info_ratelimited("overflow, try lower: %u/%u\n", |
76 | info->avg, info->burst); | 76 | info->avg, info->burst); |
77 | return -EINVAL; | 77 | return -EINVAL; |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 1e492ef2a33d..4d4c82229e9e 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -418,6 +418,7 @@ ceph_parse_options(char *options, const char *dev_name, | |||
418 | opt->flags |= CEPH_OPT_FSID; | 418 | opt->flags |= CEPH_OPT_FSID; |
419 | break; | 419 | break; |
420 | case Opt_name: | 420 | case Opt_name: |
421 | kfree(opt->name); | ||
421 | opt->name = kstrndup(argstr[0].from, | 422 | opt->name = kstrndup(argstr[0].from, |
422 | argstr[0].to-argstr[0].from, | 423 | argstr[0].to-argstr[0].from, |
423 | GFP_KERNEL); | 424 | GFP_KERNEL); |
@@ -427,6 +428,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
427 | } | 428 | } |
428 | break; | 429 | break; |
429 | case Opt_secret: | 430 | case Opt_secret: |
431 | ceph_crypto_key_destroy(opt->key); | ||
432 | kfree(opt->key); | ||
433 | |||
430 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 434 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
431 | if (!opt->key) { | 435 | if (!opt->key) { |
432 | err = -ENOMEM; | 436 | err = -ENOMEM; |
@@ -437,6 +441,9 @@ ceph_parse_options(char *options, const char *dev_name, | |||
437 | goto out; | 441 | goto out; |
438 | break; | 442 | break; |
439 | case Opt_key: | 443 | case Opt_key: |
444 | ceph_crypto_key_destroy(opt->key); | ||
445 | kfree(opt->key); | ||
446 | |||
440 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); | 447 | opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); |
441 | if (!opt->key) { | 448 | if (!opt->key) { |
442 | err = -ENOMEM; | 449 | err = -ENOMEM; |
diff --git a/net/core/dev.c b/net/core/dev.c index dda9d7b9a840..d4362befe7e2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc); | |||
2382 | */ | 2382 | */ |
2383 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | 2383 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
2384 | { | 2384 | { |
2385 | bool disabling; | ||
2385 | int rc; | 2386 | int rc; |
2386 | 2387 | ||
2388 | disabling = txq < dev->real_num_tx_queues; | ||
2389 | |||
2387 | if (txq < 1 || txq > dev->num_tx_queues) | 2390 | if (txq < 1 || txq > dev->num_tx_queues) |
2388 | return -EINVAL; | 2391 | return -EINVAL; |
2389 | 2392 | ||
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | |||
2399 | if (dev->num_tc) | 2402 | if (dev->num_tc) |
2400 | netif_setup_tc(dev, txq); | 2403 | netif_setup_tc(dev, txq); |
2401 | 2404 | ||
2402 | if (txq < dev->real_num_tx_queues) { | 2405 | dev->real_num_tx_queues = txq; |
2406 | |||
2407 | if (disabling) { | ||
2408 | synchronize_net(); | ||
2403 | qdisc_reset_all_tx_gt(dev, txq); | 2409 | qdisc_reset_all_tx_gt(dev, txq); |
2404 | #ifdef CONFIG_XPS | 2410 | #ifdef CONFIG_XPS |
2405 | netif_reset_xps_queues_gt(dev, txq); | 2411 | netif_reset_xps_queues_gt(dev, txq); |
2406 | #endif | 2412 | #endif |
2407 | } | 2413 | } |
2414 | } else { | ||
2415 | dev->real_num_tx_queues = txq; | ||
2408 | } | 2416 | } |
2409 | 2417 | ||
2410 | dev->real_num_tx_queues = txq; | ||
2411 | return 0; | 2418 | return 0; |
2412 | } | 2419 | } |
2413 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | 2420 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
diff --git a/net/core/filter.c b/net/core/filter.c index 08ab4c65a998..0c121adbdbaa 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, | |||
3381 | struct sock *sk = bpf_sock->sk; | 3381 | struct sock *sk = bpf_sock->sk; |
3382 | int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; | 3382 | int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; |
3383 | 3383 | ||
3384 | if (!sk_fullsock(sk)) | 3384 | if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) |
3385 | return -EINVAL; | 3385 | return -EINVAL; |
3386 | 3386 | ||
3387 | #ifdef CONFIG_INET | ||
3388 | if (val) | 3387 | if (val) |
3389 | tcp_sk(sk)->bpf_sock_ops_cb_flags = val; | 3388 | tcp_sk(sk)->bpf_sock_ops_cb_flags = val; |
3390 | 3389 | ||
3391 | return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); | 3390 | return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); |
3392 | #else | ||
3393 | return -EINVAL; | ||
3394 | #endif | ||
3395 | } | 3391 | } |
3396 | 3392 | ||
3397 | static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { | 3393 | static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 0a3f88f08727..98fd12721221 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -66,6 +66,7 @@ struct net_rate_estimator { | |||
66 | static void est_fetch_counters(struct net_rate_estimator *e, | 66 | static void est_fetch_counters(struct net_rate_estimator *e, |
67 | struct gnet_stats_basic_packed *b) | 67 | struct gnet_stats_basic_packed *b) |
68 | { | 68 | { |
69 | memset(b, 0, sizeof(*b)); | ||
69 | if (e->stats_lock) | 70 | if (e->stats_lock) |
70 | spin_lock(e->stats_lock); | 71 | spin_lock(e->stats_lock); |
71 | 72 | ||
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 91dd09f79808..791aff68af88 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use | |||
1338 | lock_sock(sk); | 1338 | lock_sock(sk); |
1339 | err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); | 1339 | err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); |
1340 | release_sock(sk); | 1340 | release_sock(sk); |
1341 | #ifdef CONFIG_NETFILTER | ||
1342 | /* we need to exclude all possible ENOPROTOOPTs except default case */ | ||
1343 | if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && | ||
1344 | optname != DSO_STREAM && optname != DSO_SEQPACKET) | ||
1345 | err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); | ||
1346 | #endif | ||
1341 | 1347 | ||
1342 | return err; | 1348 | return err; |
1343 | } | 1349 | } |
@@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us | |||
1445 | dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); | 1451 | dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); |
1446 | break; | 1452 | break; |
1447 | 1453 | ||
1448 | default: | ||
1449 | #ifdef CONFIG_NETFILTER | ||
1450 | return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); | ||
1451 | #endif | ||
1452 | case DSO_LINKINFO: | ||
1453 | case DSO_STREAM: | ||
1454 | case DSO_SEQPACKET: | ||
1455 | return -ENOPROTOOPT; | ||
1456 | |||
1457 | case DSO_MAXWINDOW: | 1454 | case DSO_MAXWINDOW: |
1458 | if (optlen != sizeof(unsigned long)) | 1455 | if (optlen != sizeof(unsigned long)) |
1459 | return -EINVAL; | 1456 | return -EINVAL; |
@@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us | |||
1501 | return -EINVAL; | 1498 | return -EINVAL; |
1502 | scp->info_loc = u.info; | 1499 | scp->info_loc = u.info; |
1503 | break; | 1500 | break; |
1501 | |||
1502 | case DSO_LINKINFO: | ||
1503 | case DSO_STREAM: | ||
1504 | case DSO_SEQPACKET: | ||
1505 | default: | ||
1506 | return -ENOPROTOOPT; | ||
1504 | } | 1507 | } |
1505 | 1508 | ||
1506 | return 0; | 1509 | return 0; |
@@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use | |||
1514 | lock_sock(sk); | 1517 | lock_sock(sk); |
1515 | err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); | 1518 | err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); |
1516 | release_sock(sk); | 1519 | release_sock(sk); |
1520 | #ifdef CONFIG_NETFILTER | ||
1521 | if (err == -ENOPROTOOPT && optname != DSO_STREAM && | ||
1522 | optname != DSO_SEQPACKET && optname != DSO_CONACCEPT && | ||
1523 | optname != DSO_CONREJECT) { | ||
1524 | int len; | ||
1525 | |||
1526 | if (get_user(len, optlen)) | ||
1527 | return -EFAULT; | ||
1528 | |||
1529 | err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); | ||
1530 | if (err >= 0) | ||
1531 | err = put_user(len, optlen); | ||
1532 | } | ||
1533 | #endif | ||
1517 | 1534 | ||
1518 | return err; | 1535 | return err; |
1519 | } | 1536 | } |
@@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us | |||
1579 | r_data = &link; | 1596 | r_data = &link; |
1580 | break; | 1597 | break; |
1581 | 1598 | ||
1582 | default: | ||
1583 | #ifdef CONFIG_NETFILTER | ||
1584 | { | ||
1585 | int ret, len; | ||
1586 | |||
1587 | if (get_user(len, optlen)) | ||
1588 | return -EFAULT; | ||
1589 | |||
1590 | ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); | ||
1591 | if (ret >= 0) | ||
1592 | ret = put_user(len, optlen); | ||
1593 | return ret; | ||
1594 | } | ||
1595 | #endif | ||
1596 | case DSO_STREAM: | ||
1597 | case DSO_SEQPACKET: | ||
1598 | case DSO_CONACCEPT: | ||
1599 | case DSO_CONREJECT: | ||
1600 | return -ENOPROTOOPT; | ||
1601 | |||
1602 | case DSO_MAXWINDOW: | 1599 | case DSO_MAXWINDOW: |
1603 | if (r_len > sizeof(unsigned long)) | 1600 | if (r_len > sizeof(unsigned long)) |
1604 | r_len = sizeof(unsigned long); | 1601 | r_len = sizeof(unsigned long); |
@@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us | |||
1630 | r_len = sizeof(unsigned char); | 1627 | r_len = sizeof(unsigned char); |
1631 | r_data = &scp->info_rem; | 1628 | r_data = &scp->info_rem; |
1632 | break; | 1629 | break; |
1630 | |||
1631 | case DSO_STREAM: | ||
1632 | case DSO_SEQPACKET: | ||
1633 | case DSO_CONACCEPT: | ||
1634 | case DSO_CONREJECT: | ||
1635 | default: | ||
1636 | return -ENOPROTOOPT; | ||
1633 | } | 1637 | } |
1634 | 1638 | ||
1635 | if (r_data) { | 1639 | if (r_data) { |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index c586597da20d..7d36a950d961 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, | |||
646 | fi->fib_nh, cfg, extack)) | 646 | fi->fib_nh, cfg, extack)) |
647 | return 1; | 647 | return 1; |
648 | } | 648 | } |
649 | #ifdef CONFIG_IP_ROUTE_CLASSID | ||
650 | if (cfg->fc_flow && | ||
651 | cfg->fc_flow != fi->fib_nh->nh_tclassid) | ||
652 | return 1; | ||
653 | #endif | ||
649 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && | 654 | if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && |
650 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) | 655 | (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) |
651 | return 0; | 656 | return 0; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 008be04ac1cc..9c41a0cef1a5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -1567,10 +1567,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1567 | if (get_user(len, optlen)) | 1567 | if (get_user(len, optlen)) |
1568 | return -EFAULT; | 1568 | return -EFAULT; |
1569 | 1569 | ||
1570 | lock_sock(sk); | 1570 | err = nf_getsockopt(sk, PF_INET, optname, optval, &len); |
1571 | err = nf_getsockopt(sk, PF_INET, optname, optval, | ||
1572 | &len); | ||
1573 | release_sock(sk); | ||
1574 | if (err >= 0) | 1571 | if (err >= 0) |
1575 | err = put_user(len, optlen); | 1572 | err = put_user(len, optlen); |
1576 | return err; | 1573 | return err; |
@@ -1602,9 +1599,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1602 | if (get_user(len, optlen)) | 1599 | if (get_user(len, optlen)) |
1603 | return -EFAULT; | 1600 | return -EFAULT; |
1604 | 1601 | ||
1605 | lock_sock(sk); | ||
1606 | err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); | 1602 | err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); |
1607 | release_sock(sk); | ||
1608 | if (err >= 0) | 1603 | if (err >= 0) |
1609 | err = put_user(len, optlen); | 1604 | err = put_user(len, optlen); |
1610 | return err; | 1605 | return err; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4ffe302f9b82..e3e420f3ba7b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
252 | } | 252 | } |
253 | if (table_base + v | 253 | if (table_base + v |
254 | != arpt_next_entry(e)) { | 254 | != arpt_next_entry(e)) { |
255 | if (unlikely(stackidx >= private->stacksize)) { | ||
256 | verdict = NF_DROP; | ||
257 | break; | ||
258 | } | ||
255 | jumpstack[stackidx++] = e; | 259 | jumpstack[stackidx++] = e; |
256 | } | 260 | } |
257 | 261 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 9a71f3149507..e38395a8dcf2 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb, | |||
330 | continue; | 330 | continue; |
331 | } | 331 | } |
332 | if (table_base + v != ipt_next_entry(e) && | 332 | if (table_base + v != ipt_next_entry(e) && |
333 | !(e->ip.flags & IPT_F_GOTO)) | 333 | !(e->ip.flags & IPT_F_GOTO)) { |
334 | if (unlikely(stackidx >= private->stacksize)) { | ||
335 | verdict = NF_DROP; | ||
336 | break; | ||
337 | } | ||
334 | jumpstack[stackidx++] = e; | 338 | jumpstack[stackidx++] = e; |
339 | } | ||
335 | 340 | ||
336 | e = get_entry(table_base, v); | 341 | e = get_entry(table_base, v); |
337 | continue; | 342 | continue; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 3a84a60f6b39..4b02ab39ebc5 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) | |||
107 | 107 | ||
108 | local_bh_disable(); | 108 | local_bh_disable(); |
109 | if (refcount_dec_and_lock(&c->entries, &cn->lock)) { | 109 | if (refcount_dec_and_lock(&c->entries, &cn->lock)) { |
110 | list_del_rcu(&c->list); | ||
111 | spin_unlock(&cn->lock); | ||
112 | local_bh_enable(); | ||
113 | |||
114 | unregister_netdevice_notifier(&c->notifier); | ||
115 | |||
116 | /* In case anyone still accesses the file, the open/close | 110 | /* In case anyone still accesses the file, the open/close |
117 | * functions are also incrementing the refcount on their own, | 111 | * functions are also incrementing the refcount on their own, |
118 | * so it's safe to remove the entry even if it's in use. */ | 112 | * so it's safe to remove the entry even if it's in use. */ |
@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) | |||
120 | if (cn->procdir) | 114 | if (cn->procdir) |
121 | proc_remove(c->pde); | 115 | proc_remove(c->pde); |
122 | #endif | 116 | #endif |
117 | list_del_rcu(&c->list); | ||
118 | spin_unlock(&cn->lock); | ||
119 | local_bh_enable(); | ||
120 | |||
121 | unregister_netdevice_notifier(&c->notifier); | ||
122 | |||
123 | return; | 123 | return; |
124 | } | 124 | } |
125 | local_bh_enable(); | 125 | local_bh_enable(); |
@@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) | |||
154 | #endif | 154 | #endif |
155 | if (unlikely(!refcount_inc_not_zero(&c->refcount))) | 155 | if (unlikely(!refcount_inc_not_zero(&c->refcount))) |
156 | c = NULL; | 156 | c = NULL; |
157 | else if (entry) | 157 | else if (entry) { |
158 | refcount_inc(&c->entries); | 158 | if (unlikely(!refcount_inc_not_zero(&c->entries))) { |
159 | clusterip_config_put(c); | ||
160 | c = NULL; | ||
161 | } | ||
162 | } | ||
159 | } | 163 | } |
160 | rcu_read_unlock_bh(); | 164 | rcu_read_unlock_bh(); |
161 | 165 | ||
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 270765236f5e..aaaf9a81fbc9 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par) | |||
98 | const struct ipt_ECN_info *einfo = par->targinfo; | 98 | const struct ipt_ECN_info *einfo = par->targinfo; |
99 | const struct ipt_entry *e = par->entryinfo; | 99 | const struct ipt_entry *e = par->entryinfo; |
100 | 100 | ||
101 | if (einfo->operation & IPT_ECN_OP_MASK) { | 101 | if (einfo->operation & IPT_ECN_OP_MASK) |
102 | pr_info("unsupported ECN operation %x\n", einfo->operation); | ||
103 | return -EINVAL; | 102 | return -EINVAL; |
104 | } | 103 | |
105 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { | 104 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) |
106 | pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect); | ||
107 | return -EINVAL; | 105 | return -EINVAL; |
108 | } | 106 | |
109 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && | 107 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) && |
110 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { | 108 | (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { |
111 | pr_info("cannot use TCP operations on a non-tcp rule\n"); | 109 | pr_info_ratelimited("cannot use operation on non-tcp rule\n"); |
112 | return -EINVAL; | 110 | return -EINVAL; |
113 | } | 111 | } |
114 | return 0; | 112 | return 0; |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 8bd0d7b26632..e8bed3390e58 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par) | |||
74 | const struct ipt_entry *e = par->entryinfo; | 74 | const struct ipt_entry *e = par->entryinfo; |
75 | 75 | ||
76 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { | 76 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { |
77 | pr_info("ECHOREPLY no longer supported.\n"); | 77 | pr_info_ratelimited("ECHOREPLY no longer supported.\n"); |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | } else if (rejinfo->with == IPT_TCP_RESET) { | 79 | } else if (rejinfo->with == IPT_TCP_RESET) { |
80 | /* Must specify that it's a TCP packet */ | 80 | /* Must specify that it's a TCP packet */ |
81 | if (e->ip.proto != IPPROTO_TCP || | 81 | if (e->ip.proto != IPPROTO_TCP || |
82 | (e->ip.invflags & XT_INV_PROTO)) { | 82 | (e->ip.invflags & XT_INV_PROTO)) { |
83 | pr_info("TCP_RESET invalid for non-tcp\n"); | 83 | pr_info_ratelimited("TCP_RESET invalid for non-tcp\n"); |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | } | 86 | } |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 37fb9552e858..fd01f13c896a 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
@@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par) | |||
105 | const struct xt_rpfilter_info *info = par->matchinfo; | 105 | const struct xt_rpfilter_info *info = par->matchinfo; |
106 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; | 106 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; |
107 | if (info->flags & options) { | 107 | if (info->flags & options) { |
108 | pr_info("unknown options encountered"); | 108 | pr_info_ratelimited("unknown options\n"); |
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | 111 | ||
112 | if (strcmp(par->table, "mangle") != 0 && | 112 | if (strcmp(par->table, "mangle") != 0 && |
113 | strcmp(par->table, "raw") != 0) { | 113 | strcmp(par->table, "raw") != 0) { |
114 | pr_info("match only valid in the \'raw\' " | 114 | pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n", |
115 | "or \'mangle\' tables, not \'%s\'.\n", par->table); | 115 | par->table); |
116 | return -EINVAL; | 116 | return -EINVAL; |
117 | } | 117 | } |
118 | 118 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 49cc1c1df1ba..a4f44d815a61 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1826,6 +1826,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4, | |||
1826 | return skb_get_hash_raw(skb) >> 1; | 1826 | return skb_get_hash_raw(skb) >> 1; |
1827 | memset(&hash_keys, 0, sizeof(hash_keys)); | 1827 | memset(&hash_keys, 0, sizeof(hash_keys)); |
1828 | skb_flow_dissect_flow_keys(skb, &keys, flag); | 1828 | skb_flow_dissect_flow_keys(skb, &keys, flag); |
1829 | |||
1830 | hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; | ||
1829 | hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; | 1831 | hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; |
1830 | hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; | 1832 | hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; |
1831 | hash_keys.ports.src = keys.ports.src; | 1833 | hash_keys.ports.src = keys.ports.src; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e9f985e42405..6818042cd8a9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, | |||
1730 | */ | 1730 | */ |
1731 | segs = max_t(u32, bytes / mss_now, min_tso_segs); | 1731 | segs = max_t(u32, bytes / mss_now, min_tso_segs); |
1732 | 1732 | ||
1733 | return min_t(u32, segs, sk->sk_gso_max_segs); | 1733 | return segs; |
1734 | } | 1734 | } |
1735 | EXPORT_SYMBOL(tcp_tso_autosize); | 1735 | EXPORT_SYMBOL(tcp_tso_autosize); |
1736 | 1736 | ||
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) | |||
1742 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; | 1742 | const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; |
1743 | u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; | 1743 | u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; |
1744 | 1744 | ||
1745 | return tso_segs ? : | 1745 | if (!tso_segs) |
1746 | tcp_tso_autosize(sk, mss_now, | 1746 | tso_segs = tcp_tso_autosize(sk, mss_now, |
1747 | sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); | 1747 | sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); |
1748 | return min_t(u32, tso_segs, sk->sk_gso_max_segs); | ||
1748 | } | 1749 | } |
1749 | 1750 | ||
1750 | /* Returns the portion of skb which can be sent right away */ | 1751 | /* Returns the portion of skb which can be sent right away */ |
@@ -2027,6 +2028,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) | |||
2027 | } | 2028 | } |
2028 | } | 2029 | } |
2029 | 2030 | ||
2031 | static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) | ||
2032 | { | ||
2033 | struct sk_buff *skb, *next; | ||
2034 | |||
2035 | skb = tcp_send_head(sk); | ||
2036 | tcp_for_write_queue_from_safe(skb, next, sk) { | ||
2037 | if (len <= skb->len) | ||
2038 | break; | ||
2039 | |||
2040 | if (unlikely(TCP_SKB_CB(skb)->eor)) | ||
2041 | return false; | ||
2042 | |||
2043 | len -= skb->len; | ||
2044 | } | ||
2045 | |||
2046 | return true; | ||
2047 | } | ||
2048 | |||
2030 | /* Create a new MTU probe if we are ready. | 2049 | /* Create a new MTU probe if we are ready. |
2031 | * MTU probe is regularly attempting to increase the path MTU by | 2050 | * MTU probe is regularly attempting to increase the path MTU by |
2032 | * deliberately sending larger packets. This discovers routing | 2051 | * deliberately sending larger packets. This discovers routing |
@@ -2099,6 +2118,9 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2099 | return 0; | 2118 | return 0; |
2100 | } | 2119 | } |
2101 | 2120 | ||
2121 | if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) | ||
2122 | return -1; | ||
2123 | |||
2102 | /* We're allowed to probe. Build it now. */ | 2124 | /* We're allowed to probe. Build it now. */ |
2103 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); | 2125 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); |
2104 | if (!nskb) | 2126 | if (!nskb) |
@@ -2134,6 +2156,10 @@ static int tcp_mtu_probe(struct sock *sk) | |||
2134 | /* We've eaten all the data from this skb. | 2156 | /* We've eaten all the data from this skb. |
2135 | * Throw it away. */ | 2157 | * Throw it away. */ |
2136 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; | 2158 | TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
2159 | /* If this is the last SKB we copy and eor is set | ||
2160 | * we need to propagate it to the new skb. | ||
2161 | */ | ||
2162 | TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; | ||
2137 | tcp_unlink_write_queue(skb, sk); | 2163 | tcp_unlink_write_queue(skb, sk); |
2138 | sk_wmem_free_skb(sk, skb); | 2164 | sk_wmem_free_skb(sk, skb); |
2139 | } else { | 2165 | } else { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bfaefe560b5c..e5ef7c38c934 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
2024 | err = udplite_checksum_init(skb, uh); | 2024 | err = udplite_checksum_init(skb, uh); |
2025 | if (err) | 2025 | if (err) |
2026 | return err; | 2026 | return err; |
2027 | |||
2028 | if (UDP_SKB_CB(skb)->partial_cov) { | ||
2029 | skb->csum = inet_compute_pseudo(skb, proto); | ||
2030 | return 0; | ||
2031 | } | ||
2027 | } | 2032 | } |
2028 | 2033 | ||
2029 | /* Note, we are only interested in != 0 or == 0, thus the | 2034 | /* Note, we are only interested in != 0 or == 0, thus the |
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index ec43d18b5ff9..547515e8450a 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c | |||
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) | |||
73 | err = udplite_checksum_init(skb, uh); | 73 | err = udplite_checksum_init(skb, uh); |
74 | if (err) | 74 | if (err) |
75 | return err; | 75 | return err; |
76 | |||
77 | if (UDP_SKB_CB(skb)->partial_cov) { | ||
78 | skb->csum = ip6_compute_pseudo(skb, proto); | ||
79 | return 0; | ||
80 | } | ||
76 | } | 81 | } |
77 | 82 | ||
78 | /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) | 83 | /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d78d41fc4b1a..24535169663d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1367 | if (get_user(len, optlen)) | 1367 | if (get_user(len, optlen)) |
1368 | return -EFAULT; | 1368 | return -EFAULT; |
1369 | 1369 | ||
1370 | lock_sock(sk); | 1370 | err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); |
1371 | err = nf_getsockopt(sk, PF_INET6, optname, optval, | ||
1372 | &len); | ||
1373 | release_sock(sk); | ||
1374 | if (err >= 0) | 1371 | if (err >= 0) |
1375 | err = put_user(len, optlen); | 1372 | err = put_user(len, optlen); |
1376 | } | 1373 | } |
@@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1409 | if (get_user(len, optlen)) | 1406 | if (get_user(len, optlen)) |
1410 | return -EFAULT; | 1407 | return -EFAULT; |
1411 | 1408 | ||
1412 | lock_sock(sk); | 1409 | err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len); |
1413 | err = compat_nf_getsockopt(sk, PF_INET6, | ||
1414 | optname, optval, &len); | ||
1415 | release_sock(sk); | ||
1416 | if (err >= 0) | 1410 | if (err >= 0) |
1417 | err = put_user(len, optlen); | 1411 | err = put_user(len, optlen); |
1418 | } | 1412 | } |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index af4c917e0836..62358b93bbac 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb, | |||
352 | } | 352 | } |
353 | if (table_base + v != ip6t_next_entry(e) && | 353 | if (table_base + v != ip6t_next_entry(e) && |
354 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 354 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
355 | if (unlikely(stackidx >= private->stacksize)) { | ||
356 | verdict = NF_DROP; | ||
357 | break; | ||
358 | } | ||
355 | jumpstack[stackidx++] = e; | 359 | jumpstack[stackidx++] = e; |
356 | } | 360 | } |
357 | 361 | ||
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index fa51a205918d..38dea8ff680f 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par) | |||
85 | const struct ip6t_entry *e = par->entryinfo; | 85 | const struct ip6t_entry *e = par->entryinfo; |
86 | 86 | ||
87 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { | 87 | if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { |
88 | pr_info("ECHOREPLY is not supported.\n"); | 88 | pr_info_ratelimited("ECHOREPLY is not supported\n"); |
89 | return -EINVAL; | 89 | return -EINVAL; |
90 | } else if (rejinfo->with == IP6T_TCP_RESET) { | 90 | } else if (rejinfo->with == IP6T_TCP_RESET) { |
91 | /* Must specify that it's a TCP packet */ | 91 | /* Must specify that it's a TCP packet */ |
92 | if (!(e->ipv6.flags & IP6T_F_PROTO) || | 92 | if (!(e->ipv6.flags & IP6T_F_PROTO) || |
93 | e->ipv6.proto != IPPROTO_TCP || | 93 | e->ipv6.proto != IPPROTO_TCP || |
94 | (e->ipv6.invflags & XT_INV_PROTO)) { | 94 | (e->ipv6.invflags & XT_INV_PROTO)) { |
95 | pr_info("TCP_RESET illegal for non-tcp\n"); | 95 | pr_info_ratelimited("TCP_RESET illegal for non-tcp\n"); |
96 | return -EINVAL; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | } | 98 | } |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index b12e61b7b16c..94deb69bbbda 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -103,14 +103,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par) | |||
103 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; | 103 | unsigned int options = ~XT_RPFILTER_OPTION_MASK; |
104 | 104 | ||
105 | if (info->flags & options) { | 105 | if (info->flags & options) { |
106 | pr_info("unknown options encountered"); | 106 | pr_info_ratelimited("unknown options\n"); |
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (strcmp(par->table, "mangle") != 0 && | 110 | if (strcmp(par->table, "mangle") != 0 && |
111 | strcmp(par->table, "raw") != 0) { | 111 | strcmp(par->table, "raw") != 0) { |
112 | pr_info("match only valid in the \'raw\' " | 112 | pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n", |
113 | "or \'mangle\' tables, not \'%s\'.\n", par->table); | 113 | par->table); |
114 | return -EINVAL; | 114 | return -EINVAL; |
115 | } | 115 | } |
116 | 116 | ||
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c index 9642164107ce..33719d5560c8 100644 --- a/net/ipv6/netfilter/ip6t_srh.c +++ b/net/ipv6/netfilter/ip6t_srh.c | |||
@@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par) | |||
122 | const struct ip6t_srh *srhinfo = par->matchinfo; | 122 | const struct ip6t_srh *srhinfo = par->matchinfo; |
123 | 123 | ||
124 | if (srhinfo->mt_flags & ~IP6T_SRH_MASK) { | 124 | if (srhinfo->mt_flags & ~IP6T_SRH_MASK) { |
125 | pr_err("unknown srh match flags %X\n", srhinfo->mt_flags); | 125 | pr_info_ratelimited("unknown srh match flags %X\n", |
126 | srhinfo->mt_flags); | ||
126 | return -EINVAL; | 127 | return -EINVAL; |
127 | } | 128 | } |
128 | 129 | ||
129 | if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) { | 130 | if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) { |
130 | pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags); | 131 | pr_info_ratelimited("unknown srh invflags %X\n", |
132 | srhinfo->mt_invflags); | ||
131 | return -EINVAL; | 133 | return -EINVAL; |
132 | } | 134 | } |
133 | 135 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3873d3877135..3a1775a62973 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) | |||
182 | #ifdef CONFIG_IPV6_SIT_6RD | 182 | #ifdef CONFIG_IPV6_SIT_6RD |
183 | struct ip_tunnel *t = netdev_priv(dev); | 183 | struct ip_tunnel *t = netdev_priv(dev); |
184 | 184 | ||
185 | if (t->dev == sitn->fb_tunnel_dev) { | 185 | if (dev == sitn->fb_tunnel_dev) { |
186 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); | 186 | ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); |
187 | t->ip6rd.relay_prefix = 0; | 187 | t->ip6rd.relay_prefix = 0; |
188 | t->ip6rd.prefixlen = 16; | 188 | t->ip6rd.prefixlen = 16; |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a8b1616cec41..1f3188d03840 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, | |||
304 | * driver so reject the timeout update. | 305 | * driver so reject the timeout update. |
305 | */ | 306 | */ |
306 | status = WLAN_STATUS_REQUEST_DECLINED; | 307 | status = WLAN_STATUS_REQUEST_DECLINED; |
307 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, | ||
308 | tid, dialog_token, status, | ||
309 | 1, buf_size, timeout); | ||
310 | goto end; | 308 | goto end; |
311 | } | 309 | } |
312 | 310 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 46028e12e216..f4195a0f0279 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon) | |||
2892 | } | 2892 | } |
2893 | if (beacon->probe_resp_len) { | 2893 | if (beacon->probe_resp_len) { |
2894 | new_beacon->probe_resp_len = beacon->probe_resp_len; | 2894 | new_beacon->probe_resp_len = beacon->probe_resp_len; |
2895 | beacon->probe_resp = pos; | 2895 | new_beacon->probe_resp = pos; |
2896 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); | 2896 | memcpy(pos, beacon->probe_resp, beacon->probe_resp_len); |
2897 | pos += beacon->probe_resp_len; | 2897 | pos += beacon->probe_resp_len; |
2898 | } | 2898 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 26900025de2f..ae9c33cd8ada 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1467,7 +1467,7 @@ struct ieee802_11_elems { | |||
1467 | const struct ieee80211_timeout_interval_ie *timeout_int; | 1467 | const struct ieee80211_timeout_interval_ie *timeout_int; |
1468 | const u8 *opmode_notif; | 1468 | const u8 *opmode_notif; |
1469 | const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; | 1469 | const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; |
1470 | const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; | 1470 | struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; |
1471 | const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; | 1471 | const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; |
1472 | 1472 | ||
1473 | /* length of them, respectively */ | 1473 | /* length of them, respectively */ |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 73ac607beb5d..6a381cbe1e33 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, | |||
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, | 1257 | static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, |
1258 | struct ieee80211_mgmt *mgmt, size_t len) | 1258 | struct ieee80211_mgmt *mgmt, size_t len, |
1259 | struct ieee802_11_elems *elems) | ||
1259 | { | 1260 | { |
1260 | struct ieee80211_mgmt *mgmt_fwd; | 1261 | struct ieee80211_mgmt *mgmt_fwd; |
1261 | struct sk_buff *skb; | 1262 | struct sk_buff *skb; |
1262 | struct ieee80211_local *local = sdata->local; | 1263 | struct ieee80211_local *local = sdata->local; |
1263 | u8 *pos = mgmt->u.action.u.chan_switch.variable; | ||
1264 | size_t offset_ttl; | ||
1265 | 1264 | ||
1266 | skb = dev_alloc_skb(local->tx_headroom + len); | 1265 | skb = dev_alloc_skb(local->tx_headroom + len); |
1267 | if (!skb) | 1266 | if (!skb) |
@@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1269 | skb_reserve(skb, local->tx_headroom); | 1268 | skb_reserve(skb, local->tx_headroom); |
1270 | mgmt_fwd = skb_put(skb, len); | 1269 | mgmt_fwd = skb_put(skb, len); |
1271 | 1270 | ||
1272 | /* offset_ttl is based on whether the secondary channel | 1271 | elems->mesh_chansw_params_ie->mesh_ttl--; |
1273 | * offset is available or not. Subtract 1 from the mesh TTL | 1272 | elems->mesh_chansw_params_ie->mesh_flags &= |
1274 | * and disable the initiator flag before forwarding. | 1273 | ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; |
1275 | */ | ||
1276 | offset_ttl = (len < 42) ? 7 : 10; | ||
1277 | *(pos + offset_ttl) -= 1; | ||
1278 | *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; | ||
1279 | 1274 | ||
1280 | memcpy(mgmt_fwd, mgmt, len); | 1275 | memcpy(mgmt_fwd, mgmt, len); |
1281 | eth_broadcast_addr(mgmt_fwd->da); | 1276 | eth_broadcast_addr(mgmt_fwd->da); |
@@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, | |||
1323 | 1318 | ||
1324 | /* forward or re-broadcast the CSA frame */ | 1319 | /* forward or re-broadcast the CSA frame */ |
1325 | if (fwd_csa) { | 1320 | if (fwd_csa) { |
1326 | if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0) | 1321 | if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) |
1327 | mcsa_dbg(sdata, "Failed to forward the CSA frame"); | 1322 | mcsa_dbg(sdata, "Failed to forward the CSA frame"); |
1328 | } | 1323 | } |
1329 | } | 1324 | } |
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index ee0181778a42..029334835747 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2008, Intel Corporation |
10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | 10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> |
11 | * Copyright (C) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
27 | u32 sta_flags, u8 *bssid, | 28 | u32 sta_flags, u8 *bssid, |
28 | struct ieee80211_csa_ie *csa_ie) | 29 | struct ieee80211_csa_ie *csa_ie) |
29 | { | 30 | { |
30 | enum nl80211_band new_band; | 31 | enum nl80211_band new_band = current_band; |
31 | int new_freq; | 32 | int new_freq; |
32 | u8 new_chan_no; | 33 | u8 new_chan_no; |
33 | struct ieee80211_channel *new_chan; | 34 | struct ieee80211_channel *new_chan; |
@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, | |||
55 | elems->ext_chansw_ie->new_operating_class, | 56 | elems->ext_chansw_ie->new_operating_class, |
56 | &new_band)) { | 57 | &new_band)) { |
57 | sdata_info(sdata, | 58 | sdata_info(sdata, |
58 | "cannot understand ECSA IE operating class %d, disconnecting\n", | 59 | "cannot understand ECSA IE operating class, %d, ignoring\n", |
59 | elems->ext_chansw_ie->new_operating_class); | 60 | elems->ext_chansw_ie->new_operating_class); |
60 | return -EINVAL; | ||
61 | } | 61 | } |
62 | new_chan_no = elems->ext_chansw_ie->new_ch_num; | 62 | new_chan_no = elems->ext_chansw_ie->new_ch_num; |
63 | csa_ie->count = elems->ext_chansw_ie->count; | 63 | csa_ie->count = elems->ext_chansw_ie->count; |
64 | csa_ie->mode = elems->ext_chansw_ie->mode; | 64 | csa_ie->mode = elems->ext_chansw_ie->mode; |
65 | } else if (elems->ch_switch_ie) { | 65 | } else if (elems->ch_switch_ie) { |
66 | new_band = current_band; | ||
67 | new_chan_no = elems->ch_switch_ie->new_ch_num; | 66 | new_chan_no = elems->ch_switch_ie->new_ch_num; |
68 | csa_ie->count = elems->ch_switch_ie->count; | 67 | csa_ie->count = elems->ch_switch_ie->count; |
69 | csa_ie->mode = elems->ch_switch_ie->mode; | 68 | csa_ie->mode = elems->ch_switch_ie->mode; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 0c5627f8a104..af0b608ee8ed 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
314 | 314 | ||
315 | if (ieee80211_hw_check(hw, USES_RSS)) { | 315 | if (ieee80211_hw_check(hw, USES_RSS)) { |
316 | sta->pcpu_rx_stats = | 316 | sta->pcpu_rx_stats = |
317 | alloc_percpu(struct ieee80211_sta_rx_stats); | 317 | alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); |
318 | if (!sta->pcpu_rx_stats) | 318 | if (!sta->pcpu_rx_stats) |
319 | goto free; | 319 | goto free; |
320 | } | 320 | } |
@@ -433,6 +433,7 @@ free_txq: | |||
433 | if (sta->sta.txq[0]) | 433 | if (sta->sta.txq[0]) |
434 | kfree(to_txq_info(sta->sta.txq[0])); | 434 | kfree(to_txq_info(sta->sta.txq[0])); |
435 | free: | 435 | free: |
436 | free_percpu(sta->pcpu_rx_stats); | ||
436 | #ifdef CONFIG_MAC80211_MESH | 437 | #ifdef CONFIG_MAC80211_MESH |
437 | kfree(sta->mesh); | 438 | kfree(sta->mesh); |
438 | #endif | 439 | #endif |
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c index fbce552a796e..7d7466dbf663 100644 --- a/net/netfilter/nf_nat_proto_common.c +++ b/net/netfilter/nf_nat_proto_common.c | |||
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, | |||
41 | const struct nf_conn *ct, | 41 | const struct nf_conn *ct, |
42 | u16 *rover) | 42 | u16 *rover) |
43 | { | 43 | { |
44 | unsigned int range_size, min, i; | 44 | unsigned int range_size, min, max, i; |
45 | __be16 *portptr; | 45 | __be16 *portptr; |
46 | u_int16_t off; | 46 | u_int16_t off; |
47 | 47 | ||
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, | |||
71 | } | 71 | } |
72 | } else { | 72 | } else { |
73 | min = ntohs(range->min_proto.all); | 73 | min = ntohs(range->min_proto.all); |
74 | range_size = ntohs(range->max_proto.all) - min + 1; | 74 | max = ntohs(range->max_proto.all); |
75 | if (unlikely(max < min)) | ||
76 | swap(max, min); | ||
77 | range_size = max - min + 1; | ||
75 | } | 78 | } |
76 | 79 | ||
77 | if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { | 80 | if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 2f685ee1f9c8..fa1655aff8d3 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -434,36 +434,35 @@ int xt_check_match(struct xt_mtchk_param *par, | |||
434 | * ebt_among is exempt from centralized matchsize checking | 434 | * ebt_among is exempt from centralized matchsize checking |
435 | * because it uses a dynamic-size data set. | 435 | * because it uses a dynamic-size data set. |
436 | */ | 436 | */ |
437 | pr_err("%s_tables: %s.%u match: invalid size " | 437 | pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n", |
438 | "%u (kernel) != (user) %u\n", | 438 | xt_prefix[par->family], par->match->name, |
439 | xt_prefix[par->family], par->match->name, | 439 | par->match->revision, |
440 | par->match->revision, | 440 | XT_ALIGN(par->match->matchsize), size); |
441 | XT_ALIGN(par->match->matchsize), size); | ||
442 | return -EINVAL; | 441 | return -EINVAL; |
443 | } | 442 | } |
444 | if (par->match->table != NULL && | 443 | if (par->match->table != NULL && |
445 | strcmp(par->match->table, par->table) != 0) { | 444 | strcmp(par->match->table, par->table) != 0) { |
446 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", | 445 | pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n", |
447 | xt_prefix[par->family], par->match->name, | 446 | xt_prefix[par->family], par->match->name, |
448 | par->match->table, par->table); | 447 | par->match->table, par->table); |
449 | return -EINVAL; | 448 | return -EINVAL; |
450 | } | 449 | } |
451 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { | 450 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
452 | char used[64], allow[64]; | 451 | char used[64], allow[64]; |
453 | 452 | ||
454 | pr_err("%s_tables: %s match: used from hooks %s, but only " | 453 | pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n", |
455 | "valid from %s\n", | 454 | xt_prefix[par->family], par->match->name, |
456 | xt_prefix[par->family], par->match->name, | 455 | textify_hooks(used, sizeof(used), |
457 | textify_hooks(used, sizeof(used), par->hook_mask, | 456 | par->hook_mask, par->family), |
458 | par->family), | 457 | textify_hooks(allow, sizeof(allow), |
459 | textify_hooks(allow, sizeof(allow), par->match->hooks, | 458 | par->match->hooks, |
460 | par->family)); | 459 | par->family)); |
461 | return -EINVAL; | 460 | return -EINVAL; |
462 | } | 461 | } |
463 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { | 462 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
464 | pr_err("%s_tables: %s match: only valid for protocol %u\n", | 463 | pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n", |
465 | xt_prefix[par->family], par->match->name, | 464 | xt_prefix[par->family], par->match->name, |
466 | par->match->proto); | 465 | par->match->proto); |
467 | return -EINVAL; | 466 | return -EINVAL; |
468 | } | 467 | } |
469 | if (par->match->checkentry != NULL) { | 468 | if (par->match->checkentry != NULL) { |
@@ -814,36 +813,35 @@ int xt_check_target(struct xt_tgchk_param *par, | |||
814 | int ret; | 813 | int ret; |
815 | 814 | ||
816 | if (XT_ALIGN(par->target->targetsize) != size) { | 815 | if (XT_ALIGN(par->target->targetsize) != size) { |
817 | pr_err("%s_tables: %s.%u target: invalid size " | 816 | pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n", |
818 | "%u (kernel) != (user) %u\n", | 817 | xt_prefix[par->family], par->target->name, |
819 | xt_prefix[par->family], par->target->name, | 818 | par->target->revision, |
820 | par->target->revision, | 819 | XT_ALIGN(par->target->targetsize), size); |
821 | XT_ALIGN(par->target->targetsize), size); | ||
822 | return -EINVAL; | 820 | return -EINVAL; |
823 | } | 821 | } |
824 | if (par->target->table != NULL && | 822 | if (par->target->table != NULL && |
825 | strcmp(par->target->table, par->table) != 0) { | 823 | strcmp(par->target->table, par->table) != 0) { |
826 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", | 824 | pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n", |
827 | xt_prefix[par->family], par->target->name, | 825 | xt_prefix[par->family], par->target->name, |
828 | par->target->table, par->table); | 826 | par->target->table, par->table); |
829 | return -EINVAL; | 827 | return -EINVAL; |
830 | } | 828 | } |
831 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { | 829 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
832 | char used[64], allow[64]; | 830 | char used[64], allow[64]; |
833 | 831 | ||
834 | pr_err("%s_tables: %s target: used from hooks %s, but only " | 832 | pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n", |
835 | "usable from %s\n", | 833 | xt_prefix[par->family], par->target->name, |
836 | xt_prefix[par->family], par->target->name, | 834 | textify_hooks(used, sizeof(used), |
837 | textify_hooks(used, sizeof(used), par->hook_mask, | 835 | par->hook_mask, par->family), |
838 | par->family), | 836 | textify_hooks(allow, sizeof(allow), |
839 | textify_hooks(allow, sizeof(allow), par->target->hooks, | 837 | par->target->hooks, |
840 | par->family)); | 838 | par->family)); |
841 | return -EINVAL; | 839 | return -EINVAL; |
842 | } | 840 | } |
843 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { | 841 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
844 | pr_err("%s_tables: %s target: only valid for protocol %u\n", | 842 | pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n", |
845 | xt_prefix[par->family], par->target->name, | 843 | xt_prefix[par->family], par->target->name, |
846 | par->target->proto); | 844 | par->target->proto); |
847 | return -EINVAL; | 845 | return -EINVAL; |
848 | } | 846 | } |
849 | if (par->target->checkentry != NULL) { | 847 | if (par->target->checkentry != NULL) { |
@@ -1004,10 +1002,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) | |||
1004 | if (sz < sizeof(*info)) | 1002 | if (sz < sizeof(*info)) |
1005 | return NULL; | 1003 | return NULL; |
1006 | 1004 | ||
1007 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ | ||
1008 | if ((size >> PAGE_SHIFT) + 2 > totalram_pages) | ||
1009 | return NULL; | ||
1010 | |||
1011 | /* __GFP_NORETRY is not fully supported by kvmalloc but it should | 1005 | /* __GFP_NORETRY is not fully supported by kvmalloc but it should |
1012 | * work reasonably well if sz is too large and bail out rather | 1006 | * work reasonably well if sz is too large and bail out rather |
1013 | * than shoot all processes down before realizing there is nothing | 1007 | * than shoot all processes down before realizing there is nothing |
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index c502419d6306..f368ee6741db 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c | |||
@@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par) | |||
120 | const struct xt_audit_info *info = par->targinfo; | 120 | const struct xt_audit_info *info = par->targinfo; |
121 | 121 | ||
122 | if (info->type > XT_AUDIT_TYPE_MAX) { | 122 | if (info->type > XT_AUDIT_TYPE_MAX) { |
123 | pr_info("Audit type out of range (valid range: 0..%hhu)\n", | 123 | pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n", |
124 | XT_AUDIT_TYPE_MAX); | 124 | XT_AUDIT_TYPE_MAX); |
125 | return -ERANGE; | 125 | return -ERANGE; |
126 | } | 126 | } |
127 | 127 | ||
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c index 0f642ef8cd26..9f4151ec3e06 100644 --- a/net/netfilter/xt_CHECKSUM.c +++ b/net/netfilter/xt_CHECKSUM.c | |||
@@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par) | |||
36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; | 36 | const struct xt_CHECKSUM_info *einfo = par->targinfo; |
37 | 37 | ||
38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { | 38 | if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { |
39 | pr_info("unsupported CHECKSUM operation %x\n", einfo->operation); | 39 | pr_info_ratelimited("unsupported CHECKSUM operation %x\n", |
40 | einfo->operation); | ||
40 | return -EINVAL; | 41 | return -EINVAL; |
41 | } | 42 | } |
42 | if (!einfo->operation) { | 43 | if (!einfo->operation) |
43 | pr_info("no CHECKSUM operation enabled\n"); | ||
44 | return -EINVAL; | 44 | return -EINVAL; |
45 | } | 45 | |
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index da56c06a443c..f3f1caac949b 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c | |||
@@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par) | |||
91 | 91 | ||
92 | if (strcmp(par->table, "mangle") != 0 && | 92 | if (strcmp(par->table, "mangle") != 0 && |
93 | strcmp(par->table, "security") != 0) { | 93 | strcmp(par->table, "security") != 0) { |
94 | pr_info("target only valid in the \'mangle\' " | 94 | pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n", |
95 | "or \'security\' tables, not \'%s\'.\n", par->table); | 95 | par->table); |
96 | return -EINVAL; | 96 | return -EINVAL; |
97 | } | 97 | } |
98 | 98 | ||
@@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par) | |||
102 | break; | 102 | break; |
103 | 103 | ||
104 | default: | 104 | default: |
105 | pr_info("invalid mode: %hu\n", info->mode); | 105 | pr_info_ratelimited("invalid mode: %hu\n", info->mode); |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | } | 107 | } |
108 | 108 | ||
109 | ret = nf_ct_netns_get(par->net, par->family); | 109 | ret = nf_ct_netns_get(par->net, par->family); |
110 | if (ret < 0) | 110 | if (ret < 0) |
111 | pr_info("cannot load conntrack support for proto=%u\n", | 111 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
112 | par->family); | 112 | par->family); |
113 | return ret; | 113 | return ret; |
114 | } | 114 | } |
115 | 115 | ||
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 5a152e2acfd5..8790190c6feb 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, | |||
82 | 82 | ||
83 | proto = xt_ct_find_proto(par); | 83 | proto = xt_ct_find_proto(par); |
84 | if (!proto) { | 84 | if (!proto) { |
85 | pr_info("You must specify a L4 protocol, and not use " | 85 | pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n"); |
86 | "inversions on it.\n"); | ||
87 | return -ENOENT; | 86 | return -ENOENT; |
88 | } | 87 | } |
89 | 88 | ||
90 | helper = nf_conntrack_helper_try_module_get(helper_name, par->family, | 89 | helper = nf_conntrack_helper_try_module_get(helper_name, par->family, |
91 | proto); | 90 | proto); |
92 | if (helper == NULL) { | 91 | if (helper == NULL) { |
93 | pr_info("No such helper \"%s\"\n", helper_name); | 92 | pr_info_ratelimited("No such helper \"%s\"\n", helper_name); |
94 | return -ENOENT; | 93 | return -ENOENT; |
95 | } | 94 | } |
96 | 95 | ||
@@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
124 | const struct nf_conntrack_l4proto *l4proto; | 123 | const struct nf_conntrack_l4proto *l4proto; |
125 | struct ctnl_timeout *timeout; | 124 | struct ctnl_timeout *timeout; |
126 | struct nf_conn_timeout *timeout_ext; | 125 | struct nf_conn_timeout *timeout_ext; |
126 | const char *errmsg = NULL; | ||
127 | int ret = 0; | 127 | int ret = 0; |
128 | u8 proto; | 128 | u8 proto; |
129 | 129 | ||
@@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
131 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); | 131 | timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook); |
132 | if (timeout_find_get == NULL) { | 132 | if (timeout_find_get == NULL) { |
133 | ret = -ENOENT; | 133 | ret = -ENOENT; |
134 | pr_info("Timeout policy base is empty\n"); | 134 | errmsg = "Timeout policy base is empty"; |
135 | goto out; | 135 | goto out; |
136 | } | 136 | } |
137 | 137 | ||
138 | proto = xt_ct_find_proto(par); | 138 | proto = xt_ct_find_proto(par); |
139 | if (!proto) { | 139 | if (!proto) { |
140 | ret = -EINVAL; | 140 | ret = -EINVAL; |
141 | pr_info("You must specify a L4 protocol, and not use " | 141 | errmsg = "You must specify a L4 protocol and not use inversions on it"; |
142 | "inversions on it.\n"); | ||
143 | goto out; | 142 | goto out; |
144 | } | 143 | } |
145 | 144 | ||
146 | timeout = timeout_find_get(par->net, timeout_name); | 145 | timeout = timeout_find_get(par->net, timeout_name); |
147 | if (timeout == NULL) { | 146 | if (timeout == NULL) { |
148 | ret = -ENOENT; | 147 | ret = -ENOENT; |
149 | pr_info("No such timeout policy \"%s\"\n", timeout_name); | 148 | pr_info_ratelimited("No such timeout policy \"%s\"\n", |
149 | timeout_name); | ||
150 | goto out; | 150 | goto out; |
151 | } | 151 | } |
152 | 152 | ||
153 | if (timeout->l3num != par->family) { | 153 | if (timeout->l3num != par->family) { |
154 | ret = -EINVAL; | 154 | ret = -EINVAL; |
155 | pr_info("Timeout policy `%s' can only be used by L3 protocol " | 155 | pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n", |
156 | "number %d\n", timeout_name, timeout->l3num); | 156 | timeout_name, 3, timeout->l3num); |
157 | goto err_put_timeout; | 157 | goto err_put_timeout; |
158 | } | 158 | } |
159 | /* Make sure the timeout policy matches any existing protocol tracker, | 159 | /* Make sure the timeout policy matches any existing protocol tracker, |
@@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, | |||
162 | l4proto = __nf_ct_l4proto_find(par->family, proto); | 162 | l4proto = __nf_ct_l4proto_find(par->family, proto); |
163 | if (timeout->l4proto->l4proto != l4proto->l4proto) { | 163 | if (timeout->l4proto->l4proto != l4proto->l4proto) { |
164 | ret = -EINVAL; | 164 | ret = -EINVAL; |
165 | pr_info("Timeout policy `%s' can only be used by L4 protocol " | 165 | pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n", |
166 | "number %d\n", | 166 | timeout_name, 4, timeout->l4proto->l4proto); |
167 | timeout_name, timeout->l4proto->l4proto); | ||
168 | goto err_put_timeout; | 167 | goto err_put_timeout; |
169 | } | 168 | } |
170 | timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); | 169 | timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); |
@@ -180,6 +179,8 @@ err_put_timeout: | |||
180 | __xt_ct_tg_timeout_put(timeout); | 179 | __xt_ct_tg_timeout_put(timeout); |
181 | out: | 180 | out: |
182 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
182 | if (errmsg) | ||
183 | pr_info_ratelimited("%s\n", errmsg); | ||
183 | return ret; | 184 | return ret; |
184 | #else | 185 | #else |
185 | return -EOPNOTSUPP; | 186 | return -EOPNOTSUPP; |
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 3f83d38c4e5b..098ed851b7a7 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par) | |||
66 | { | 66 | { |
67 | const struct xt_DSCP_info *info = par->targinfo; | 67 | const struct xt_DSCP_info *info = par->targinfo; |
68 | 68 | ||
69 | if (info->dscp > XT_DSCP_MAX) { | 69 | if (info->dscp > XT_DSCP_MAX) |
70 | pr_info("dscp %x out of range\n", info->dscp); | ||
71 | return -EDOM; | 70 | return -EDOM; |
72 | } | ||
73 | return 0; | 71 | return 0; |
74 | } | 72 | } |
75 | 73 | ||
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c index 1535e87ed9bd..4653b071bed4 100644 --- a/net/netfilter/xt_HL.c +++ b/net/netfilter/xt_HL.c | |||
@@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par) | |||
105 | { | 105 | { |
106 | const struct ipt_TTL_info *info = par->targinfo; | 106 | const struct ipt_TTL_info *info = par->targinfo; |
107 | 107 | ||
108 | if (info->mode > IPT_TTL_MAXMODE) { | 108 | if (info->mode > IPT_TTL_MAXMODE) |
109 | pr_info("TTL: invalid or unknown mode %u\n", info->mode); | ||
110 | return -EINVAL; | 109 | return -EINVAL; |
111 | } | ||
112 | if (info->mode != IPT_TTL_SET && info->ttl == 0) | 110 | if (info->mode != IPT_TTL_SET && info->ttl == 0) |
113 | return -EINVAL; | 111 | return -EINVAL; |
114 | return 0; | 112 | return 0; |
@@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par) | |||
118 | { | 116 | { |
119 | const struct ip6t_HL_info *info = par->targinfo; | 117 | const struct ip6t_HL_info *info = par->targinfo; |
120 | 118 | ||
121 | if (info->mode > IP6T_HL_MAXMODE) { | 119 | if (info->mode > IP6T_HL_MAXMODE) |
122 | pr_info("invalid or unknown mode %u\n", info->mode); | ||
123 | return -EINVAL; | 120 | return -EINVAL; |
124 | } | 121 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) |
125 | if (info->mode != IP6T_HL_SET && info->hop_limit == 0) { | ||
126 | pr_info("increment/decrement does not " | ||
127 | "make sense with value 0\n"); | ||
128 | return -EINVAL; | 122 | return -EINVAL; |
129 | } | ||
130 | return 0; | 123 | return 0; |
131 | } | 124 | } |
132 | 125 | ||
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 60e6dbe12460..9c75f419cd80 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
14 | #include <linux/icmp.h> | 16 | #include <linux/icmp.h> |
@@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) | |||
312 | static int hmark_tg_check(const struct xt_tgchk_param *par) | 314 | static int hmark_tg_check(const struct xt_tgchk_param *par) |
313 | { | 315 | { |
314 | const struct xt_hmark_info *info = par->targinfo; | 316 | const struct xt_hmark_info *info = par->targinfo; |
317 | const char *errmsg = "proto mask must be zero with L3 mode"; | ||
315 | 318 | ||
316 | if (!info->hmodulus) { | 319 | if (!info->hmodulus) |
317 | pr_info("xt_HMARK: hash modulus can't be zero\n"); | ||
318 | return -EINVAL; | 320 | return -EINVAL; |
319 | } | 321 | |
320 | if (info->proto_mask && | 322 | if (info->proto_mask && |
321 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) { | 323 | (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) |
322 | pr_info("xt_HMARK: proto mask must be zero with L3 mode\n"); | 324 | goto err; |
323 | return -EINVAL; | 325 | |
324 | } | ||
325 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && | 326 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && |
326 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | | 327 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | |
327 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) { | 328 | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) |
328 | pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n"); | ||
329 | return -EINVAL; | 329 | return -EINVAL; |
330 | } | 330 | |
331 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && | 331 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && |
332 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | | 332 | (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | |
333 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { | 333 | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { |
334 | pr_info("xt_HMARK: spi-set and port-set can't be combined\n"); | 334 | errmsg = "spi-set and port-set can't be combined"; |
335 | return -EINVAL; | 335 | goto err; |
336 | } | 336 | } |
337 | return 0; | 337 | return 0; |
338 | err: | ||
339 | pr_info_ratelimited("%s\n", errmsg); | ||
340 | return -EINVAL; | ||
338 | } | 341 | } |
339 | 342 | ||
340 | static struct xt_target hmark_tg_reg[] __read_mostly = { | 343 | static struct xt_target hmark_tg_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 6c2482b709b1..1ac6600bfafd 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c | |||
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) | |||
146 | timer_setup(&info->timer->timer, idletimer_tg_expired, 0); | 146 | timer_setup(&info->timer->timer, idletimer_tg_expired, 0); |
147 | info->timer->refcnt = 1; | 147 | info->timer->refcnt = 1; |
148 | 148 | ||
149 | INIT_WORK(&info->timer->work, idletimer_tg_work); | ||
150 | |||
149 | mod_timer(&info->timer->timer, | 151 | mod_timer(&info->timer->timer, |
150 | msecs_to_jiffies(info->timeout * 1000) + jiffies); | 152 | msecs_to_jiffies(info->timeout * 1000) + jiffies); |
151 | 153 | ||
152 | INIT_WORK(&info->timer->work, idletimer_tg_work); | ||
153 | |||
154 | return 0; | 154 | return 0; |
155 | 155 | ||
156 | out_free_attr: | 156 | out_free_attr: |
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par) | |||
191 | pr_debug("timeout value is zero\n"); | 191 | pr_debug("timeout value is zero\n"); |
192 | return -EINVAL; | 192 | return -EINVAL; |
193 | } | 193 | } |
194 | 194 | if (info->timeout >= INT_MAX / 1000) { | |
195 | pr_debug("timeout value is too big\n"); | ||
196 | return -EINVAL; | ||
197 | } | ||
195 | if (info->label[0] == '\0' || | 198 | if (info->label[0] == '\0' || |
196 | strnlen(info->label, | 199 | strnlen(info->label, |
197 | MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { | 200 | MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { |
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 1dcad893df78..19846445504d 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c | |||
@@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par) | |||
111 | struct xt_led_info_internal *ledinternal; | 111 | struct xt_led_info_internal *ledinternal; |
112 | int err; | 112 | int err; |
113 | 113 | ||
114 | if (ledinfo->id[0] == '\0') { | 114 | if (ledinfo->id[0] == '\0') |
115 | pr_info("No 'id' parameter given.\n"); | ||
116 | return -EINVAL; | 115 | return -EINVAL; |
117 | } | ||
118 | 116 | ||
119 | mutex_lock(&xt_led_mutex); | 117 | mutex_lock(&xt_led_mutex); |
120 | 118 | ||
@@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par) | |||
138 | 136 | ||
139 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); | 137 | err = led_trigger_register(&ledinternal->netfilter_led_trigger); |
140 | if (err) { | 138 | if (err) { |
141 | pr_err("Trigger name is already in use.\n"); | 139 | pr_info_ratelimited("Trigger name is already in use.\n"); |
142 | goto exit_alloc; | 140 | goto exit_alloc; |
143 | } | 141 | } |
144 | 142 | ||
145 | /* See if we need to set up a timer */ | 143 | /* Since the letinternal timer can be shared between multiple targets, |
146 | if (ledinfo->delay > 0) | 144 | * always set it up, even if the current target does not need it |
147 | timer_setup(&ledinternal->timer, led_timeout_callback, 0); | 145 | */ |
146 | timer_setup(&ledinternal->timer, led_timeout_callback, 0); | ||
148 | 147 | ||
149 | list_add_tail(&ledinternal->list, &xt_led_triggers); | 148 | list_add_tail(&ledinternal->list, &xt_led_triggers); |
150 | 149 | ||
@@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par) | |||
181 | 180 | ||
182 | list_del(&ledinternal->list); | 181 | list_del(&ledinternal->list); |
183 | 182 | ||
184 | if (ledinfo->delay > 0) | 183 | del_timer_sync(&ledinternal->timer); |
185 | del_timer_sync(&ledinternal->timer); | ||
186 | 184 | ||
187 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); | 185 | led_trigger_unregister(&ledinternal->netfilter_led_trigger); |
188 | 186 | ||
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index a360b99a958a..a9aca80a32ae 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
13 | 15 | ||
@@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par) | |||
67 | init_hashrandom(&jhash_initval); | 69 | init_hashrandom(&jhash_initval); |
68 | 70 | ||
69 | if (info->queues_total == 0) { | 71 | if (info->queues_total == 0) { |
70 | pr_err("NFQUEUE: number of total queues is 0\n"); | 72 | pr_info_ratelimited("number of total queues is 0\n"); |
71 | return -EINVAL; | 73 | return -EINVAL; |
72 | } | 74 | } |
73 | maxid = info->queues_total - 1 + info->queuenum; | 75 | maxid = info->queues_total - 1 + info->queuenum; |
74 | if (maxid > 0xffff) { | 76 | if (maxid > 0xffff) { |
75 | pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", | 77 | pr_info_ratelimited("number of queues (%u) out of range (got %u)\n", |
76 | info->queues_total, maxid); | 78 | info->queues_total, maxid); |
77 | return -ERANGE; | 79 | return -ERANGE; |
78 | } | 80 | } |
79 | if (par->target->revision == 2 && info->flags > 1) | 81 | if (par->target->revision == 2 && info->flags > 1) |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 9faf5e050b79..4ad5fe27e08b 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
@@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info) | |||
60 | &info->secid); | 60 | &info->secid); |
61 | if (err) { | 61 | if (err) { |
62 | if (err == -EINVAL) | 62 | if (err == -EINVAL) |
63 | pr_info("invalid security context \'%s\'\n", info->secctx); | 63 | pr_info_ratelimited("invalid security context \'%s\'\n", |
64 | info->secctx); | ||
64 | return err; | 65 | return err; |
65 | } | 66 | } |
66 | 67 | ||
67 | if (!info->secid) { | 68 | if (!info->secid) { |
68 | pr_info("unable to map security context \'%s\'\n", info->secctx); | 69 | pr_info_ratelimited("unable to map security context \'%s\'\n", |
70 | info->secctx); | ||
69 | return -ENOENT; | 71 | return -ENOENT; |
70 | } | 72 | } |
71 | 73 | ||
72 | err = security_secmark_relabel_packet(info->secid); | 74 | err = security_secmark_relabel_packet(info->secid); |
73 | if (err) { | 75 | if (err) { |
74 | pr_info("unable to obtain relabeling permission\n"); | 76 | pr_info_ratelimited("unable to obtain relabeling permission\n"); |
75 | return err; | 77 | return err; |
76 | } | 78 | } |
77 | 79 | ||
@@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) | |||
86 | 88 | ||
87 | if (strcmp(par->table, "mangle") != 0 && | 89 | if (strcmp(par->table, "mangle") != 0 && |
88 | strcmp(par->table, "security") != 0) { | 90 | strcmp(par->table, "security") != 0) { |
89 | pr_info("target only valid in the \'mangle\' " | 91 | pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n", |
90 | "or \'security\' tables, not \'%s\'.\n", par->table); | 92 | par->table); |
91 | return -EINVAL; | 93 | return -EINVAL; |
92 | } | 94 | } |
93 | 95 | ||
94 | if (mode && mode != info->mode) { | 96 | if (mode && mode != info->mode) { |
95 | pr_info("mode already set to %hu cannot mix with " | 97 | pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n", |
96 | "rules for mode %hu\n", mode, info->mode); | 98 | mode, info->mode); |
97 | return -EINVAL; | 99 | return -EINVAL; |
98 | } | 100 | } |
99 | 101 | ||
@@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) | |||
101 | case SECMARK_MODE_SEL: | 103 | case SECMARK_MODE_SEL: |
102 | break; | 104 | break; |
103 | default: | 105 | default: |
104 | pr_info("invalid mode: %hu\n", info->mode); | 106 | pr_info_ratelimited("invalid mode: %hu\n", info->mode); |
105 | return -EINVAL; | 107 | return -EINVAL; |
106 | } | 108 | } |
107 | 109 | ||
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 99bb8e410f22..98efb202f8b4 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par) | |||
273 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 273 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
274 | (1 << NF_INET_LOCAL_OUT) | | 274 | (1 << NF_INET_LOCAL_OUT) | |
275 | (1 << NF_INET_POST_ROUTING))) != 0) { | 275 | (1 << NF_INET_POST_ROUTING))) != 0) { |
276 | pr_info("path-MTU clamping only supported in " | 276 | pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); |
277 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | ||
278 | return -EINVAL; | 277 | return -EINVAL; |
279 | } | 278 | } |
280 | if (par->nft_compat) | 279 | if (par->nft_compat) |
@@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par) | |||
283 | xt_ematch_foreach(ematch, e) | 282 | xt_ematch_foreach(ematch, e) |
284 | if (find_syn_match(ematch)) | 283 | if (find_syn_match(ematch)) |
285 | return 0; | 284 | return 0; |
286 | pr_info("Only works on TCP SYN packets\n"); | 285 | pr_info_ratelimited("Only works on TCP SYN packets\n"); |
287 | return -EINVAL; | 286 | return -EINVAL; |
288 | } | 287 | } |
289 | 288 | ||
@@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par) | |||
298 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | | 297 | (par->hook_mask & ~((1 << NF_INET_FORWARD) | |
299 | (1 << NF_INET_LOCAL_OUT) | | 298 | (1 << NF_INET_LOCAL_OUT) | |
300 | (1 << NF_INET_POST_ROUTING))) != 0) { | 299 | (1 << NF_INET_POST_ROUTING))) != 0) { |
301 | pr_info("path-MTU clamping only supported in " | 300 | pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n"); |
302 | "FORWARD, OUTPUT and POSTROUTING hooks\n"); | ||
303 | return -EINVAL; | 301 | return -EINVAL; |
304 | } | 302 | } |
305 | if (par->nft_compat) | 303 | if (par->nft_compat) |
@@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par) | |||
308 | xt_ematch_foreach(ematch, e) | 306 | xt_ematch_foreach(ematch, e) |
309 | if (find_syn_match(ematch)) | 307 | if (find_syn_match(ematch)) |
310 | return 0; | 308 | return 0; |
311 | pr_info("Only works on TCP SYN packets\n"); | 309 | pr_info_ratelimited("Only works on TCP SYN packets\n"); |
312 | return -EINVAL; | 310 | return -EINVAL; |
313 | } | 311 | } |
314 | #endif | 312 | #endif |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 17d7705e3bd4..8c89323c06af 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
540 | !(i->invflags & IP6T_INV_PROTO)) | 540 | !(i->invflags & IP6T_INV_PROTO)) |
541 | return 0; | 541 | return 0; |
542 | 542 | ||
543 | pr_info("Can be used only in combination with " | 543 | pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); |
544 | "either -p tcp or -p udp\n"); | ||
545 | return -EINVAL; | 544 | return -EINVAL; |
546 | } | 545 | } |
547 | #endif | 546 | #endif |
@@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par) | |||
559 | && !(i->invflags & IPT_INV_PROTO)) | 558 | && !(i->invflags & IPT_INV_PROTO)) |
560 | return 0; | 559 | return 0; |
561 | 560 | ||
562 | pr_info("Can be used only in combination with " | 561 | pr_info_ratelimited("Can be used only with -p tcp or -p udp\n"); |
563 | "either -p tcp or -p udp\n"); | ||
564 | return -EINVAL; | 562 | return -EINVAL; |
565 | } | 563 | } |
566 | 564 | ||
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 911a7c0da504..89e281b3bfc2 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c | |||
@@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
164 | 164 | ||
165 | static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) | 165 | static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par) |
166 | { | 166 | { |
167 | const char *errmsg = "both incoming and outgoing interface limitation cannot be selected"; | ||
167 | struct xt_addrtype_info_v1 *info = par->matchinfo; | 168 | struct xt_addrtype_info_v1 *info = par->matchinfo; |
168 | 169 | ||
169 | if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN && | 170 | if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN && |
170 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { | 171 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) |
171 | pr_info("both incoming and outgoing " | 172 | goto err; |
172 | "interface limitation cannot be selected\n"); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | 173 | ||
176 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 174 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
177 | (1 << NF_INET_LOCAL_IN)) && | 175 | (1 << NF_INET_LOCAL_IN)) && |
178 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { | 176 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) { |
179 | pr_info("output interface limitation " | 177 | errmsg = "output interface limitation not valid in PREROUTING and INPUT"; |
180 | "not valid in PREROUTING and INPUT\n"); | 178 | goto err; |
181 | return -EINVAL; | ||
182 | } | 179 | } |
183 | 180 | ||
184 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 181 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
185 | (1 << NF_INET_LOCAL_OUT)) && | 182 | (1 << NF_INET_LOCAL_OUT)) && |
186 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) { | 183 | info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) { |
187 | pr_info("input interface limitation " | 184 | errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT"; |
188 | "not valid in POSTROUTING and OUTPUT\n"); | 185 | goto err; |
189 | return -EINVAL; | ||
190 | } | 186 | } |
191 | 187 | ||
192 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 188 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
193 | if (par->family == NFPROTO_IPV6) { | 189 | if (par->family == NFPROTO_IPV6) { |
194 | if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { | 190 | if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { |
195 | pr_err("ipv6 BLACKHOLE matching not supported\n"); | 191 | errmsg = "ipv6 BLACKHOLE matching not supported"; |
196 | return -EINVAL; | 192 | goto err; |
197 | } | 193 | } |
198 | if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { | 194 | if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { |
199 | pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n"); | 195 | errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported"; |
200 | return -EINVAL; | 196 | goto err; |
201 | } | 197 | } |
202 | if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { | 198 | if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { |
203 | pr_err("ipv6 does not support BROADCAST matching\n"); | 199 | errmsg = "ipv6 does not support BROADCAST matching"; |
204 | return -EINVAL; | 200 | goto err; |
205 | } | 201 | } |
206 | } | 202 | } |
207 | #endif | 203 | #endif |
208 | return 0; | 204 | return 0; |
205 | err: | ||
206 | pr_info_ratelimited("%s\n", errmsg); | ||
207 | return -EINVAL; | ||
209 | } | 208 | } |
210 | 209 | ||
211 | static struct xt_match addrtype_mt_reg[] __read_mostly = { | 210 | static struct xt_match addrtype_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 06b090d8e901..a2cf8a6236d6 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
10 | #include <linux/module.h> | 12 | #include <linux/module.h> |
11 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, | |||
34 | program.filter = insns; | 36 | program.filter = insns; |
35 | 37 | ||
36 | if (bpf_prog_create(ret, &program)) { | 38 | if (bpf_prog_create(ret, &program)) { |
37 | pr_info("bpf: check failed: parse error\n"); | 39 | pr_info_ratelimited("check failed: parse error\n"); |
38 | return -EINVAL; | 40 | return -EINVAL; |
39 | } | 41 | } |
40 | 42 | ||
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 891f4e7e8ea7..7df2dece57d3 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
15 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
16 | #include <linux/module.h> | 18 | #include <linux/module.h> |
17 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
@@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) | |||
48 | } | 50 | } |
49 | 51 | ||
50 | if (info->has_path && info->has_classid) { | 52 | if (info->has_path && info->has_classid) { |
51 | pr_info("xt_cgroup: both path and classid specified\n"); | 53 | pr_info_ratelimited("path and classid specified\n"); |
52 | return -EINVAL; | 54 | return -EINVAL; |
53 | } | 55 | } |
54 | 56 | ||
@@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) | |||
56 | if (info->has_path) { | 58 | if (info->has_path) { |
57 | cgrp = cgroup_get_from_path(info->path); | 59 | cgrp = cgroup_get_from_path(info->path); |
58 | if (IS_ERR(cgrp)) { | 60 | if (IS_ERR(cgrp)) { |
59 | pr_info("xt_cgroup: invalid path, errno=%ld\n", | 61 | pr_info_ratelimited("invalid path, errno=%ld\n", |
60 | PTR_ERR(cgrp)); | 62 | PTR_ERR(cgrp)); |
61 | return -EINVAL; | 63 | return -EINVAL; |
62 | } | 64 | } |
63 | info->priv = cgrp; | 65 | info->priv = cgrp; |
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 57ef175dfbfa..0068688995c8 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
135 | struct xt_cluster_match_info *info = par->matchinfo; | 135 | struct xt_cluster_match_info *info = par->matchinfo; |
136 | 136 | ||
137 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { | 137 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
138 | pr_info("you have exceeded the maximum " | 138 | pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", |
139 | "number of cluster nodes (%u > %u)\n", | 139 | info->total_nodes, XT_CLUSTER_NODES_MAX); |
140 | info->total_nodes, XT_CLUSTER_NODES_MAX); | ||
141 | return -EINVAL; | 140 | return -EINVAL; |
142 | } | 141 | } |
143 | if (info->node_mask >= (1ULL << info->total_nodes)) { | 142 | if (info->node_mask >= (1ULL << info->total_nodes)) { |
144 | pr_info("this node mask cannot be " | 143 | pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); |
145 | "higher than the total number of nodes\n"); | ||
146 | return -EDOM; | 144 | return -EDOM; |
147 | } | 145 | } |
148 | return 0; | 146 | return 0; |
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index cad0b7b5eb35..93cb018c3055 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c | |||
@@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par) | |||
112 | 112 | ||
113 | ret = nf_ct_netns_get(par->net, par->family); | 113 | ret = nf_ct_netns_get(par->net, par->family); |
114 | if (ret < 0) | 114 | if (ret < 0) |
115 | pr_info("cannot load conntrack support for proto=%u\n", | 115 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
116 | par->family); | 116 | par->family); |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * This filter cannot function correctly unless connection tracking | 119 | * This filter cannot function correctly unless connection tracking |
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c index 23372879e6e3..4fa4efd24353 100644 --- a/net/netfilter/xt_connlabel.c +++ b/net/netfilter/xt_connlabel.c | |||
@@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par) | |||
57 | int ret; | 57 | int ret; |
58 | 58 | ||
59 | if (info->options & ~options) { | 59 | if (info->options & ~options) { |
60 | pr_err("Unknown options in mask %x\n", info->options); | 60 | pr_info_ratelimited("Unknown options in mask %x\n", |
61 | info->options); | ||
61 | return -EINVAL; | 62 | return -EINVAL; |
62 | } | 63 | } |
63 | 64 | ||
64 | ret = nf_ct_netns_get(par->net, par->family); | 65 | ret = nf_ct_netns_get(par->net, par->family); |
65 | if (ret < 0) { | 66 | if (ret < 0) { |
66 | pr_info("cannot load conntrack support for proto=%u\n", | 67 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
67 | par->family); | 68 | par->family); |
68 | return ret; | 69 | return ret; |
69 | } | 70 | } |
70 | 71 | ||
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index ec377cc6a369..809639ce6f5a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par) | |||
79 | 79 | ||
80 | ret = nf_ct_netns_get(par->net, par->family); | 80 | ret = nf_ct_netns_get(par->net, par->family); |
81 | if (ret < 0) | 81 | if (ret < 0) |
82 | pr_info("cannot load conntrack support for proto=%u\n", | 82 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
83 | par->family); | 83 | par->family); |
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
@@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par) | |||
109 | 109 | ||
110 | ret = nf_ct_netns_get(par->net, par->family); | 110 | ret = nf_ct_netns_get(par->net, par->family); |
111 | if (ret < 0) | 111 | if (ret < 0) |
112 | pr_info("cannot load conntrack support for proto=%u\n", | 112 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
113 | par->family); | 113 | par->family); |
114 | return ret; | 114 | return ret; |
115 | } | 115 | } |
116 | 116 | ||
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 39cf1d019240..df80fe7d391c 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) | |||
272 | 272 | ||
273 | ret = nf_ct_netns_get(par->net, par->family); | 273 | ret = nf_ct_netns_get(par->net, par->family); |
274 | if (ret < 0) | 274 | if (ret < 0) |
275 | pr_info("cannot load conntrack support for proto=%u\n", | 275 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
276 | par->family); | 276 | par->family); |
277 | return ret; | 277 | return ret; |
278 | } | 278 | } |
279 | 279 | ||
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index 236ac8008909..a4c2b862f820 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c | |||
@@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par) | |||
46 | { | 46 | { |
47 | const struct xt_dscp_info *info = par->matchinfo; | 47 | const struct xt_dscp_info *info = par->matchinfo; |
48 | 48 | ||
49 | if (info->dscp > XT_DSCP_MAX) { | 49 | if (info->dscp > XT_DSCP_MAX) |
50 | pr_info("dscp %x out of range\n", info->dscp); | ||
51 | return -EDOM; | 50 | return -EDOM; |
52 | } | ||
53 | 51 | ||
54 | return 0; | 52 | return 0; |
55 | } | 53 | } |
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c index 3c831a8efebc..c7ad4afa5fb8 100644 --- a/net/netfilter/xt_ecn.c +++ b/net/netfilter/xt_ecn.c | |||
@@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par) | |||
97 | 97 | ||
98 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && | 98 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && |
99 | (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { | 99 | (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { |
100 | pr_info("cannot match TCP bits in rule for non-tcp packets\n"); | 100 | pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n"); |
101 | return -EINVAL; | 101 | return -EINVAL; |
102 | } | 102 | } |
103 | 103 | ||
@@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par) | |||
139 | 139 | ||
140 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && | 140 | if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && |
141 | (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { | 141 | (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { |
142 | pr_info("cannot match TCP bits in rule for non-tcp packets\n"); | 142 | pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n"); |
143 | return -EINVAL; | 143 | return -EINVAL; |
144 | } | 144 | } |
145 | 145 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index ca6847403ca2..66f5aca62a08 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -523,7 +523,8 @@ static u64 user2rate(u64 user) | |||
523 | if (user != 0) { | 523 | if (user != 0) { |
524 | return div64_u64(XT_HASHLIMIT_SCALE_v2, user); | 524 | return div64_u64(XT_HASHLIMIT_SCALE_v2, user); |
525 | } else { | 525 | } else { |
526 | pr_warn("invalid rate from userspace: %llu\n", user); | 526 | pr_info_ratelimited("invalid rate from userspace: %llu\n", |
527 | user); | ||
527 | return 0; | 528 | return 0; |
528 | } | 529 | } |
529 | } | 530 | } |
@@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par, | |||
774 | if (!dh->rateinfo.prev_window && | 775 | if (!dh->rateinfo.prev_window && |
775 | (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { | 776 | (dh->rateinfo.current_rate <= dh->rateinfo.burst)) { |
776 | spin_unlock(&dh->lock); | 777 | spin_unlock(&dh->lock); |
777 | rcu_read_unlock_bh(); | 778 | local_bh_enable(); |
778 | return !(cfg->mode & XT_HASHLIMIT_INVERT); | 779 | return !(cfg->mode & XT_HASHLIMIT_INVERT); |
779 | } else { | 780 | } else { |
780 | goto overlimit; | 781 | goto overlimit; |
@@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par, | |||
865 | } | 866 | } |
866 | 867 | ||
867 | if (cfg->mode & ~XT_HASHLIMIT_ALL) { | 868 | if (cfg->mode & ~XT_HASHLIMIT_ALL) { |
868 | pr_info("Unknown mode mask %X, kernel too old?\n", | 869 | pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n", |
869 | cfg->mode); | 870 | cfg->mode); |
870 | return -EINVAL; | 871 | return -EINVAL; |
871 | } | 872 | } |
872 | 873 | ||
873 | /* Check for overflow. */ | 874 | /* Check for overflow. */ |
874 | if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { | 875 | if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { |
875 | if (cfg->avg == 0 || cfg->avg > U32_MAX) { | 876 | if (cfg->avg == 0 || cfg->avg > U32_MAX) { |
876 | pr_info("hashlimit invalid rate\n"); | 877 | pr_info_ratelimited("invalid rate\n"); |
877 | return -ERANGE; | 878 | return -ERANGE; |
878 | } | 879 | } |
879 | 880 | ||
880 | if (cfg->interval == 0) { | 881 | if (cfg->interval == 0) { |
881 | pr_info("hashlimit invalid interval\n"); | 882 | pr_info_ratelimited("invalid interval\n"); |
882 | return -EINVAL; | 883 | return -EINVAL; |
883 | } | 884 | } |
884 | } else if (cfg->mode & XT_HASHLIMIT_BYTES) { | 885 | } else if (cfg->mode & XT_HASHLIMIT_BYTES) { |
885 | if (user2credits_byte(cfg->avg) == 0) { | 886 | if (user2credits_byte(cfg->avg) == 0) { |
886 | pr_info("overflow, rate too high: %llu\n", cfg->avg); | 887 | pr_info_ratelimited("overflow, rate too high: %llu\n", |
888 | cfg->avg); | ||
887 | return -EINVAL; | 889 | return -EINVAL; |
888 | } | 890 | } |
889 | } else if (cfg->burst == 0 || | 891 | } else if (cfg->burst == 0 || |
890 | user2credits(cfg->avg * cfg->burst, revision) < | 892 | user2credits(cfg->avg * cfg->burst, revision) < |
891 | user2credits(cfg->avg, revision)) { | 893 | user2credits(cfg->avg, revision)) { |
892 | pr_info("overflow, try lower: %llu/%llu\n", | 894 | pr_info_ratelimited("overflow, try lower: %llu/%llu\n", |
893 | cfg->avg, cfg->burst); | 895 | cfg->avg, cfg->burst); |
894 | return -ERANGE; | 896 | return -ERANGE; |
895 | } | 897 | } |
896 | 898 | ||
897 | mutex_lock(&hashlimit_mutex); | 899 | mutex_lock(&hashlimit_mutex); |
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 38a78151c0e9..fd077aeaaed9 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c | |||
@@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par) | |||
61 | 61 | ||
62 | ret = nf_ct_netns_get(par->net, par->family); | 62 | ret = nf_ct_netns_get(par->net, par->family); |
63 | if (ret < 0) { | 63 | if (ret < 0) { |
64 | pr_info("cannot load conntrack support for proto=%u\n", | 64 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
65 | par->family); | 65 | par->family); |
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | info->name[sizeof(info->name) - 1] = '\0'; | 68 | info->name[sizeof(info->name) - 1] = '\0'; |
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c index 7ca64a50db04..57f1df575701 100644 --- a/net/netfilter/xt_ipcomp.c +++ b/net/netfilter/xt_ipcomp.c | |||
@@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par) | |||
72 | 72 | ||
73 | /* Must specify no unknown invflags */ | 73 | /* Must specify no unknown invflags */ |
74 | if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) { | 74 | if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) { |
75 | pr_err("unknown flags %X\n", compinfo->invflags); | 75 | pr_info_ratelimited("unknown flags %X\n", compinfo->invflags); |
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | } | 77 | } |
78 | return 0; | 78 | return 0; |
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c index 42540d26c2b8..1d950a6100af 100644 --- a/net/netfilter/xt_ipvs.c +++ b/net/netfilter/xt_ipvs.c | |||
@@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par) | |||
158 | && par->family != NFPROTO_IPV6 | 158 | && par->family != NFPROTO_IPV6 |
159 | #endif | 159 | #endif |
160 | ) { | 160 | ) { |
161 | pr_info("protocol family %u not supported\n", par->family); | 161 | pr_info_ratelimited("protocol family %u not supported\n", |
162 | par->family); | ||
162 | return -EINVAL; | 163 | return -EINVAL; |
163 | } | 164 | } |
164 | 165 | ||
diff --git a/net/netfilter/xt_l2tp.c b/net/netfilter/xt_l2tp.c index 8aee572771f2..c43482bf48e6 100644 --- a/net/netfilter/xt_l2tp.c +++ b/net/netfilter/xt_l2tp.c | |||
@@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
216 | /* Check for invalid flags */ | 216 | /* Check for invalid flags */ |
217 | if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION | | 217 | if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION | |
218 | XT_L2TP_TYPE)) { | 218 | XT_L2TP_TYPE)) { |
219 | pr_info("unknown flags: %x\n", info->flags); | 219 | pr_info_ratelimited("unknown flags: %x\n", info->flags); |
220 | return -EINVAL; | 220 | return -EINVAL; |
221 | } | 221 | } |
222 | 222 | ||
@@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
225 | (!(info->flags & XT_L2TP_SID)) && | 225 | (!(info->flags & XT_L2TP_SID)) && |
226 | ((!(info->flags & XT_L2TP_TYPE)) || | 226 | ((!(info->flags & XT_L2TP_TYPE)) || |
227 | (info->type != XT_L2TP_TYPE_CONTROL))) { | 227 | (info->type != XT_L2TP_TYPE_CONTROL))) { |
228 | pr_info("invalid flags combination: %x\n", info->flags); | 228 | pr_info_ratelimited("invalid flags combination: %x\n", |
229 | info->flags); | ||
229 | return -EINVAL; | 230 | return -EINVAL; |
230 | } | 231 | } |
231 | 232 | ||
@@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par) | |||
234 | */ | 235 | */ |
235 | if (info->flags & XT_L2TP_VERSION) { | 236 | if (info->flags & XT_L2TP_VERSION) { |
236 | if ((info->version < 2) || (info->version > 3)) { | 237 | if ((info->version < 2) || (info->version > 3)) { |
237 | pr_info("wrong L2TP version: %u\n", info->version); | 238 | pr_info_ratelimited("wrong L2TP version: %u\n", |
239 | info->version); | ||
238 | return -EINVAL; | 240 | return -EINVAL; |
239 | } | 241 | } |
240 | 242 | ||
241 | if (info->version == 2) { | 243 | if (info->version == 2) { |
242 | if ((info->flags & XT_L2TP_TID) && | 244 | if ((info->flags & XT_L2TP_TID) && |
243 | (info->tid > 0xffff)) { | 245 | (info->tid > 0xffff)) { |
244 | pr_info("v2 tid > 0xffff: %u\n", info->tid); | 246 | pr_info_ratelimited("v2 tid > 0xffff: %u\n", |
247 | info->tid); | ||
245 | return -EINVAL; | 248 | return -EINVAL; |
246 | } | 249 | } |
247 | if ((info->flags & XT_L2TP_SID) && | 250 | if ((info->flags & XT_L2TP_SID) && |
248 | (info->sid > 0xffff)) { | 251 | (info->sid > 0xffff)) { |
249 | pr_info("v2 sid > 0xffff: %u\n", info->sid); | 252 | pr_info_ratelimited("v2 sid > 0xffff: %u\n", |
253 | info->sid); | ||
250 | return -EINVAL; | 254 | return -EINVAL; |
251 | } | 255 | } |
252 | } | 256 | } |
@@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par) | |||
268 | 272 | ||
269 | if ((ip->proto != IPPROTO_UDP) && | 273 | if ((ip->proto != IPPROTO_UDP) && |
270 | (ip->proto != IPPROTO_L2TP)) { | 274 | (ip->proto != IPPROTO_L2TP)) { |
271 | pr_info("missing protocol rule (udp|l2tpip)\n"); | 275 | pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n"); |
272 | return -EINVAL; | 276 | return -EINVAL; |
273 | } | 277 | } |
274 | 278 | ||
275 | if ((ip->proto == IPPROTO_L2TP) && | 279 | if ((ip->proto == IPPROTO_L2TP) && |
276 | (info->version == 2)) { | 280 | (info->version == 2)) { |
277 | pr_info("v2 doesn't support IP mode\n"); | 281 | pr_info_ratelimited("v2 doesn't support IP mode\n"); |
278 | return -EINVAL; | 282 | return -EINVAL; |
279 | } | 283 | } |
280 | 284 | ||
@@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par) | |||
295 | 299 | ||
296 | if ((ip->proto != IPPROTO_UDP) && | 300 | if ((ip->proto != IPPROTO_UDP) && |
297 | (ip->proto != IPPROTO_L2TP)) { | 301 | (ip->proto != IPPROTO_L2TP)) { |
298 | pr_info("missing protocol rule (udp|l2tpip)\n"); | 302 | pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n"); |
299 | return -EINVAL; | 303 | return -EINVAL; |
300 | } | 304 | } |
301 | 305 | ||
302 | if ((ip->proto == IPPROTO_L2TP) && | 306 | if ((ip->proto == IPPROTO_L2TP) && |
303 | (info->version == 2)) { | 307 | (info->version == 2)) { |
304 | pr_info("v2 doesn't support IP mode\n"); | 308 | pr_info_ratelimited("v2 doesn't support IP mode\n"); |
305 | return -EINVAL; | 309 | return -EINVAL; |
306 | } | 310 | } |
307 | 311 | ||
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 61403b77361c..55d18cd67635 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par) | |||
106 | /* Check for overflow. */ | 106 | /* Check for overflow. */ |
107 | if (r->burst == 0 | 107 | if (r->burst == 0 |
108 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { | 108 | || user2credits(r->avg * r->burst) < user2credits(r->avg)) { |
109 | pr_info("Overflow, try lower: %u/%u\n", | 109 | pr_info_ratelimited("Overflow, try lower: %u/%u\n", |
110 | r->avg, r->burst); | 110 | r->avg, r->burst); |
111 | return -ERANGE; | 111 | return -ERANGE; |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c index 0fd14d1eb09d..bdb689cdc829 100644 --- a/net/netfilter/xt_nat.c +++ b/net/netfilter/xt_nat.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
13 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
@@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par) | |||
19 | const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; | 21 | const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; |
20 | 22 | ||
21 | if (mr->rangesize != 1) { | 23 | if (mr->rangesize != 1) { |
22 | pr_info("%s: multiple ranges no longer supported\n", | 24 | pr_info_ratelimited("multiple ranges no longer supported\n"); |
23 | par->target->name); | ||
24 | return -EINVAL; | 25 | return -EINVAL; |
25 | } | 26 | } |
26 | return nf_ct_netns_get(par->net, par->family); | 27 | return nf_ct_netns_get(par->net, par->family); |
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index 6f92d25590a8..c8674deed4eb 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 (or any | 6 | * it under the terms of the GNU General Public License version 2 (or any |
7 | * later at your option) as published by the Free Software Foundation. | 7 | * later at your option) as published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
10 | |||
9 | #include <linux/module.h> | 11 | #include <linux/module.h> |
10 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
11 | 13 | ||
@@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par) | |||
39 | 41 | ||
40 | nfacct = nfnl_acct_find_get(par->net, info->name); | 42 | nfacct = nfnl_acct_find_get(par->net, info->name); |
41 | if (nfacct == NULL) { | 43 | if (nfacct == NULL) { |
42 | pr_info("xt_nfacct: accounting object with name `%s' " | 44 | pr_info_ratelimited("accounting object `%s' does not exists\n", |
43 | "does not exists\n", info->name); | 45 | info->name); |
44 | return -ENOENT; | 46 | return -ENOENT; |
45 | } | 47 | } |
46 | info->nfacct = nfacct; | 48 | info->nfacct = nfacct; |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index bb33598e4530..9d6d67b953ac 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
@@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) | |||
107 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | 107 | info->invert & XT_PHYSDEV_OP_BRIDGED) && |
108 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | | 108 | par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | |
109 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { | 109 | (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { |
110 | pr_info("using --physdev-out and --physdev-is-out are only " | 110 | pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); |
111 | "supported in the FORWARD and POSTROUTING chains with " | ||
112 | "bridged traffic.\n"); | ||
113 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) | 111 | if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) |
114 | return -EINVAL; | 112 | return -EINVAL; |
115 | } | 113 | } |
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index 5639fb03bdd9..13f8ccf946d6 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c | |||
@@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
132 | static int policy_mt_check(const struct xt_mtchk_param *par) | 132 | static int policy_mt_check(const struct xt_mtchk_param *par) |
133 | { | 133 | { |
134 | const struct xt_policy_info *info = par->matchinfo; | 134 | const struct xt_policy_info *info = par->matchinfo; |
135 | const char *errmsg = "neither incoming nor outgoing policy selected"; | ||
136 | |||
137 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) | ||
138 | goto err; | ||
135 | 139 | ||
136 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { | ||
137 | pr_info("neither incoming nor outgoing policy selected\n"); | ||
138 | return -EINVAL; | ||
139 | } | ||
140 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | | 140 | if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | |
141 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { | 141 | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { |
142 | pr_info("output policy not valid in PREROUTING and INPUT\n"); | 142 | errmsg = "output policy not valid in PREROUTING and INPUT"; |
143 | return -EINVAL; | 143 | goto err; |
144 | } | 144 | } |
145 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | | 145 | if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | |
146 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { | 146 | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { |
147 | pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); | 147 | errmsg = "input policy not valid in POSTROUTING and OUTPUT"; |
148 | return -EINVAL; | 148 | goto err; |
149 | } | 149 | } |
150 | if (info->len > XT_POLICY_MAX_ELEM) { | 150 | if (info->len > XT_POLICY_MAX_ELEM) { |
151 | pr_info("too many policy elements\n"); | 151 | errmsg = "too many policy elements"; |
152 | return -EINVAL; | 152 | goto err; |
153 | } | 153 | } |
154 | return 0; | 154 | return 0; |
155 | err: | ||
156 | pr_info_ratelimited("%s\n", errmsg); | ||
157 | return -EINVAL; | ||
155 | } | 158 | } |
156 | 159 | ||
157 | static struct xt_match policy_mt_reg[] __read_mostly = { | 160 | static struct xt_match policy_mt_reg[] __read_mostly = { |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 245fa350a7a8..6d232d18faff 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
342 | net_get_random_once(&hash_rnd, sizeof(hash_rnd)); | 342 | net_get_random_once(&hash_rnd, sizeof(hash_rnd)); |
343 | 343 | ||
344 | if (info->check_set & ~XT_RECENT_VALID_FLAGS) { | 344 | if (info->check_set & ~XT_RECENT_VALID_FLAGS) { |
345 | pr_info("Unsupported user space flags (%08x)\n", | 345 | pr_info_ratelimited("Unsupported userspace flags (%08x)\n", |
346 | info->check_set); | 346 | info->check_set); |
347 | return -EINVAL; | 347 | return -EINVAL; |
348 | } | 348 | } |
349 | if (hweight8(info->check_set & | 349 | if (hweight8(info->check_set & |
@@ -357,8 +357,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
357 | if ((info->check_set & XT_RECENT_REAP) && !info->seconds) | 357 | if ((info->check_set & XT_RECENT_REAP) && !info->seconds) |
358 | return -EINVAL; | 358 | return -EINVAL; |
359 | if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { | 359 | if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) { |
360 | pr_info("hitcount (%u) is larger than allowed maximum (%u)\n", | 360 | pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n", |
361 | info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); | 361 | info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); |
362 | return -EINVAL; | 362 | return -EINVAL; |
363 | } | 363 | } |
364 | if (info->name[0] == '\0' || | 364 | if (info->name[0] == '\0' || |
@@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
587 | add = true; | 587 | add = true; |
588 | break; | 588 | break; |
589 | default: | 589 | default: |
590 | pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); | 590 | pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n"); |
591 | return -EINVAL; | 591 | return -EINVAL; |
592 | } | 592 | } |
593 | 593 | ||
@@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input, | |||
601 | succ = in4_pton(c, size, (void *)&addr, '\n', NULL); | 601 | succ = in4_pton(c, size, (void *)&addr, '\n', NULL); |
602 | } | 602 | } |
603 | 603 | ||
604 | if (!succ) { | 604 | if (!succ) |
605 | pr_info("illegal address written to procfs\n"); | ||
606 | return -EINVAL; | 605 | return -EINVAL; |
607 | } | ||
608 | 606 | ||
609 | spin_lock_bh(&recent_lock); | 607 | spin_lock_bh(&recent_lock); |
610 | e = recent_entry_lookup(t, &addr, family, 0); | 608 | e = recent_entry_lookup(t, &addr, family, 0); |
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 16b6b11ee83f..6f4c5217d835 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c | |||
@@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par) | |||
92 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); | 92 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); |
93 | 93 | ||
94 | if (index == IPSET_INVALID_ID) { | 94 | if (index == IPSET_INVALID_ID) { |
95 | pr_warn("Cannot find set identified by id %u to match\n", | 95 | pr_info_ratelimited("Cannot find set identified by id %u to match\n", |
96 | info->match_set.index); | 96 | info->match_set.index); |
97 | return -ENOENT; | 97 | return -ENOENT; |
98 | } | 98 | } |
99 | if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) { | 99 | if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) { |
100 | pr_warn("Protocol error: set match dimension is over the limit!\n"); | 100 | pr_info_ratelimited("set match dimension is over the limit!\n"); |
101 | ip_set_nfnl_put(par->net, info->match_set.index); | 101 | ip_set_nfnl_put(par->net, info->match_set.index); |
102 | return -ERANGE; | 102 | return -ERANGE; |
103 | } | 103 | } |
@@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par) | |||
143 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); | 143 | index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); |
144 | 144 | ||
145 | if (index == IPSET_INVALID_ID) { | 145 | if (index == IPSET_INVALID_ID) { |
146 | pr_warn("Cannot find set identified by id %u to match\n", | 146 | pr_info_ratelimited("Cannot find set identified by id %u to match\n", |
147 | info->match_set.index); | 147 | info->match_set.index); |
148 | return -ENOENT; | 148 | return -ENOENT; |
149 | } | 149 | } |
150 | if (info->match_set.dim > IPSET_DIM_MAX) { | 150 | if (info->match_set.dim > IPSET_DIM_MAX) { |
151 | pr_warn("Protocol error: set match dimension is over the limit!\n"); | 151 | pr_info_ratelimited("set match dimension is over the limit!\n"); |
152 | ip_set_nfnl_put(par->net, info->match_set.index); | 152 | ip_set_nfnl_put(par->net, info->match_set.index); |
153 | return -ERANGE; | 153 | return -ERANGE; |
154 | } | 154 | } |
@@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
241 | if (info->add_set.index != IPSET_INVALID_ID) { | 241 | if (info->add_set.index != IPSET_INVALID_ID) { |
242 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); | 242 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); |
243 | if (index == IPSET_INVALID_ID) { | 243 | if (index == IPSET_INVALID_ID) { |
244 | pr_warn("Cannot find add_set index %u as target\n", | 244 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
245 | info->add_set.index); | 245 | info->add_set.index); |
246 | return -ENOENT; | 246 | return -ENOENT; |
247 | } | 247 | } |
248 | } | 248 | } |
@@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
250 | if (info->del_set.index != IPSET_INVALID_ID) { | 250 | if (info->del_set.index != IPSET_INVALID_ID) { |
251 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); | 251 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); |
252 | if (index == IPSET_INVALID_ID) { | 252 | if (index == IPSET_INVALID_ID) { |
253 | pr_warn("Cannot find del_set index %u as target\n", | 253 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
254 | info->del_set.index); | 254 | info->del_set.index); |
255 | if (info->add_set.index != IPSET_INVALID_ID) | 255 | if (info->add_set.index != IPSET_INVALID_ID) |
256 | ip_set_nfnl_put(par->net, info->add_set.index); | 256 | ip_set_nfnl_put(par->net, info->add_set.index); |
257 | return -ENOENT; | 257 | return -ENOENT; |
@@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
259 | } | 259 | } |
260 | if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 || | 260 | if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 || |
261 | info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) { | 261 | info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) { |
262 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 262 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
263 | if (info->add_set.index != IPSET_INVALID_ID) | 263 | if (info->add_set.index != IPSET_INVALID_ID) |
264 | ip_set_nfnl_put(par->net, info->add_set.index); | 264 | ip_set_nfnl_put(par->net, info->add_set.index); |
265 | if (info->del_set.index != IPSET_INVALID_ID) | 265 | if (info->del_set.index != IPSET_INVALID_ID) |
@@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
316 | if (info->add_set.index != IPSET_INVALID_ID) { | 316 | if (info->add_set.index != IPSET_INVALID_ID) { |
317 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); | 317 | index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); |
318 | if (index == IPSET_INVALID_ID) { | 318 | if (index == IPSET_INVALID_ID) { |
319 | pr_warn("Cannot find add_set index %u as target\n", | 319 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
320 | info->add_set.index); | 320 | info->add_set.index); |
321 | return -ENOENT; | 321 | return -ENOENT; |
322 | } | 322 | } |
323 | } | 323 | } |
@@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
325 | if (info->del_set.index != IPSET_INVALID_ID) { | 325 | if (info->del_set.index != IPSET_INVALID_ID) { |
326 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); | 326 | index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); |
327 | if (index == IPSET_INVALID_ID) { | 327 | if (index == IPSET_INVALID_ID) { |
328 | pr_warn("Cannot find del_set index %u as target\n", | 328 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
329 | info->del_set.index); | 329 | info->del_set.index); |
330 | if (info->add_set.index != IPSET_INVALID_ID) | 330 | if (info->add_set.index != IPSET_INVALID_ID) |
331 | ip_set_nfnl_put(par->net, info->add_set.index); | 331 | ip_set_nfnl_put(par->net, info->add_set.index); |
332 | return -ENOENT; | 332 | return -ENOENT; |
@@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) | |||
334 | } | 334 | } |
335 | if (info->add_set.dim > IPSET_DIM_MAX || | 335 | if (info->add_set.dim > IPSET_DIM_MAX || |
336 | info->del_set.dim > IPSET_DIM_MAX) { | 336 | info->del_set.dim > IPSET_DIM_MAX) { |
337 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 337 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
338 | if (info->add_set.index != IPSET_INVALID_ID) | 338 | if (info->add_set.index != IPSET_INVALID_ID) |
339 | ip_set_nfnl_put(par->net, info->add_set.index); | 339 | ip_set_nfnl_put(par->net, info->add_set.index); |
340 | if (info->del_set.index != IPSET_INVALID_ID) | 340 | if (info->del_set.index != IPSET_INVALID_ID) |
@@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
444 | index = ip_set_nfnl_get_byindex(par->net, | 444 | index = ip_set_nfnl_get_byindex(par->net, |
445 | info->add_set.index); | 445 | info->add_set.index); |
446 | if (index == IPSET_INVALID_ID) { | 446 | if (index == IPSET_INVALID_ID) { |
447 | pr_warn("Cannot find add_set index %u as target\n", | 447 | pr_info_ratelimited("Cannot find add_set index %u as target\n", |
448 | info->add_set.index); | 448 | info->add_set.index); |
449 | return -ENOENT; | 449 | return -ENOENT; |
450 | } | 450 | } |
451 | } | 451 | } |
@@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
454 | index = ip_set_nfnl_get_byindex(par->net, | 454 | index = ip_set_nfnl_get_byindex(par->net, |
455 | info->del_set.index); | 455 | info->del_set.index); |
456 | if (index == IPSET_INVALID_ID) { | 456 | if (index == IPSET_INVALID_ID) { |
457 | pr_warn("Cannot find del_set index %u as target\n", | 457 | pr_info_ratelimited("Cannot find del_set index %u as target\n", |
458 | info->del_set.index); | 458 | info->del_set.index); |
459 | if (info->add_set.index != IPSET_INVALID_ID) | 459 | if (info->add_set.index != IPSET_INVALID_ID) |
460 | ip_set_nfnl_put(par->net, | 460 | ip_set_nfnl_put(par->net, |
461 | info->add_set.index); | 461 | info->add_set.index); |
@@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
465 | 465 | ||
466 | if (info->map_set.index != IPSET_INVALID_ID) { | 466 | if (info->map_set.index != IPSET_INVALID_ID) { |
467 | if (strncmp(par->table, "mangle", 7)) { | 467 | if (strncmp(par->table, "mangle", 7)) { |
468 | pr_warn("--map-set only usable from mangle table\n"); | 468 | pr_info_ratelimited("--map-set only usable from mangle table\n"); |
469 | return -EINVAL; | 469 | return -EINVAL; |
470 | } | 470 | } |
471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | | 471 | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | |
@@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
473 | !(par->hook_mask & (1 << NF_INET_FORWARD | | 473 | !(par->hook_mask & (1 << NF_INET_FORWARD | |
474 | 1 << NF_INET_LOCAL_OUT | | 474 | 1 << NF_INET_LOCAL_OUT | |
475 | 1 << NF_INET_POST_ROUTING))) { | 475 | 1 << NF_INET_POST_ROUTING))) { |
476 | pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); | 476 | pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); |
477 | return -EINVAL; | 477 | return -EINVAL; |
478 | } | 478 | } |
479 | index = ip_set_nfnl_get_byindex(par->net, | 479 | index = ip_set_nfnl_get_byindex(par->net, |
480 | info->map_set.index); | 480 | info->map_set.index); |
481 | if (index == IPSET_INVALID_ID) { | 481 | if (index == IPSET_INVALID_ID) { |
482 | pr_warn("Cannot find map_set index %u as target\n", | 482 | pr_info_ratelimited("Cannot find map_set index %u as target\n", |
483 | info->map_set.index); | 483 | info->map_set.index); |
484 | if (info->add_set.index != IPSET_INVALID_ID) | 484 | if (info->add_set.index != IPSET_INVALID_ID) |
485 | ip_set_nfnl_put(par->net, | 485 | ip_set_nfnl_put(par->net, |
486 | info->add_set.index); | 486 | info->add_set.index); |
@@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) | |||
494 | if (info->add_set.dim > IPSET_DIM_MAX || | 494 | if (info->add_set.dim > IPSET_DIM_MAX || |
495 | info->del_set.dim > IPSET_DIM_MAX || | 495 | info->del_set.dim > IPSET_DIM_MAX || |
496 | info->map_set.dim > IPSET_DIM_MAX) { | 496 | info->map_set.dim > IPSET_DIM_MAX) { |
497 | pr_warn("Protocol error: SET target dimension is over the limit!\n"); | 497 | pr_info_ratelimited("SET target dimension over the limit!\n"); |
498 | if (info->add_set.index != IPSET_INVALID_ID) | 498 | if (info->add_set.index != IPSET_INVALID_ID) |
499 | ip_set_nfnl_put(par->net, info->add_set.index); | 499 | ip_set_nfnl_put(par->net, info->add_set.index); |
500 | if (info->del_set.index != IPSET_INVALID_ID) | 500 | if (info->del_set.index != IPSET_INVALID_ID) |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 575d2153e3b8..2ac7f674d19b 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par) | |||
171 | return err; | 171 | return err; |
172 | 172 | ||
173 | if (info->flags & ~XT_SOCKET_FLAGS_V1) { | 173 | if (info->flags & ~XT_SOCKET_FLAGS_V1) { |
174 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1); | 174 | pr_info_ratelimited("unknown flags 0x%x\n", |
175 | info->flags & ~XT_SOCKET_FLAGS_V1); | ||
175 | return -EINVAL; | 176 | return -EINVAL; |
176 | } | 177 | } |
177 | return 0; | 178 | return 0; |
@@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par) | |||
187 | return err; | 188 | return err; |
188 | 189 | ||
189 | if (info->flags & ~XT_SOCKET_FLAGS_V2) { | 190 | if (info->flags & ~XT_SOCKET_FLAGS_V2) { |
190 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2); | 191 | pr_info_ratelimited("unknown flags 0x%x\n", |
192 | info->flags & ~XT_SOCKET_FLAGS_V2); | ||
191 | return -EINVAL; | 193 | return -EINVAL; |
192 | } | 194 | } |
193 | return 0; | 195 | return 0; |
@@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par) | |||
203 | if (err) | 205 | if (err) |
204 | return err; | 206 | return err; |
205 | if (info->flags & ~XT_SOCKET_FLAGS_V3) { | 207 | if (info->flags & ~XT_SOCKET_FLAGS_V3) { |
206 | pr_info("unknown flags 0x%x\n", | 208 | pr_info_ratelimited("unknown flags 0x%x\n", |
207 | info->flags & ~XT_SOCKET_FLAGS_V3); | 209 | info->flags & ~XT_SOCKET_FLAGS_V3); |
208 | return -EINVAL; | 210 | return -EINVAL; |
209 | } | 211 | } |
210 | return 0; | 212 | return 0; |
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index 5fbd79194d21..0b41c0befe3c 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c | |||
@@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par) | |||
44 | 44 | ||
45 | ret = nf_ct_netns_get(par->net, par->family); | 45 | ret = nf_ct_netns_get(par->net, par->family); |
46 | if (ret < 0) | 46 | if (ret < 0) |
47 | pr_info("cannot load conntrack support for proto=%u\n", | 47 | pr_info_ratelimited("cannot load conntrack support for proto=%u\n", |
48 | par->family); | 48 | par->family); |
49 | return ret; | 49 | return ret; |
50 | } | 50 | } |
51 | 51 | ||
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 1b01eec1fbda..0160f505e337 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par) | |||
235 | 235 | ||
236 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || | 236 | if (info->daytime_start > XT_TIME_MAX_DAYTIME || |
237 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { | 237 | info->daytime_stop > XT_TIME_MAX_DAYTIME) { |
238 | pr_info("invalid argument - start or " | 238 | pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n"); |
239 | "stop time greater than 23:59:59\n"); | ||
240 | return -EDOM; | 239 | return -EDOM; |
241 | } | 240 | } |
242 | 241 | ||
243 | if (info->flags & ~XT_TIME_ALL_FLAGS) { | 242 | if (info->flags & ~XT_TIME_ALL_FLAGS) { |
244 | pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS); | 243 | pr_info_ratelimited("unknown flags 0x%x\n", |
244 | info->flags & ~XT_TIME_ALL_FLAGS); | ||
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | } | 246 | } |
247 | 247 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2ad445c1d27c..07e8478068f0 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2308 | if (cb->start) { | 2308 | if (cb->start) { |
2309 | ret = cb->start(cb); | 2309 | ret = cb->start(cb); |
2310 | if (ret) | 2310 | if (ret) |
2311 | goto error_unlock; | 2311 | goto error_put; |
2312 | } | 2312 | } |
2313 | 2313 | ||
2314 | nlk->cb_running = true; | 2314 | nlk->cb_running = true; |
@@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2328 | */ | 2328 | */ |
2329 | return -EINTR; | 2329 | return -EINTR; |
2330 | 2330 | ||
2331 | error_put: | ||
2332 | module_put(control->module); | ||
2331 | error_unlock: | 2333 | error_unlock: |
2332 | sock_put(sk); | 2334 | sock_put(sk); |
2333 | mutex_unlock(nlk->cb_mutex); | 2335 | mutex_unlock(nlk->cb_mutex); |
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 367d8c027101..2ceefa183cee 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c | |||
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri, | |||
149 | 149 | ||
150 | pr_debug("uri: %s, len: %zu\n", uri, uri_len); | 150 | pr_debug("uri: %s, len: %zu\n", uri, uri_len); |
151 | 151 | ||
152 | /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ | ||
153 | if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) | ||
154 | return NULL; | ||
155 | |||
152 | sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); | 156 | sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); |
153 | if (sdreq == NULL) | 157 | if (sdreq == NULL) |
154 | return NULL; | 158 | return NULL; |
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index c0b83dc9d993..f018eafc2a0d 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c | |||
@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { | 63 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { |
64 | [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, | 64 | [NFC_SDP_ATTR_URI] = { .type = NLA_STRING, |
65 | .len = U8_MAX - 4 }, | ||
65 | [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, | 66 | [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, |
66 | }; | 67 | }; |
67 | 68 | ||
diff --git a/net/rds/connection.c b/net/rds/connection.c index 94e190febfdd..2da3176bf792 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, | |||
224 | if (rds_destroy_pending(conn)) | 224 | if (rds_destroy_pending(conn)) |
225 | ret = -ENETDOWN; | 225 | ret = -ENETDOWN; |
226 | else | 226 | else |
227 | ret = trans->conn_alloc(conn, gfp); | 227 | ret = trans->conn_alloc(conn, GFP_ATOMIC); |
228 | if (ret) { | 228 | if (ret) { |
229 | rcu_read_unlock(); | 229 | rcu_read_unlock(); |
230 | kfree(conn->c_path); | 230 | kfree(conn->c_path); |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 42410e910aff..cf73dc006c3b 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
@@ -445,7 +445,7 @@ send_fragmentable: | |||
445 | (char *)&opt, sizeof(opt)); | 445 | (char *)&opt, sizeof(opt)); |
446 | if (ret == 0) { | 446 | if (ret == 0) { |
447 | ret = kernel_sendmsg(conn->params.local->socket, &msg, | 447 | ret = kernel_sendmsg(conn->params.local->socket, &msg, |
448 | iov, 1, iov[0].iov_len); | 448 | iov, 2, len); |
449 | 449 | ||
450 | opt = IPV6_PMTUDISC_DO; | 450 | opt = IPV6_PMTUDISC_DO; |
451 | kernel_setsockopt(conn->params.local->socket, | 451 | kernel_setsockopt(conn->params.local->socket, |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index cc21e8db25b0..9d45d8b56744 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -517,9 +517,10 @@ try_again: | |||
517 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | 517 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, |
518 | sizeof(unsigned int), &id32); | 518 | sizeof(unsigned int), &id32); |
519 | } else { | 519 | } else { |
520 | unsigned long idl = call->user_call_ID; | ||
521 | |||
520 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, | 522 | ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, |
521 | sizeof(unsigned long), | 523 | sizeof(unsigned long), &idl); |
522 | &call->user_call_ID); | ||
523 | } | 524 | } |
524 | if (ret < 0) | 525 | if (ret < 0) |
525 | goto error_unlock_call; | 526 | goto error_unlock_call; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2bc1bc23d42e..247b7cc20c13 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -376,17 +376,12 @@ struct tcf_net { | |||
376 | static unsigned int tcf_net_id; | 376 | static unsigned int tcf_net_id; |
377 | 377 | ||
378 | static int tcf_block_insert(struct tcf_block *block, struct net *net, | 378 | static int tcf_block_insert(struct tcf_block *block, struct net *net, |
379 | u32 block_index, struct netlink_ext_ack *extack) | 379 | struct netlink_ext_ack *extack) |
380 | { | 380 | { |
381 | struct tcf_net *tn = net_generic(net, tcf_net_id); | 381 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
382 | int err; | ||
383 | 382 | ||
384 | err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, | 383 | return idr_alloc_u32(&tn->idr, block, &block->index, block->index, |
385 | GFP_KERNEL); | 384 | GFP_KERNEL); |
386 | if (err) | ||
387 | return err; | ||
388 | block->index = block_index; | ||
389 | return 0; | ||
390 | } | 385 | } |
391 | 386 | ||
392 | static void tcf_block_remove(struct tcf_block *block, struct net *net) | 387 | static void tcf_block_remove(struct tcf_block *block, struct net *net) |
@@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net) | |||
397 | } | 392 | } |
398 | 393 | ||
399 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, | 394 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
395 | u32 block_index, | ||
400 | struct netlink_ext_ack *extack) | 396 | struct netlink_ext_ack *extack) |
401 | { | 397 | { |
402 | struct tcf_block *block; | 398 | struct tcf_block *block; |
@@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, | |||
419 | err = -ENOMEM; | 415 | err = -ENOMEM; |
420 | goto err_chain_create; | 416 | goto err_chain_create; |
421 | } | 417 | } |
422 | block->net = qdisc_net(q); | ||
423 | block->refcnt = 1; | 418 | block->refcnt = 1; |
424 | block->net = net; | 419 | block->net = net; |
425 | block->q = q; | 420 | block->index = block_index; |
421 | |||
422 | /* Don't store q pointer for blocks which are shared */ | ||
423 | if (!tcf_block_shared(block)) | ||
424 | block->q = q; | ||
426 | return block; | 425 | return block; |
427 | 426 | ||
428 | err_chain_create: | 427 | err_chain_create: |
@@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, | |||
518 | } | 517 | } |
519 | 518 | ||
520 | if (!block) { | 519 | if (!block) { |
521 | block = tcf_block_create(net, q, extack); | 520 | block = tcf_block_create(net, q, ei->block_index, extack); |
522 | if (IS_ERR(block)) | 521 | if (IS_ERR(block)) |
523 | return PTR_ERR(block); | 522 | return PTR_ERR(block); |
524 | created = true; | 523 | created = true; |
525 | if (ei->block_index) { | 524 | if (tcf_block_shared(block)) { |
526 | err = tcf_block_insert(block, net, | 525 | err = tcf_block_insert(block, net, extack); |
527 | ei->block_index, extack); | ||
528 | if (err) | 526 | if (err) |
529 | goto err_block_insert; | 527 | goto err_block_insert; |
530 | } | 528 | } |
@@ -1399,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
1399 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) | 1397 | nla_get_u32(tca[TCA_CHAIN]) != chain->index) |
1400 | continue; | 1398 | continue; |
1401 | if (!tcf_chain_dump(chain, q, parent, skb, cb, | 1399 | if (!tcf_chain_dump(chain, q, parent, skb, cb, |
1402 | index_start, &index)) | 1400 | index_start, &index)) { |
1401 | err = -EMSGSIZE; | ||
1403 | break; | 1402 | break; |
1403 | } | ||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | cb->args[0] = index; | 1406 | cb->args[0] = index; |
1407 | 1407 | ||
1408 | out: | 1408 | out: |
1409 | /* If we did no progress, the error (EMSGSIZE) is real */ | ||
1410 | if (skb->len == 0 && err) | ||
1411 | return err; | ||
1409 | return skb->len; | 1412 | return skb->len; |
1410 | } | 1413 | } |
1411 | 1414 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 6c7601a530e3..ed8b6a24b9e9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -96,7 +96,7 @@ struct tc_u_hnode { | |||
96 | 96 | ||
97 | struct tc_u_common { | 97 | struct tc_u_common { |
98 | struct tc_u_hnode __rcu *hlist; | 98 | struct tc_u_hnode __rcu *hlist; |
99 | struct tcf_block *block; | 99 | void *ptr; |
100 | int refcnt; | 100 | int refcnt; |
101 | struct idr handle_idr; | 101 | struct idr handle_idr; |
102 | struct hlist_node hnode; | 102 | struct hlist_node hnode; |
@@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash; | |||
330 | #define U32_HASH_SHIFT 10 | 330 | #define U32_HASH_SHIFT 10 |
331 | #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) | 331 | #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) |
332 | 332 | ||
333 | static void *tc_u_common_ptr(const struct tcf_proto *tp) | ||
334 | { | ||
335 | struct tcf_block *block = tp->chain->block; | ||
336 | |||
337 | /* The block sharing is currently supported only | ||
338 | * for classless qdiscs. In that case we use block | ||
339 | * for tc_u_common identification. In case the | ||
340 | * block is not shared, block->q is a valid pointer | ||
341 | * and we can use that. That works for classful qdiscs. | ||
342 | */ | ||
343 | if (tcf_block_shared(block)) | ||
344 | return block; | ||
345 | else | ||
346 | return block->q; | ||
347 | } | ||
348 | |||
333 | static unsigned int tc_u_hash(const struct tcf_proto *tp) | 349 | static unsigned int tc_u_hash(const struct tcf_proto *tp) |
334 | { | 350 | { |
335 | return hash_ptr(tp->chain->block, U32_HASH_SHIFT); | 351 | return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT); |
336 | } | 352 | } |
337 | 353 | ||
338 | static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) | 354 | static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) |
@@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) | |||
342 | 358 | ||
343 | h = tc_u_hash(tp); | 359 | h = tc_u_hash(tp); |
344 | hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { | 360 | hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { |
345 | if (tc->block == tp->chain->block) | 361 | if (tc->ptr == tc_u_common_ptr(tp)) |
346 | return tc; | 362 | return tc; |
347 | } | 363 | } |
348 | return NULL; | 364 | return NULL; |
@@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp) | |||
371 | kfree(root_ht); | 387 | kfree(root_ht); |
372 | return -ENOBUFS; | 388 | return -ENOBUFS; |
373 | } | 389 | } |
374 | tp_c->block = tp->chain->block; | 390 | tp_c->ptr = tc_u_common_ptr(tp); |
375 | INIT_HLIST_NODE(&tp_c->hnode); | 391 | INIT_HLIST_NODE(&tp_c->hnode); |
376 | idr_init(&tp_c->handle_idr); | 392 | idr_init(&tp_c->handle_idr); |
377 | 393 | ||
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 291c97b07058..8f6c2e8c0953 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c | |||
@@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid) | |||
81 | case SCTP_CID_RECONF: | 81 | case SCTP_CID_RECONF: |
82 | return "RECONF"; | 82 | return "RECONF"; |
83 | 83 | ||
84 | case SCTP_CID_I_DATA: | ||
85 | return "I_DATA"; | ||
86 | |||
87 | case SCTP_CID_I_FWD_TSN: | ||
88 | return "I_FWD_TSN"; | ||
89 | |||
84 | default: | 90 | default: |
85 | break; | 91 | break; |
86 | } | 92 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 141c9c466ec1..0247cc432e02 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t) | |||
897 | rhl_for_each_entry_rcu(transport, tmp, list, node) | 897 | rhl_for_each_entry_rcu(transport, tmp, list, node) |
898 | if (transport->asoc->ep == t->asoc->ep) { | 898 | if (transport->asoc->ep == t->asoc->ep) { |
899 | rcu_read_unlock(); | 899 | rcu_read_unlock(); |
900 | err = -EEXIST; | 900 | return -EEXIST; |
901 | goto out; | ||
902 | } | 901 | } |
903 | rcu_read_unlock(); | 902 | rcu_read_unlock(); |
904 | 903 | ||
905 | err = rhltable_insert_key(&sctp_transport_hashtable, &arg, | 904 | err = rhltable_insert_key(&sctp_transport_hashtable, &arg, |
906 | &t->node, sctp_hash_params); | 905 | &t->node, sctp_hash_params); |
907 | |||
908 | out: | ||
909 | if (err) | 906 | if (err) |
910 | pr_err_once("insert transport fail, errno %d\n", err); | 907 | pr_err_once("insert transport fail, errno %d\n", err); |
911 | 908 | ||
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index cedf672487f9..f799043abec9 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * This file is part of the SCTP kernel implementation | 7 | * This file is part of the SCTP kernel implementation |
8 | * | 8 | * |
9 | * These functions manipulate sctp tsn mapping array. | 9 | * This file contains sctp stream maniuplation primitives and helpers. |
10 | * | 10 | * |
11 | * This SCTP implementation is free software; | 11 | * This SCTP implementation is free software; |
12 | * you can redistribute it and/or modify it under the terms of | 12 | * you can redistribute it and/or modify it under the terms of |
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c index 8c7cf8f08711..d3764c181299 100644 --- a/net/sctp/stream_interleave.c +++ b/net/sctp/stream_interleave.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This file is part of the SCTP kernel implementation | 4 | * This file is part of the SCTP kernel implementation |
5 | * | 5 | * |
6 | * These functions manipulate sctp stream queue/scheduling. | 6 | * These functions implement sctp stream message interleaving, mostly |
7 | * including I-DATA and I-FORWARD-TSN chunks process. | ||
7 | * | 8 | * |
8 | * This SCTP implementation is free software; | 9 | * This SCTP implementation is free software; |
9 | * you can redistribute it and/or modify it under the terms of | 10 | * you can redistribute it and/or modify it under the terms of |
@@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
954 | __u32 freed = 0; | 955 | __u32 freed = 0; |
955 | __u16 needed; | 956 | __u16 needed; |
956 | 957 | ||
957 | if (chunk) { | 958 | needed = ntohs(chunk->chunk_hdr->length) - |
958 | needed = ntohs(chunk->chunk_hdr->length); | 959 | sizeof(struct sctp_idata_chunk); |
959 | needed -= sizeof(struct sctp_idata_chunk); | ||
960 | } else { | ||
961 | needed = SCTP_DEFAULT_MAXWINDOW; | ||
962 | } | ||
963 | 960 | ||
964 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { | 961 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { |
965 | freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | 962 | freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); |
@@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
971 | needed); | 968 | needed); |
972 | } | 969 | } |
973 | 970 | ||
974 | if (chunk && freed >= needed) | 971 | if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) |
975 | if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) | 972 | sctp_intl_start_pd(ulpq, gfp); |
976 | sctp_intl_start_pd(ulpq, gfp); | ||
977 | 973 | ||
978 | sk_mem_reclaim(asoc->base.sk); | 974 | sk_mem_reclaim(asoc->base.sk); |
979 | } | 975 | } |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index c8001471da6c..3e3dce3d4c63 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -813,7 +813,7 @@ err_out: | |||
813 | return err; | 813 | return err; |
814 | } | 814 | } |
815 | 815 | ||
816 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | 816 | int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) |
817 | { | 817 | { |
818 | int err; | 818 | int err; |
819 | char *name; | 819 | char *name; |
@@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) | |||
835 | 835 | ||
836 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 836 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
837 | 837 | ||
838 | rtnl_lock(); | ||
839 | bearer = tipc_bearer_find(net, name); | 838 | bearer = tipc_bearer_find(net, name); |
840 | if (!bearer) { | 839 | if (!bearer) |
841 | rtnl_unlock(); | ||
842 | return -EINVAL; | 840 | return -EINVAL; |
843 | } | ||
844 | 841 | ||
845 | bearer_disable(net, bearer); | 842 | bearer_disable(net, bearer); |
846 | rtnl_unlock(); | ||
847 | 843 | ||
848 | return 0; | 844 | return 0; |
849 | } | 845 | } |
850 | 846 | ||
851 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | 847 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) |
848 | { | ||
849 | int err; | ||
850 | |||
851 | rtnl_lock(); | ||
852 | err = __tipc_nl_bearer_disable(skb, info); | ||
853 | rtnl_unlock(); | ||
854 | |||
855 | return err; | ||
856 | } | ||
857 | |||
858 | int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | ||
852 | { | 859 | { |
853 | int err; | 860 | int err; |
854 | char *bearer; | 861 | char *bearer; |
@@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | |||
890 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 897 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
891 | } | 898 | } |
892 | 899 | ||
900 | return tipc_enable_bearer(net, bearer, domain, prio, attrs); | ||
901 | } | ||
902 | |||
903 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) | ||
904 | { | ||
905 | int err; | ||
906 | |||
893 | rtnl_lock(); | 907 | rtnl_lock(); |
894 | err = tipc_enable_bearer(net, bearer, domain, prio, attrs); | 908 | err = __tipc_nl_bearer_enable(skb, info); |
895 | if (err) { | ||
896 | rtnl_unlock(); | ||
897 | return err; | ||
898 | } | ||
899 | rtnl_unlock(); | 909 | rtnl_unlock(); |
900 | 910 | ||
901 | return 0; | 911 | return err; |
902 | } | 912 | } |
903 | 913 | ||
904 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) | 914 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) |
@@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) | |||
944 | return 0; | 954 | return 0; |
945 | } | 955 | } |
946 | 956 | ||
947 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | 957 | int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) |
948 | { | 958 | { |
949 | int err; | 959 | int err; |
950 | char *name; | 960 | char *name; |
@@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
965 | return -EINVAL; | 975 | return -EINVAL; |
966 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); | 976 | name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); |
967 | 977 | ||
968 | rtnl_lock(); | ||
969 | b = tipc_bearer_find(net, name); | 978 | b = tipc_bearer_find(net, name); |
970 | if (!b) { | 979 | if (!b) |
971 | rtnl_unlock(); | ||
972 | return -EINVAL; | 980 | return -EINVAL; |
973 | } | ||
974 | 981 | ||
975 | if (attrs[TIPC_NLA_BEARER_PROP]) { | 982 | if (attrs[TIPC_NLA_BEARER_PROP]) { |
976 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; | 983 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
977 | 984 | ||
978 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], | 985 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], |
979 | props); | 986 | props); |
980 | if (err) { | 987 | if (err) |
981 | rtnl_unlock(); | ||
982 | return err; | 988 | return err; |
983 | } | ||
984 | 989 | ||
985 | if (props[TIPC_NLA_PROP_TOL]) | 990 | if (props[TIPC_NLA_PROP_TOL]) |
986 | b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 991 | b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
@@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
989 | if (props[TIPC_NLA_PROP_WIN]) | 994 | if (props[TIPC_NLA_PROP_WIN]) |
990 | b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | 995 | b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
991 | } | 996 | } |
992 | rtnl_unlock(); | ||
993 | 997 | ||
994 | return 0; | 998 | return 0; |
995 | } | 999 | } |
996 | 1000 | ||
1001 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | ||
1002 | { | ||
1003 | int err; | ||
1004 | |||
1005 | rtnl_lock(); | ||
1006 | err = __tipc_nl_bearer_set(skb, info); | ||
1007 | rtnl_unlock(); | ||
1008 | |||
1009 | return err; | ||
1010 | } | ||
1011 | |||
997 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | 1012 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, |
998 | struct tipc_media *media, int nlflags) | 1013 | struct tipc_media *media, int nlflags) |
999 | { | 1014 | { |
@@ -1115,7 +1130,7 @@ err_out: | |||
1115 | return err; | 1130 | return err; |
1116 | } | 1131 | } |
1117 | 1132 | ||
1118 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | 1133 | int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) |
1119 | { | 1134 | { |
1120 | int err; | 1135 | int err; |
1121 | char *name; | 1136 | char *name; |
@@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | |||
1133 | return -EINVAL; | 1148 | return -EINVAL; |
1134 | name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); | 1149 | name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); |
1135 | 1150 | ||
1136 | rtnl_lock(); | ||
1137 | m = tipc_media_find(name); | 1151 | m = tipc_media_find(name); |
1138 | if (!m) { | 1152 | if (!m) |
1139 | rtnl_unlock(); | ||
1140 | return -EINVAL; | 1153 | return -EINVAL; |
1141 | } | ||
1142 | 1154 | ||
1143 | if (attrs[TIPC_NLA_MEDIA_PROP]) { | 1155 | if (attrs[TIPC_NLA_MEDIA_PROP]) { |
1144 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; | 1156 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; |
1145 | 1157 | ||
1146 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], | 1158 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], |
1147 | props); | 1159 | props); |
1148 | if (err) { | 1160 | if (err) |
1149 | rtnl_unlock(); | ||
1150 | return err; | 1161 | return err; |
1151 | } | ||
1152 | 1162 | ||
1153 | if (props[TIPC_NLA_PROP_TOL]) | 1163 | if (props[TIPC_NLA_PROP_TOL]) |
1154 | m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1164 | m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
@@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | |||
1157 | if (props[TIPC_NLA_PROP_WIN]) | 1167 | if (props[TIPC_NLA_PROP_WIN]) |
1158 | m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | 1168 | m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); |
1159 | } | 1169 | } |
1160 | rtnl_unlock(); | ||
1161 | 1170 | ||
1162 | return 0; | 1171 | return 0; |
1163 | } | 1172 | } |
1173 | |||
1174 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) | ||
1175 | { | ||
1176 | int err; | ||
1177 | |||
1178 | rtnl_lock(); | ||
1179 | err = __tipc_nl_media_set(skb, info); | ||
1180 | rtnl_unlock(); | ||
1181 | |||
1182 | return err; | ||
1183 | } | ||
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 42d6eeeb646d..a53613d95bc9 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info; | |||
188 | #endif | 188 | #endif |
189 | 189 | ||
190 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | 190 | int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); |
191 | int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); | ||
191 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | 192 | int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); |
193 | int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); | ||
192 | int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); | 194 | int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); |
193 | int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); | 195 | int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); |
194 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); | 196 | int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); |
197 | int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); | ||
195 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); | 198 | int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); |
196 | 199 | ||
197 | int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); | 200 | int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); |
198 | int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); | 201 | int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); |
199 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | 202 | int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); |
203 | int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); | ||
200 | 204 | ||
201 | int tipc_media_set_priority(const char *name, u32 new_value); | 205 | int tipc_media_set_priority(const char *name, u32 new_value); |
202 | int tipc_media_set_window(const char *name, u32 new_value); | 206 | int tipc_media_set_window(const char *name, u32 new_value); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 719c5924b638..1a2fde0d6f61 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -200,7 +200,7 @@ out: | |||
200 | return skb->len; | 200 | return skb->len; |
201 | } | 201 | } |
202 | 202 | ||
203 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | 203 | int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) |
204 | { | 204 | { |
205 | struct net *net = sock_net(skb->sk); | 205 | struct net *net = sock_net(skb->sk); |
206 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 206 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
@@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
241 | if (!tipc_addr_node_valid(addr)) | 241 | if (!tipc_addr_node_valid(addr)) |
242 | return -EINVAL; | 242 | return -EINVAL; |
243 | 243 | ||
244 | rtnl_lock(); | ||
245 | tipc_net_start(net, addr); | 244 | tipc_net_start(net, addr); |
246 | rtnl_unlock(); | ||
247 | } | 245 | } |
248 | 246 | ||
249 | return 0; | 247 | return 0; |
250 | } | 248 | } |
249 | |||
250 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | ||
251 | { | ||
252 | int err; | ||
253 | |||
254 | rtnl_lock(); | ||
255 | err = __tipc_nl_net_set(skb, info); | ||
256 | rtnl_unlock(); | ||
257 | |||
258 | return err; | ||
259 | } | ||
diff --git a/net/tipc/net.h b/net/tipc/net.h index c7c254902873..c0306aa2374b 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h | |||
@@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net); | |||
47 | 47 | ||
48 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | 48 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); |
49 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | 49 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); |
50 | int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | ||
50 | 51 | ||
51 | #endif | 52 | #endif |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index e48f0b2c01b9..4492cda45566 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | |||
285 | if (!trans_buf) | 285 | if (!trans_buf) |
286 | return -ENOMEM; | 286 | return -ENOMEM; |
287 | 287 | ||
288 | err = (*cmd->transcode)(cmd, trans_buf, msg); | ||
289 | if (err) | ||
290 | goto trans_out; | ||
291 | |||
292 | attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * | 288 | attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * |
293 | sizeof(struct nlattr *), GFP_KERNEL); | 289 | sizeof(struct nlattr *), GFP_KERNEL); |
294 | if (!attrbuf) { | 290 | if (!attrbuf) { |
@@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, | |||
296 | goto trans_out; | 292 | goto trans_out; |
297 | } | 293 | } |
298 | 294 | ||
299 | err = nla_parse(attrbuf, tipc_genl_family.maxattr, | ||
300 | (const struct nlattr *)trans_buf->data, | ||
301 | trans_buf->len, NULL, NULL); | ||
302 | if (err) | ||
303 | goto parse_out; | ||
304 | |||
305 | doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 295 | doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
306 | if (!doit_buf) { | 296 | if (!doit_buf) { |
307 | err = -ENOMEM; | 297 | err = -ENOMEM; |
308 | goto parse_out; | 298 | goto attrbuf_out; |
309 | } | 299 | } |
310 | 300 | ||
311 | doit_buf->sk = msg->dst_sk; | ||
312 | |||
313 | memset(&info, 0, sizeof(info)); | 301 | memset(&info, 0, sizeof(info)); |
314 | info.attrs = attrbuf; | 302 | info.attrs = attrbuf; |
315 | 303 | ||
304 | rtnl_lock(); | ||
305 | err = (*cmd->transcode)(cmd, trans_buf, msg); | ||
306 | if (err) | ||
307 | goto doit_out; | ||
308 | |||
309 | err = nla_parse(attrbuf, tipc_genl_family.maxattr, | ||
310 | (const struct nlattr *)trans_buf->data, | ||
311 | trans_buf->len, NULL, NULL); | ||
312 | if (err) | ||
313 | goto doit_out; | ||
314 | |||
315 | doit_buf->sk = msg->dst_sk; | ||
316 | |||
316 | err = (*cmd->doit)(doit_buf, &info); | 317 | err = (*cmd->doit)(doit_buf, &info); |
318 | doit_out: | ||
319 | rtnl_unlock(); | ||
317 | 320 | ||
318 | kfree_skb(doit_buf); | 321 | kfree_skb(doit_buf); |
319 | parse_out: | 322 | attrbuf_out: |
320 | kfree(attrbuf); | 323 | kfree(attrbuf); |
321 | trans_out: | 324 | trans_out: |
322 | kfree_skb(trans_buf); | 325 | kfree_skb(trans_buf); |
@@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
722 | 725 | ||
723 | media = tipc_media_find(lc->name); | 726 | media = tipc_media_find(lc->name); |
724 | if (media) { | 727 | if (media) { |
725 | cmd->doit = &tipc_nl_media_set; | 728 | cmd->doit = &__tipc_nl_media_set; |
726 | return tipc_nl_compat_media_set(skb, msg); | 729 | return tipc_nl_compat_media_set(skb, msg); |
727 | } | 730 | } |
728 | 731 | ||
729 | bearer = tipc_bearer_find(msg->net, lc->name); | 732 | bearer = tipc_bearer_find(msg->net, lc->name); |
730 | if (bearer) { | 733 | if (bearer) { |
731 | cmd->doit = &tipc_nl_bearer_set; | 734 | cmd->doit = &__tipc_nl_bearer_set; |
732 | return tipc_nl_compat_bearer_set(skb, msg); | 735 | return tipc_nl_compat_bearer_set(skb, msg); |
733 | } | 736 | } |
734 | 737 | ||
@@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) | |||
1089 | return tipc_nl_compat_dumpit(&dump, msg); | 1092 | return tipc_nl_compat_dumpit(&dump, msg); |
1090 | case TIPC_CMD_ENABLE_BEARER: | 1093 | case TIPC_CMD_ENABLE_BEARER: |
1091 | msg->req_type = TIPC_TLV_BEARER_CONFIG; | 1094 | msg->req_type = TIPC_TLV_BEARER_CONFIG; |
1092 | doit.doit = tipc_nl_bearer_enable; | 1095 | doit.doit = __tipc_nl_bearer_enable; |
1093 | doit.transcode = tipc_nl_compat_bearer_enable; | 1096 | doit.transcode = tipc_nl_compat_bearer_enable; |
1094 | return tipc_nl_compat_doit(&doit, msg); | 1097 | return tipc_nl_compat_doit(&doit, msg); |
1095 | case TIPC_CMD_DISABLE_BEARER: | 1098 | case TIPC_CMD_DISABLE_BEARER: |
1096 | msg->req_type = TIPC_TLV_BEARER_NAME; | 1099 | msg->req_type = TIPC_TLV_BEARER_NAME; |
1097 | doit.doit = tipc_nl_bearer_disable; | 1100 | doit.doit = __tipc_nl_bearer_disable; |
1098 | doit.transcode = tipc_nl_compat_bearer_disable; | 1101 | doit.transcode = tipc_nl_compat_bearer_disable; |
1099 | return tipc_nl_compat_doit(&doit, msg); | 1102 | return tipc_nl_compat_doit(&doit, msg); |
1100 | case TIPC_CMD_SHOW_LINK_STATS: | 1103 | case TIPC_CMD_SHOW_LINK_STATS: |
@@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) | |||
1148 | return tipc_nl_compat_dumpit(&dump, msg); | 1151 | return tipc_nl_compat_dumpit(&dump, msg); |
1149 | case TIPC_CMD_SET_NODE_ADDR: | 1152 | case TIPC_CMD_SET_NODE_ADDR: |
1150 | msg->req_type = TIPC_TLV_NET_ADDR; | 1153 | msg->req_type = TIPC_TLV_NET_ADDR; |
1151 | doit.doit = tipc_nl_net_set; | 1154 | doit.doit = __tipc_nl_net_set; |
1152 | doit.transcode = tipc_nl_compat_net_set; | 1155 | doit.transcode = tipc_nl_compat_net_set; |
1153 | return tipc_nl_compat_doit(&doit, msg); | 1156 | return tipc_nl_compat_doit(&doit, msg); |
1154 | case TIPC_CMD_SET_NETID: | 1157 | case TIPC_CMD_SET_NETID: |
1155 | msg->req_type = TIPC_TLV_UNSIGNED; | 1158 | msg->req_type = TIPC_TLV_UNSIGNED; |
1156 | doit.doit = tipc_nl_net_set; | 1159 | doit.doit = __tipc_nl_net_set; |
1157 | doit.transcode = tipc_nl_compat_net_set; | 1160 | doit.transcode = tipc_nl_compat_net_set; |
1158 | return tipc_nl_compat_doit(&doit, msg); | 1161 | return tipc_nl_compat_doit(&doit, msg); |
1159 | case TIPC_CMD_GET_NETID: | 1162 | case TIPC_CMD_GET_NETID: |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b0d5fcea47e7..e9b4b53ab53e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -308,8 +308,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, | |||
308 | goto out; | 308 | goto out; |
309 | } | 309 | } |
310 | lock_sock(sk); | 310 | lock_sock(sk); |
311 | memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, | 311 | memcpy(crypto_info_aes_gcm_128->iv, |
312 | ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, | ||
312 | TLS_CIPHER_AES_GCM_128_IV_SIZE); | 313 | TLS_CIPHER_AES_GCM_128_IV_SIZE); |
314 | memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq, | ||
315 | TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); | ||
313 | release_sock(sk); | 316 | release_sock(sk); |
314 | if (copy_to_user(optval, | 317 | if (copy_to_user(optval, |
315 | crypto_info_aes_gcm_128, | 318 | crypto_info_aes_gcm_128, |
@@ -375,7 +378,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, | |||
375 | rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); | 378 | rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); |
376 | if (rc) { | 379 | if (rc) { |
377 | rc = -EFAULT; | 380 | rc = -EFAULT; |
378 | goto out; | 381 | goto err_crypto_info; |
379 | } | 382 | } |
380 | 383 | ||
381 | /* check version */ | 384 | /* check version */ |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d545e1d0dea2..2d465bdeccbc 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1825,7 +1825,7 @@ out: | |||
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | /* We use paged skbs for stream sockets, and limit occupancy to 32768 | 1827 | /* We use paged skbs for stream sockets, and limit occupancy to 32768 |
1828 | * bytes, and a minimun of a full page. | 1828 | * bytes, and a minimum of a full page. |
1829 | */ | 1829 | */ |
1830 | #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) | 1830 | #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) |
1831 | 1831 | ||
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 51aa55618ef7..b12da6ef3c12 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c | |||
@@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
170 | enum nl80211_bss_scan_width scan_width; | 170 | enum nl80211_bss_scan_width scan_width; |
171 | struct ieee80211_supported_band *sband = | 171 | struct ieee80211_supported_band *sband = |
172 | rdev->wiphy.bands[setup->chandef.chan->band]; | 172 | rdev->wiphy.bands[setup->chandef.chan->band]; |
173 | scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); | 173 | |
174 | setup->basic_rates = ieee80211_mandatory_rates(sband, | 174 | if (setup->chandef.chan->band == NL80211_BAND_2GHZ) { |
175 | scan_width); | 175 | int i; |
176 | |||
177 | /* | ||
178 | * Older versions selected the mandatory rates for | ||
179 | * 2.4 GHz as well, but were broken in that only | ||
180 | * 1 Mbps was regarded as a mandatory rate. Keep | ||
181 | * using just 1 Mbps as the default basic rate for | ||
182 | * mesh to be interoperable with older versions. | ||
183 | */ | ||
184 | for (i = 0; i < sband->n_bitrates; i++) { | ||
185 | if (sband->bitrates[i].bitrate == 10) { | ||
186 | setup->basic_rates = BIT(i); | ||
187 | break; | ||
188 | } | ||
189 | } | ||
190 | } else { | ||
191 | scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); | ||
192 | setup->basic_rates = ieee80211_mandatory_rates(sband, | ||
193 | scan_width); | ||
194 | } | ||
176 | } | 195 | } |
177 | 196 | ||
178 | err = cfg80211_chandef_dfs_required(&rdev->wiphy, | 197 | err = cfg80211_chandef_dfs_required(&rdev->wiphy, |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index fdb3646274a5..701cfd7acc1b 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -1032,6 +1032,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
1032 | wdev->current_bss = NULL; | 1032 | wdev->current_bss = NULL; |
1033 | wdev->ssid_len = 0; | 1033 | wdev->ssid_len = 0; |
1034 | wdev->conn_owner_nlportid = 0; | 1034 | wdev->conn_owner_nlportid = 0; |
1035 | kzfree(wdev->connect_keys); | ||
1036 | wdev->connect_keys = NULL; | ||
1035 | 1037 | ||
1036 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); | 1038 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); |
1037 | 1039 | ||
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index 0e349b80686e..ba942e3ead89 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | ifndef CROSS_COMPILE | ||
2 | hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct | 3 | hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct |
3 | 4 | ||
4 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include | 5 | HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include |
@@ -16,7 +17,6 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include | |||
16 | bpf-direct-objs := bpf-direct.o | 17 | bpf-direct-objs := bpf-direct.o |
17 | 18 | ||
18 | # Try to match the kernel target. | 19 | # Try to match the kernel target. |
19 | ifndef CROSS_COMPILE | ||
20 | ifndef CONFIG_64BIT | 20 | ifndef CONFIG_64BIT |
21 | 21 | ||
22 | # s390 has -m31 flag to build 31 bit binaries | 22 | # s390 has -m31 flag to build 31 bit binaries |
@@ -35,12 +35,4 @@ HOSTLOADLIBES_bpf-fancy += $(MFLAG) | |||
35 | HOSTLOADLIBES_dropper += $(MFLAG) | 35 | HOSTLOADLIBES_dropper += $(MFLAG) |
36 | endif | 36 | endif |
37 | always := $(hostprogs-m) | 37 | always := $(hostprogs-m) |
38 | else | ||
39 | # MIPS system calls are defined based on the -mabi that is passed | ||
40 | # to the toolchain which may or may not be a valid option | ||
41 | # for the host toolchain. So disable tests if target architecture | ||
42 | # is MIPS but the host isn't. | ||
43 | ifndef CONFIG_MIPS | ||
44 | always := $(hostprogs-m) | ||
45 | endif | ||
46 | endif | 38 | endif |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 47cddf32aeba..4f2b25d43ec9 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool | |||
256 | 256 | ||
257 | objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check) | 257 | objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check) |
258 | 258 | ||
259 | objtool_args += $(if $(part-of-module), --module,) | ||
260 | |||
259 | ifndef CONFIG_FRAME_POINTER | 261 | ifndef CONFIG_FRAME_POINTER |
260 | objtool_args += --no-fp | 262 | objtool_args += --no-fp |
261 | endif | 263 | endif |
@@ -264,6 +266,12 @@ objtool_args += --no-unreachable | |||
264 | else | 266 | else |
265 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | 267 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) |
266 | endif | 268 | endif |
269 | ifdef CONFIG_RETPOLINE | ||
270 | ifneq ($(RETPOLINE_CFLAGS),) | ||
271 | objtool_args += --retpoline | ||
272 | endif | ||
273 | endif | ||
274 | |||
267 | 275 | ||
268 | ifdef CONFIG_MODVERSIONS | 276 | ifdef CONFIG_MODVERSIONS |
269 | objtool_o = $(@D)/.tmp_$(@F) | 277 | objtool_o = $(@D)/.tmp_$(@F) |
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 6f9e4ce568cd..9bb0a7f2863e 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/cred.h> | 18 | #include <linux/cred.h> |
19 | #include <linux/key-type.h> | 19 | #include <linux/key-type.h> |
20 | #include <linux/digsig.h> | 20 | #include <linux/digsig.h> |
21 | #include <linux/vmalloc.h> | ||
21 | #include <crypto/public_key.h> | 22 | #include <crypto/public_key.h> |
22 | #include <keys/system_keyring.h> | 23 | #include <keys/system_keyring.h> |
23 | 24 | ||
diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 929e14978c42..fa728f662a6f 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c | |||
@@ -22,6 +22,13 @@ | |||
22 | #include <keys/big_key-type.h> | 22 | #include <keys/big_key-type.h> |
23 | #include <crypto/aead.h> | 23 | #include <crypto/aead.h> |
24 | 24 | ||
25 | struct big_key_buf { | ||
26 | unsigned int nr_pages; | ||
27 | void *virt; | ||
28 | struct scatterlist *sg; | ||
29 | struct page *pages[]; | ||
30 | }; | ||
31 | |||
25 | /* | 32 | /* |
26 | * Layout of key payload words. | 33 | * Layout of key payload words. |
27 | */ | 34 | */ |
@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock); | |||
91 | /* | 98 | /* |
92 | * Encrypt/decrypt big_key data | 99 | * Encrypt/decrypt big_key data |
93 | */ | 100 | */ |
94 | static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) | 101 | static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key) |
95 | { | 102 | { |
96 | int ret; | 103 | int ret; |
97 | struct scatterlist sgio; | ||
98 | struct aead_request *aead_req; | 104 | struct aead_request *aead_req; |
99 | /* We always use a zero nonce. The reason we can get away with this is | 105 | /* We always use a zero nonce. The reason we can get away with this is |
100 | * because we're using a different randomly generated key for every | 106 | * because we're using a different randomly generated key for every |
@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) | |||
109 | return -ENOMEM; | 115 | return -ENOMEM; |
110 | 116 | ||
111 | memset(zero_nonce, 0, sizeof(zero_nonce)); | 117 | memset(zero_nonce, 0, sizeof(zero_nonce)); |
112 | sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0)); | 118 | aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce); |
113 | aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce); | ||
114 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | 119 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); |
115 | aead_request_set_ad(aead_req, 0); | 120 | aead_request_set_ad(aead_req, 0); |
116 | 121 | ||
@@ -130,21 +135,81 @@ error: | |||
130 | } | 135 | } |
131 | 136 | ||
132 | /* | 137 | /* |
138 | * Free up the buffer. | ||
139 | */ | ||
140 | static void big_key_free_buffer(struct big_key_buf *buf) | ||
141 | { | ||
142 | unsigned int i; | ||
143 | |||
144 | if (buf->virt) { | ||
145 | memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE); | ||
146 | vunmap(buf->virt); | ||
147 | } | ||
148 | |||
149 | for (i = 0; i < buf->nr_pages; i++) | ||
150 | if (buf->pages[i]) | ||
151 | __free_page(buf->pages[i]); | ||
152 | |||
153 | kfree(buf); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Allocate a buffer consisting of a set of pages with a virtual mapping | ||
158 | * applied over them. | ||
159 | */ | ||
160 | static void *big_key_alloc_buffer(size_t len) | ||
161 | { | ||
162 | struct big_key_buf *buf; | ||
163 | unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
164 | unsigned int i, l; | ||
165 | |||
166 | buf = kzalloc(sizeof(struct big_key_buf) + | ||
167 | sizeof(struct page) * npg + | ||
168 | sizeof(struct scatterlist) * npg, | ||
169 | GFP_KERNEL); | ||
170 | if (!buf) | ||
171 | return NULL; | ||
172 | |||
173 | buf->nr_pages = npg; | ||
174 | buf->sg = (void *)(buf->pages + npg); | ||
175 | sg_init_table(buf->sg, npg); | ||
176 | |||
177 | for (i = 0; i < buf->nr_pages; i++) { | ||
178 | buf->pages[i] = alloc_page(GFP_KERNEL); | ||
179 | if (!buf->pages[i]) | ||
180 | goto nomem; | ||
181 | |||
182 | l = min_t(size_t, len, PAGE_SIZE); | ||
183 | sg_set_page(&buf->sg[i], buf->pages[i], l, 0); | ||
184 | len -= l; | ||
185 | } | ||
186 | |||
187 | buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL); | ||
188 | if (!buf->virt) | ||
189 | goto nomem; | ||
190 | |||
191 | return buf; | ||
192 | |||
193 | nomem: | ||
194 | big_key_free_buffer(buf); | ||
195 | return NULL; | ||
196 | } | ||
197 | |||
198 | /* | ||
133 | * Preparse a big key | 199 | * Preparse a big key |
134 | */ | 200 | */ |
135 | int big_key_preparse(struct key_preparsed_payload *prep) | 201 | int big_key_preparse(struct key_preparsed_payload *prep) |
136 | { | 202 | { |
203 | struct big_key_buf *buf; | ||
137 | struct path *path = (struct path *)&prep->payload.data[big_key_path]; | 204 | struct path *path = (struct path *)&prep->payload.data[big_key_path]; |
138 | struct file *file; | 205 | struct file *file; |
139 | u8 *enckey; | 206 | u8 *enckey; |
140 | u8 *data = NULL; | ||
141 | ssize_t written; | 207 | ssize_t written; |
142 | size_t datalen = prep->datalen; | 208 | size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE; |
143 | int ret; | 209 | int ret; |
144 | 210 | ||
145 | ret = -EINVAL; | ||
146 | if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) | 211 | if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) |
147 | goto error; | 212 | return -EINVAL; |
148 | 213 | ||
149 | /* Set an arbitrary quota */ | 214 | /* Set an arbitrary quota */ |
150 | prep->quotalen = 16; | 215 | prep->quotalen = 16; |
@@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
157 | * | 222 | * |
158 | * File content is stored encrypted with randomly generated key. | 223 | * File content is stored encrypted with randomly generated key. |
159 | */ | 224 | */ |
160 | size_t enclen = datalen + ENC_AUTHTAG_SIZE; | ||
161 | loff_t pos = 0; | 225 | loff_t pos = 0; |
162 | 226 | ||
163 | data = kmalloc(enclen, GFP_KERNEL); | 227 | buf = big_key_alloc_buffer(enclen); |
164 | if (!data) | 228 | if (!buf) |
165 | return -ENOMEM; | 229 | return -ENOMEM; |
166 | memcpy(data, prep->data, datalen); | 230 | memcpy(buf->virt, prep->data, datalen); |
167 | 231 | ||
168 | /* generate random key */ | 232 | /* generate random key */ |
169 | enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); | 233 | enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); |
@@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
176 | goto err_enckey; | 240 | goto err_enckey; |
177 | 241 | ||
178 | /* encrypt aligned data */ | 242 | /* encrypt aligned data */ |
179 | ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey); | 243 | ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey); |
180 | if (ret) | 244 | if (ret) |
181 | goto err_enckey; | 245 | goto err_enckey; |
182 | 246 | ||
@@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
187 | goto err_enckey; | 251 | goto err_enckey; |
188 | } | 252 | } |
189 | 253 | ||
190 | written = kernel_write(file, data, enclen, &pos); | 254 | written = kernel_write(file, buf->virt, enclen, &pos); |
191 | if (written != enclen) { | 255 | if (written != enclen) { |
192 | ret = written; | 256 | ret = written; |
193 | if (written >= 0) | 257 | if (written >= 0) |
@@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
202 | *path = file->f_path; | 266 | *path = file->f_path; |
203 | path_get(path); | 267 | path_get(path); |
204 | fput(file); | 268 | fput(file); |
205 | kzfree(data); | 269 | big_key_free_buffer(buf); |
206 | } else { | 270 | } else { |
207 | /* Just store the data in a buffer */ | 271 | /* Just store the data in a buffer */ |
208 | void *data = kmalloc(datalen, GFP_KERNEL); | 272 | void *data = kmalloc(datalen, GFP_KERNEL); |
@@ -220,7 +284,7 @@ err_fput: | |||
220 | err_enckey: | 284 | err_enckey: |
221 | kzfree(enckey); | 285 | kzfree(enckey); |
222 | error: | 286 | error: |
223 | kzfree(data); | 287 | big_key_free_buffer(buf); |
224 | return ret; | 288 | return ret; |
225 | } | 289 | } |
226 | 290 | ||
@@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) | |||
298 | return datalen; | 362 | return datalen; |
299 | 363 | ||
300 | if (datalen > BIG_KEY_FILE_THRESHOLD) { | 364 | if (datalen > BIG_KEY_FILE_THRESHOLD) { |
365 | struct big_key_buf *buf; | ||
301 | struct path *path = (struct path *)&key->payload.data[big_key_path]; | 366 | struct path *path = (struct path *)&key->payload.data[big_key_path]; |
302 | struct file *file; | 367 | struct file *file; |
303 | u8 *data; | ||
304 | u8 *enckey = (u8 *)key->payload.data[big_key_data]; | 368 | u8 *enckey = (u8 *)key->payload.data[big_key_data]; |
305 | size_t enclen = datalen + ENC_AUTHTAG_SIZE; | 369 | size_t enclen = datalen + ENC_AUTHTAG_SIZE; |
306 | loff_t pos = 0; | 370 | loff_t pos = 0; |
307 | 371 | ||
308 | data = kmalloc(enclen, GFP_KERNEL); | 372 | buf = big_key_alloc_buffer(enclen); |
309 | if (!data) | 373 | if (!buf) |
310 | return -ENOMEM; | 374 | return -ENOMEM; |
311 | 375 | ||
312 | file = dentry_open(path, O_RDONLY, current_cred()); | 376 | file = dentry_open(path, O_RDONLY, current_cred()); |
@@ -316,26 +380,26 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) | |||
316 | } | 380 | } |
317 | 381 | ||
318 | /* read file to kernel and decrypt */ | 382 | /* read file to kernel and decrypt */ |
319 | ret = kernel_read(file, data, enclen, &pos); | 383 | ret = kernel_read(file, buf->virt, enclen, &pos); |
320 | if (ret >= 0 && ret != enclen) { | 384 | if (ret >= 0 && ret != enclen) { |
321 | ret = -EIO; | 385 | ret = -EIO; |
322 | goto err_fput; | 386 | goto err_fput; |
323 | } | 387 | } |
324 | 388 | ||
325 | ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey); | 389 | ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey); |
326 | if (ret) | 390 | if (ret) |
327 | goto err_fput; | 391 | goto err_fput; |
328 | 392 | ||
329 | ret = datalen; | 393 | ret = datalen; |
330 | 394 | ||
331 | /* copy decrypted data to user */ | 395 | /* copy decrypted data to user */ |
332 | if (copy_to_user(buffer, data, datalen) != 0) | 396 | if (copy_to_user(buffer, buf->virt, datalen) != 0) |
333 | ret = -EFAULT; | 397 | ret = -EFAULT; |
334 | 398 | ||
335 | err_fput: | 399 | err_fput: |
336 | fput(file); | 400 | fput(file); |
337 | error: | 401 | error: |
338 | kzfree(data); | 402 | big_key_free_buffer(buf); |
339 | } else { | 403 | } else { |
340 | ret = datalen; | 404 | ret = datalen; |
341 | if (copy_to_user(buffer, key->payload.data[big_key_data], | 405 | if (copy_to_user(buffer, key->payload.data[big_key_data], |
diff --git a/sound/core/control.c b/sound/core/control.c index 0b3026d937b1..8a77620a3854 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -889,7 +889,7 @@ static int snd_ctl_elem_read(struct snd_card *card, | |||
889 | 889 | ||
890 | index_offset = snd_ctl_get_ioff(kctl, &control->id); | 890 | index_offset = snd_ctl_get_ioff(kctl, &control->id); |
891 | vd = &kctl->vd[index_offset]; | 891 | vd = &kctl->vd[index_offset]; |
892 | if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL) | 892 | if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) |
893 | return -EPERM; | 893 | return -EPERM; |
894 | 894 | ||
895 | snd_ctl_build_ioff(&control->id, kctl, index_offset); | 895 | snd_ctl_build_ioff(&control->id, kctl, index_offset); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c71dcacea807..96143df19b21 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = { | |||
181 | }; | 181 | }; |
182 | #define param_check_xint param_check_int | 182 | #define param_check_xint param_check_int |
183 | 183 | ||
184 | static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; | 184 | static int power_save = -1; |
185 | module_param(power_save, xint, 0644); | 185 | module_param(power_save, xint, 0644); |
186 | MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " | 186 | MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " |
187 | "(in second, 0 = disable)."); | 187 | "(in second, 0 = disable)."); |
@@ -2186,6 +2186,24 @@ out_free: | |||
2186 | return err; | 2186 | return err; |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | #ifdef CONFIG_PM | ||
2190 | /* On some boards setting power_save to a non 0 value leads to clicking / | ||
2191 | * popping sounds when ever we enter/leave powersaving mode. Ideally we would | ||
2192 | * figure out how to avoid these sounds, but that is not always feasible. | ||
2193 | * So we keep a list of devices where we disable powersaving as its known | ||
2194 | * to causes problems on these devices. | ||
2195 | */ | ||
2196 | static struct snd_pci_quirk power_save_blacklist[] = { | ||
2197 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | ||
2198 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), | ||
2199 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | ||
2200 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), | ||
2201 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ | ||
2202 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), | ||
2203 | {} | ||
2204 | }; | ||
2205 | #endif /* CONFIG_PM */ | ||
2206 | |||
2189 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ | 2207 | /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ |
2190 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { | 2208 | static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { |
2191 | [AZX_DRIVER_NVIDIA] = 8, | 2209 | [AZX_DRIVER_NVIDIA] = 8, |
@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip) | |||
2198 | struct hdac_bus *bus = azx_bus(chip); | 2216 | struct hdac_bus *bus = azx_bus(chip); |
2199 | struct pci_dev *pci = chip->pci; | 2217 | struct pci_dev *pci = chip->pci; |
2200 | int dev = chip->dev_index; | 2218 | int dev = chip->dev_index; |
2219 | int val; | ||
2201 | int err; | 2220 | int err; |
2202 | 2221 | ||
2203 | hda->probe_continued = 1; | 2222 | hda->probe_continued = 1; |
@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip) | |||
2278 | 2297 | ||
2279 | chip->running = 1; | 2298 | chip->running = 1; |
2280 | azx_add_card_list(chip); | 2299 | azx_add_card_list(chip); |
2281 | snd_hda_set_power_save(&chip->bus, power_save * 1000); | 2300 | |
2301 | val = power_save; | ||
2302 | #ifdef CONFIG_PM | ||
2303 | if (val == -1) { | ||
2304 | const struct snd_pci_quirk *q; | ||
2305 | |||
2306 | val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; | ||
2307 | q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); | ||
2308 | if (q && val) { | ||
2309 | dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", | ||
2310 | q->subvendor, q->subdevice); | ||
2311 | val = 0; | ||
2312 | } | ||
2313 | } | ||
2314 | #endif /* CONFIG_PM */ | ||
2315 | snd_hda_set_power_save(&chip->bus, val * 1000); | ||
2282 | if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) | 2316 | if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) |
2283 | pm_runtime_put_autosuspend(&pci->dev); | 2317 | pm_runtime_put_autosuspend(&pci->dev); |
2284 | 2318 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ce28f7ce64e6..b9c93fa0a51c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4997,13 +4997,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, | |||
4997 | 4997 | ||
4998 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | 4998 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
4999 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; | 4999 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
5000 | snd_hda_apply_pincfgs(codec, pincfgs); | ||
5001 | } else if (action == HDA_FIXUP_ACT_INIT) { | ||
5000 | /* Enable DOCK device */ | 5002 | /* Enable DOCK device */ |
5001 | snd_hda_codec_write(codec, 0x17, 0, | 5003 | snd_hda_codec_write(codec, 0x17, 0, |
5002 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); | 5004 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
5003 | /* Enable DOCK device */ | 5005 | /* Enable DOCK device */ |
5004 | snd_hda_codec_write(codec, 0x19, 0, | 5006 | snd_hda_codec_write(codec, 0x19, 0, |
5005 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); | 5007 | AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
5006 | snd_hda_apply_pincfgs(codec, pincfgs); | ||
5007 | } | 5008 | } |
5008 | } | 5009 | } |
5009 | 5010 | ||
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 50252046b01d..754e632a27bd 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -3325,4 +3325,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), | |||
3325 | } | 3325 | } |
3326 | }, | 3326 | }, |
3327 | 3327 | ||
3328 | { | ||
3329 | /* | ||
3330 | * Bower's & Wilkins PX headphones only support the 48 kHz sample rate | ||
3331 | * even though it advertises more. The capture interface doesn't work | ||
3332 | * even on windows. | ||
3333 | */ | ||
3334 | USB_DEVICE(0x19b5, 0x0021), | ||
3335 | .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { | ||
3336 | .ifnum = QUIRK_ANY_INTERFACE, | ||
3337 | .type = QUIRK_COMPOSITE, | ||
3338 | .data = (const struct snd_usb_audio_quirk[]) { | ||
3339 | { | ||
3340 | .ifnum = 0, | ||
3341 | .type = QUIRK_AUDIO_STANDARD_MIXER, | ||
3342 | }, | ||
3343 | /* Capture */ | ||
3344 | { | ||
3345 | .ifnum = 1, | ||
3346 | .type = QUIRK_IGNORE_INTERFACE, | ||
3347 | }, | ||
3348 | /* Playback */ | ||
3349 | { | ||
3350 | .ifnum = 2, | ||
3351 | .type = QUIRK_AUDIO_FIXED_ENDPOINT, | ||
3352 | .data = &(const struct audioformat) { | ||
3353 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | ||
3354 | .channels = 2, | ||
3355 | .iface = 2, | ||
3356 | .altsetting = 1, | ||
3357 | .altset_idx = 1, | ||
3358 | .attributes = UAC_EP_CS_ATTR_FILL_MAX | | ||
3359 | UAC_EP_CS_ATTR_SAMPLE_RATE, | ||
3360 | .endpoint = 0x03, | ||
3361 | .ep_attr = USB_ENDPOINT_XFER_ISOC, | ||
3362 | .rates = SNDRV_PCM_RATE_48000, | ||
3363 | .rate_min = 48000, | ||
3364 | .rate_max = 48000, | ||
3365 | .nr_rates = 1, | ||
3366 | .rate_table = (unsigned int[]) { | ||
3367 | 48000 | ||
3368 | } | ||
3369 | } | ||
3370 | }, | ||
3371 | } | ||
3372 | } | ||
3373 | }, | ||
3374 | |||
3328 | #undef USB_DEVICE_VENDOR_SPEC | 3375 | #undef USB_DEVICE_VENDOR_SPEC |
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index a0951505c7f5..4ed9d0c41843 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c | |||
@@ -50,6 +50,7 @@ | |||
50 | /*standard module options for ALSA. This module supports only one card*/ | 50 | /*standard module options for ALSA. This module supports only one card*/ |
51 | static int hdmi_card_index = SNDRV_DEFAULT_IDX1; | 51 | static int hdmi_card_index = SNDRV_DEFAULT_IDX1; |
52 | static char *hdmi_card_id = SNDRV_DEFAULT_STR1; | 52 | static char *hdmi_card_id = SNDRV_DEFAULT_STR1; |
53 | static bool single_port; | ||
53 | 54 | ||
54 | module_param_named(index, hdmi_card_index, int, 0444); | 55 | module_param_named(index, hdmi_card_index, int, 0444); |
55 | MODULE_PARM_DESC(index, | 56 | MODULE_PARM_DESC(index, |
@@ -57,6 +58,9 @@ MODULE_PARM_DESC(index, | |||
57 | module_param_named(id, hdmi_card_id, charp, 0444); | 58 | module_param_named(id, hdmi_card_id, charp, 0444); |
58 | MODULE_PARM_DESC(id, | 59 | MODULE_PARM_DESC(id, |
59 | "ID string for INTEL Intel HDMI Audio controller."); | 60 | "ID string for INTEL Intel HDMI Audio controller."); |
61 | module_param(single_port, bool, 0444); | ||
62 | MODULE_PARM_DESC(single_port, | ||
63 | "Single-port mode (for compatibility)"); | ||
60 | 64 | ||
61 | /* | 65 | /* |
62 | * ELD SA bits in the CEA Speaker Allocation data block | 66 | * ELD SA bits in the CEA Speaker Allocation data block |
@@ -1579,7 +1583,11 @@ static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id) | |||
1579 | static void notify_audio_lpe(struct platform_device *pdev, int port) | 1583 | static void notify_audio_lpe(struct platform_device *pdev, int port) |
1580 | { | 1584 | { |
1581 | struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev); | 1585 | struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev); |
1582 | struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port]; | 1586 | struct snd_intelhad *ctx; |
1587 | |||
1588 | ctx = &card_ctx->pcm_ctx[single_port ? 0 : port]; | ||
1589 | if (single_port) | ||
1590 | ctx->port = port; | ||
1583 | 1591 | ||
1584 | schedule_work(&ctx->hdmi_audio_wq); | 1592 | schedule_work(&ctx->hdmi_audio_wq); |
1585 | } | 1593 | } |
@@ -1743,6 +1751,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1743 | { | 1751 | { |
1744 | struct snd_card *card; | 1752 | struct snd_card *card; |
1745 | struct snd_intelhad_card *card_ctx; | 1753 | struct snd_intelhad_card *card_ctx; |
1754 | struct snd_intelhad *ctx; | ||
1746 | struct snd_pcm *pcm; | 1755 | struct snd_pcm *pcm; |
1747 | struct intel_hdmi_lpe_audio_pdata *pdata; | 1756 | struct intel_hdmi_lpe_audio_pdata *pdata; |
1748 | int irq; | 1757 | int irq; |
@@ -1787,6 +1796,21 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1787 | 1796 | ||
1788 | platform_set_drvdata(pdev, card_ctx); | 1797 | platform_set_drvdata(pdev, card_ctx); |
1789 | 1798 | ||
1799 | card_ctx->num_pipes = pdata->num_pipes; | ||
1800 | card_ctx->num_ports = single_port ? 1 : pdata->num_ports; | ||
1801 | |||
1802 | for_each_port(card_ctx, port) { | ||
1803 | ctx = &card_ctx->pcm_ctx[port]; | ||
1804 | ctx->card_ctx = card_ctx; | ||
1805 | ctx->dev = card_ctx->dev; | ||
1806 | ctx->port = single_port ? -1 : port; | ||
1807 | ctx->pipe = -1; | ||
1808 | |||
1809 | spin_lock_init(&ctx->had_spinlock); | ||
1810 | mutex_init(&ctx->mutex); | ||
1811 | INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq); | ||
1812 | } | ||
1813 | |||
1790 | dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n", | 1814 | dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n", |
1791 | __func__, (unsigned int)res_mmio->start, | 1815 | __func__, (unsigned int)res_mmio->start, |
1792 | (unsigned int)res_mmio->end); | 1816 | (unsigned int)res_mmio->end); |
@@ -1816,19 +1840,12 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev) | |||
1816 | init_channel_allocations(); | 1840 | init_channel_allocations(); |
1817 | 1841 | ||
1818 | card_ctx->num_pipes = pdata->num_pipes; | 1842 | card_ctx->num_pipes = pdata->num_pipes; |
1819 | card_ctx->num_ports = pdata->num_ports; | 1843 | card_ctx->num_ports = single_port ? 1 : pdata->num_ports; |
1820 | 1844 | ||
1821 | for_each_port(card_ctx, port) { | 1845 | for_each_port(card_ctx, port) { |
1822 | struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port]; | ||
1823 | int i; | 1846 | int i; |
1824 | 1847 | ||
1825 | ctx->card_ctx = card_ctx; | 1848 | ctx = &card_ctx->pcm_ctx[port]; |
1826 | ctx->dev = card_ctx->dev; | ||
1827 | ctx->port = port; | ||
1828 | ctx->pipe = -1; | ||
1829 | |||
1830 | INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq); | ||
1831 | |||
1832 | ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS, | 1849 | ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS, |
1833 | MAX_CAP_STREAMS, &pcm); | 1850 | MAX_CAP_STREAMS, &pcm); |
1834 | if (ret) | 1851 | if (ret) |
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 3a0396d87c42..185acfa229b5 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c | |||
@@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv) | |||
244 | } | 244 | } |
245 | 245 | ||
246 | if (errno && errno != ENOENT) { | 246 | if (errno && errno != ENOENT) { |
247 | perror("reading batch file failed"); | 247 | p_err("reading batch file failed: %s", strerror(errno)); |
248 | err = -1; | 248 | err = -1; |
249 | } else { | 249 | } else { |
250 | p_info("processed %d lines", lines); | 250 | p_info("processed %d lines", lines); |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e8e2baaf93c2..e549e329be82 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv) | |||
774 | n < 0 ? strerror(errno) : "short write"); | 774 | n < 0 ? strerror(errno) : "short write"); |
775 | goto err_free; | 775 | goto err_free; |
776 | } | 776 | } |
777 | |||
778 | if (json_output) | ||
779 | jsonw_null(json_wtr); | ||
777 | } else { | 780 | } else { |
778 | if (member_len == &info.jited_prog_len) { | 781 | if (member_len == &info.jited_prog_len) { |
779 | const char *name = NULL; | 782 | const char *name = NULL; |
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile index 860fa151640a..ffca068e4a76 100644 --- a/tools/cgroup/Makefile +++ b/tools/cgroup/Makefile | |||
@@ -1,7 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for cgroup tools | 2 | # Makefile for cgroup tools |
3 | 3 | ||
4 | CC = $(CROSS_COMPILE)gcc | ||
5 | CFLAGS = -Wall -Wextra | 4 | CFLAGS = -Wall -Wextra |
6 | 5 | ||
7 | all: cgroup_event_listener | 6 | all: cgroup_event_listener |
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 805a2c0cf4cd..240eda014b37 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile | |||
@@ -12,8 +12,6 @@ endif | |||
12 | # (this improves performance and avoids hard-to-debug behaviour); | 12 | # (this improves performance and avoids hard-to-debug behaviour); |
13 | MAKEFLAGS += -r | 13 | MAKEFLAGS += -r |
14 | 14 | ||
15 | CC = $(CROSS_COMPILE)gcc | ||
16 | LD = $(CROSS_COMPILE)ld | ||
17 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include | 15 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include |
18 | 16 | ||
19 | ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon | 17 | ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon |
diff --git a/tools/hv/Makefile b/tools/hv/Makefile index 1139d71fa0cf..5db5e62cebda 100644 --- a/tools/hv/Makefile +++ b/tools/hv/Makefile | |||
@@ -1,7 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for Hyper-V tools | 2 | # Makefile for Hyper-V tools |
3 | 3 | ||
4 | CC = $(CROSS_COMPILE)gcc | ||
5 | WARNINGS = -Wall -Wextra | 4 | WARNINGS = -Wall -Wextra |
6 | CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS) | 5 | CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS) |
7 | 6 | ||
diff --git a/tools/iio/Makefile b/tools/iio/Makefile index a08e7a47d6a3..332ed2f6c2c2 100644 --- a/tools/iio/Makefile +++ b/tools/iio/Makefile | |||
@@ -12,8 +12,6 @@ endif | |||
12 | # (this improves performance and avoids hard-to-debug behaviour); | 12 | # (this improves performance and avoids hard-to-debug behaviour); |
13 | MAKEFLAGS += -r | 13 | MAKEFLAGS += -r |
14 | 14 | ||
15 | CC = $(CROSS_COMPILE)gcc | ||
16 | LD = $(CROSS_COMPILE)ld | ||
17 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include | 15 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include |
18 | 16 | ||
19 | ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer | 17 | ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer |
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index a5684d0968b4..5898c22ba310 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat | |||
@@ -33,7 +33,7 @@ import resource | |||
33 | import struct | 33 | import struct |
34 | import re | 34 | import re |
35 | import subprocess | 35 | import subprocess |
36 | from collections import defaultdict | 36 | from collections import defaultdict, namedtuple |
37 | 37 | ||
38 | VMX_EXIT_REASONS = { | 38 | VMX_EXIT_REASONS = { |
39 | 'EXCEPTION_NMI': 0, | 39 | 'EXCEPTION_NMI': 0, |
@@ -228,6 +228,7 @@ IOCTL_NUMBERS = { | |||
228 | } | 228 | } |
229 | 229 | ||
230 | ENCODING = locale.getpreferredencoding(False) | 230 | ENCODING = locale.getpreferredencoding(False) |
231 | TRACE_FILTER = re.compile(r'^[^\(]*$') | ||
231 | 232 | ||
232 | 233 | ||
233 | class Arch(object): | 234 | class Arch(object): |
@@ -260,6 +261,11 @@ class Arch(object): | |||
260 | return ArchX86(SVM_EXIT_REASONS) | 261 | return ArchX86(SVM_EXIT_REASONS) |
261 | return | 262 | return |
262 | 263 | ||
264 | def tracepoint_is_child(self, field): | ||
265 | if (TRACE_FILTER.match(field)): | ||
266 | return None | ||
267 | return field.split('(', 1)[0] | ||
268 | |||
263 | 269 | ||
264 | class ArchX86(Arch): | 270 | class ArchX86(Arch): |
265 | def __init__(self, exit_reasons): | 271 | def __init__(self, exit_reasons): |
@@ -267,6 +273,10 @@ class ArchX86(Arch): | |||
267 | self.ioctl_numbers = IOCTL_NUMBERS | 273 | self.ioctl_numbers = IOCTL_NUMBERS |
268 | self.exit_reasons = exit_reasons | 274 | self.exit_reasons = exit_reasons |
269 | 275 | ||
276 | def debugfs_is_child(self, field): | ||
277 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
278 | return None | ||
279 | |||
270 | 280 | ||
271 | class ArchPPC(Arch): | 281 | class ArchPPC(Arch): |
272 | def __init__(self): | 282 | def __init__(self): |
@@ -282,6 +292,10 @@ class ArchPPC(Arch): | |||
282 | self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 | 292 | self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 |
283 | self.exit_reasons = {} | 293 | self.exit_reasons = {} |
284 | 294 | ||
295 | def debugfs_is_child(self, field): | ||
296 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
297 | return None | ||
298 | |||
285 | 299 | ||
286 | class ArchA64(Arch): | 300 | class ArchA64(Arch): |
287 | def __init__(self): | 301 | def __init__(self): |
@@ -289,6 +303,10 @@ class ArchA64(Arch): | |||
289 | self.ioctl_numbers = IOCTL_NUMBERS | 303 | self.ioctl_numbers = IOCTL_NUMBERS |
290 | self.exit_reasons = AARCH64_EXIT_REASONS | 304 | self.exit_reasons = AARCH64_EXIT_REASONS |
291 | 305 | ||
306 | def debugfs_is_child(self, field): | ||
307 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
308 | return None | ||
309 | |||
292 | 310 | ||
293 | class ArchS390(Arch): | 311 | class ArchS390(Arch): |
294 | def __init__(self): | 312 | def __init__(self): |
@@ -296,6 +314,12 @@ class ArchS390(Arch): | |||
296 | self.ioctl_numbers = IOCTL_NUMBERS | 314 | self.ioctl_numbers = IOCTL_NUMBERS |
297 | self.exit_reasons = None | 315 | self.exit_reasons = None |
298 | 316 | ||
317 | def debugfs_is_child(self, field): | ||
318 | """ Returns name of parent if 'field' is a child, None otherwise """ | ||
319 | if field.startswith('instruction_'): | ||
320 | return 'exit_instruction' | ||
321 | |||
322 | |||
299 | ARCH = Arch.get_arch() | 323 | ARCH = Arch.get_arch() |
300 | 324 | ||
301 | 325 | ||
@@ -331,9 +355,6 @@ class perf_event_attr(ctypes.Structure): | |||
331 | PERF_TYPE_TRACEPOINT = 2 | 355 | PERF_TYPE_TRACEPOINT = 2 |
332 | PERF_FORMAT_GROUP = 1 << 3 | 356 | PERF_FORMAT_GROUP = 1 << 3 |
333 | 357 | ||
334 | PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing' | ||
335 | PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm' | ||
336 | |||
337 | 358 | ||
338 | class Group(object): | 359 | class Group(object): |
339 | """Represents a perf event group.""" | 360 | """Represents a perf event group.""" |
@@ -376,8 +397,8 @@ class Event(object): | |||
376 | self.syscall = self.libc.syscall | 397 | self.syscall = self.libc.syscall |
377 | self.name = name | 398 | self.name = name |
378 | self.fd = None | 399 | self.fd = None |
379 | self.setup_event(group, trace_cpu, trace_pid, trace_point, | 400 | self._setup_event(group, trace_cpu, trace_pid, trace_point, |
380 | trace_filter, trace_set) | 401 | trace_filter, trace_set) |
381 | 402 | ||
382 | def __del__(self): | 403 | def __del__(self): |
383 | """Closes the event's file descriptor. | 404 | """Closes the event's file descriptor. |
@@ -390,7 +411,7 @@ class Event(object): | |||
390 | if self.fd: | 411 | if self.fd: |
391 | os.close(self.fd) | 412 | os.close(self.fd) |
392 | 413 | ||
393 | def perf_event_open(self, attr, pid, cpu, group_fd, flags): | 414 | def _perf_event_open(self, attr, pid, cpu, group_fd, flags): |
394 | """Wrapper for the sys_perf_evt_open() syscall. | 415 | """Wrapper for the sys_perf_evt_open() syscall. |
395 | 416 | ||
396 | Used to set up performance events, returns a file descriptor or -1 | 417 | Used to set up performance events, returns a file descriptor or -1 |
@@ -409,7 +430,7 @@ class Event(object): | |||
409 | ctypes.c_int(pid), ctypes.c_int(cpu), | 430 | ctypes.c_int(pid), ctypes.c_int(cpu), |
410 | ctypes.c_int(group_fd), ctypes.c_long(flags)) | 431 | ctypes.c_int(group_fd), ctypes.c_long(flags)) |
411 | 432 | ||
412 | def setup_event_attribute(self, trace_set, trace_point): | 433 | def _setup_event_attribute(self, trace_set, trace_point): |
413 | """Returns an initialized ctype perf_event_attr struct.""" | 434 | """Returns an initialized ctype perf_event_attr struct.""" |
414 | 435 | ||
415 | id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set, | 436 | id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set, |
@@ -419,8 +440,8 @@ class Event(object): | |||
419 | event_attr.config = int(open(id_path).read()) | 440 | event_attr.config = int(open(id_path).read()) |
420 | return event_attr | 441 | return event_attr |
421 | 442 | ||
422 | def setup_event(self, group, trace_cpu, trace_pid, trace_point, | 443 | def _setup_event(self, group, trace_cpu, trace_pid, trace_point, |
423 | trace_filter, trace_set): | 444 | trace_filter, trace_set): |
424 | """Sets up the perf event in Linux. | 445 | """Sets up the perf event in Linux. |
425 | 446 | ||
426 | Issues the syscall to register the event in the kernel and | 447 | Issues the syscall to register the event in the kernel and |
@@ -428,7 +449,7 @@ class Event(object): | |||
428 | 449 | ||
429 | """ | 450 | """ |
430 | 451 | ||
431 | event_attr = self.setup_event_attribute(trace_set, trace_point) | 452 | event_attr = self._setup_event_attribute(trace_set, trace_point) |
432 | 453 | ||
433 | # First event will be group leader. | 454 | # First event will be group leader. |
434 | group_leader = -1 | 455 | group_leader = -1 |
@@ -437,8 +458,8 @@ class Event(object): | |||
437 | if group.events: | 458 | if group.events: |
438 | group_leader = group.events[0].fd | 459 | group_leader = group.events[0].fd |
439 | 460 | ||
440 | fd = self.perf_event_open(event_attr, trace_pid, | 461 | fd = self._perf_event_open(event_attr, trace_pid, |
441 | trace_cpu, group_leader, 0) | 462 | trace_cpu, group_leader, 0) |
442 | if fd == -1: | 463 | if fd == -1: |
443 | err = ctypes.get_errno() | 464 | err = ctypes.get_errno() |
444 | raise OSError(err, os.strerror(err), | 465 | raise OSError(err, os.strerror(err), |
@@ -475,6 +496,10 @@ class Event(object): | |||
475 | 496 | ||
476 | class Provider(object): | 497 | class Provider(object): |
477 | """Encapsulates functionalities used by all providers.""" | 498 | """Encapsulates functionalities used by all providers.""" |
499 | def __init__(self, pid): | ||
500 | self.child_events = False | ||
501 | self.pid = pid | ||
502 | |||
478 | @staticmethod | 503 | @staticmethod |
479 | def is_field_wanted(fields_filter, field): | 504 | def is_field_wanted(fields_filter, field): |
480 | """Indicate whether field is valid according to fields_filter.""" | 505 | """Indicate whether field is valid according to fields_filter.""" |
@@ -500,12 +525,12 @@ class TracepointProvider(Provider): | |||
500 | """ | 525 | """ |
501 | def __init__(self, pid, fields_filter): | 526 | def __init__(self, pid, fields_filter): |
502 | self.group_leaders = [] | 527 | self.group_leaders = [] |
503 | self.filters = self.get_filters() | 528 | self.filters = self._get_filters() |
504 | self.update_fields(fields_filter) | 529 | self.update_fields(fields_filter) |
505 | self.pid = pid | 530 | super(TracepointProvider, self).__init__(pid) |
506 | 531 | ||
507 | @staticmethod | 532 | @staticmethod |
508 | def get_filters(): | 533 | def _get_filters(): |
509 | """Returns a dict of trace events, their filter ids and | 534 | """Returns a dict of trace events, their filter ids and |
510 | the values that can be filtered. | 535 | the values that can be filtered. |
511 | 536 | ||
@@ -521,8 +546,8 @@ class TracepointProvider(Provider): | |||
521 | filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons) | 546 | filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons) |
522 | return filters | 547 | return filters |
523 | 548 | ||
524 | def get_available_fields(self): | 549 | def _get_available_fields(self): |
525 | """Returns a list of available event's of format 'event name(filter | 550 | """Returns a list of available events of format 'event name(filter |
526 | name)'. | 551 | name)'. |
527 | 552 | ||
528 | All available events have directories under | 553 | All available events have directories under |
@@ -549,11 +574,12 @@ class TracepointProvider(Provider): | |||
549 | 574 | ||
550 | def update_fields(self, fields_filter): | 575 | def update_fields(self, fields_filter): |
551 | """Refresh fields, applying fields_filter""" | 576 | """Refresh fields, applying fields_filter""" |
552 | self.fields = [field for field in self.get_available_fields() | 577 | self.fields = [field for field in self._get_available_fields() |
553 | if self.is_field_wanted(fields_filter, field)] | 578 | if self.is_field_wanted(fields_filter, field) or |
579 | ARCH.tracepoint_is_child(field)] | ||
554 | 580 | ||
555 | @staticmethod | 581 | @staticmethod |
556 | def get_online_cpus(): | 582 | def _get_online_cpus(): |
557 | """Returns a list of cpu id integers.""" | 583 | """Returns a list of cpu id integers.""" |
558 | def parse_int_list(list_string): | 584 | def parse_int_list(list_string): |
559 | """Returns an int list from a string of comma separated integers and | 585 | """Returns an int list from a string of comma separated integers and |
@@ -575,17 +601,17 @@ class TracepointProvider(Provider): | |||
575 | cpu_string = cpu_list.readline() | 601 | cpu_string = cpu_list.readline() |
576 | return parse_int_list(cpu_string) | 602 | return parse_int_list(cpu_string) |
577 | 603 | ||
578 | def setup_traces(self): | 604 | def _setup_traces(self): |
579 | """Creates all event and group objects needed to be able to retrieve | 605 | """Creates all event and group objects needed to be able to retrieve |
580 | data.""" | 606 | data.""" |
581 | fields = self.get_available_fields() | 607 | fields = self._get_available_fields() |
582 | if self._pid > 0: | 608 | if self._pid > 0: |
583 | # Fetch list of all threads of the monitored pid, as qemu | 609 | # Fetch list of all threads of the monitored pid, as qemu |
584 | # starts a thread for each vcpu. | 610 | # starts a thread for each vcpu. |
585 | path = os.path.join('/proc', str(self._pid), 'task') | 611 | path = os.path.join('/proc', str(self._pid), 'task') |
586 | groupids = self.walkdir(path)[1] | 612 | groupids = self.walkdir(path)[1] |
587 | else: | 613 | else: |
588 | groupids = self.get_online_cpus() | 614 | groupids = self._get_online_cpus() |
589 | 615 | ||
590 | # The constant is needed as a buffer for python libs, std | 616 | # The constant is needed as a buffer for python libs, std |
591 | # streams and other files that the script opens. | 617 | # streams and other files that the script opens. |
@@ -663,7 +689,7 @@ class TracepointProvider(Provider): | |||
663 | # The garbage collector will get rid of all Event/Group | 689 | # The garbage collector will get rid of all Event/Group |
664 | # objects and open files after removing the references. | 690 | # objects and open files after removing the references. |
665 | self.group_leaders = [] | 691 | self.group_leaders = [] |
666 | self.setup_traces() | 692 | self._setup_traces() |
667 | self.fields = self._fields | 693 | self.fields = self._fields |
668 | 694 | ||
669 | def read(self, by_guest=0): | 695 | def read(self, by_guest=0): |
@@ -671,8 +697,12 @@ class TracepointProvider(Provider): | |||
671 | ret = defaultdict(int) | 697 | ret = defaultdict(int) |
672 | for group in self.group_leaders: | 698 | for group in self.group_leaders: |
673 | for name, val in group.read().items(): | 699 | for name, val in group.read().items(): |
674 | if name in self._fields: | 700 | if name not in self._fields: |
675 | ret[name] += val | 701 | continue |
702 | parent = ARCH.tracepoint_is_child(name) | ||
703 | if parent: | ||
704 | name += ' ' + parent | ||
705 | ret[name] += val | ||
676 | return ret | 706 | return ret |
677 | 707 | ||
678 | def reset(self): | 708 | def reset(self): |
@@ -690,11 +720,11 @@ class DebugfsProvider(Provider): | |||
690 | self._baseline = {} | 720 | self._baseline = {} |
691 | self.do_read = True | 721 | self.do_read = True |
692 | self.paths = [] | 722 | self.paths = [] |
693 | self.pid = pid | 723 | super(DebugfsProvider, self).__init__(pid) |
694 | if include_past: | 724 | if include_past: |
695 | self.restore() | 725 | self._restore() |
696 | 726 | ||
697 | def get_available_fields(self): | 727 | def _get_available_fields(self): |
698 | """"Returns a list of available fields. | 728 | """"Returns a list of available fields. |
699 | 729 | ||
700 | The fields are all available KVM debugfs files | 730 | The fields are all available KVM debugfs files |
@@ -704,8 +734,9 @@ class DebugfsProvider(Provider): | |||
704 | 734 | ||
705 | def update_fields(self, fields_filter): | 735 | def update_fields(self, fields_filter): |
706 | """Refresh fields, applying fields_filter""" | 736 | """Refresh fields, applying fields_filter""" |
707 | self._fields = [field for field in self.get_available_fields() | 737 | self._fields = [field for field in self._get_available_fields() |
708 | if self.is_field_wanted(fields_filter, field)] | 738 | if self.is_field_wanted(fields_filter, field) or |
739 | ARCH.debugfs_is_child(field)] | ||
709 | 740 | ||
710 | @property | 741 | @property |
711 | def fields(self): | 742 | def fields(self): |
@@ -758,7 +789,7 @@ class DebugfsProvider(Provider): | |||
758 | paths.append(dir) | 789 | paths.append(dir) |
759 | for path in paths: | 790 | for path in paths: |
760 | for field in self._fields: | 791 | for field in self._fields: |
761 | value = self.read_field(field, path) | 792 | value = self._read_field(field, path) |
762 | key = path + field | 793 | key = path + field |
763 | if reset == 1: | 794 | if reset == 1: |
764 | self._baseline[key] = value | 795 | self._baseline[key] = value |
@@ -766,20 +797,21 @@ class DebugfsProvider(Provider): | |||
766 | self._baseline[key] = 0 | 797 | self._baseline[key] = 0 |
767 | if self._baseline.get(key, -1) == -1: | 798 | if self._baseline.get(key, -1) == -1: |
768 | self._baseline[key] = value | 799 | self._baseline[key] = value |
769 | increment = (results.get(field, 0) + value - | 800 | parent = ARCH.debugfs_is_child(field) |
770 | self._baseline.get(key, 0)) | 801 | if parent: |
771 | if by_guest: | 802 | field = field + ' ' + parent |
772 | pid = key.split('-')[0] | 803 | else: |
773 | if pid in results: | 804 | if by_guest: |
774 | results[pid] += increment | 805 | field = key.split('-')[0] # set 'field' to 'pid' |
775 | else: | 806 | increment = value - self._baseline.get(key, 0) |
776 | results[pid] = increment | 807 | if field in results: |
808 | results[field] += increment | ||
777 | else: | 809 | else: |
778 | results[field] = increment | 810 | results[field] = increment |
779 | 811 | ||
780 | return results | 812 | return results |
781 | 813 | ||
782 | def read_field(self, field, path): | 814 | def _read_field(self, field, path): |
783 | """Returns the value of a single field from a specific VM.""" | 815 | """Returns the value of a single field from a specific VM.""" |
784 | try: | 816 | try: |
785 | return int(open(os.path.join(PATH_DEBUGFS_KVM, | 817 | return int(open(os.path.join(PATH_DEBUGFS_KVM, |
@@ -794,12 +826,15 @@ class DebugfsProvider(Provider): | |||
794 | self._baseline = {} | 826 | self._baseline = {} |
795 | self.read(1) | 827 | self.read(1) |
796 | 828 | ||
797 | def restore(self): | 829 | def _restore(self): |
798 | """Reset field counters""" | 830 | """Reset field counters""" |
799 | self._baseline = {} | 831 | self._baseline = {} |
800 | self.read(2) | 832 | self.read(2) |
801 | 833 | ||
802 | 834 | ||
835 | EventStat = namedtuple('EventStat', ['value', 'delta']) | ||
836 | |||
837 | |||
803 | class Stats(object): | 838 | class Stats(object): |
804 | """Manages the data providers and the data they provide. | 839 | """Manages the data providers and the data they provide. |
805 | 840 | ||
@@ -808,13 +843,13 @@ class Stats(object): | |||
808 | 843 | ||
809 | """ | 844 | """ |
810 | def __init__(self, options): | 845 | def __init__(self, options): |
811 | self.providers = self.get_providers(options) | 846 | self.providers = self._get_providers(options) |
812 | self._pid_filter = options.pid | 847 | self._pid_filter = options.pid |
813 | self._fields_filter = options.fields | 848 | self._fields_filter = options.fields |
814 | self.values = {} | 849 | self.values = {} |
850 | self._child_events = False | ||
815 | 851 | ||
816 | @staticmethod | 852 | def _get_providers(self, options): |
817 | def get_providers(options): | ||
818 | """Returns a list of data providers depending on the passed options.""" | 853 | """Returns a list of data providers depending on the passed options.""" |
819 | providers = [] | 854 | providers = [] |
820 | 855 | ||
@@ -826,7 +861,7 @@ class Stats(object): | |||
826 | 861 | ||
827 | return providers | 862 | return providers |
828 | 863 | ||
829 | def update_provider_filters(self): | 864 | def _update_provider_filters(self): |
830 | """Propagates fields filters to providers.""" | 865 | """Propagates fields filters to providers.""" |
831 | # As we reset the counters when updating the fields we can | 866 | # As we reset the counters when updating the fields we can |
832 | # also clear the cache of old values. | 867 | # also clear the cache of old values. |
@@ -847,7 +882,7 @@ class Stats(object): | |||
847 | def fields_filter(self, fields_filter): | 882 | def fields_filter(self, fields_filter): |
848 | if fields_filter != self._fields_filter: | 883 | if fields_filter != self._fields_filter: |
849 | self._fields_filter = fields_filter | 884 | self._fields_filter = fields_filter |
850 | self.update_provider_filters() | 885 | self._update_provider_filters() |
851 | 886 | ||
852 | @property | 887 | @property |
853 | def pid_filter(self): | 888 | def pid_filter(self): |
@@ -861,16 +896,33 @@ class Stats(object): | |||
861 | for provider in self.providers: | 896 | for provider in self.providers: |
862 | provider.pid = self._pid_filter | 897 | provider.pid = self._pid_filter |
863 | 898 | ||
899 | @property | ||
900 | def child_events(self): | ||
901 | return self._child_events | ||
902 | |||
903 | @child_events.setter | ||
904 | def child_events(self, val): | ||
905 | self._child_events = val | ||
906 | for provider in self.providers: | ||
907 | provider.child_events = val | ||
908 | |||
864 | def get(self, by_guest=0): | 909 | def get(self, by_guest=0): |
865 | """Returns a dict with field -> (value, delta to last value) of all | 910 | """Returns a dict with field -> (value, delta to last value) of all |
866 | provider data.""" | 911 | provider data. |
912 | Key formats: | ||
913 | * plain: 'key' is event name | ||
914 | * child-parent: 'key' is in format '<child> <parent>' | ||
915 | * pid: 'key' is the pid of the guest, and the record contains the | ||
916 | aggregated event data | ||
917 | These formats are generated by the providers, and handled in class TUI. | ||
918 | """ | ||
867 | for provider in self.providers: | 919 | for provider in self.providers: |
868 | new = provider.read(by_guest=by_guest) | 920 | new = provider.read(by_guest=by_guest) |
869 | for key in new if by_guest else provider.fields: | 921 | for key in new: |
870 | oldval = self.values.get(key, (0, 0))[0] | 922 | oldval = self.values.get(key, EventStat(0, 0)).value |
871 | newval = new.get(key, 0) | 923 | newval = new.get(key, 0) |
872 | newdelta = newval - oldval | 924 | newdelta = newval - oldval |
873 | self.values[key] = (newval, newdelta) | 925 | self.values[key] = EventStat(newval, newdelta) |
874 | return self.values | 926 | return self.values |
875 | 927 | ||
876 | def toggle_display_guests(self, to_pid): | 928 | def toggle_display_guests(self, to_pid): |
@@ -899,10 +951,10 @@ class Stats(object): | |||
899 | self.get(to_pid) | 951 | self.get(to_pid) |
900 | return 0 | 952 | return 0 |
901 | 953 | ||
954 | |||
902 | DELAY_DEFAULT = 3.0 | 955 | DELAY_DEFAULT = 3.0 |
903 | MAX_GUEST_NAME_LEN = 48 | 956 | MAX_GUEST_NAME_LEN = 48 |
904 | MAX_REGEX_LEN = 44 | 957 | MAX_REGEX_LEN = 44 |
905 | DEFAULT_REGEX = r'^[^\(]*$' | ||
906 | SORT_DEFAULT = 0 | 958 | SORT_DEFAULT = 0 |
907 | 959 | ||
908 | 960 | ||
@@ -969,7 +1021,7 @@ class Tui(object): | |||
969 | 1021 | ||
970 | return res | 1022 | return res |
971 | 1023 | ||
972 | def print_all_gnames(self, row): | 1024 | def _print_all_gnames(self, row): |
973 | """Print a list of all running guests along with their pids.""" | 1025 | """Print a list of all running guests along with their pids.""" |
974 | self.screen.addstr(row, 2, '%8s %-60s' % | 1026 | self.screen.addstr(row, 2, '%8s %-60s' % |
975 | ('Pid', 'Guest Name (fuzzy list, might be ' | 1027 | ('Pid', 'Guest Name (fuzzy list, might be ' |
@@ -1032,19 +1084,13 @@ class Tui(object): | |||
1032 | 1084 | ||
1033 | return name | 1085 | return name |
1034 | 1086 | ||
1035 | def update_drilldown(self): | 1087 | def _update_pid(self, pid): |
1036 | """Sets or removes a filter that only allows fields without braces.""" | ||
1037 | if not self.stats.fields_filter: | ||
1038 | self.stats.fields_filter = DEFAULT_REGEX | ||
1039 | |||
1040 | elif self.stats.fields_filter == DEFAULT_REGEX: | ||
1041 | self.stats.fields_filter = None | ||
1042 | |||
1043 | def update_pid(self, pid): | ||
1044 | """Propagates pid selection to stats object.""" | 1088 | """Propagates pid selection to stats object.""" |
1089 | self.screen.addstr(4, 1, 'Updating pid filter...') | ||
1090 | self.screen.refresh() | ||
1045 | self.stats.pid_filter = pid | 1091 | self.stats.pid_filter = pid |
1046 | 1092 | ||
1047 | def refresh_header(self, pid=None): | 1093 | def _refresh_header(self, pid=None): |
1048 | """Refreshes the header.""" | 1094 | """Refreshes the header.""" |
1049 | if pid is None: | 1095 | if pid is None: |
1050 | pid = self.stats.pid_filter | 1096 | pid = self.stats.pid_filter |
@@ -1059,8 +1105,7 @@ class Tui(object): | |||
1059 | .format(pid, gname), curses.A_BOLD) | 1105 | .format(pid, gname), curses.A_BOLD) |
1060 | else: | 1106 | else: |
1061 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) | 1107 | self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) |
1062 | if self.stats.fields_filter and self.stats.fields_filter \ | 1108 | if self.stats.fields_filter: |
1063 | != DEFAULT_REGEX: | ||
1064 | regex = self.stats.fields_filter | 1109 | regex = self.stats.fields_filter |
1065 | if len(regex) > MAX_REGEX_LEN: | 1110 | if len(regex) > MAX_REGEX_LEN: |
1066 | regex = regex[:MAX_REGEX_LEN] + '...' | 1111 | regex = regex[:MAX_REGEX_LEN] + '...' |
@@ -1075,56 +1120,99 @@ class Tui(object): | |||
1075 | self.screen.addstr(4, 1, 'Collecting data...') | 1120 | self.screen.addstr(4, 1, 'Collecting data...') |
1076 | self.screen.refresh() | 1121 | self.screen.refresh() |
1077 | 1122 | ||
1078 | def refresh_body(self, sleeptime): | 1123 | def _refresh_body(self, sleeptime): |
1124 | def is_child_field(field): | ||
1125 | return field.find('(') != -1 | ||
1126 | |||
1127 | def insert_child(sorted_items, child, values, parent): | ||
1128 | num = len(sorted_items) | ||
1129 | for i in range(0, num): | ||
1130 | # only add child if parent is present | ||
1131 | if parent.startswith(sorted_items[i][0]): | ||
1132 | sorted_items.insert(i + 1, (' ' + child, values)) | ||
1133 | |||
1134 | def get_sorted_events(self, stats): | ||
1135 | """ separate parent and child events """ | ||
1136 | if self._sorting == SORT_DEFAULT: | ||
1137 | def sortkey((_k, v)): | ||
1138 | # sort by (delta value, overall value) | ||
1139 | return (v.delta, v.value) | ||
1140 | else: | ||
1141 | def sortkey((_k, v)): | ||
1142 | # sort by overall value | ||
1143 | return v.value | ||
1144 | |||
1145 | childs = [] | ||
1146 | sorted_items = [] | ||
1147 | # we can't rule out child events to appear prior to parents even | ||
1148 | # when sorted - separate out all children first, and add in later | ||
1149 | for key, values in sorted(stats.items(), key=sortkey, | ||
1150 | reverse=True): | ||
1151 | if values == (0, 0): | ||
1152 | continue | ||
1153 | if key.find(' ') != -1: | ||
1154 | if not self.stats.child_events: | ||
1155 | continue | ||
1156 | childs.insert(0, (key, values)) | ||
1157 | else: | ||
1158 | sorted_items.append((key, values)) | ||
1159 | if self.stats.child_events: | ||
1160 | for key, values in childs: | ||
1161 | (child, parent) = key.split(' ') | ||
1162 | insert_child(sorted_items, child, values, parent) | ||
1163 | |||
1164 | return sorted_items | ||
1165 | |||
1079 | row = 3 | 1166 | row = 3 |
1080 | self.screen.move(row, 0) | 1167 | self.screen.move(row, 0) |
1081 | self.screen.clrtobot() | 1168 | self.screen.clrtobot() |
1082 | stats = self.stats.get(self._display_guests) | 1169 | stats = self.stats.get(self._display_guests) |
1083 | 1170 | total = 0. | |
1084 | def sortCurAvg(x): | 1171 | ctotal = 0. |
1085 | # sort by current events if available | 1172 | for key, values in stats.items(): |
1086 | if stats[x][1]: | 1173 | if self._display_guests: |
1087 | return (-stats[x][1], -stats[x][0]) | 1174 | if self.get_gname_from_pid(key): |
1175 | total += values.value | ||
1176 | continue | ||
1177 | if not key.find(' ') != -1: | ||
1178 | total += values.value | ||
1088 | else: | 1179 | else: |
1089 | return (0, -stats[x][0]) | 1180 | ctotal += values.value |
1181 | if total == 0.: | ||
1182 | # we don't have any fields, or all non-child events are filtered | ||
1183 | total = ctotal | ||
1090 | 1184 | ||
1091 | def sortTotal(x): | 1185 | # print events |
1092 | # sort by totals | ||
1093 | return (0, -stats[x][0]) | ||
1094 | total = 0. | ||
1095 | for key in stats.keys(): | ||
1096 | if key.find('(') is -1: | ||
1097 | total += stats[key][0] | ||
1098 | if self._sorting == SORT_DEFAULT: | ||
1099 | sortkey = sortCurAvg | ||
1100 | else: | ||
1101 | sortkey = sortTotal | ||
1102 | tavg = 0 | 1186 | tavg = 0 |
1103 | for key in sorted(stats.keys(), key=sortkey): | 1187 | tcur = 0 |
1104 | if row >= self.screen.getmaxyx()[0] - 1: | 1188 | for key, values in get_sorted_events(self, stats): |
1105 | break | 1189 | if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): |
1106 | values = stats[key] | ||
1107 | if not values[0] and not values[1]: | ||
1108 | break | 1190 | break |
1109 | if values[0] is not None: | 1191 | if self._display_guests: |
1110 | cur = int(round(values[1] / sleeptime)) if values[1] else '' | 1192 | key = self.get_gname_from_pid(key) |
1111 | if self._display_guests: | 1193 | if not key: |
1112 | key = self.get_gname_from_pid(key) | 1194 | continue |
1113 | self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % | 1195 | cur = int(round(values.delta / sleeptime)) if values.delta else '' |
1114 | (key, values[0], values[0] * 100 / total, | 1196 | if key[0] != ' ': |
1115 | cur)) | 1197 | if values.delta: |
1116 | if cur is not '' and key.find('(') is -1: | 1198 | tcur += values.delta |
1117 | tavg += cur | 1199 | ptotal = values.value |
1200 | ltotal = total | ||
1201 | else: | ||
1202 | ltotal = ptotal | ||
1203 | self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key, | ||
1204 | values.value, | ||
1205 | values.value * 100 / float(ltotal), cur)) | ||
1118 | row += 1 | 1206 | row += 1 |
1119 | if row == 3: | 1207 | if row == 3: |
1120 | self.screen.addstr(4, 1, 'No matching events reported yet') | 1208 | self.screen.addstr(4, 1, 'No matching events reported yet') |
1121 | else: | 1209 | if row > 4: |
1210 | tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' | ||
1122 | self.screen.addstr(row, 1, '%-40s %10d %8s' % | 1211 | self.screen.addstr(row, 1, '%-40s %10d %8s' % |
1123 | ('Total', total, tavg if tavg else ''), | 1212 | ('Total', total, tavg), curses.A_BOLD) |
1124 | curses.A_BOLD) | ||
1125 | self.screen.refresh() | 1213 | self.screen.refresh() |
1126 | 1214 | ||
1127 | def show_msg(self, text): | 1215 | def _show_msg(self, text): |
1128 | """Display message centered text and exit on key press""" | 1216 | """Display message centered text and exit on key press""" |
1129 | hint = 'Press any key to continue' | 1217 | hint = 'Press any key to continue' |
1130 | curses.cbreak() | 1218 | curses.cbreak() |
@@ -1139,16 +1227,16 @@ class Tui(object): | |||
1139 | curses.A_STANDOUT) | 1227 | curses.A_STANDOUT) |
1140 | self.screen.getkey() | 1228 | self.screen.getkey() |
1141 | 1229 | ||
1142 | def show_help_interactive(self): | 1230 | def _show_help_interactive(self): |
1143 | """Display help with list of interactive commands""" | 1231 | """Display help with list of interactive commands""" |
1144 | msg = (' b toggle events by guests (debugfs only, honors' | 1232 | msg = (' b toggle events by guests (debugfs only, honors' |
1145 | ' filters)', | 1233 | ' filters)', |
1146 | ' c clear filter', | 1234 | ' c clear filter', |
1147 | ' f filter by regular expression', | 1235 | ' f filter by regular expression', |
1148 | ' g filter by guest name', | 1236 | ' g filter by guest name/PID', |
1149 | ' h display interactive commands reference', | 1237 | ' h display interactive commands reference', |
1150 | ' o toggle sorting order (Total vs CurAvg/s)', | 1238 | ' o toggle sorting order (Total vs CurAvg/s)', |
1151 | ' p filter by PID', | 1239 | ' p filter by guest name/PID', |
1152 | ' q quit', | 1240 | ' q quit', |
1153 | ' r reset stats', | 1241 | ' r reset stats', |
1154 | ' s set update interval', | 1242 | ' s set update interval', |
@@ -1165,14 +1253,15 @@ class Tui(object): | |||
1165 | self.screen.addstr(row, 0, line) | 1253 | self.screen.addstr(row, 0, line) |
1166 | row += 1 | 1254 | row += 1 |
1167 | self.screen.getkey() | 1255 | self.screen.getkey() |
1168 | self.refresh_header() | 1256 | self._refresh_header() |
1169 | 1257 | ||
1170 | def show_filter_selection(self): | 1258 | def _show_filter_selection(self): |
1171 | """Draws filter selection mask. | 1259 | """Draws filter selection mask. |
1172 | 1260 | ||
1173 | Asks for a valid regex and sets the fields filter accordingly. | 1261 | Asks for a valid regex and sets the fields filter accordingly. |
1174 | 1262 | ||
1175 | """ | 1263 | """ |
1264 | msg = '' | ||
1176 | while True: | 1265 | while True: |
1177 | self.screen.erase() | 1266 | self.screen.erase() |
1178 | self.screen.addstr(0, 0, | 1267 | self.screen.addstr(0, 0, |
@@ -1181,61 +1270,25 @@ class Tui(object): | |||
1181 | self.screen.addstr(2, 0, | 1270 | self.screen.addstr(2, 0, |
1182 | "Current regex: {0}" | 1271 | "Current regex: {0}" |
1183 | .format(self.stats.fields_filter)) | 1272 | .format(self.stats.fields_filter)) |
1273 | self.screen.addstr(5, 0, msg) | ||
1184 | self.screen.addstr(3, 0, "New regex: ") | 1274 | self.screen.addstr(3, 0, "New regex: ") |
1185 | curses.echo() | 1275 | curses.echo() |
1186 | regex = self.screen.getstr().decode(ENCODING) | 1276 | regex = self.screen.getstr().decode(ENCODING) |
1187 | curses.noecho() | 1277 | curses.noecho() |
1188 | if len(regex) == 0: | 1278 | if len(regex) == 0: |
1189 | self.stats.fields_filter = DEFAULT_REGEX | 1279 | self.stats.fields_filter = '' |
1190 | self.refresh_header() | 1280 | self._refresh_header() |
1191 | return | 1281 | return |
1192 | try: | 1282 | try: |
1193 | re.compile(regex) | 1283 | re.compile(regex) |
1194 | self.stats.fields_filter = regex | 1284 | self.stats.fields_filter = regex |
1195 | self.refresh_header() | 1285 | self._refresh_header() |
1196 | return | 1286 | return |
1197 | except re.error: | 1287 | except re.error: |
1288 | msg = '"' + regex + '": Not a valid regular expression' | ||
1198 | continue | 1289 | continue |
1199 | 1290 | ||
1200 | def show_vm_selection_by_pid(self): | 1291 | def _show_set_update_interval(self): |
1201 | """Draws PID selection mask. | ||
1202 | |||
1203 | Asks for a pid until a valid pid or 0 has been entered. | ||
1204 | |||
1205 | """ | ||
1206 | msg = '' | ||
1207 | while True: | ||
1208 | self.screen.erase() | ||
1209 | self.screen.addstr(0, 0, | ||
1210 | 'Show statistics for specific pid.', | ||
1211 | curses.A_BOLD) | ||
1212 | self.screen.addstr(1, 0, | ||
1213 | 'This might limit the shown data to the trace ' | ||
1214 | 'statistics.') | ||
1215 | self.screen.addstr(5, 0, msg) | ||
1216 | self.print_all_gnames(7) | ||
1217 | |||
1218 | curses.echo() | ||
1219 | self.screen.addstr(3, 0, "Pid [0 or pid]: ") | ||
1220 | pid = self.screen.getstr().decode(ENCODING) | ||
1221 | curses.noecho() | ||
1222 | |||
1223 | try: | ||
1224 | if len(pid) > 0: | ||
1225 | pid = int(pid) | ||
1226 | if pid != 0 and not os.path.isdir(os.path.join('/proc/', | ||
1227 | str(pid))): | ||
1228 | msg = '"' + str(pid) + '": Not a running process' | ||
1229 | continue | ||
1230 | else: | ||
1231 | pid = 0 | ||
1232 | self.refresh_header(pid) | ||
1233 | self.update_pid(pid) | ||
1234 | break | ||
1235 | except ValueError: | ||
1236 | msg = '"' + str(pid) + '": Not a valid pid' | ||
1237 | |||
1238 | def show_set_update_interval(self): | ||
1239 | """Draws update interval selection mask.""" | 1292 | """Draws update interval selection mask.""" |
1240 | msg = '' | 1293 | msg = '' |
1241 | while True: | 1294 | while True: |
@@ -1265,60 +1318,67 @@ class Tui(object): | |||
1265 | 1318 | ||
1266 | except ValueError: | 1319 | except ValueError: |
1267 | msg = '"' + str(val) + '": Invalid value' | 1320 | msg = '"' + str(val) + '": Invalid value' |
1268 | self.refresh_header() | 1321 | self._refresh_header() |
1269 | 1322 | ||
1270 | def show_vm_selection_by_guest_name(self): | 1323 | def _show_vm_selection_by_guest(self): |
1271 | """Draws guest selection mask. | 1324 | """Draws guest selection mask. |
1272 | 1325 | ||
1273 | Asks for a guest name until a valid guest name or '' is entered. | 1326 | Asks for a guest name or pid until a valid guest name or '' is entered. |
1274 | 1327 | ||
1275 | """ | 1328 | """ |
1276 | msg = '' | 1329 | msg = '' |
1277 | while True: | 1330 | while True: |
1278 | self.screen.erase() | 1331 | self.screen.erase() |
1279 | self.screen.addstr(0, 0, | 1332 | self.screen.addstr(0, 0, |
1280 | 'Show statistics for specific guest.', | 1333 | 'Show statistics for specific guest or pid.', |
1281 | curses.A_BOLD) | 1334 | curses.A_BOLD) |
1282 | self.screen.addstr(1, 0, | 1335 | self.screen.addstr(1, 0, |
1283 | 'This might limit the shown data to the trace ' | 1336 | 'This might limit the shown data to the trace ' |
1284 | 'statistics.') | 1337 | 'statistics.') |
1285 | self.screen.addstr(5, 0, msg) | 1338 | self.screen.addstr(5, 0, msg) |
1286 | self.print_all_gnames(7) | 1339 | self._print_all_gnames(7) |
1287 | curses.echo() | 1340 | curses.echo() |
1288 | self.screen.addstr(3, 0, "Guest [ENTER or guest]: ") | 1341 | curses.curs_set(1) |
1289 | gname = self.screen.getstr().decode(ENCODING) | 1342 | self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ") |
1343 | guest = self.screen.getstr().decode(ENCODING) | ||
1290 | curses.noecho() | 1344 | curses.noecho() |
1291 | 1345 | ||
1292 | if not gname: | 1346 | pid = 0 |
1293 | self.refresh_header(0) | 1347 | if not guest or guest == '0': |
1294 | self.update_pid(0) | ||
1295 | break | 1348 | break |
1296 | else: | 1349 | if guest.isdigit(): |
1297 | pids = [] | 1350 | if not os.path.isdir(os.path.join('/proc/', guest)): |
1298 | try: | 1351 | msg = '"' + guest + '": Not a running process' |
1299 | pids = self.get_pid_from_gname(gname) | ||
1300 | except: | ||
1301 | msg = '"' + gname + '": Internal error while searching, ' \ | ||
1302 | 'use pid filter instead' | ||
1303 | continue | ||
1304 | if len(pids) == 0: | ||
1305 | msg = '"' + gname + '": Not an active guest' | ||
1306 | continue | 1352 | continue |
1307 | if len(pids) > 1: | 1353 | pid = int(guest) |
1308 | msg = '"' + gname + '": Multiple matches found, use pid ' \ | ||
1309 | 'filter instead' | ||
1310 | continue | ||
1311 | self.refresh_header(pids[0]) | ||
1312 | self.update_pid(pids[0]) | ||
1313 | break | 1354 | break |
1355 | pids = [] | ||
1356 | try: | ||
1357 | pids = self.get_pid_from_gname(guest) | ||
1358 | except: | ||
1359 | msg = '"' + guest + '": Internal error while searching, ' \ | ||
1360 | 'use pid filter instead' | ||
1361 | continue | ||
1362 | if len(pids) == 0: | ||
1363 | msg = '"' + guest + '": Not an active guest' | ||
1364 | continue | ||
1365 | if len(pids) > 1: | ||
1366 | msg = '"' + guest + '": Multiple matches found, use pid ' \ | ||
1367 | 'filter instead' | ||
1368 | continue | ||
1369 | pid = pids[0] | ||
1370 | break | ||
1371 | curses.curs_set(0) | ||
1372 | self._refresh_header(pid) | ||
1373 | self._update_pid(pid) | ||
1314 | 1374 | ||
1315 | def show_stats(self): | 1375 | def show_stats(self): |
1316 | """Refreshes the screen and processes user input.""" | 1376 | """Refreshes the screen and processes user input.""" |
1317 | sleeptime = self._delay_initial | 1377 | sleeptime = self._delay_initial |
1318 | self.refresh_header() | 1378 | self._refresh_header() |
1319 | start = 0.0 # result based on init value never appears on screen | 1379 | start = 0.0 # result based on init value never appears on screen |
1320 | while True: | 1380 | while True: |
1321 | self.refresh_body(time.time() - start) | 1381 | self._refresh_body(time.time() - start) |
1322 | curses.halfdelay(int(sleeptime * 10)) | 1382 | curses.halfdelay(int(sleeptime * 10)) |
1323 | start = time.time() | 1383 | start = time.time() |
1324 | sleeptime = self._delay_regular | 1384 | sleeptime = self._delay_regular |
@@ -1327,47 +1387,39 @@ class Tui(object): | |||
1327 | if char == 'b': | 1387 | if char == 'b': |
1328 | self._display_guests = not self._display_guests | 1388 | self._display_guests = not self._display_guests |
1329 | if self.stats.toggle_display_guests(self._display_guests): | 1389 | if self.stats.toggle_display_guests(self._display_guests): |
1330 | self.show_msg(['Command not available with tracepoints' | 1390 | self._show_msg(['Command not available with ' |
1331 | ' enabled', 'Restart with debugfs only ' | 1391 | 'tracepoints enabled', 'Restart with ' |
1332 | '(see option \'-d\') and try again!']) | 1392 | 'debugfs only (see option \'-d\') and ' |
1393 | 'try again!']) | ||
1333 | self._display_guests = not self._display_guests | 1394 | self._display_guests = not self._display_guests |
1334 | self.refresh_header() | 1395 | self._refresh_header() |
1335 | if char == 'c': | 1396 | if char == 'c': |
1336 | self.stats.fields_filter = DEFAULT_REGEX | 1397 | self.stats.fields_filter = '' |
1337 | self.refresh_header(0) | 1398 | self._refresh_header(0) |
1338 | self.update_pid(0) | 1399 | self._update_pid(0) |
1339 | if char == 'f': | 1400 | if char == 'f': |
1340 | curses.curs_set(1) | 1401 | curses.curs_set(1) |
1341 | self.show_filter_selection() | 1402 | self._show_filter_selection() |
1342 | curses.curs_set(0) | 1403 | curses.curs_set(0) |
1343 | sleeptime = self._delay_initial | 1404 | sleeptime = self._delay_initial |
1344 | if char == 'g': | 1405 | if char == 'g' or char == 'p': |
1345 | curses.curs_set(1) | 1406 | self._show_vm_selection_by_guest() |
1346 | self.show_vm_selection_by_guest_name() | ||
1347 | curses.curs_set(0) | ||
1348 | sleeptime = self._delay_initial | 1407 | sleeptime = self._delay_initial |
1349 | if char == 'h': | 1408 | if char == 'h': |
1350 | self.show_help_interactive() | 1409 | self._show_help_interactive() |
1351 | if char == 'o': | 1410 | if char == 'o': |
1352 | self._sorting = not self._sorting | 1411 | self._sorting = not self._sorting |
1353 | if char == 'p': | ||
1354 | curses.curs_set(1) | ||
1355 | self.show_vm_selection_by_pid() | ||
1356 | curses.curs_set(0) | ||
1357 | sleeptime = self._delay_initial | ||
1358 | if char == 'q': | 1412 | if char == 'q': |
1359 | break | 1413 | break |
1360 | if char == 'r': | 1414 | if char == 'r': |
1361 | self.stats.reset() | 1415 | self.stats.reset() |
1362 | if char == 's': | 1416 | if char == 's': |
1363 | curses.curs_set(1) | 1417 | curses.curs_set(1) |
1364 | self.show_set_update_interval() | 1418 | self._show_set_update_interval() |
1365 | curses.curs_set(0) | 1419 | curses.curs_set(0) |
1366 | sleeptime = self._delay_initial | 1420 | sleeptime = self._delay_initial |
1367 | if char == 'x': | 1421 | if char == 'x': |
1368 | self.update_drilldown() | 1422 | self.stats.child_events = not self.stats.child_events |
1369 | # prevents display of current values on next refresh | ||
1370 | self.stats.get(self._display_guests) | ||
1371 | except KeyboardInterrupt: | 1423 | except KeyboardInterrupt: |
1372 | break | 1424 | break |
1373 | except curses.error: | 1425 | except curses.error: |
@@ -1380,9 +1432,9 @@ def batch(stats): | |||
1380 | s = stats.get() | 1432 | s = stats.get() |
1381 | time.sleep(1) | 1433 | time.sleep(1) |
1382 | s = stats.get() | 1434 | s = stats.get() |
1383 | for key in sorted(s.keys()): | 1435 | for key, values in sorted(s.items()): |
1384 | values = s[key] | 1436 | print('%-42s%10d%10d' % (key.split(' ')[0], values.value, |
1385 | print('%-42s%10d%10d' % (key, values[0], values[1])) | 1437 | values.delta)) |
1386 | except KeyboardInterrupt: | 1438 | except KeyboardInterrupt: |
1387 | pass | 1439 | pass |
1388 | 1440 | ||
@@ -1392,14 +1444,14 @@ def log(stats): | |||
1392 | keys = sorted(stats.get().keys()) | 1444 | keys = sorted(stats.get().keys()) |
1393 | 1445 | ||
1394 | def banner(): | 1446 | def banner(): |
1395 | for k in keys: | 1447 | for key in keys: |
1396 | print(k, end=' ') | 1448 | print(key.split(' ')[0], end=' ') |
1397 | print() | 1449 | print() |
1398 | 1450 | ||
1399 | def statline(): | 1451 | def statline(): |
1400 | s = stats.get() | 1452 | s = stats.get() |
1401 | for k in keys: | 1453 | for key in keys: |
1402 | print(' %9d' % s[k][1], end=' ') | 1454 | print(' %9d' % s[key].delta, end=' ') |
1403 | print() | 1455 | print() |
1404 | line = 0 | 1456 | line = 0 |
1405 | banner_repeat = 20 | 1457 | banner_repeat = 20 |
@@ -1504,7 +1556,7 @@ Press any other key to refresh statistics immediately. | |||
1504 | ) | 1556 | ) |
1505 | optparser.add_option('-f', '--fields', | 1557 | optparser.add_option('-f', '--fields', |
1506 | action='store', | 1558 | action='store', |
1507 | default=DEFAULT_REGEX, | 1559 | default='', |
1508 | dest='fields', | 1560 | dest='fields', |
1509 | help='''fields to display (regex) | 1561 | help='''fields to display (regex) |
1510 | "-f help" for a list of available events''', | 1562 | "-f help" for a list of available events''', |
@@ -1539,17 +1591,6 @@ Press any other key to refresh statistics immediately. | |||
1539 | 1591 | ||
1540 | def check_access(options): | 1592 | def check_access(options): |
1541 | """Exits if the current user can't access all needed directories.""" | 1593 | """Exits if the current user can't access all needed directories.""" |
1542 | if not os.path.exists('/sys/kernel/debug'): | ||
1543 | sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.') | ||
1544 | sys.exit(1) | ||
1545 | |||
1546 | if not os.path.exists(PATH_DEBUGFS_KVM): | ||
1547 | sys.stderr.write("Please make sure, that debugfs is mounted and " | ||
1548 | "readable by the current user:\n" | ||
1549 | "('mount -t debugfs debugfs /sys/kernel/debug')\n" | ||
1550 | "Also ensure, that the kvm modules are loaded.\n") | ||
1551 | sys.exit(1) | ||
1552 | |||
1553 | if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or | 1594 | if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or |
1554 | not options.debugfs): | 1595 | not options.debugfs): |
1555 | sys.stderr.write("Please enable CONFIG_TRACING in your kernel " | 1596 | sys.stderr.write("Please enable CONFIG_TRACING in your kernel " |
@@ -1567,7 +1608,33 @@ def check_access(options): | |||
1567 | return options | 1608 | return options |
1568 | 1609 | ||
1569 | 1610 | ||
1611 | def assign_globals(): | ||
1612 | global PATH_DEBUGFS_KVM | ||
1613 | global PATH_DEBUGFS_TRACING | ||
1614 | |||
1615 | debugfs = '' | ||
1616 | for line in file('/proc/mounts'): | ||
1617 | if line.split(' ')[0] == 'debugfs': | ||
1618 | debugfs = line.split(' ')[1] | ||
1619 | break | ||
1620 | if debugfs == '': | ||
1621 | sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in " | ||
1622 | "your kernel, mounted and\nreadable by the current " | ||
1623 | "user:\n" | ||
1624 | "('mount -t debugfs debugfs /sys/kernel/debug')\n") | ||
1625 | sys.exit(1) | ||
1626 | |||
1627 | PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm') | ||
1628 | PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing') | ||
1629 | |||
1630 | if not os.path.exists(PATH_DEBUGFS_KVM): | ||
1631 | sys.stderr.write("Please make sure that CONFIG_KVM is enabled in " | ||
1632 | "your kernel and that the modules are loaded.\n") | ||
1633 | sys.exit(1) | ||
1634 | |||
1635 | |||
1570 | def main(): | 1636 | def main(): |
1637 | assign_globals() | ||
1571 | options = get_options() | 1638 | options = get_options() |
1572 | options = check_access(options) | 1639 | options = check_access(options) |
1573 | 1640 | ||
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt index b5b3810c9e94..0811d860fe75 100644 --- a/tools/kvm/kvm_stat/kvm_stat.txt +++ b/tools/kvm/kvm_stat/kvm_stat.txt | |||
@@ -35,13 +35,13 @@ INTERACTIVE COMMANDS | |||
35 | 35 | ||
36 | *f*:: filter by regular expression | 36 | *f*:: filter by regular expression |
37 | 37 | ||
38 | *g*:: filter by guest name | 38 | *g*:: filter by guest name/PID |
39 | 39 | ||
40 | *h*:: display interactive commands reference | 40 | *h*:: display interactive commands reference |
41 | 41 | ||
42 | *o*:: toggle sorting order (Total vs CurAvg/s) | 42 | *o*:: toggle sorting order (Total vs CurAvg/s) |
43 | 43 | ||
44 | *p*:: filter by PID | 44 | *p*:: filter by guest name/PID |
45 | 45 | ||
46 | *q*:: quit | 46 | *q*:: quit |
47 | 47 | ||
diff --git a/tools/laptop/freefall/Makefile b/tools/laptop/freefall/Makefile index 5f758c489a20..b572d94255f6 100644 --- a/tools/laptop/freefall/Makefile +++ b/tools/laptop/freefall/Makefile | |||
@@ -2,7 +2,6 @@ | |||
2 | PREFIX ?= /usr | 2 | PREFIX ?= /usr |
3 | SBINDIR ?= sbin | 3 | SBINDIR ?= sbin |
4 | INSTALL ?= install | 4 | INSTALL ?= install |
5 | CC = $(CROSS_COMPILE)gcc | ||
6 | 5 | ||
7 | TARGET = freefall | 6 | TARGET = freefall |
8 | 7 | ||
diff --git a/tools/leds/Makefile b/tools/leds/Makefile index c379af003807..7b6bed13daaa 100644 --- a/tools/leds/Makefile +++ b/tools/leds/Makefile | |||
@@ -1,7 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for LEDs tools | 2 | # Makefile for LEDs tools |
3 | 3 | ||
4 | CC = $(CROSS_COMPILE)gcc | ||
5 | CFLAGS = -Wall -Wextra -g -I../../include/uapi | 4 | CFLAGS = -Wall -Wextra -g -I../../include/uapi |
6 | 5 | ||
7 | all: uledmon led_hw_brightness_mon | 6 | all: uledmon led_hw_brightness_mon |
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 97073d649c1a..5bbbf285af74 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
@@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, | |||
1060 | prog->insns = new_insn; | 1060 | prog->insns = new_insn; |
1061 | prog->main_prog_cnt = prog->insns_cnt; | 1061 | prog->main_prog_cnt = prog->insns_cnt; |
1062 | prog->insns_cnt = new_cnt; | 1062 | prog->insns_cnt = new_cnt; |
1063 | pr_debug("added %zd insn from %s to prog %s\n", | ||
1064 | text->insns_cnt, text->section_name, | ||
1065 | prog->section_name); | ||
1063 | } | 1066 | } |
1064 | insn = &prog->insns[relo->insn_idx]; | 1067 | insn = &prog->insns[relo->insn_idx]; |
1065 | insn->imm += prog->main_prog_cnt - relo->insn_idx; | 1068 | insn->imm += prog->main_prog_cnt - relo->insn_idx; |
1066 | pr_debug("added %zd insn from %s to prog %s\n", | ||
1067 | text->insns_cnt, text->section_name, prog->section_name); | ||
1068 | return 0; | 1069 | return 0; |
1069 | } | 1070 | } |
1070 | 1071 | ||
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 57254f5b2779..694abc628e9b 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include "builtin.h" | 29 | #include "builtin.h" |
30 | #include "check.h" | 30 | #include "check.h" |
31 | 31 | ||
32 | bool no_fp, no_unreachable; | 32 | bool no_fp, no_unreachable, retpoline, module; |
33 | 33 | ||
34 | static const char * const check_usage[] = { | 34 | static const char * const check_usage[] = { |
35 | "objtool check [<options>] file.o", | 35 | "objtool check [<options>] file.o", |
@@ -39,6 +39,8 @@ static const char * const check_usage[] = { | |||
39 | const struct option check_options[] = { | 39 | const struct option check_options[] = { |
40 | OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"), | 40 | OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"), |
41 | OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), | 41 | OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), |
42 | OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), | ||
43 | OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), | ||
42 | OPT_END(), | 44 | OPT_END(), |
43 | }; | 45 | }; |
44 | 46 | ||
@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv) | |||
53 | 55 | ||
54 | objname = argv[0]; | 56 | objname = argv[0]; |
55 | 57 | ||
56 | return check(objname, no_fp, no_unreachable, false); | 58 | return check(objname, false); |
57 | } | 59 | } |
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 91e8e19ff5e0..77ea2b97117d 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c | |||
@@ -25,7 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <string.h> | 27 | #include <string.h> |
28 | #include <subcmd/parse-options.h> | ||
29 | #include "builtin.h" | 28 | #include "builtin.h" |
30 | #include "check.h" | 29 | #include "check.h" |
31 | 30 | ||
@@ -36,9 +35,6 @@ static const char *orc_usage[] = { | |||
36 | NULL, | 35 | NULL, |
37 | }; | 36 | }; |
38 | 37 | ||
39 | extern const struct option check_options[]; | ||
40 | extern bool no_fp, no_unreachable; | ||
41 | |||
42 | int cmd_orc(int argc, const char **argv) | 38 | int cmd_orc(int argc, const char **argv) |
43 | { | 39 | { |
44 | const char *objname; | 40 | const char *objname; |
@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv) | |||
54 | 50 | ||
55 | objname = argv[0]; | 51 | objname = argv[0]; |
56 | 52 | ||
57 | return check(objname, no_fp, no_unreachable, true); | 53 | return check(objname, true); |
58 | } | 54 | } |
59 | 55 | ||
60 | if (!strcmp(argv[0], "dump")) { | 56 | if (!strcmp(argv[0], "dump")) { |
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index dd526067fed5..28ff40e19a14 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h | |||
@@ -17,6 +17,11 @@ | |||
17 | #ifndef _BUILTIN_H | 17 | #ifndef _BUILTIN_H |
18 | #define _BUILTIN_H | 18 | #define _BUILTIN_H |
19 | 19 | ||
20 | #include <subcmd/parse-options.h> | ||
21 | |||
22 | extern const struct option check_options[]; | ||
23 | extern bool no_fp, no_unreachable, retpoline, module; | ||
24 | |||
20 | extern int cmd_check(int argc, const char **argv); | 25 | extern int cmd_check(int argc, const char **argv); |
21 | extern int cmd_orc(int argc, const char **argv); | 26 | extern int cmd_orc(int argc, const char **argv); |
22 | 27 | ||
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index a8cb69a26576..472e64e95891 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <string.h> | 18 | #include <string.h> |
19 | #include <stdlib.h> | 19 | #include <stdlib.h> |
20 | 20 | ||
21 | #include "builtin.h" | ||
21 | #include "check.h" | 22 | #include "check.h" |
22 | #include "elf.h" | 23 | #include "elf.h" |
23 | #include "special.h" | 24 | #include "special.h" |
@@ -33,7 +34,6 @@ struct alternative { | |||
33 | }; | 34 | }; |
34 | 35 | ||
35 | const char *objname; | 36 | const char *objname; |
36 | static bool no_fp; | ||
37 | struct cfi_state initial_func_cfi; | 37 | struct cfi_state initial_func_cfi; |
38 | 38 | ||
39 | struct instruction *find_insn(struct objtool_file *file, | 39 | struct instruction *find_insn(struct objtool_file *file, |
@@ -497,6 +497,7 @@ static int add_jump_destinations(struct objtool_file *file) | |||
497 | * disguise, so convert them accordingly. | 497 | * disguise, so convert them accordingly. |
498 | */ | 498 | */ |
499 | insn->type = INSN_JUMP_DYNAMIC; | 499 | insn->type = INSN_JUMP_DYNAMIC; |
500 | insn->retpoline_safe = true; | ||
500 | continue; | 501 | continue; |
501 | } else { | 502 | } else { |
502 | /* sibling call */ | 503 | /* sibling call */ |
@@ -548,7 +549,8 @@ static int add_call_destinations(struct objtool_file *file) | |||
548 | if (!insn->call_dest && !insn->ignore) { | 549 | if (!insn->call_dest && !insn->ignore) { |
549 | WARN_FUNC("unsupported intra-function call", | 550 | WARN_FUNC("unsupported intra-function call", |
550 | insn->sec, insn->offset); | 551 | insn->sec, insn->offset); |
551 | WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE."); | 552 | if (retpoline) |
553 | WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE."); | ||
552 | return -1; | 554 | return -1; |
553 | } | 555 | } |
554 | 556 | ||
@@ -1108,6 +1110,54 @@ static int read_unwind_hints(struct objtool_file *file) | |||
1108 | return 0; | 1110 | return 0; |
1109 | } | 1111 | } |
1110 | 1112 | ||
1113 | static int read_retpoline_hints(struct objtool_file *file) | ||
1114 | { | ||
1115 | struct section *sec, *relasec; | ||
1116 | struct instruction *insn; | ||
1117 | struct rela *rela; | ||
1118 | int i; | ||
1119 | |||
1120 | sec = find_section_by_name(file->elf, ".discard.retpoline_safe"); | ||
1121 | if (!sec) | ||
1122 | return 0; | ||
1123 | |||
1124 | relasec = sec->rela; | ||
1125 | if (!relasec) { | ||
1126 | WARN("missing .rela.discard.retpoline_safe section"); | ||
1127 | return -1; | ||
1128 | } | ||
1129 | |||
1130 | if (sec->len % sizeof(unsigned long)) { | ||
1131 | WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long)); | ||
1132 | return -1; | ||
1133 | } | ||
1134 | |||
1135 | for (i = 0; i < sec->len / sizeof(unsigned long); i++) { | ||
1136 | rela = find_rela_by_dest(sec, i * sizeof(unsigned long)); | ||
1137 | if (!rela) { | ||
1138 | WARN("can't find rela for retpoline_safe[%d]", i); | ||
1139 | return -1; | ||
1140 | } | ||
1141 | |||
1142 | insn = find_insn(file, rela->sym->sec, rela->addend); | ||
1143 | if (!insn) { | ||
1144 | WARN("can't find insn for retpoline_safe[%d]", i); | ||
1145 | return -1; | ||
1146 | } | ||
1147 | |||
1148 | if (insn->type != INSN_JUMP_DYNAMIC && | ||
1149 | insn->type != INSN_CALL_DYNAMIC) { | ||
1150 | WARN_FUNC("retpoline_safe hint not a indirect jump/call", | ||
1151 | insn->sec, insn->offset); | ||
1152 | return -1; | ||
1153 | } | ||
1154 | |||
1155 | insn->retpoline_safe = true; | ||
1156 | } | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1111 | static int decode_sections(struct objtool_file *file) | 1161 | static int decode_sections(struct objtool_file *file) |
1112 | { | 1162 | { |
1113 | int ret; | 1163 | int ret; |
@@ -1146,6 +1196,10 @@ static int decode_sections(struct objtool_file *file) | |||
1146 | if (ret) | 1196 | if (ret) |
1147 | return ret; | 1197 | return ret; |
1148 | 1198 | ||
1199 | ret = read_retpoline_hints(file); | ||
1200 | if (ret) | ||
1201 | return ret; | ||
1202 | |||
1149 | return 0; | 1203 | return 0; |
1150 | } | 1204 | } |
1151 | 1205 | ||
@@ -1891,6 +1945,38 @@ static int validate_unwind_hints(struct objtool_file *file) | |||
1891 | return warnings; | 1945 | return warnings; |
1892 | } | 1946 | } |
1893 | 1947 | ||
1948 | static int validate_retpoline(struct objtool_file *file) | ||
1949 | { | ||
1950 | struct instruction *insn; | ||
1951 | int warnings = 0; | ||
1952 | |||
1953 | for_each_insn(file, insn) { | ||
1954 | if (insn->type != INSN_JUMP_DYNAMIC && | ||
1955 | insn->type != INSN_CALL_DYNAMIC) | ||
1956 | continue; | ||
1957 | |||
1958 | if (insn->retpoline_safe) | ||
1959 | continue; | ||
1960 | |||
1961 | /* | ||
1962 | * .init.text code is ran before userspace and thus doesn't | ||
1963 | * strictly need retpolines, except for modules which are | ||
1964 | * loaded late, they very much do need retpoline in their | ||
1965 | * .init.text | ||
1966 | */ | ||
1967 | if (!strcmp(insn->sec->name, ".init.text") && !module) | ||
1968 | continue; | ||
1969 | |||
1970 | WARN_FUNC("indirect %s found in RETPOLINE build", | ||
1971 | insn->sec, insn->offset, | ||
1972 | insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); | ||
1973 | |||
1974 | warnings++; | ||
1975 | } | ||
1976 | |||
1977 | return warnings; | ||
1978 | } | ||
1979 | |||
1894 | static bool is_kasan_insn(struct instruction *insn) | 1980 | static bool is_kasan_insn(struct instruction *insn) |
1895 | { | 1981 | { |
1896 | return (insn->type == INSN_CALL && | 1982 | return (insn->type == INSN_CALL && |
@@ -2022,13 +2108,12 @@ static void cleanup(struct objtool_file *file) | |||
2022 | elf_close(file->elf); | 2108 | elf_close(file->elf); |
2023 | } | 2109 | } |
2024 | 2110 | ||
2025 | int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc) | 2111 | int check(const char *_objname, bool orc) |
2026 | { | 2112 | { |
2027 | struct objtool_file file; | 2113 | struct objtool_file file; |
2028 | int ret, warnings = 0; | 2114 | int ret, warnings = 0; |
2029 | 2115 | ||
2030 | objname = _objname; | 2116 | objname = _objname; |
2031 | no_fp = _no_fp; | ||
2032 | 2117 | ||
2033 | file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY); | 2118 | file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY); |
2034 | if (!file.elf) | 2119 | if (!file.elf) |
@@ -2052,6 +2137,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc) | |||
2052 | if (list_empty(&file.insn_list)) | 2137 | if (list_empty(&file.insn_list)) |
2053 | goto out; | 2138 | goto out; |
2054 | 2139 | ||
2140 | if (retpoline) { | ||
2141 | ret = validate_retpoline(&file); | ||
2142 | if (ret < 0) | ||
2143 | return ret; | ||
2144 | warnings += ret; | ||
2145 | } | ||
2146 | |||
2055 | ret = validate_functions(&file); | 2147 | ret = validate_functions(&file); |
2056 | if (ret < 0) | 2148 | if (ret < 0) |
2057 | goto out; | 2149 | goto out; |
diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 23a1d065cae1..c6b68fcb926f 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h | |||
@@ -45,6 +45,7 @@ struct instruction { | |||
45 | unsigned char type; | 45 | unsigned char type; |
46 | unsigned long immediate; | 46 | unsigned long immediate; |
47 | bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; | 47 | bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; |
48 | bool retpoline_safe; | ||
48 | struct symbol *call_dest; | 49 | struct symbol *call_dest; |
49 | struct instruction *jump_dest; | 50 | struct instruction *jump_dest; |
50 | struct instruction *first_jump_src; | 51 | struct instruction *first_jump_src; |
@@ -63,7 +64,7 @@ struct objtool_file { | |||
63 | bool ignore_unreachables, c_file, hints; | 64 | bool ignore_unreachables, c_file, hints; |
64 | }; | 65 | }; |
65 | 66 | ||
66 | int check(const char *objname, bool no_fp, bool no_unreachable, bool orc); | 67 | int check(const char *objname, bool orc); |
67 | 68 | ||
68 | struct instruction *find_insn(struct objtool_file *file, | 69 | struct instruction *find_insn(struct objtool_file *file, |
69 | struct section *sec, unsigned long offset); | 70 | struct section *sec, unsigned long offset); |
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 9b0351d3ce34..012328038594 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf | |||
@@ -146,12 +146,6 @@ define allow-override | |||
146 | $(eval $(1) = $(2))) | 146 | $(eval $(1) = $(2))) |
147 | endef | 147 | endef |
148 | 148 | ||
149 | # Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix. | ||
150 | $(call allow-override,CC,$(CROSS_COMPILE)gcc) | ||
151 | $(call allow-override,AR,$(CROSS_COMPILE)ar) | ||
152 | $(call allow-override,LD,$(CROSS_COMPILE)ld) | ||
153 | $(call allow-override,CXX,$(CROSS_COMPILE)g++) | ||
154 | |||
155 | LD += $(EXTRA_LDFLAGS) | 149 | LD += $(EXTRA_LDFLAGS) |
156 | 150 | ||
157 | HOSTCC ?= gcc | 151 | HOSTCC ?= gcc |
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index a1883bbb0144..2cccbba64418 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config | |||
@@ -56,9 +56,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM} | |||
56 | # to compile vs uClibc, that can be done here as well. | 56 | # to compile vs uClibc, that can be done here as well. |
57 | CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- | 57 | CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- |
58 | CROSS_COMPILE ?= $(CROSS) | 58 | CROSS_COMPILE ?= $(CROSS) |
59 | CC = $(CROSS_COMPILE)gcc | ||
60 | LD = $(CROSS_COMPILE)gcc | ||
61 | STRIP = $(CROSS_COMPILE)strip | ||
62 | HOSTCC = gcc | 59 | HOSTCC = gcc |
63 | 60 | ||
64 | # check if compiler option is supported | 61 | # check if compiler option is supported |
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index fcb3ed0be5f8..dd614463d4d6 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include | |||
@@ -42,6 +42,24 @@ EXTRA_WARNINGS += -Wformat | |||
42 | 42 | ||
43 | CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?) | 43 | CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?) |
44 | 44 | ||
45 | # Makefiles suck: This macro sets a default value of $(2) for the | ||
46 | # variable named by $(1), unless the variable has been set by | ||
47 | # environment or command line. This is necessary for CC and AR | ||
48 | # because make sets default values, so the simpler ?= approach | ||
49 | # won't work as expected. | ||
50 | define allow-override | ||
51 | $(if $(or $(findstring environment,$(origin $(1))),\ | ||
52 | $(findstring command line,$(origin $(1)))),,\ | ||
53 | $(eval $(1) = $(2))) | ||
54 | endef | ||
55 | |||
56 | # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix. | ||
57 | $(call allow-override,CC,$(CROSS_COMPILE)gcc) | ||
58 | $(call allow-override,AR,$(CROSS_COMPILE)ar) | ||
59 | $(call allow-override,LD,$(CROSS_COMPILE)ld) | ||
60 | $(call allow-override,CXX,$(CROSS_COMPILE)g++) | ||
61 | $(call allow-override,STRIP,$(CROSS_COMPILE)strip) | ||
62 | |||
45 | ifeq ($(CC_NO_CLANG), 1) | 63 | ifeq ($(CC_NO_CLANG), 1) |
46 | EXTRA_WARNINGS += -Wstrict-aliasing=3 | 64 | EXTRA_WARNINGS += -Wstrict-aliasing=3 |
47 | endif | 65 | endif |
diff --git a/tools/spi/Makefile b/tools/spi/Makefile index 90615e10c79a..815d15589177 100644 --- a/tools/spi/Makefile +++ b/tools/spi/Makefile | |||
@@ -11,8 +11,6 @@ endif | |||
11 | # (this improves performance and avoids hard-to-debug behaviour); | 11 | # (this improves performance and avoids hard-to-debug behaviour); |
12 | MAKEFLAGS += -r | 12 | MAKEFLAGS += -r |
13 | 13 | ||
14 | CC = $(CROSS_COMPILE)gcc | ||
15 | LD = $(CROSS_COMPILE)ld | ||
16 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include | 14 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include |
17 | 15 | ||
18 | ALL_TARGETS := spidev_test spidev_fdx | 16 | ALL_TARGETS := spidev_test spidev_fdx |
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 44ef9eba5a7a..6c645eb77d42 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
@@ -178,6 +178,55 @@ void idr_get_next_test(int base) | |||
178 | idr_destroy(&idr); | 178 | idr_destroy(&idr); |
179 | } | 179 | } |
180 | 180 | ||
181 | int idr_u32_cb(int id, void *ptr, void *data) | ||
182 | { | ||
183 | BUG_ON(id < 0); | ||
184 | BUG_ON(ptr != DUMMY_PTR); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | void idr_u32_test1(struct idr *idr, u32 handle) | ||
189 | { | ||
190 | static bool warned = false; | ||
191 | u32 id = handle; | ||
192 | int sid = 0; | ||
193 | void *ptr; | ||
194 | |||
195 | BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); | ||
196 | BUG_ON(id != handle); | ||
197 | BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); | ||
198 | BUG_ON(id != handle); | ||
199 | if (!warned && id > INT_MAX) | ||
200 | printk("vvv Ignore these warnings\n"); | ||
201 | ptr = idr_get_next(idr, &sid); | ||
202 | if (id > INT_MAX) { | ||
203 | BUG_ON(ptr != NULL); | ||
204 | BUG_ON(sid != 0); | ||
205 | } else { | ||
206 | BUG_ON(ptr != DUMMY_PTR); | ||
207 | BUG_ON(sid != id); | ||
208 | } | ||
209 | idr_for_each(idr, idr_u32_cb, NULL); | ||
210 | if (!warned && id > INT_MAX) { | ||
211 | printk("^^^ Warnings over\n"); | ||
212 | warned = true; | ||
213 | } | ||
214 | BUG_ON(idr_remove(idr, id) != DUMMY_PTR); | ||
215 | BUG_ON(!idr_is_empty(idr)); | ||
216 | } | ||
217 | |||
218 | void idr_u32_test(int base) | ||
219 | { | ||
220 | DEFINE_IDR(idr); | ||
221 | idr_init_base(&idr, base); | ||
222 | idr_u32_test1(&idr, 10); | ||
223 | idr_u32_test1(&idr, 0x7fffffff); | ||
224 | idr_u32_test1(&idr, 0x80000000); | ||
225 | idr_u32_test1(&idr, 0x80000001); | ||
226 | idr_u32_test1(&idr, 0xffe00000); | ||
227 | idr_u32_test1(&idr, 0xffffffff); | ||
228 | } | ||
229 | |||
181 | void idr_checks(void) | 230 | void idr_checks(void) |
182 | { | 231 | { |
183 | unsigned long i; | 232 | unsigned long i; |
@@ -248,6 +297,9 @@ void idr_checks(void) | |||
248 | idr_get_next_test(0); | 297 | idr_get_next_test(0); |
249 | idr_get_next_test(1); | 298 | idr_get_next_test(1); |
250 | idr_get_next_test(4); | 299 | idr_get_next_test(4); |
300 | idr_u32_test(4); | ||
301 | idr_u32_test(1); | ||
302 | idr_u32_test(0); | ||
251 | } | 303 | } |
252 | 304 | ||
253 | /* | 305 | /* |
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 6903ccf35595..44a0d1ad4408 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c | |||
@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) | |||
29 | { | 29 | { |
30 | struct radix_tree_node *node; | 30 | struct radix_tree_node *node; |
31 | 31 | ||
32 | if (flags & __GFP_NOWARN) | 32 | if (!(flags & __GFP_DIRECT_RECLAIM)) |
33 | return NULL; | 33 | return NULL; |
34 | 34 | ||
35 | pthread_mutex_lock(&cachep->lock); | 35 | pthread_mutex_lock(&cachep->lock); |
@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
73 | 73 | ||
74 | void *kmalloc(size_t size, gfp_t gfp) | 74 | void *kmalloc(size_t size, gfp_t gfp) |
75 | { | 75 | { |
76 | void *ret = malloc(size); | 76 | void *ret; |
77 | |||
78 | if (!(gfp & __GFP_DIRECT_RECLAIM)) | ||
79 | return NULL; | ||
80 | |||
81 | ret = malloc(size); | ||
77 | uatomic_inc(&nr_allocated); | 82 | uatomic_inc(&nr_allocated); |
78 | if (kmalloc_verbose) | 83 | if (kmalloc_verbose) |
79 | printf("Allocating %p from malloc\n", ret); | 84 | printf("Allocating %p from malloc\n", ret); |
85 | if (gfp & __GFP_ZERO) | ||
86 | memset(ret, 0, size); | ||
80 | return ret; | 87 | return ret; |
81 | } | 88 | } |
82 | 89 | ||
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/tools/testing/radix-tree/linux/compiler_types.h | |||
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index e9fff59dfd8a..e3201ccf54c3 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define __GFP_IO 0x40u | 11 | #define __GFP_IO 0x40u |
12 | #define __GFP_FS 0x80u | 12 | #define __GFP_FS 0x80u |
13 | #define __GFP_NOWARN 0x200u | 13 | #define __GFP_NOWARN 0x200u |
14 | #define __GFP_ZERO 0x8000u | ||
14 | #define __GFP_ATOMIC 0x80000u | 15 | #define __GFP_ATOMIC 0x80000u |
15 | #define __GFP_ACCOUNT 0x100000u | 16 | #define __GFP_ACCOUNT 0x100000u |
16 | #define __GFP_DIRECT_RECLAIM 0x400000u | 17 | #define __GFP_DIRECT_RECLAIM 0x400000u |
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h index 979baeec7e70..a037def0dec6 100644 --- a/tools/testing/radix-tree/linux/slab.h +++ b/tools/testing/radix-tree/linux/slab.h | |||
@@ -3,6 +3,7 @@ | |||
3 | #define SLAB_H | 3 | #define SLAB_H |
4 | 4 | ||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/gfp.h> | ||
6 | 7 | ||
7 | #define SLAB_HWCACHE_ALIGN 1 | 8 | #define SLAB_HWCACHE_ALIGN 1 |
8 | #define SLAB_PANIC 2 | 9 | #define SLAB_PANIC 2 |
@@ -11,6 +12,11 @@ | |||
11 | void *kmalloc(size_t size, gfp_t); | 12 | void *kmalloc(size_t size, gfp_t); |
12 | void kfree(void *); | 13 | void kfree(void *); |
13 | 14 | ||
15 | static inline void *kzalloc(size_t size, gfp_t gfp) | ||
16 | { | ||
17 | return kmalloc(size, gfp | __GFP_ZERO); | ||
18 | } | ||
19 | |||
14 | void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); | 20 | void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); |
15 | void kmem_cache_free(struct kmem_cache *cachep, void *objp); | 21 | void kmem_cache_free(struct kmem_cache *cachep, void *objp); |
16 | 22 | ||
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile index 1a7492268993..f6304d2be90c 100644 --- a/tools/testing/selftests/android/Makefile +++ b/tools/testing/selftests/android/Makefile | |||
@@ -11,11 +11,11 @@ all: | |||
11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
12 | mkdir $$BUILD_TARGET -p; \ | 12 | mkdir $$BUILD_TARGET -p; \ |
13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
14 | #SUBDIR test prog name should be in the form: SUBDIR_test.sh | 14 | #SUBDIR test prog name should be in the form: SUBDIR_test.sh \ |
15 | TEST=$$DIR"_test.sh"; \ | 15 | TEST=$$DIR"_test.sh"; \ |
16 | if [ -e $$DIR/$$TEST ]; then | 16 | if [ -e $$DIR/$$TEST ]; then \ |
17 | rsync -a $$DIR/$$TEST $$BUILD_TARGET/; | 17 | rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \ |
18 | fi | 18 | fi \ |
19 | done | 19 | done |
20 | 20 | ||
21 | override define RUN_TESTS | 21 | override define RUN_TESTS |
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index cc15af2e54fe..9cf83f895d98 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore | |||
@@ -11,3 +11,4 @@ test_progs | |||
11 | test_tcpbpf_user | 11 | test_tcpbpf_user |
12 | test_verifier_log | 12 | test_verifier_log |
13 | feature | 13 | feature |
14 | test_libbpf_open | ||
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 436c4c72414f..9e03a4c356a4 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data) | |||
126 | fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, | 126 | fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, |
127 | 2, map_flags); | 127 | 2, map_flags); |
128 | if (fd < 0) { | 128 | if (fd < 0) { |
129 | if (errno == ENOMEM) | ||
130 | return; | ||
129 | printf("Failed to create hashmap key=%d value=%d '%s'\n", | 131 | printf("Failed to create hashmap key=%d value=%d '%s'\n", |
130 | i, j, strerror(errno)); | 132 | i, j, strerror(errno)); |
131 | exit(1); | 133 | exit(1); |
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c index 57119ad57a3f..3e645ee41ed5 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/if_ether.h> | 5 | #include <linux/if_ether.h> |
6 | #include <linux/if_packet.h> | 6 | #include <linux/if_packet.h> |
7 | #include <linux/ip.h> | 7 | #include <linux/ip.h> |
8 | #include <linux/in6.h> | ||
9 | #include <linux/types.h> | 8 | #include <linux/types.h> |
10 | #include <linux/socket.h> | 9 | #include <linux/socket.h> |
11 | #include <linux/tcp.h> | 10 | #include <linux/tcp.h> |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c0f16e93f9bd..c73592fa3d41 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -2587,6 +2587,32 @@ static struct bpf_test tests[] = { | |||
2587 | .result = ACCEPT, | 2587 | .result = ACCEPT, |
2588 | }, | 2588 | }, |
2589 | { | 2589 | { |
2590 | "runtime/jit: pass negative index to tail_call", | ||
2591 | .insns = { | ||
2592 | BPF_MOV64_IMM(BPF_REG_3, -1), | ||
2593 | BPF_LD_MAP_FD(BPF_REG_2, 0), | ||
2594 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
2595 | BPF_FUNC_tail_call), | ||
2596 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
2597 | BPF_EXIT_INSN(), | ||
2598 | }, | ||
2599 | .fixup_prog = { 1 }, | ||
2600 | .result = ACCEPT, | ||
2601 | }, | ||
2602 | { | ||
2603 | "runtime/jit: pass > 32bit index to tail_call", | ||
2604 | .insns = { | ||
2605 | BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), | ||
2606 | BPF_LD_MAP_FD(BPF_REG_2, 0), | ||
2607 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
2608 | BPF_FUNC_tail_call), | ||
2609 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
2610 | BPF_EXIT_INSN(), | ||
2611 | }, | ||
2612 | .fixup_prog = { 2 }, | ||
2613 | .result = ACCEPT, | ||
2614 | }, | ||
2615 | { | ||
2590 | "stack pointer arithmetic", | 2616 | "stack pointer arithmetic", |
2591 | .insns = { | 2617 | .insns = { |
2592 | BPF_MOV64_IMM(BPF_REG_1, 4), | 2618 | BPF_MOV64_IMM(BPF_REG_1, 4), |
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index cea4adcd42b8..a63e8453984d 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile | |||
@@ -12,9 +12,9 @@ all: | |||
12 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 12 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
13 | mkdir $$BUILD_TARGET -p; \ | 13 | mkdir $$BUILD_TARGET -p; \ |
14 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 14 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
15 | if [ -e $$DIR/$(TEST_PROGS) ]; then | 15 | if [ -e $$DIR/$(TEST_PROGS) ]; then \ |
16 | rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; | 16 | rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ |
17 | fi | 17 | fi \ |
18 | done | 18 | done |
19 | 19 | ||
20 | override define RUN_TESTS | 20 | override define RUN_TESTS |
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile index a5276a91dfbf..0862e6f47a38 100644 --- a/tools/testing/selftests/memfd/Makefile +++ b/tools/testing/selftests/memfd/Makefile | |||
@@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/ | |||
5 | CFLAGS += -I../../../../usr/include/ | 5 | CFLAGS += -I../../../../usr/include/ |
6 | 6 | ||
7 | TEST_PROGS := run_tests.sh | 7 | TEST_PROGS := run_tests.sh |
8 | TEST_FILES := run_fuse_test.sh | ||
8 | TEST_GEN_FILES := memfd_test fuse_mnt fuse_test | 9 | TEST_GEN_FILES := memfd_test fuse_mnt fuse_test |
9 | 10 | ||
10 | fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) | 11 | fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) |
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config new file mode 100644 index 000000000000..835c7f4dadcd --- /dev/null +++ b/tools/testing/selftests/memfd/config | |||
@@ -0,0 +1 @@ | |||
CONFIG_FUSE_FS=m | |||
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile index 86636d207adf..183b46883875 100644 --- a/tools/testing/selftests/memory-hotplug/Makefile +++ b/tools/testing/selftests/memory-hotplug/Makefile | |||
@@ -4,7 +4,7 @@ all: | |||
4 | include ../lib.mk | 4 | include ../lib.mk |
5 | 5 | ||
6 | TEST_PROGS := mem-on-off-test.sh | 6 | TEST_PROGS := mem-on-off-test.sh |
7 | override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]" | 7 | override RUN_TESTS := @./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]" |
8 | override EMIT_TESTS := echo "$(RUN_TESTS)" | 8 | override EMIT_TESTS := echo "$(RUN_TESTS)" |
9 | 9 | ||
10 | run_full_test: | 10 | run_full_test: |
diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config index 6a8e5a9bfc10..d148f9f89fb6 100644 --- a/tools/testing/selftests/pstore/config +++ b/tools/testing/selftests/pstore/config | |||
@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y | |||
2 | CONFIG_PSTORE=y | 2 | CONFIG_PSTORE=y |
3 | CONFIG_PSTORE_PMSG=y | 3 | CONFIG_PSTORE_PMSG=y |
4 | CONFIG_PSTORE_CONSOLE=y | 4 | CONFIG_PSTORE_CONSOLE=y |
5 | CONFIG_PSTORE_RAM=m | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 0b457e8e0f0c..5df609950a66 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -141,6 +141,15 @@ struct seccomp_data { | |||
141 | #define SECCOMP_FILTER_FLAG_LOG 2 | 141 | #define SECCOMP_FILTER_FLAG_LOG 2 |
142 | #endif | 142 | #endif |
143 | 143 | ||
144 | #ifndef PTRACE_SECCOMP_GET_METADATA | ||
145 | #define PTRACE_SECCOMP_GET_METADATA 0x420d | ||
146 | |||
147 | struct seccomp_metadata { | ||
148 | __u64 filter_off; /* Input: which filter */ | ||
149 | __u64 flags; /* Output: filter's flags */ | ||
150 | }; | ||
151 | #endif | ||
152 | |||
144 | #ifndef seccomp | 153 | #ifndef seccomp |
145 | int seccomp(unsigned int op, unsigned int flags, void *args) | 154 | int seccomp(unsigned int op, unsigned int flags, void *args) |
146 | { | 155 | { |
@@ -2845,6 +2854,58 @@ TEST(get_action_avail) | |||
2845 | EXPECT_EQ(errno, EOPNOTSUPP); | 2854 | EXPECT_EQ(errno, EOPNOTSUPP); |
2846 | } | 2855 | } |
2847 | 2856 | ||
2857 | TEST(get_metadata) | ||
2858 | { | ||
2859 | pid_t pid; | ||
2860 | int pipefd[2]; | ||
2861 | char buf; | ||
2862 | struct seccomp_metadata md; | ||
2863 | |||
2864 | ASSERT_EQ(0, pipe(pipefd)); | ||
2865 | |||
2866 | pid = fork(); | ||
2867 | ASSERT_GE(pid, 0); | ||
2868 | if (pid == 0) { | ||
2869 | struct sock_filter filter[] = { | ||
2870 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
2871 | }; | ||
2872 | struct sock_fprog prog = { | ||
2873 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
2874 | .filter = filter, | ||
2875 | }; | ||
2876 | |||
2877 | /* one with log, one without */ | ||
2878 | ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, | ||
2879 | SECCOMP_FILTER_FLAG_LOG, &prog)); | ||
2880 | ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); | ||
2881 | |||
2882 | ASSERT_EQ(0, close(pipefd[0])); | ||
2883 | ASSERT_EQ(1, write(pipefd[1], "1", 1)); | ||
2884 | ASSERT_EQ(0, close(pipefd[1])); | ||
2885 | |||
2886 | while (1) | ||
2887 | sleep(100); | ||
2888 | } | ||
2889 | |||
2890 | ASSERT_EQ(0, close(pipefd[1])); | ||
2891 | ASSERT_EQ(1, read(pipefd[0], &buf, 1)); | ||
2892 | |||
2893 | ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); | ||
2894 | ASSERT_EQ(pid, waitpid(pid, NULL, 0)); | ||
2895 | |||
2896 | md.filter_off = 0; | ||
2897 | ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); | ||
2898 | EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); | ||
2899 | EXPECT_EQ(md.filter_off, 0); | ||
2900 | |||
2901 | md.filter_off = 1; | ||
2902 | ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); | ||
2903 | EXPECT_EQ(md.flags, 0); | ||
2904 | EXPECT_EQ(md.filter_off, 1); | ||
2905 | |||
2906 | ASSERT_EQ(0, kill(pid, SIGKILL)); | ||
2907 | } | ||
2908 | |||
2848 | /* | 2909 | /* |
2849 | * TODO: | 2910 | * TODO: |
2850 | * - add microbenchmarks | 2911 | * - add microbenchmarks |
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile index b3c8ba3cb668..d0121a8a3523 100644 --- a/tools/testing/selftests/sync/Makefile +++ b/tools/testing/selftests/sync/Makefile | |||
@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS) | |||
30 | $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) | 30 | $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) |
31 | 31 | ||
32 | $(OBJS): $(OUTPUT)/%.o: %.c | 32 | $(OBJS): $(OUTPUT)/%.o: %.c |
33 | $(CC) -c $^ -o $@ | 33 | $(CC) -c $^ -o $@ $(CFLAGS) |
34 | 34 | ||
35 | $(TESTS): $(OUTPUT)/%.o: %.c | 35 | $(TESTS): $(OUTPUT)/%.o: %.c |
36 | $(CC) -c $^ -o $@ | 36 | $(CC) -c $^ -o $@ |
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index 3d5a62ff7d31..f5d7a7851e21 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile | |||
@@ -1,4 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | include ../lib.mk | ||
3 | |||
2 | ifndef CROSS_COMPILE | 4 | ifndef CROSS_COMPILE |
3 | CFLAGS := -std=gnu99 | 5 | CFLAGS := -std=gnu99 |
4 | CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector | 6 | CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector |
@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y) | |||
6 | LDLIBS += -lgcc_s | 8 | LDLIBS += -lgcc_s |
7 | endif | 9 | endif |
8 | 10 | ||
9 | TEST_PROGS := vdso_test vdso_standalone_test_x86 | 11 | TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86 |
10 | 12 | ||
11 | all: $(TEST_PROGS) | 13 | all: $(TEST_PROGS) |
12 | vdso_test: parse_vdso.c vdso_test.c | 14 | $(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c |
13 | vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c | 15 | $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c |
14 | $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ | 16 | $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ |
15 | vdso_standalone_test_x86.c parse_vdso.c \ | 17 | vdso_standalone_test_x86.c parse_vdso.c \ |
16 | -o vdso_standalone_test_x86 | 18 | -o $@ |
17 | 19 | ||
18 | include ../lib.mk | 20 | EXTRA_CLEAN := $(TEST_PROGS) |
19 | clean: | ||
20 | rm -fr $(TEST_PROGS) | ||
21 | endif | 21 | endif |
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore index 63c94d776e89..342c7bc9dc8c 100644 --- a/tools/testing/selftests/vm/.gitignore +++ b/tools/testing/selftests/vm/.gitignore | |||
@@ -11,3 +11,4 @@ mlock-intersect-test | |||
11 | mlock-random-test | 11 | mlock-random-test |
12 | virtual_address_range | 12 | virtual_address_range |
13 | gup_benchmark | 13 | gup_benchmark |
14 | va_128TBswitch | ||
diff --git a/tools/usb/Makefile b/tools/usb/Makefile index 4e6506078494..01d758d73b6d 100644 --- a/tools/usb/Makefile +++ b/tools/usb/Makefile | |||
@@ -1,7 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for USB tools | 2 | # Makefile for USB tools |
3 | 3 | ||
4 | CC = $(CROSS_COMPILE)gcc | ||
5 | PTHREAD_LIBS = -lpthread | 4 | PTHREAD_LIBS = -lpthread |
6 | WARNINGS = -Wall -Wextra | 5 | WARNINGS = -Wall -Wextra |
7 | CFLAGS = $(WARNINGS) -g -I../include | 6 | CFLAGS = $(WARNINGS) -g -I../include |
diff --git a/tools/vm/Makefile b/tools/vm/Makefile index be320b905ea7..20f6cf04377f 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile | |||
@@ -6,7 +6,6 @@ TARGETS=page-types slabinfo page_owner_sort | |||
6 | LIB_DIR = ../lib/api | 6 | LIB_DIR = ../lib/api |
7 | LIBS = $(LIB_DIR)/libapi.a | 7 | LIBS = $(LIB_DIR)/libapi.a |
8 | 8 | ||
9 | CC = $(CROSS_COMPILE)gcc | ||
10 | CFLAGS = -Wall -Wextra -I../lib/ | 9 | CFLAGS = -Wall -Wextra -I../lib/ |
11 | LDFLAGS = $(LIBS) | 10 | LDFLAGS = $(LIBS) |
12 | 11 | ||
diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile index e664f1167388..e0e87239126b 100644 --- a/tools/wmi/Makefile +++ b/tools/wmi/Makefile | |||
@@ -2,7 +2,6 @@ PREFIX ?= /usr | |||
2 | SBINDIR ?= sbin | 2 | SBINDIR ?= sbin |
3 | INSTALL ?= install | 3 | INSTALL ?= install |
4 | CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include | 4 | CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include |
5 | CC = $(CROSS_COMPILE)gcc | ||
6 | 5 | ||
7 | TARGET = dell-smbios-example | 6 | TARGET = dell-smbios-example |
8 | 7 | ||
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 70268c0bec79..70f4c30918eb 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -36,6 +36,8 @@ static struct timecounter *timecounter; | |||
36 | static unsigned int host_vtimer_irq; | 36 | static unsigned int host_vtimer_irq; |
37 | static u32 host_vtimer_irq_flags; | 37 | static u32 host_vtimer_irq_flags; |
38 | 38 | ||
39 | static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); | ||
40 | |||
39 | static const struct kvm_irq_level default_ptimer_irq = { | 41 | static const struct kvm_irq_level default_ptimer_irq = { |
40 | .irq = 30, | 42 | .irq = 30, |
41 | .level = 1, | 43 | .level = 1, |
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void) | |||
56 | return timecounter->cc->read(timecounter->cc); | 58 | return timecounter->cc->read(timecounter->cc); |
57 | } | 59 | } |
58 | 60 | ||
61 | static inline bool userspace_irqchip(struct kvm *kvm) | ||
62 | { | ||
63 | return static_branch_unlikely(&userspace_irqchip_in_use) && | ||
64 | unlikely(!irqchip_in_kernel(kvm)); | ||
65 | } | ||
66 | |||
59 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) | 67 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
60 | { | 68 | { |
61 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), | 69 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) | |||
69 | cancel_work_sync(work); | 77 | cancel_work_sync(work); |
70 | } | 78 | } |
71 | 79 | ||
72 | static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) | ||
73 | { | ||
74 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
75 | |||
76 | /* | ||
77 | * When using a userspace irqchip with the architected timers, we must | ||
78 | * prevent continuously exiting from the guest, and therefore mask the | ||
79 | * physical interrupt by disabling it on the host interrupt controller | ||
80 | * when the virtual level is high, such that the guest can make | ||
81 | * forward progress. Once we detect the output level being | ||
82 | * de-asserted, we unmask the interrupt again so that we exit from the | ||
83 | * guest when the timer fires. | ||
84 | */ | ||
85 | if (vtimer->irq.level) | ||
86 | disable_percpu_irq(host_vtimer_irq); | ||
87 | else | ||
88 | enable_percpu_irq(host_vtimer_irq, 0); | ||
89 | } | ||
90 | |||
91 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | 80 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
92 | { | 81 | { |
93 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | 82 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; |
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | |||
106 | if (kvm_timer_should_fire(vtimer)) | 95 | if (kvm_timer_should_fire(vtimer)) |
107 | kvm_timer_update_irq(vcpu, true, vtimer); | 96 | kvm_timer_update_irq(vcpu, true, vtimer); |
108 | 97 | ||
109 | if (static_branch_unlikely(&userspace_irqchip_in_use) && | 98 | if (userspace_irqchip(vcpu->kvm) && |
110 | unlikely(!irqchip_in_kernel(vcpu->kvm))) | 99 | !static_branch_unlikely(&has_gic_active_state)) |
111 | kvm_vtimer_update_mask_user(vcpu); | 100 | disable_percpu_irq(host_vtimer_irq); |
112 | 101 | ||
113 | return IRQ_HANDLED; | 102 | return IRQ_HANDLED; |
114 | } | 103 | } |
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
290 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, | 279 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, |
291 | timer_ctx->irq.level); | 280 | timer_ctx->irq.level); |
292 | 281 | ||
293 | if (!static_branch_unlikely(&userspace_irqchip_in_use) || | 282 | if (!userspace_irqchip(vcpu->kvm)) { |
294 | likely(irqchip_in_kernel(vcpu->kvm))) { | ||
295 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | 283 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, |
296 | timer_ctx->irq.irq, | 284 | timer_ctx->irq.irq, |
297 | timer_ctx->irq.level, | 285 | timer_ctx->irq.level, |
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
350 | phys_timer_emulate(vcpu); | 338 | phys_timer_emulate(vcpu); |
351 | } | 339 | } |
352 | 340 | ||
353 | static void __timer_snapshot_state(struct arch_timer_context *timer) | ||
354 | { | ||
355 | timer->cnt_ctl = read_sysreg_el0(cntv_ctl); | ||
356 | timer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
357 | } | ||
358 | |||
359 | static void vtimer_save_state(struct kvm_vcpu *vcpu) | 341 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
360 | { | 342 | { |
361 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 343 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu) | |||
367 | if (!vtimer->loaded) | 349 | if (!vtimer->loaded) |
368 | goto out; | 350 | goto out; |
369 | 351 | ||
370 | if (timer->enabled) | 352 | if (timer->enabled) { |
371 | __timer_snapshot_state(vtimer); | 353 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); |
354 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
355 | } | ||
372 | 356 | ||
373 | /* Disable the virtual timer */ | 357 | /* Disable the virtual timer */ |
374 | write_sysreg_el0(0, cntv_ctl); | 358 | write_sysreg_el0(0, cntv_ctl); |
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff) | |||
460 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); | 444 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); |
461 | } | 445 | } |
462 | 446 | ||
463 | static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) | 447 | static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active) |
448 | { | ||
449 | int r; | ||
450 | r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active); | ||
451 | WARN_ON(r); | ||
452 | } | ||
453 | |||
454 | static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu) | ||
464 | { | 455 | { |
465 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 456 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
466 | bool phys_active; | 457 | bool phys_active; |
467 | int ret; | ||
468 | 458 | ||
469 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); | 459 | if (irqchip_in_kernel(vcpu->kvm)) |
470 | 460 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); | |
471 | ret = irq_set_irqchip_state(host_vtimer_irq, | 461 | else |
472 | IRQCHIP_STATE_ACTIVE, | 462 | phys_active = vtimer->irq.level; |
473 | phys_active); | 463 | set_vtimer_irq_phys_active(vcpu, phys_active); |
474 | WARN_ON(ret); | ||
475 | } | 464 | } |
476 | 465 | ||
477 | static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) | 466 | static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) |
478 | { | 467 | { |
479 | kvm_vtimer_update_mask_user(vcpu); | 468 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
469 | |||
470 | /* | ||
471 | * When using a userspace irqchip with the architected timers and a | ||
472 | * host interrupt controller that doesn't support an active state, we | ||
473 | * must still prevent continuously exiting from the guest, and | ||
474 | * therefore mask the physical interrupt by disabling it on the host | ||
475 | * interrupt controller when the virtual level is high, such that the | ||
476 | * guest can make forward progress. Once we detect the output level | ||
477 | * being de-asserted, we unmask the interrupt again so that we exit | ||
478 | * from the guest when the timer fires. | ||
479 | */ | ||
480 | if (vtimer->irq.level) | ||
481 | disable_percpu_irq(host_vtimer_irq); | ||
482 | else | ||
483 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); | ||
480 | } | 484 | } |
481 | 485 | ||
482 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | 486 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) |
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | |||
487 | if (unlikely(!timer->enabled)) | 491 | if (unlikely(!timer->enabled)) |
488 | return; | 492 | return; |
489 | 493 | ||
490 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | 494 | if (static_branch_likely(&has_gic_active_state)) |
491 | kvm_timer_vcpu_load_user(vcpu); | 495 | kvm_timer_vcpu_load_gic(vcpu); |
492 | else | 496 | else |
493 | kvm_timer_vcpu_load_vgic(vcpu); | 497 | kvm_timer_vcpu_load_nogic(vcpu); |
494 | 498 | ||
495 | set_cntvoff(vtimer->cntvoff); | 499 | set_cntvoff(vtimer->cntvoff); |
496 | 500 | ||
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) | |||
555 | { | 559 | { |
556 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 560 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
557 | 561 | ||
558 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { | 562 | if (!kvm_timer_should_fire(vtimer)) { |
559 | __timer_snapshot_state(vtimer); | 563 | kvm_timer_update_irq(vcpu, false, vtimer); |
560 | if (!kvm_timer_should_fire(vtimer)) { | 564 | if (static_branch_likely(&has_gic_active_state)) |
561 | kvm_timer_update_irq(vcpu, false, vtimer); | 565 | set_vtimer_irq_phys_active(vcpu, false); |
562 | kvm_vtimer_update_mask_user(vcpu); | 566 | else |
563 | } | 567 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
564 | } | 568 | } |
565 | } | 569 | } |
566 | 570 | ||
567 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | 571 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
568 | { | 572 | { |
569 | unmask_vtimer_irq_user(vcpu); | 573 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
574 | |||
575 | if (unlikely(!timer->enabled)) | ||
576 | return; | ||
577 | |||
578 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
579 | unmask_vtimer_irq_user(vcpu); | ||
570 | } | 580 | } |
571 | 581 | ||
572 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 582 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic) | |||
753 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); | 763 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); |
754 | goto out_free_irq; | 764 | goto out_free_irq; |
755 | } | 765 | } |
766 | |||
767 | static_branch_enable(&has_gic_active_state); | ||
756 | } | 768 | } |
757 | 769 | ||
758 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 770 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4501e658e8d6..65dea3ffef68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
969 | /* Check for overlaps */ | 969 | /* Check for overlaps */ |
970 | r = -EEXIST; | 970 | r = -EEXIST; |
971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { | 971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { |
972 | if ((slot->id >= KVM_USER_MEM_SLOTS) || | 972 | if (slot->id == id) |
973 | (slot->id == id)) | ||
974 | continue; | 973 | continue; |
975 | if (!((base_gfn + npages <= slot->base_gfn) || | 974 | if (!((base_gfn + npages <= slot->base_gfn) || |
976 | (base_gfn >= slot->base_gfn + slot->npages))) | 975 | (base_gfn >= slot->base_gfn + slot->npages))) |