diff options
873 files changed, 9130 insertions, 4751 deletions
diff --git a/.clang-format b/.clang-format index f49620f506f1..f3923a1f9858 100644 --- a/.clang-format +++ b/.clang-format | |||
| @@ -78,6 +78,8 @@ ForEachMacros: | |||
| 78 | - 'ata_qc_for_each_with_internal' | 78 | - 'ata_qc_for_each_with_internal' |
| 79 | - 'ax25_for_each' | 79 | - 'ax25_for_each' |
| 80 | - 'ax25_uid_for_each' | 80 | - 'ax25_uid_for_each' |
| 81 | - '__bio_for_each_bvec' | ||
| 82 | - 'bio_for_each_bvec' | ||
| 81 | - 'bio_for_each_integrity_vec' | 83 | - 'bio_for_each_integrity_vec' |
| 82 | - '__bio_for_each_segment' | 84 | - '__bio_for_each_segment' |
| 83 | - 'bio_for_each_segment' | 85 | - 'bio_for_each_segment' |
| @@ -118,10 +120,12 @@ ForEachMacros: | |||
| 118 | - 'drm_for_each_legacy_plane' | 120 | - 'drm_for_each_legacy_plane' |
| 119 | - 'drm_for_each_plane' | 121 | - 'drm_for_each_plane' |
| 120 | - 'drm_for_each_plane_mask' | 122 | - 'drm_for_each_plane_mask' |
| 123 | - 'drm_for_each_privobj' | ||
| 121 | - 'drm_mm_for_each_hole' | 124 | - 'drm_mm_for_each_hole' |
| 122 | - 'drm_mm_for_each_node' | 125 | - 'drm_mm_for_each_node' |
| 123 | - 'drm_mm_for_each_node_in_range' | 126 | - 'drm_mm_for_each_node_in_range' |
| 124 | - 'drm_mm_for_each_node_safe' | 127 | - 'drm_mm_for_each_node_safe' |
| 128 | - 'flow_action_for_each' | ||
| 125 | - 'for_each_active_drhd_unit' | 129 | - 'for_each_active_drhd_unit' |
| 126 | - 'for_each_active_iommu' | 130 | - 'for_each_active_iommu' |
| 127 | - 'for_each_available_child_of_node' | 131 | - 'for_each_available_child_of_node' |
| @@ -158,6 +162,9 @@ ForEachMacros: | |||
| 158 | - 'for_each_dss_dev' | 162 | - 'for_each_dss_dev' |
| 159 | - 'for_each_efi_memory_desc' | 163 | - 'for_each_efi_memory_desc' |
| 160 | - 'for_each_efi_memory_desc_in_map' | 164 | - 'for_each_efi_memory_desc_in_map' |
| 165 | - 'for_each_element' | ||
| 166 | - 'for_each_element_extid' | ||
| 167 | - 'for_each_element_id' | ||
| 161 | - 'for_each_endpoint_of_node' | 168 | - 'for_each_endpoint_of_node' |
| 162 | - 'for_each_evictable_lru' | 169 | - 'for_each_evictable_lru' |
| 163 | - 'for_each_fib6_node_rt_rcu' | 170 | - 'for_each_fib6_node_rt_rcu' |
| @@ -195,6 +202,7 @@ ForEachMacros: | |||
| 195 | - 'for_each_net_rcu' | 202 | - 'for_each_net_rcu' |
| 196 | - 'for_each_new_connector_in_state' | 203 | - 'for_each_new_connector_in_state' |
| 197 | - 'for_each_new_crtc_in_state' | 204 | - 'for_each_new_crtc_in_state' |
| 205 | - 'for_each_new_mst_mgr_in_state' | ||
| 198 | - 'for_each_new_plane_in_state' | 206 | - 'for_each_new_plane_in_state' |
| 199 | - 'for_each_new_private_obj_in_state' | 207 | - 'for_each_new_private_obj_in_state' |
| 200 | - 'for_each_node' | 208 | - 'for_each_node' |
| @@ -210,8 +218,10 @@ ForEachMacros: | |||
| 210 | - 'for_each_of_pci_range' | 218 | - 'for_each_of_pci_range' |
| 211 | - 'for_each_old_connector_in_state' | 219 | - 'for_each_old_connector_in_state' |
| 212 | - 'for_each_old_crtc_in_state' | 220 | - 'for_each_old_crtc_in_state' |
| 221 | - 'for_each_old_mst_mgr_in_state' | ||
| 213 | - 'for_each_oldnew_connector_in_state' | 222 | - 'for_each_oldnew_connector_in_state' |
| 214 | - 'for_each_oldnew_crtc_in_state' | 223 | - 'for_each_oldnew_crtc_in_state' |
| 224 | - 'for_each_oldnew_mst_mgr_in_state' | ||
| 215 | - 'for_each_oldnew_plane_in_state' | 225 | - 'for_each_oldnew_plane_in_state' |
| 216 | - 'for_each_oldnew_plane_in_state_reverse' | 226 | - 'for_each_oldnew_plane_in_state_reverse' |
| 217 | - 'for_each_oldnew_private_obj_in_state' | 227 | - 'for_each_oldnew_private_obj_in_state' |
| @@ -243,6 +253,9 @@ ForEachMacros: | |||
| 243 | - 'for_each_sg_dma_page' | 253 | - 'for_each_sg_dma_page' |
| 244 | - 'for_each_sg_page' | 254 | - 'for_each_sg_page' |
| 245 | - 'for_each_sibling_event' | 255 | - 'for_each_sibling_event' |
| 256 | - 'for_each_subelement' | ||
| 257 | - 'for_each_subelement_extid' | ||
| 258 | - 'for_each_subelement_id' | ||
| 246 | - '__for_each_thread' | 259 | - '__for_each_thread' |
| 247 | - 'for_each_thread' | 260 | - 'for_each_thread' |
| 248 | - 'for_each_zone' | 261 | - 'for_each_zone' |
| @@ -252,6 +265,8 @@ ForEachMacros: | |||
| 252 | - 'fwnode_for_each_child_node' | 265 | - 'fwnode_for_each_child_node' |
| 253 | - 'fwnode_graph_for_each_endpoint' | 266 | - 'fwnode_graph_for_each_endpoint' |
| 254 | - 'gadget_for_each_ep' | 267 | - 'gadget_for_each_ep' |
| 268 | - 'genradix_for_each' | ||
| 269 | - 'genradix_for_each_from' | ||
| 255 | - 'hash_for_each' | 270 | - 'hash_for_each' |
| 256 | - 'hash_for_each_possible' | 271 | - 'hash_for_each_possible' |
| 257 | - 'hash_for_each_possible_rcu' | 272 | - 'hash_for_each_possible_rcu' |
| @@ -293,7 +308,11 @@ ForEachMacros: | |||
| 293 | - 'key_for_each' | 308 | - 'key_for_each' |
| 294 | - 'key_for_each_safe' | 309 | - 'key_for_each_safe' |
| 295 | - 'klp_for_each_func' | 310 | - 'klp_for_each_func' |
| 311 | - 'klp_for_each_func_safe' | ||
| 312 | - 'klp_for_each_func_static' | ||
| 296 | - 'klp_for_each_object' | 313 | - 'klp_for_each_object' |
| 314 | - 'klp_for_each_object_safe' | ||
| 315 | - 'klp_for_each_object_static' | ||
| 297 | - 'kvm_for_each_memslot' | 316 | - 'kvm_for_each_memslot' |
| 298 | - 'kvm_for_each_vcpu' | 317 | - 'kvm_for_each_vcpu' |
| 299 | - 'list_for_each' | 318 | - 'list_for_each' |
| @@ -324,6 +343,8 @@ ForEachMacros: | |||
| 324 | - 'media_device_for_each_intf' | 343 | - 'media_device_for_each_intf' |
| 325 | - 'media_device_for_each_link' | 344 | - 'media_device_for_each_link' |
| 326 | - 'media_device_for_each_pad' | 345 | - 'media_device_for_each_pad' |
| 346 | - 'mp_bvec_for_each_page' | ||
| 347 | - 'mp_bvec_for_each_segment' | ||
| 327 | - 'nanddev_io_for_each_page' | 348 | - 'nanddev_io_for_each_page' |
| 328 | - 'netdev_for_each_lower_dev' | 349 | - 'netdev_for_each_lower_dev' |
| 329 | - 'netdev_for_each_lower_private' | 350 | - 'netdev_for_each_lower_private' |
| @@ -375,6 +396,7 @@ ForEachMacros: | |||
| 375 | - 'rht_for_each_rcu' | 396 | - 'rht_for_each_rcu' |
| 376 | - 'rht_for_each_rcu_continue' | 397 | - 'rht_for_each_rcu_continue' |
| 377 | - '__rq_for_each_bio' | 398 | - '__rq_for_each_bio' |
| 399 | - 'rq_for_each_bvec' | ||
| 378 | - 'rq_for_each_segment' | 400 | - 'rq_for_each_segment' |
| 379 | - 'scsi_for_each_prot_sg' | 401 | - 'scsi_for_each_prot_sg' |
| 380 | - 'scsi_for_each_sg' | 402 | - 'scsi_for_each_sg' |
| @@ -410,6 +432,8 @@ ForEachMacros: | |||
| 410 | - 'v4l2_m2m_for_each_src_buf_safe' | 432 | - 'v4l2_m2m_for_each_src_buf_safe' |
| 411 | - 'virtio_device_for_each_vq' | 433 | - 'virtio_device_for_each_vq' |
| 412 | - 'xa_for_each' | 434 | - 'xa_for_each' |
| 435 | - 'xa_for_each_marked' | ||
| 436 | - 'xa_for_each_start' | ||
| 413 | - 'xas_for_each' | 437 | - 'xas_for_each' |
| 414 | - 'xas_for_each_conflict' | 438 | - 'xas_for_each_conflict' |
| 415 | - 'xas_for_each_marked' | 439 | - 'xas_for_each_marked' |
| @@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com> | |||
| 156 | Morten Welinder <welinder@troll.com> | 156 | Morten Welinder <welinder@troll.com> |
| 157 | Mythri P K <mythripk@ti.com> | 157 | Mythri P K <mythripk@ti.com> |
| 158 | Nguyen Anh Quynh <aquynh@gmail.com> | 158 | Nguyen Anh Quynh <aquynh@gmail.com> |
| 159 | Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org> | ||
| 160 | Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org> | ||
| 159 | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | 161 | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> |
| 160 | Patrick Mochel <mochel@digitalimplant.org> | 162 | Patrick Mochel <mochel@digitalimplant.org> |
| 161 | Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> | 163 | Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> |
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt index b8ca28b60215..7e71c9c1d8e9 100644 --- a/Documentation/accounting/psi.txt +++ b/Documentation/accounting/psi.txt | |||
| @@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is | |||
| 56 | still doing productive work. As such, time spent in this subset of the | 56 | still doing productive work. As such, time spent in this subset of the |
| 57 | stall state is tracked separately and exported in the "full" averages. | 57 | stall state is tracked separately and exported in the "full" averages. |
| 58 | 58 | ||
| 59 | The ratios are tracked as recent trends over ten, sixty, and three | 59 | The ratios (in %) are tracked as recent trends over ten, sixty, and |
| 60 | hundred second windows, which gives insight into short term events as | 60 | three hundred second windows, which gives insight into short term events |
| 61 | well as medium and long term trends. The total absolute stall time is | 61 | as well as medium and long term trends. The total absolute stall time |
| 62 | tracked and exported as well, to allow detection of latency spikes | 62 | (in us) is tracked and exported as well, to allow detection of latency |
| 63 | which wouldn't necessarily make a dent in the time averages, or to | 63 | spikes which wouldn't necessarily make a dent in the time averages, |
| 64 | average trends over custom time frames. | 64 | or to average trends over custom time frames. |
| 65 | 65 | ||
| 66 | Cgroup2 interface | 66 | Cgroup2 interface |
| 67 | ================= | 67 | ================= |
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 9a60a5d60e38..7313d354f20e 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst | |||
| @@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()`` | |||
| 148 | for the type. The maximum value of ``BTF_INT_BITS()`` is 128. | 148 | for the type. The maximum value of ``BTF_INT_BITS()`` is 128. |
| 149 | 149 | ||
| 150 | The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values | 150 | The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values |
| 151 | for this int. For example, a bitfield struct member has: * btf member bit | 151 | for this int. For example, a bitfield struct member has: |
| 152 | offset 100 from the start of the structure, * btf member pointing to an int | 152 | * btf member bit offset 100 from the start of the structure, |
| 153 | type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` | 153 | * btf member pointing to an int type, |
| 154 | * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` | ||
| 154 | 155 | ||
| 155 | Then in the struct memory layout, this member will occupy ``4`` bits starting | 156 | Then in the struct memory layout, this member will occupy ``4`` bits starting |
| 156 | from bits ``100 + 2 = 102``. | 157 | from bits ``100 + 2 = 102``. |
| 157 | 158 | ||
| 158 | Alternatively, the bitfield struct member can be the following to access the | 159 | Alternatively, the bitfield struct member can be the following to access the |
| 159 | same bits as the above: | 160 | same bits as the above: |
| 160 | |||
| 161 | * btf member bit offset 102, | 161 | * btf member bit offset 102, |
| 162 | * btf member pointing to an int type, | 162 | * btf member pointing to an int type, |
| 163 | * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` | 163 | * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` |
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml index 365dcf384d73..82dd7582e945 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpus.yaml | |||
| @@ -228,7 +228,7 @@ patternProperties: | |||
| 228 | - renesas,r9a06g032-smp | 228 | - renesas,r9a06g032-smp |
| 229 | - rockchip,rk3036-smp | 229 | - rockchip,rk3036-smp |
| 230 | - rockchip,rk3066-smp | 230 | - rockchip,rk3066-smp |
| 231 | - socionext,milbeaut-m10v-smp | 231 | - socionext,milbeaut-m10v-smp |
| 232 | - ste,dbx500-smp | 232 | - ste,dbx500-smp |
| 233 | 233 | ||
| 234 | cpu-release-addr: | 234 | cpu-release-addr: |
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt index 08bab0e94d25..d0ae46d7bac3 100644 --- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt +++ b/Documentation/devicetree/bindings/hwmon/adc128d818.txt | |||
| @@ -26,7 +26,7 @@ Required node properties: | |||
| 26 | 26 | ||
| 27 | Optional node properties: | 27 | Optional node properties: |
| 28 | 28 | ||
| 29 | - ti,mode: Operation mode (see above). | 29 | - ti,mode: Operation mode (u8) (see above). |
| 30 | 30 | ||
| 31 | 31 | ||
| 32 | Example (operation mode 2): | 32 | Example (operation mode 2): |
| @@ -34,5 +34,5 @@ Example (operation mode 2): | |||
| 34 | adc128d818@1d { | 34 | adc128d818@1d { |
| 35 | compatible = "ti,adc128d818"; | 35 | compatible = "ti,adc128d818"; |
| 36 | reg = <0x1d>; | 36 | reg = <0x1d>; |
| 37 | ti,mode = <2>; | 37 | ti,mode = /bits/ 8 <2>; |
| 38 | }; | 38 | }; |
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt index 24c5cdaba8d2..ca83dcc84fb8 100644 --- a/Documentation/devicetree/bindings/net/davinci_emac.txt +++ b/Documentation/devicetree/bindings/net/davinci_emac.txt | |||
| @@ -20,6 +20,8 @@ Required properties: | |||
| 20 | Optional properties: | 20 | Optional properties: |
| 21 | - phy-handle: See ethernet.txt file in the same directory. | 21 | - phy-handle: See ethernet.txt file in the same directory. |
| 22 | If absent, davinci_emac driver defaults to 100/FULL. | 22 | If absent, davinci_emac driver defaults to 100/FULL. |
| 23 | - nvmem-cells: phandle, reference to an nvmem node for the MAC address | ||
| 24 | - nvmem-cell-names: string, should be "mac-address" if nvmem is to be used | ||
| 23 | - ti,davinci-rmii-en: 1 byte, 1 means use RMII | 25 | - ti,davinci-rmii-en: 1 byte, 1 means use RMII |
| 24 | - ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM? | 26 | - ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM? |
| 25 | 27 | ||
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index cfc376bc977a..a68621580584 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt | |||
| @@ -10,15 +10,14 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt. | |||
| 10 | the boot program; should be used in cases where the MAC address assigned to | 10 | the boot program; should be used in cases where the MAC address assigned to |
| 11 | the device by the boot program is different from the "local-mac-address" | 11 | the device by the boot program is different from the "local-mac-address" |
| 12 | property; | 12 | property; |
| 13 | - nvmem-cells: phandle, reference to an nvmem node for the MAC address; | ||
| 14 | - nvmem-cell-names: string, should be "mac-address" if nvmem is to be used; | ||
| 15 | - max-speed: number, specifies maximum speed in Mbit/s supported by the device; | 13 | - max-speed: number, specifies maximum speed in Mbit/s supported by the device; |
| 16 | - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than | 14 | - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than |
| 17 | the maximum frame size (there's contradiction in the Devicetree | 15 | the maximum frame size (there's contradiction in the Devicetree |
| 18 | Specification). | 16 | Specification). |
| 19 | - phy-mode: string, operation mode of the PHY interface. This is now a de-facto | 17 | - phy-mode: string, operation mode of the PHY interface. This is now a de-facto |
| 20 | standard property; supported values are: | 18 | standard property; supported values are: |
| 21 | * "internal" | 19 | * "internal" (Internal means there is not a standard bus between the MAC and |
| 20 | the PHY, something proprietary is being used to embed the PHY in the MAC.) | ||
| 22 | * "mii" | 21 | * "mii" |
| 23 | * "gmii" | 22 | * "gmii" |
| 24 | * "sgmii" | 23 | * "sgmii" |
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 174f292d8a3e..8b80515729d7 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt | |||
| @@ -26,6 +26,10 @@ Required properties: | |||
| 26 | Optional elements: 'tsu_clk' | 26 | Optional elements: 'tsu_clk' |
| 27 | - clocks: Phandles to input clocks. | 27 | - clocks: Phandles to input clocks. |
| 28 | 28 | ||
| 29 | Optional properties: | ||
| 30 | - nvmem-cells: phandle, reference to an nvmem node for the MAC address | ||
| 31 | - nvmem-cell-names: string, should be "mac-address" if nvmem is to be used | ||
| 32 | |||
| 29 | Optional properties for PHY child node: | 33 | Optional properties for PHY child node: |
| 30 | - reset-gpios : Should specify the gpio for phy reset | 34 | - reset-gpios : Should specify the gpio for phy reset |
| 31 | - magic-packet : If present, indicates that the hardware supports waking | 35 | - magic-packet : If present, indicates that the hardware supports waking |
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt index f79934225d8d..ca983328976b 100644 --- a/Documentation/lzo.txt +++ b/Documentation/lzo.txt | |||
| @@ -102,9 +102,11 @@ Byte sequences | |||
| 102 | dictionary which is empty, and that it will always be | 102 | dictionary which is empty, and that it will always be |
| 103 | invalid at this place. | 103 | invalid at this place. |
| 104 | 104 | ||
| 105 | 17 : bitstream version. If the first byte is 17, the next byte | 105 | 17 : bitstream version. If the first byte is 17, and compressed |
| 106 | gives the bitstream version (version 1 only). If the first byte | 106 | stream length is at least 5 bytes (length of shortest possible |
| 107 | is not 17, the bitstream version is 0. | 107 | versioned bitstream), the next byte gives the bitstream version |
| 108 | (version 1 only). | ||
| 109 | Otherwise, the bitstream version is 0. | ||
| 108 | 110 | ||
| 109 | 18..21 : copy 0..3 literals | 111 | 18..21 : copy 0..3 literals |
| 110 | state = (byte - 17) = 0..3 [ copy <state> literals ] | 112 | state = (byte - 17) = 0..3 [ copy <state> literals ] |
diff --git a/Documentation/media/uapi/rc/rc-tables.rst b/Documentation/media/uapi/rc/rc-tables.rst index f460031d8531..177ac44fa0fa 100644 --- a/Documentation/media/uapi/rc/rc-tables.rst +++ b/Documentation/media/uapi/rc/rc-tables.rst | |||
| @@ -623,7 +623,7 @@ the remote via /dev/input/event devices. | |||
| 623 | 623 | ||
| 624 | - .. row 78 | 624 | - .. row 78 |
| 625 | 625 | ||
| 626 | - ``KEY_SCREEN`` | 626 | - ``KEY_ASPECT_RATIO`` |
| 627 | 627 | ||
| 628 | - Select screen aspect ratio | 628 | - Select screen aspect ratio |
| 629 | 629 | ||
| @@ -631,7 +631,7 @@ the remote via /dev/input/event devices. | |||
| 631 | 631 | ||
| 632 | - .. row 79 | 632 | - .. row 79 |
| 633 | 633 | ||
| 634 | - ``KEY_ZOOM`` | 634 | - ``KEY_FULL_SCREEN`` |
| 635 | 635 | ||
| 636 | - Put device into zoom/full screen mode | 636 | - Put device into zoom/full screen mode |
| 637 | 637 | ||
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst new file mode 100644 index 000000000000..b375ae2ec2c4 --- /dev/null +++ b/Documentation/networking/bpf_flow_dissector.rst | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | .. SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | ================== | ||
| 4 | BPF Flow Dissector | ||
| 5 | ================== | ||
| 6 | |||
| 7 | Overview | ||
| 8 | ======== | ||
| 9 | |||
| 10 | Flow dissector is a routine that parses metadata out of the packets. It's | ||
| 11 | used in the various places in the networking subsystem (RFS, flow hash, etc). | ||
| 12 | |||
| 13 | BPF flow dissector is an attempt to reimplement C-based flow dissector logic | ||
| 14 | in BPF to gain all the benefits of BPF verifier (namely, limits on the | ||
| 15 | number of instructions and tail calls). | ||
| 16 | |||
| 17 | API | ||
| 18 | === | ||
| 19 | |||
| 20 | BPF flow dissector programs operate on an ``__sk_buff``. However, only the | ||
| 21 | limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``. | ||
| 22 | ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input | ||
| 23 | and output arguments. | ||
| 24 | |||
| 25 | The inputs are: | ||
| 26 | * ``nhoff`` - initial offset of the networking header | ||
| 27 | * ``thoff`` - initial offset of the transport header, initialized to nhoff | ||
| 28 | * ``n_proto`` - L3 protocol type, parsed out of L2 header | ||
| 29 | |||
| 30 | Flow dissector BPF program should fill out the rest of the ``struct | ||
| 31 | bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be | ||
| 32 | also adjusted accordingly. | ||
| 33 | |||
| 34 | The return code of the BPF program is either BPF_OK to indicate successful | ||
| 35 | dissection, or BPF_DROP to indicate parsing error. | ||
| 36 | |||
| 37 | __sk_buff->data | ||
| 38 | =============== | ||
| 39 | |||
| 40 | In the VLAN-less case, this is what the initial state of the BPF flow | ||
| 41 | dissector looks like:: | ||
| 42 | |||
| 43 | +------+------+------------+-----------+ | ||
| 44 | | DMAC | SMAC | ETHER_TYPE | L3_HEADER | | ||
| 45 | +------+------+------------+-----------+ | ||
| 46 | ^ | ||
| 47 | | | ||
| 48 | +-- flow dissector starts here | ||
| 49 | |||
| 50 | |||
| 51 | .. code:: c | ||
| 52 | |||
| 53 | skb->data + flow_keys->nhoff point to the first byte of L3_HEADER | ||
| 54 | flow_keys->thoff = nhoff | ||
| 55 | flow_keys->n_proto = ETHER_TYPE | ||
| 56 | |||
| 57 | In case of VLAN, flow dissector can be called with the two different states. | ||
| 58 | |||
| 59 | Pre-VLAN parsing:: | ||
| 60 | |||
| 61 | +------+------+------+-----+-----------+-----------+ | ||
| 62 | | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER | | ||
| 63 | +------+------+------+-----+-----------+-----------+ | ||
| 64 | ^ | ||
| 65 | | | ||
| 66 | +-- flow dissector starts here | ||
| 67 | |||
| 68 | .. code:: c | ||
| 69 | |||
| 70 | skb->data + flow_keys->nhoff point the to first byte of TCI | ||
| 71 | flow_keys->thoff = nhoff | ||
| 72 | flow_keys->n_proto = TPID | ||
| 73 | |||
| 74 | Please note that TPID can be 802.1AD and, hence, BPF program would | ||
| 75 | have to parse VLAN information twice for double tagged packets. | ||
| 76 | |||
| 77 | |||
| 78 | Post-VLAN parsing:: | ||
| 79 | |||
| 80 | +------+------+------+-----+-----------+-----------+ | ||
| 81 | | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER | | ||
| 82 | +------+------+------+-----+-----------+-----------+ | ||
| 83 | ^ | ||
| 84 | | | ||
| 85 | +-- flow dissector starts here | ||
| 86 | |||
| 87 | .. code:: c | ||
| 88 | |||
| 89 | skb->data + flow_keys->nhoff point the to first byte of L3_HEADER | ||
| 90 | flow_keys->thoff = nhoff | ||
| 91 | flow_keys->n_proto = ETHER_TYPE | ||
| 92 | |||
| 93 | In this case VLAN information has been processed before the flow dissector | ||
| 94 | and BPF flow dissector is not required to handle it. | ||
| 95 | |||
| 96 | |||
| 97 | The takeaway here is as follows: BPF flow dissector program can be called with | ||
| 98 | the optional VLAN header and should gracefully handle both cases: when single | ||
| 99 | or double VLAN is present and when it is not present. The same program | ||
| 100 | can be called for both cases and would have to be written carefully to | ||
| 101 | handle both cases. | ||
| 102 | |||
| 103 | |||
| 104 | Reference Implementation | ||
| 105 | ======================== | ||
| 106 | |||
| 107 | See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference | ||
| 108 | implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]`` | ||
| 109 | for the loader. bpftool can be used to load BPF flow dissector program as well. | ||
| 110 | |||
| 111 | The reference implementation is organized as follows: | ||
| 112 | * ``jmp_table`` map that contains sub-programs for each supported L3 protocol | ||
| 113 | * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and | ||
| 114 | does ``bpf_tail_call`` to the appropriate L3 handler | ||
| 115 | |||
| 116 | Since BPF at this point doesn't support looping (or any jumping back), | ||
| 117 | jmp_table is used instead to handle multiple levels of encapsulation (and | ||
| 118 | IPv6 options). | ||
| 119 | |||
| 120 | |||
| 121 | Current Limitations | ||
| 122 | =================== | ||
| 123 | BPF flow dissector doesn't support exporting all the metadata that in-kernel | ||
| 124 | C-based implementation can export. Notable example is single VLAN (802.1Q) | ||
| 125 | and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys`` | ||
| 126 | for a set of information that's currently can be exported from the BPF context. | ||
diff --git a/Documentation/networking/decnet.txt b/Documentation/networking/decnet.txt index e12a4900cf72..d192f8b9948b 100644 --- a/Documentation/networking/decnet.txt +++ b/Documentation/networking/decnet.txt | |||
| @@ -22,8 +22,6 @@ you'll need the following options as well... | |||
| 22 | CONFIG_DECNET_ROUTER (to be able to add/delete routes) | 22 | CONFIG_DECNET_ROUTER (to be able to add/delete routes) |
| 23 | CONFIG_NETFILTER (will be required for the DECnet routing daemon) | 23 | CONFIG_NETFILTER (will be required for the DECnet routing daemon) |
| 24 | 24 | ||
| 25 | CONFIG_DECNET_ROUTE_FWMARK is optional | ||
| 26 | |||
| 27 | Don't turn on SIOCGIFCONF support for DECnet unless you are really sure | 25 | Don't turn on SIOCGIFCONF support for DECnet unless you are really sure |
| 28 | that you need it, in general you won't and it can cause ifconfig to | 26 | that you need it, in general you won't and it can cause ifconfig to |
| 29 | malfunction. | 27 | malfunction. |
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 5449149be496..984e68f9e026 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst | |||
| @@ -9,6 +9,7 @@ Contents: | |||
| 9 | netdev-FAQ | 9 | netdev-FAQ |
| 10 | af_xdp | 10 | af_xdp |
| 11 | batman-adv | 11 | batman-adv |
| 12 | bpf_flow_dissector | ||
| 12 | can | 13 | can |
| 13 | can_ucan_protocol | 14 | can_ucan_protocol |
| 14 | device_drivers/freescale/dpaa2/index | 15 | device_drivers/freescale/dpaa2/index |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index acdfb5d2bcaa..e2142fe40cda 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
| @@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER | |||
| 422 | minimum RTT when it is moved to a longer path (e.g., due to traffic | 422 | minimum RTT when it is moved to a longer path (e.g., due to traffic |
| 423 | engineering). A longer window makes the filter more resistant to RTT | 423 | engineering). A longer window makes the filter more resistant to RTT |
| 424 | inflations such as transient congestion. The unit is seconds. | 424 | inflations such as transient congestion. The unit is seconds. |
| 425 | Possible values: 0 - 86400 (1 day) | ||
| 425 | Default: 300 | 426 | Default: 300 |
| 426 | 427 | ||
| 427 | tcp_moderate_rcvbuf - BOOLEAN | 428 | tcp_moderate_rcvbuf - BOOLEAN |
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 2df5894353d6..cd7303d7fa25 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt | |||
| @@ -1009,16 +1009,18 @@ The kernel interface functions are as follows: | |||
| 1009 | 1009 | ||
| 1010 | (*) Check call still alive. | 1010 | (*) Check call still alive. |
| 1011 | 1011 | ||
| 1012 | u32 rxrpc_kernel_check_life(struct socket *sock, | 1012 | bool rxrpc_kernel_check_life(struct socket *sock, |
| 1013 | struct rxrpc_call *call); | 1013 | struct rxrpc_call *call, |
| 1014 | u32 *_life); | ||
| 1014 | void rxrpc_kernel_probe_life(struct socket *sock, | 1015 | void rxrpc_kernel_probe_life(struct socket *sock, |
| 1015 | struct rxrpc_call *call); | 1016 | struct rxrpc_call *call); |
| 1016 | 1017 | ||
| 1017 | The first function returns a number that is updated when ACKs are received | 1018 | The first function passes back in *_life a number that is updated when |
| 1018 | from the peer (notably including PING RESPONSE ACKs which we can elicit by | 1019 | ACKs are received from the peer (notably including PING RESPONSE ACKs |
| 1019 | sending PING ACKs to see if the call still exists on the server). The | 1020 | which we can elicit by sending PING ACKs to see if the call still exists |
| 1020 | caller should compare the numbers of two calls to see if the call is still | 1021 | on the server). The caller should compare the numbers of two calls to see |
| 1021 | alive after waiting for a suitable interval. | 1022 | if the call is still alive after waiting for a suitable interval. It also |
| 1023 | returns true as long as the call hasn't yet reached the completed state. | ||
| 1022 | 1024 | ||
| 1023 | This allows the caller to work out if the server is still contactable and | 1025 | This allows the caller to work out if the server is still contactable and |
| 1024 | if the call is still alive on the server while waiting for the server to | 1026 | if the call is still alive on the server while waiting for the server to |
diff --git a/MAINTAINERS b/MAINTAINERS index 43b36dbed48e..5c38f21aee78 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1893,14 +1893,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git | |||
| 1893 | ARM/NUVOTON NPCM ARCHITECTURE | 1893 | ARM/NUVOTON NPCM ARCHITECTURE |
| 1894 | M: Avi Fishman <avifishman70@gmail.com> | 1894 | M: Avi Fishman <avifishman70@gmail.com> |
| 1895 | M: Tomer Maimon <tmaimon77@gmail.com> | 1895 | M: Tomer Maimon <tmaimon77@gmail.com> |
| 1896 | M: Tali Perry <tali.perry1@gmail.com> | ||
| 1896 | R: Patrick Venture <venture@google.com> | 1897 | R: Patrick Venture <venture@google.com> |
| 1897 | R: Nancy Yuen <yuenn@google.com> | 1898 | R: Nancy Yuen <yuenn@google.com> |
| 1898 | R: Brendan Higgins <brendanhiggins@google.com> | 1899 | R: Benjamin Fair <benjaminfair@google.com> |
| 1899 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) | 1900 | L: openbmc@lists.ozlabs.org (moderated for non-subscribers) |
| 1900 | S: Supported | 1901 | S: Supported |
| 1901 | F: arch/arm/mach-npcm/ | 1902 | F: arch/arm/mach-npcm/ |
| 1902 | F: arch/arm/boot/dts/nuvoton-npcm* | 1903 | F: arch/arm/boot/dts/nuvoton-npcm* |
| 1903 | F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h | 1904 | F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h |
| 1904 | F: drivers/*/*npcm* | 1905 | F: drivers/*/*npcm* |
| 1905 | F: Documentation/devicetree/bindings/*/*npcm* | 1906 | F: Documentation/devicetree/bindings/*/*npcm* |
| 1906 | F: Documentation/devicetree/bindings/*/*/*npcm* | 1907 | F: Documentation/devicetree/bindings/*/*/*npcm* |
| @@ -3120,6 +3121,7 @@ F: drivers/cpufreq/bmips-cpufreq.c | |||
| 3120 | BROADCOM BMIPS MIPS ARCHITECTURE | 3121 | BROADCOM BMIPS MIPS ARCHITECTURE |
| 3121 | M: Kevin Cernekee <cernekee@gmail.com> | 3122 | M: Kevin Cernekee <cernekee@gmail.com> |
| 3122 | M: Florian Fainelli <f.fainelli@gmail.com> | 3123 | M: Florian Fainelli <f.fainelli@gmail.com> |
| 3124 | L: bcm-kernel-feedback-list@broadcom.com | ||
| 3123 | L: linux-mips@vger.kernel.org | 3125 | L: linux-mips@vger.kernel.org |
| 3124 | T: git git://github.com/broadcom/stblinux.git | 3126 | T: git git://github.com/broadcom/stblinux.git |
| 3125 | S: Maintained | 3127 | S: Maintained |
| @@ -4129,7 +4131,7 @@ F: drivers/cpuidle/* | |||
| 4129 | F: include/linux/cpuidle.h | 4131 | F: include/linux/cpuidle.h |
| 4130 | 4132 | ||
| 4131 | CRAMFS FILESYSTEM | 4133 | CRAMFS FILESYSTEM |
| 4132 | M: Nicolas Pitre <nico@linaro.org> | 4134 | M: Nicolas Pitre <nico@fluxnic.net> |
| 4133 | S: Maintained | 4135 | S: Maintained |
| 4134 | F: Documentation/filesystems/cramfs.txt | 4136 | F: Documentation/filesystems/cramfs.txt |
| 4135 | F: fs/cramfs/ | 4137 | F: fs/cramfs/ |
| @@ -5833,7 +5835,7 @@ L: netdev@vger.kernel.org | |||
| 5833 | S: Maintained | 5835 | S: Maintained |
| 5834 | F: Documentation/ABI/testing/sysfs-bus-mdio | 5836 | F: Documentation/ABI/testing/sysfs-bus-mdio |
| 5835 | F: Documentation/devicetree/bindings/net/mdio* | 5837 | F: Documentation/devicetree/bindings/net/mdio* |
| 5836 | F: Documentation/networking/phy.txt | 5838 | F: Documentation/networking/phy.rst |
| 5837 | F: drivers/net/phy/ | 5839 | F: drivers/net/phy/ |
| 5838 | F: drivers/of/of_mdio.c | 5840 | F: drivers/of/of_mdio.c |
| 5839 | F: drivers/of/of_net.c | 5841 | F: drivers/of/of_net.c |
| @@ -7332,7 +7334,6 @@ F: Documentation/devicetree/bindings/i3c/ | |||
| 7332 | F: Documentation/driver-api/i3c | 7334 | F: Documentation/driver-api/i3c |
| 7333 | F: drivers/i3c/ | 7335 | F: drivers/i3c/ |
| 7334 | F: include/linux/i3c/ | 7336 | F: include/linux/i3c/ |
| 7335 | F: include/dt-bindings/i3c/ | ||
| 7336 | 7337 | ||
| 7337 | I3C DRIVER FOR SYNOPSYS DESIGNWARE | 7338 | I3C DRIVER FOR SYNOPSYS DESIGNWARE |
| 7338 | M: Vitor Soares <vitor.soares@synopsys.com> | 7339 | M: Vitor Soares <vitor.soares@synopsys.com> |
| @@ -7515,7 +7516,7 @@ F: include/net/mac802154.h | |||
| 7515 | F: include/net/af_ieee802154.h | 7516 | F: include/net/af_ieee802154.h |
| 7516 | F: include/net/cfg802154.h | 7517 | F: include/net/cfg802154.h |
| 7517 | F: include/net/ieee802154_netdev.h | 7518 | F: include/net/ieee802154_netdev.h |
| 7518 | F: Documentation/networking/ieee802154.txt | 7519 | F: Documentation/networking/ieee802154.rst |
| 7519 | 7520 | ||
| 7520 | IFE PROTOCOL | 7521 | IFE PROTOCOL |
| 7521 | M: Yotam Gigi <yotam.gi@gmail.com> | 7522 | M: Yotam Gigi <yotam.gi@gmail.com> |
| @@ -8707,6 +8708,7 @@ F: scripts/leaking_addresses.pl | |||
| 8707 | LED SUBSYSTEM | 8708 | LED SUBSYSTEM |
| 8708 | M: Jacek Anaszewski <jacek.anaszewski@gmail.com> | 8709 | M: Jacek Anaszewski <jacek.anaszewski@gmail.com> |
| 8709 | M: Pavel Machek <pavel@ucw.cz> | 8710 | M: Pavel Machek <pavel@ucw.cz> |
| 8711 | R: Dan Murphy <dmurphy@ti.com> | ||
| 8710 | L: linux-leds@vger.kernel.org | 8712 | L: linux-leds@vger.kernel.org |
| 8711 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git | 8713 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git |
| 8712 | S: Maintained | 8714 | S: Maintained |
| @@ -10144,7 +10146,7 @@ F: drivers/spi/spi-at91-usart.c | |||
| 10144 | F: Documentation/devicetree/bindings/mfd/atmel-usart.txt | 10146 | F: Documentation/devicetree/bindings/mfd/atmel-usart.txt |
| 10145 | 10147 | ||
| 10146 | MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER | 10148 | MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER |
| 10147 | M: Woojung Huh <Woojung.Huh@microchip.com> | 10149 | M: Woojung Huh <woojung.huh@microchip.com> |
| 10148 | M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> | 10150 | M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> |
| 10149 | L: netdev@vger.kernel.org | 10151 | L: netdev@vger.kernel.org |
| 10150 | S: Maintained | 10152 | S: Maintained |
| @@ -13981,7 +13983,7 @@ F: drivers/media/rc/serial_ir.c | |||
| 13981 | SFC NETWORK DRIVER | 13983 | SFC NETWORK DRIVER |
| 13982 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> | 13984 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> |
| 13983 | M: Edward Cree <ecree@solarflare.com> | 13985 | M: Edward Cree <ecree@solarflare.com> |
| 13984 | M: Bert Kenward <bkenward@solarflare.com> | 13986 | M: Martin Habets <mhabets@solarflare.com> |
| 13985 | L: netdev@vger.kernel.org | 13987 | L: netdev@vger.kernel.org |
| 13986 | S: Supported | 13988 | S: Supported |
| 13987 | F: drivers/net/ethernet/sfc/ | 13989 | F: drivers/net/ethernet/sfc/ |
| @@ -16508,7 +16510,7 @@ F: drivers/char/virtio_console.c | |||
| 16508 | F: include/linux/virtio_console.h | 16510 | F: include/linux/virtio_console.h |
| 16509 | F: include/uapi/linux/virtio_console.h | 16511 | F: include/uapi/linux/virtio_console.h |
| 16510 | 16512 | ||
| 16511 | VIRTIO CORE, NET AND BLOCK DRIVERS | 16513 | VIRTIO CORE AND NET DRIVERS |
| 16512 | M: "Michael S. Tsirkin" <mst@redhat.com> | 16514 | M: "Michael S. Tsirkin" <mst@redhat.com> |
| 16513 | M: Jason Wang <jasowang@redhat.com> | 16515 | M: Jason Wang <jasowang@redhat.com> |
| 16514 | L: virtualization@lists.linux-foundation.org | 16516 | L: virtualization@lists.linux-foundation.org |
| @@ -16523,6 +16525,19 @@ F: include/uapi/linux/virtio_*.h | |||
| 16523 | F: drivers/crypto/virtio/ | 16525 | F: drivers/crypto/virtio/ |
| 16524 | F: mm/balloon_compaction.c | 16526 | F: mm/balloon_compaction.c |
| 16525 | 16527 | ||
| 16528 | VIRTIO BLOCK AND SCSI DRIVERS | ||
| 16529 | M: "Michael S. Tsirkin" <mst@redhat.com> | ||
| 16530 | M: Jason Wang <jasowang@redhat.com> | ||
| 16531 | R: Paolo Bonzini <pbonzini@redhat.com> | ||
| 16532 | R: Stefan Hajnoczi <stefanha@redhat.com> | ||
| 16533 | L: virtualization@lists.linux-foundation.org | ||
| 16534 | S: Maintained | ||
| 16535 | F: drivers/block/virtio_blk.c | ||
| 16536 | F: drivers/scsi/virtio_scsi.c | ||
| 16537 | F: include/uapi/linux/virtio_blk.h | ||
| 16538 | F: include/uapi/linux/virtio_scsi.h | ||
| 16539 | F: drivers/vhost/scsi.c | ||
| 16540 | |||
| 16526 | VIRTIO CRYPTO DRIVER | 16541 | VIRTIO CRYPTO DRIVER |
| 16527 | M: Gonglei <arei.gonglei@huawei.com> | 16542 | M: Gonglei <arei.gonglei@huawei.com> |
| 16528 | L: virtualization@lists.linux-foundation.org | 16543 | L: virtualization@lists.linux-foundation.org |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 5 | 2 | VERSION = 5 |
| 3 | PATCHLEVEL = 1 | 3 | PATCHLEVEL = 1 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc3 | 5 | EXTRAVERSION = -rc6 |
| 6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 63ed39cbd3bd..165f268beafc 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl | |||
| @@ -463,3 +463,7 @@ | |||
| 463 | 532 common getppid sys_getppid | 463 | 532 common getppid sys_getppid |
| 464 | # all other architectures have common numbers for new syscall, alpha | 464 | # all other architectures have common numbers for new syscall, alpha |
| 465 | # is the exception. | 465 | # is the exception. |
| 466 | 534 common pidfd_send_signal sys_pidfd_send_signal | ||
| 467 | 535 common io_uring_setup sys_io_uring_setup | ||
| 468 | 536 common io_uring_enter sys_io_uring_enter | ||
| 469 | 537 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h index 29de09804306..c7a4201ed62b 100644 --- a/arch/arc/include/asm/syscall.h +++ b/arch/arc/include/asm/syscall.h | |||
| @@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 55 | */ | 55 | */ |
| 56 | static inline void | 56 | static inline void |
| 57 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 57 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 58 | unsigned int i, unsigned int n, unsigned long *args) | 58 | unsigned long *args) |
| 59 | { | 59 | { |
| 60 | unsigned long *inside_ptregs = &(regs->r0); | 60 | unsigned long *inside_ptregs = &(regs->r0); |
| 61 | inside_ptregs -= i; | 61 | unsigned int n = 6; |
| 62 | 62 | unsigned int i = 0; | |
| 63 | BUG_ON((i + n) > 6); | ||
| 64 | 63 | ||
| 65 | while (n--) { | 64 | while (n--) { |
| 66 | args[i++] = (*inside_ptregs); | 65 | args[i++] = (*inside_ptregs); |
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index dce5be5df97b..edcff79879e7 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts | |||
| @@ -57,6 +57,24 @@ | |||
| 57 | enable-active-high; | 57 | enable-active-high; |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | /* TPS79501 */ | ||
| 61 | v1_8d_reg: fixedregulator-v1_8d { | ||
| 62 | compatible = "regulator-fixed"; | ||
| 63 | regulator-name = "v1_8d"; | ||
| 64 | vin-supply = <&vbat>; | ||
| 65 | regulator-min-microvolt = <1800000>; | ||
| 66 | regulator-max-microvolt = <1800000>; | ||
| 67 | }; | ||
| 68 | |||
| 69 | /* TPS79501 */ | ||
| 70 | v3_3d_reg: fixedregulator-v3_3d { | ||
| 71 | compatible = "regulator-fixed"; | ||
| 72 | regulator-name = "v3_3d"; | ||
| 73 | vin-supply = <&vbat>; | ||
| 74 | regulator-min-microvolt = <3300000>; | ||
| 75 | regulator-max-microvolt = <3300000>; | ||
| 76 | }; | ||
| 77 | |||
| 60 | matrix_keypad: matrix_keypad0 { | 78 | matrix_keypad: matrix_keypad0 { |
| 61 | compatible = "gpio-matrix-keypad"; | 79 | compatible = "gpio-matrix-keypad"; |
| 62 | debounce-delay-ms = <5>; | 80 | debounce-delay-ms = <5>; |
| @@ -499,10 +517,10 @@ | |||
| 499 | status = "okay"; | 517 | status = "okay"; |
| 500 | 518 | ||
| 501 | /* Regulators */ | 519 | /* Regulators */ |
| 502 | AVDD-supply = <&vaux2_reg>; | 520 | AVDD-supply = <&v3_3d_reg>; |
| 503 | IOVDD-supply = <&vaux2_reg>; | 521 | IOVDD-supply = <&v3_3d_reg>; |
| 504 | DRVDD-supply = <&vaux2_reg>; | 522 | DRVDD-supply = <&v3_3d_reg>; |
| 505 | DVDD-supply = <&vbat>; | 523 | DVDD-supply = <&v1_8d_reg>; |
| 506 | }; | 524 | }; |
| 507 | }; | 525 | }; |
| 508 | 526 | ||
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index b128998097ce..2c2d8b5b8cf5 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts | |||
| @@ -73,6 +73,24 @@ | |||
| 73 | enable-active-high; | 73 | enable-active-high; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | /* TPS79518 */ | ||
| 77 | v1_8d_reg: fixedregulator-v1_8d { | ||
| 78 | compatible = "regulator-fixed"; | ||
| 79 | regulator-name = "v1_8d"; | ||
| 80 | vin-supply = <&vbat>; | ||
| 81 | regulator-min-microvolt = <1800000>; | ||
| 82 | regulator-max-microvolt = <1800000>; | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* TPS78633 */ | ||
| 86 | v3_3d_reg: fixedregulator-v3_3d { | ||
| 87 | compatible = "regulator-fixed"; | ||
| 88 | regulator-name = "v3_3d"; | ||
| 89 | vin-supply = <&vbat>; | ||
| 90 | regulator-min-microvolt = <3300000>; | ||
| 91 | regulator-max-microvolt = <3300000>; | ||
| 92 | }; | ||
| 93 | |||
| 76 | leds { | 94 | leds { |
| 77 | pinctrl-names = "default"; | 95 | pinctrl-names = "default"; |
| 78 | pinctrl-0 = <&user_leds_s0>; | 96 | pinctrl-0 = <&user_leds_s0>; |
| @@ -501,10 +519,10 @@ | |||
| 501 | status = "okay"; | 519 | status = "okay"; |
| 502 | 520 | ||
| 503 | /* Regulators */ | 521 | /* Regulators */ |
| 504 | AVDD-supply = <&vaux2_reg>; | 522 | AVDD-supply = <&v3_3d_reg>; |
| 505 | IOVDD-supply = <&vaux2_reg>; | 523 | IOVDD-supply = <&v3_3d_reg>; |
| 506 | DRVDD-supply = <&vaux2_reg>; | 524 | DRVDD-supply = <&v3_3d_reg>; |
| 507 | DVDD-supply = <&vbat>; | 525 | DVDD-supply = <&v1_8d_reg>; |
| 508 | }; | 526 | }; |
| 509 | }; | 527 | }; |
| 510 | 528 | ||
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index f459ec316a22..ca6d9f02a800 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi | |||
| @@ -1762,7 +1762,7 @@ | |||
| 1762 | reg = <0xcc000 0x4>; | 1762 | reg = <0xcc000 0x4>; |
| 1763 | reg-names = "rev"; | 1763 | reg-names = "rev"; |
| 1764 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ | 1764 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ |
| 1765 | clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>; | 1765 | clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>; |
| 1766 | clock-names = "fck"; | 1766 | clock-names = "fck"; |
| 1767 | #address-cells = <1>; | 1767 | #address-cells = <1>; |
| 1768 | #size-cells = <1>; | 1768 | #size-cells = <1>; |
| @@ -1785,7 +1785,7 @@ | |||
| 1785 | reg = <0xd0000 0x4>; | 1785 | reg = <0xd0000 0x4>; |
| 1786 | reg-names = "rev"; | 1786 | reg-names = "rev"; |
| 1787 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ | 1787 | /* Domains (P, C): per_pwrdm, l4ls_clkdm */ |
| 1788 | clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>; | 1788 | clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>; |
| 1789 | clock-names = "fck"; | 1789 | clock-names = "fck"; |
| 1790 | #address-cells = <1>; | 1790 | #address-cells = <1>; |
| 1791 | #size-cells = <1>; | 1791 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi index aa107ee41b8b..ef653c3209bc 100644 --- a/arch/arm/boot/dts/rk3288-tinker.dtsi +++ b/arch/arm/boot/dts/rk3288-tinker.dtsi | |||
| @@ -254,6 +254,7 @@ | |||
| 254 | }; | 254 | }; |
| 255 | 255 | ||
| 256 | vccio_sd: LDO_REG5 { | 256 | vccio_sd: LDO_REG5 { |
| 257 | regulator-boot-on; | ||
| 257 | regulator-min-microvolt = <1800000>; | 258 | regulator-min-microvolt = <1800000>; |
| 258 | regulator-max-microvolt = <3300000>; | 259 | regulator-max-microvolt = <3300000>; |
| 259 | regulator-name = "vccio_sd"; | 260 | regulator-name = "vccio_sd"; |
| @@ -430,7 +431,7 @@ | |||
| 430 | bus-width = <4>; | 431 | bus-width = <4>; |
| 431 | cap-mmc-highspeed; | 432 | cap-mmc-highspeed; |
| 432 | cap-sd-highspeed; | 433 | cap-sd-highspeed; |
| 433 | card-detect-delay = <200>; | 434 | broken-cd; |
| 434 | disable-wp; /* wp not hooked up */ | 435 | disable-wp; /* wp not hooked up */ |
| 435 | pinctrl-names = "default"; | 436 | pinctrl-names = "default"; |
| 436 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; | 437 | pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; |
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 0bc2409f6903..192dbc089ade 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi | |||
| @@ -25,8 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | gpio_keys: gpio-keys { | 26 | gpio_keys: gpio-keys { |
| 27 | compatible = "gpio-keys"; | 27 | compatible = "gpio-keys"; |
| 28 | #address-cells = <1>; | ||
| 29 | #size-cells = <0>; | ||
| 30 | 28 | ||
| 31 | pinctrl-names = "default"; | 29 | pinctrl-names = "default"; |
| 32 | pinctrl-0 = <&pwr_key_l>; | 30 | pinctrl-0 = <&pwr_key_l>; |
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index ca7d52daa8fb..a024d1e7e74c 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
| @@ -70,7 +70,7 @@ | |||
| 70 | compatible = "arm,cortex-a12"; | 70 | compatible = "arm,cortex-a12"; |
| 71 | reg = <0x501>; | 71 | reg = <0x501>; |
| 72 | resets = <&cru SRST_CORE1>; | 72 | resets = <&cru SRST_CORE1>; |
| 73 | operating-points = <&cpu_opp_table>; | 73 | operating-points-v2 = <&cpu_opp_table>; |
| 74 | #cooling-cells = <2>; /* min followed by max */ | 74 | #cooling-cells = <2>; /* min followed by max */ |
| 75 | clock-latency = <40000>; | 75 | clock-latency = <40000>; |
| 76 | clocks = <&cru ARMCLK>; | 76 | clocks = <&cru ARMCLK>; |
| @@ -80,7 +80,7 @@ | |||
| 80 | compatible = "arm,cortex-a12"; | 80 | compatible = "arm,cortex-a12"; |
| 81 | reg = <0x502>; | 81 | reg = <0x502>; |
| 82 | resets = <&cru SRST_CORE2>; | 82 | resets = <&cru SRST_CORE2>; |
| 83 | operating-points = <&cpu_opp_table>; | 83 | operating-points-v2 = <&cpu_opp_table>; |
| 84 | #cooling-cells = <2>; /* min followed by max */ | 84 | #cooling-cells = <2>; /* min followed by max */ |
| 85 | clock-latency = <40000>; | 85 | clock-latency = <40000>; |
| 86 | clocks = <&cru ARMCLK>; | 86 | clocks = <&cru ARMCLK>; |
| @@ -90,7 +90,7 @@ | |||
| 90 | compatible = "arm,cortex-a12"; | 90 | compatible = "arm,cortex-a12"; |
| 91 | reg = <0x503>; | 91 | reg = <0x503>; |
| 92 | resets = <&cru SRST_CORE3>; | 92 | resets = <&cru SRST_CORE3>; |
| 93 | operating-points = <&cpu_opp_table>; | 93 | operating-points-v2 = <&cpu_opp_table>; |
| 94 | #cooling-cells = <2>; /* min followed by max */ | 94 | #cooling-cells = <2>; /* min followed by max */ |
| 95 | clock-latency = <40000>; | 95 | clock-latency = <40000>; |
| 96 | clocks = <&cru ARMCLK>; | 96 | clocks = <&cru ARMCLK>; |
| @@ -1119,8 +1119,6 @@ | |||
| 1119 | clock-names = "ref", "pclk"; | 1119 | clock-names = "ref", "pclk"; |
| 1120 | power-domains = <&power RK3288_PD_VIO>; | 1120 | power-domains = <&power RK3288_PD_VIO>; |
| 1121 | rockchip,grf = <&grf>; | 1121 | rockchip,grf = <&grf>; |
| 1122 | #address-cells = <1>; | ||
| 1123 | #size-cells = <0>; | ||
| 1124 | status = "disabled"; | 1122 | status = "disabled"; |
| 1125 | 1123 | ||
| 1126 | ports { | 1124 | ports { |
| @@ -1282,27 +1280,27 @@ | |||
| 1282 | gpu_opp_table: gpu-opp-table { | 1280 | gpu_opp_table: gpu-opp-table { |
| 1283 | compatible = "operating-points-v2"; | 1281 | compatible = "operating-points-v2"; |
| 1284 | 1282 | ||
| 1285 | opp@100000000 { | 1283 | opp-100000000 { |
| 1286 | opp-hz = /bits/ 64 <100000000>; | 1284 | opp-hz = /bits/ 64 <100000000>; |
| 1287 | opp-microvolt = <950000>; | 1285 | opp-microvolt = <950000>; |
| 1288 | }; | 1286 | }; |
| 1289 | opp@200000000 { | 1287 | opp-200000000 { |
| 1290 | opp-hz = /bits/ 64 <200000000>; | 1288 | opp-hz = /bits/ 64 <200000000>; |
| 1291 | opp-microvolt = <950000>; | 1289 | opp-microvolt = <950000>; |
| 1292 | }; | 1290 | }; |
| 1293 | opp@300000000 { | 1291 | opp-300000000 { |
| 1294 | opp-hz = /bits/ 64 <300000000>; | 1292 | opp-hz = /bits/ 64 <300000000>; |
| 1295 | opp-microvolt = <1000000>; | 1293 | opp-microvolt = <1000000>; |
| 1296 | }; | 1294 | }; |
| 1297 | opp@400000000 { | 1295 | opp-400000000 { |
| 1298 | opp-hz = /bits/ 64 <400000000>; | 1296 | opp-hz = /bits/ 64 <400000000>; |
| 1299 | opp-microvolt = <1100000>; | 1297 | opp-microvolt = <1100000>; |
| 1300 | }; | 1298 | }; |
| 1301 | opp@500000000 { | 1299 | opp-500000000 { |
| 1302 | opp-hz = /bits/ 64 <500000000>; | 1300 | opp-hz = /bits/ 64 <500000000>; |
| 1303 | opp-microvolt = <1200000>; | 1301 | opp-microvolt = <1200000>; |
| 1304 | }; | 1302 | }; |
| 1305 | opp@600000000 { | 1303 | opp-600000000 { |
| 1306 | opp-hz = /bits/ 64 <600000000>; | 1304 | opp-hz = /bits/ 64 <600000000>; |
| 1307 | opp-microvolt = <1250000>; | 1305 | opp-microvolt = <1250000>; |
| 1308 | }; | 1306 | }; |
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h index 1c01a6f843d8..28a2e45752fe 100644 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h | |||
| @@ -518,7 +518,7 @@ | |||
| 518 | #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) | 518 | #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) |
| 519 | #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) | 519 | #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) |
| 520 | #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) | 520 | #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) |
| 521 | #define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) | 521 | #define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1) |
| 522 | #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) | 522 | #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) |
| 523 | #define PIN_PC10 74 | 523 | #define PIN_PC10 74 |
| 524 | #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) | 524 | #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) |
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index f2f6558a00f1..04066f9cb8a3 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts | |||
| @@ -213,13 +213,12 @@ | |||
| 213 | gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; | 213 | gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; |
| 214 | gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; | 214 | gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; |
| 215 | /* | 215 | /* |
| 216 | * This chipselect is active high. Just setting the flags | 216 | * It's not actually active high, but the frameworks assume |
| 217 | * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings, | 217 | * the polarity of the passed-in GPIO is "normal" (active |
| 218 | * it will be ignored, only the special "spi-cs-high" flag | 218 | * high) then actively drives the line low to select the |
| 219 | * really counts. | 219 | * chip. |
| 220 | */ | 220 | */ |
| 221 | cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 221 | cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; |
| 222 | spi-cs-high; | ||
| 223 | num-chipselects = <1>; | 222 | num-chipselects = <1>; |
| 224 | 223 | ||
| 225 | /* | 224 | /* |
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 06dea6bce293..080ce70cab12 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h | |||
| @@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 55 | 55 | ||
| 56 | static inline void syscall_get_arguments(struct task_struct *task, | 56 | static inline void syscall_get_arguments(struct task_struct *task, |
| 57 | struct pt_regs *regs, | 57 | struct pt_regs *regs, |
| 58 | unsigned int i, unsigned int n, | ||
| 59 | unsigned long *args) | 58 | unsigned long *args) |
| 60 | { | 59 | { |
| 61 | if (n == 0) | 60 | args[0] = regs->ARM_ORIG_r0; |
| 62 | return; | 61 | args++; |
| 63 | 62 | ||
| 64 | if (i + n > SYSCALL_MAX_ARGS) { | 63 | memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0])); |
| 65 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 66 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 67 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 68 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 69 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 70 | n = SYSCALL_MAX_ARGS - i; | ||
| 71 | } | ||
| 72 | |||
| 73 | if (i == 0) { | ||
| 74 | args[0] = regs->ARM_ORIG_r0; | ||
| 75 | args++; | ||
| 76 | i++; | ||
| 77 | n--; | ||
| 78 | } | ||
| 79 | |||
| 80 | memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0])); | ||
| 81 | } | 64 | } |
| 82 | 65 | ||
| 83 | static inline void syscall_set_arguments(struct task_struct *task, | 66 | static inline void syscall_set_arguments(struct task_struct *task, |
| 84 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 85 | unsigned int i, unsigned int n, | ||
| 86 | const unsigned long *args) | 68 | const unsigned long *args) |
| 87 | { | 69 | { |
| 88 | if (n == 0) | 70 | regs->ARM_ORIG_r0 = args[0]; |
| 89 | return; | 71 | args++; |
| 90 | 72 | ||
| 91 | if (i + n > SYSCALL_MAX_ARGS) { | 73 | memcpy(®s->ARM_r0 + 1, args, 5 * sizeof(args[0])); |
| 92 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 93 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 94 | n = SYSCALL_MAX_ARGS - i; | ||
| 95 | } | ||
| 96 | |||
| 97 | if (i == 0) { | ||
| 98 | regs->ARM_ORIG_r0 = args[0]; | ||
| 99 | args++; | ||
| 100 | i++; | ||
| 101 | n--; | ||
| 102 | } | ||
| 103 | |||
| 104 | memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); | ||
| 105 | } | 74 | } |
| 106 | 75 | ||
| 107 | static inline int syscall_get_arch(void) | 76 | static inline int syscall_get_arch(void) |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 51e808adb00c..2a757dcaa1a5 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
| @@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void) | |||
| 591 | 591 | ||
| 592 | np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); | 592 | np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); |
| 593 | if (!np) | 593 | if (!np) |
| 594 | goto securam_fail; | 594 | goto securam_fail_no_ref_dev; |
| 595 | 595 | ||
| 596 | pdev = of_find_device_by_node(np); | 596 | pdev = of_find_device_by_node(np); |
| 597 | of_node_put(np); | 597 | of_node_put(np); |
| 598 | if (!pdev) { | 598 | if (!pdev) { |
| 599 | pr_warn("%s: failed to find securam device!\n", __func__); | 599 | pr_warn("%s: failed to find securam device!\n", __func__); |
| 600 | goto securam_fail; | 600 | goto securam_fail_no_ref_dev; |
| 601 | } | 601 | } |
| 602 | 602 | ||
| 603 | sram_pool = gen_pool_get(&pdev->dev, NULL); | 603 | sram_pool = gen_pool_get(&pdev->dev, NULL); |
| @@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void) | |||
| 620 | return 0; | 620 | return 0; |
| 621 | 621 | ||
| 622 | securam_fail: | 622 | securam_fail: |
| 623 | put_device(&pdev->dev); | ||
| 624 | securam_fail_no_ref_dev: | ||
| 623 | iounmap(pm_data.sfrbu); | 625 | iounmap(pm_data.sfrbu); |
| 624 | pm_data.sfrbu = NULL; | 626 | pm_data.sfrbu = NULL; |
| 625 | return ret; | 627 | return ret; |
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 53c316f7301e..fe4932fda01d 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c | |||
| @@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = { | |||
| 300 | } | 300 | } |
| 301 | }; | 301 | }; |
| 302 | 302 | ||
| 303 | static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); | 303 | static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32); |
| 304 | static struct iop_adma_platform_data iop13xx_adma_0_data = { | 304 | static struct iop_adma_platform_data iop13xx_adma_0_data = { |
| 305 | .hw_id = 0, | 305 | .hw_id = 0, |
| 306 | .pool_size = PAGE_SIZE, | 306 | .pool_size = PAGE_SIZE, |
| @@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = { | |||
| 324 | .resource = iop13xx_adma_0_resources, | 324 | .resource = iop13xx_adma_0_resources, |
| 325 | .dev = { | 325 | .dev = { |
| 326 | .dma_mask = &iop13xx_adma_dmamask, | 326 | .dma_mask = &iop13xx_adma_dmamask, |
| 327 | .coherent_dma_mask = DMA_BIT_MASK(64), | 327 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 328 | .platform_data = (void *) &iop13xx_adma_0_data, | 328 | .platform_data = (void *) &iop13xx_adma_0_data, |
| 329 | }, | 329 | }, |
| 330 | }; | 330 | }; |
| @@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = { | |||
| 336 | .resource = iop13xx_adma_1_resources, | 336 | .resource = iop13xx_adma_1_resources, |
| 337 | .dev = { | 337 | .dev = { |
| 338 | .dma_mask = &iop13xx_adma_dmamask, | 338 | .dma_mask = &iop13xx_adma_dmamask, |
| 339 | .coherent_dma_mask = DMA_BIT_MASK(64), | 339 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 340 | .platform_data = (void *) &iop13xx_adma_1_data, | 340 | .platform_data = (void *) &iop13xx_adma_1_data, |
| 341 | }, | 341 | }, |
| 342 | }; | 342 | }; |
| @@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = { | |||
| 348 | .resource = iop13xx_adma_2_resources, | 348 | .resource = iop13xx_adma_2_resources, |
| 349 | .dev = { | 349 | .dev = { |
| 350 | .dma_mask = &iop13xx_adma_dmamask, | 350 | .dma_mask = &iop13xx_adma_dmamask, |
| 351 | .coherent_dma_mask = DMA_BIT_MASK(64), | 351 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 352 | .platform_data = (void *) &iop13xx_adma_2_data, | 352 | .platform_data = (void *) &iop13xx_adma_2_data, |
| 353 | }, | 353 | }, |
| 354 | }; | 354 | }; |
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c index db511ec2b1df..116feb6b261e 100644 --- a/arch/arm/mach-iop13xx/tpmi.c +++ b/arch/arm/mach-iop13xx/tpmi.c | |||
| @@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = { | |||
| 152 | } | 152 | } |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); | 155 | u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32); |
| 156 | static struct platform_device iop13xx_tpmi_0_device = { | 156 | static struct platform_device iop13xx_tpmi_0_device = { |
| 157 | .name = "iop-tpmi", | 157 | .name = "iop-tpmi", |
| 158 | .id = 0, | 158 | .id = 0, |
| @@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = { | |||
| 160 | .resource = iop13xx_tpmi_0_resources, | 160 | .resource = iop13xx_tpmi_0_resources, |
| 161 | .dev = { | 161 | .dev = { |
| 162 | .dma_mask = &iop13xx_tpmi_mask, | 162 | .dma_mask = &iop13xx_tpmi_mask, |
| 163 | .coherent_dma_mask = DMA_BIT_MASK(64), | 163 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 164 | }, | 164 | }, |
| 165 | }; | 165 | }; |
| 166 | 166 | ||
| @@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = { | |||
| 171 | .resource = iop13xx_tpmi_1_resources, | 171 | .resource = iop13xx_tpmi_1_resources, |
| 172 | .dev = { | 172 | .dev = { |
| 173 | .dma_mask = &iop13xx_tpmi_mask, | 173 | .dma_mask = &iop13xx_tpmi_mask, |
| 174 | .coherent_dma_mask = DMA_BIT_MASK(64), | 174 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 175 | }, | 175 | }, |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| @@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = { | |||
| 182 | .resource = iop13xx_tpmi_2_resources, | 182 | .resource = iop13xx_tpmi_2_resources, |
| 183 | .dev = { | 183 | .dev = { |
| 184 | .dma_mask = &iop13xx_tpmi_mask, | 184 | .dma_mask = &iop13xx_tpmi_mask, |
| 185 | .coherent_dma_mask = DMA_BIT_MASK(64), | 185 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 186 | }, | 186 | }, |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = { | |||
| 193 | .resource = iop13xx_tpmi_3_resources, | 193 | .resource = iop13xx_tpmi_3_resources, |
| 194 | .dev = { | 194 | .dev = { |
| 195 | .dma_mask = &iop13xx_tpmi_mask, | 195 | .dma_mask = &iop13xx_tpmi_mask, |
| 196 | .coherent_dma_mask = DMA_BIT_MASK(64), | 196 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 197 | }, | 197 | }, |
| 198 | }; | 198 | }; |
| 199 | 199 | ||
diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c index 591543c81399..3ea880f5fcb7 100644 --- a/arch/arm/mach-milbeaut/platsmp.c +++ b/arch/arm/mach-milbeaut/platsmp.c | |||
| @@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus) | |||
| 65 | writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); | 65 | writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 68 | static void m10v_cpu_die(unsigned int l_cpu) | 69 | static void m10v_cpu_die(unsigned int l_cpu) |
| 69 | { | 70 | { |
| 70 | gic_cpu_if_down(0); | 71 | gic_cpu_if_down(0); |
| @@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu) | |||
| 83 | 84 | ||
| 84 | return 1; | 85 | return 1; |
| 85 | } | 86 | } |
| 87 | #endif | ||
| 86 | 88 | ||
| 87 | static struct smp_operations m10v_smp_ops __initdata = { | 89 | static struct smp_operations m10v_smp_ops __initdata = { |
| 88 | .smp_prepare_cpus = m10v_smp_init, | 90 | .smp_prepare_cpus = m10v_smp_init, |
| 89 | .smp_boot_secondary = m10v_boot_secondary, | 91 | .smp_boot_secondary = m10v_boot_secondary, |
| 92 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 90 | .cpu_die = m10v_cpu_die, | 93 | .cpu_die = m10v_cpu_die, |
| 91 | .cpu_kill = m10v_cpu_kill, | 94 | .cpu_kill = m10v_cpu_kill, |
| 95 | #endif | ||
| 92 | }; | 96 | }; |
| 93 | CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); | 97 | CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); |
| 94 | 98 | ||
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index be30c3c061b4..1b15d593837e 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c | |||
| @@ -182,6 +182,7 @@ static struct resource latch1_resources[] = { | |||
| 182 | 182 | ||
| 183 | static struct bgpio_pdata latch1_pdata = { | 183 | static struct bgpio_pdata latch1_pdata = { |
| 184 | .label = LATCH1_LABEL, | 184 | .label = LATCH1_LABEL, |
| 185 | .base = -1, | ||
| 185 | .ngpio = LATCH1_NGPIO, | 186 | .ngpio = LATCH1_NGPIO, |
| 186 | }; | 187 | }; |
| 187 | 188 | ||
| @@ -219,6 +220,7 @@ static struct resource latch2_resources[] = { | |||
| 219 | 220 | ||
| 220 | static struct bgpio_pdata latch2_pdata = { | 221 | static struct bgpio_pdata latch2_pdata = { |
| 221 | .label = LATCH2_LABEL, | 222 | .label = LATCH2_LABEL, |
| 223 | .base = -1, | ||
| 222 | .ngpio = LATCH2_NGPIO, | 224 | .ngpio = LATCH2_NGPIO, |
| 223 | }; | 225 | }; |
| 224 | 226 | ||
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 1444b4b4bd9f..439e143cad7b 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
| @@ -250,8 +250,10 @@ static int __init omapdss_init_of(void) | |||
| 250 | if (!node) | 250 | if (!node) |
| 251 | return 0; | 251 | return 0; |
| 252 | 252 | ||
| 253 | if (!of_device_is_available(node)) | 253 | if (!of_device_is_available(node)) { |
| 254 | of_node_put(node); | ||
| 254 | return 0; | 255 | return 0; |
| 256 | } | ||
| 255 | 257 | ||
| 256 | pdev = of_find_device_by_node(node); | 258 | pdev = of_find_device_by_node(node); |
| 257 | 259 | ||
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index a4d1f8de3b5b..d9612221e484 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c | |||
| @@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = { | |||
| 143 | .resource = iop3xx_dma_0_resources, | 143 | .resource = iop3xx_dma_0_resources, |
| 144 | .dev = { | 144 | .dev = { |
| 145 | .dma_mask = &iop3xx_adma_dmamask, | 145 | .dma_mask = &iop3xx_adma_dmamask, |
| 146 | .coherent_dma_mask = DMA_BIT_MASK(64), | 146 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 147 | .platform_data = (void *) &iop3xx_dma_0_data, | 147 | .platform_data = (void *) &iop3xx_dma_0_data, |
| 148 | }, | 148 | }, |
| 149 | }; | 149 | }; |
| @@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = { | |||
| 155 | .resource = iop3xx_dma_1_resources, | 155 | .resource = iop3xx_dma_1_resources, |
| 156 | .dev = { | 156 | .dev = { |
| 157 | .dma_mask = &iop3xx_adma_dmamask, | 157 | .dma_mask = &iop3xx_adma_dmamask, |
| 158 | .coherent_dma_mask = DMA_BIT_MASK(64), | 158 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 159 | .platform_data = (void *) &iop3xx_dma_1_data, | 159 | .platform_data = (void *) &iop3xx_dma_1_data, |
| 160 | }, | 160 | }, |
| 161 | }; | 161 | }; |
| @@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = { | |||
| 167 | .resource = iop3xx_aau_resources, | 167 | .resource = iop3xx_aau_resources, |
| 168 | .dev = { | 168 | .dev = { |
| 169 | .dma_mask = &iop3xx_adma_dmamask, | 169 | .dma_mask = &iop3xx_adma_dmamask, |
| 170 | .coherent_dma_mask = DMA_BIT_MASK(64), | 170 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 171 | .platform_data = (void *) &iop3xx_aau_data, | 171 | .platform_data = (void *) &iop3xx_aau_data, |
| 172 | }, | 172 | }, |
| 173 | }; | 173 | }; |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a6c81ce00f52..8647cb80a93b 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
| @@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = { | |||
| 622 | .resource = orion_xor0_shared_resources, | 622 | .resource = orion_xor0_shared_resources, |
| 623 | .dev = { | 623 | .dev = { |
| 624 | .dma_mask = &orion_xor_dmamask, | 624 | .dma_mask = &orion_xor_dmamask, |
| 625 | .coherent_dma_mask = DMA_BIT_MASK(64), | 625 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 626 | .platform_data = &orion_xor0_pdata, | 626 | .platform_data = &orion_xor0_pdata, |
| 627 | }, | 627 | }, |
| 628 | }; | 628 | }; |
| @@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = { | |||
| 683 | .resource = orion_xor1_shared_resources, | 683 | .resource = orion_xor1_shared_resources, |
| 684 | .dev = { | 684 | .dev = { |
| 685 | .dma_mask = &orion_xor_dmamask, | 685 | .dma_mask = &orion_xor_dmamask, |
| 686 | .coherent_dma_mask = DMA_BIT_MASK(64), | 686 | .coherent_dma_mask = DMA_BIT_MASK(32), |
| 687 | .platform_data = &orion_xor1_pdata, | 687 | .platform_data = &orion_xor1_pdata, |
| 688 | }, | 688 | }, |
| 689 | }; | 689 | }; |
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 9016f4081bb9..0393917eaa57 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl | |||
| @@ -437,3 +437,7 @@ | |||
| 437 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait | 437 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait |
| 438 | 422 common futex_time64 sys_futex | 438 | 422 common futex_time64 sys_futex |
| 439 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval | 439 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 440 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 441 | 425 common io_uring_setup sys_io_uring_setup | ||
| 442 | 426 common io_uring_enter sys_io_uring_enter | ||
| 443 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 7c649f6b14cb..cd7c76e58b09 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi | |||
| @@ -162,6 +162,7 @@ | |||
| 162 | rx-fifo-depth = <16384>; | 162 | rx-fifo-depth = <16384>; |
| 163 | snps,multicast-filter-bins = <256>; | 163 | snps,multicast-filter-bins = <256>; |
| 164 | iommus = <&smmu 1>; | 164 | iommus = <&smmu 1>; |
| 165 | altr,sysmgr-syscon = <&sysmgr 0x44 0>; | ||
| 165 | status = "disabled"; | 166 | status = "disabled"; |
| 166 | }; | 167 | }; |
| 167 | 168 | ||
| @@ -179,6 +180,7 @@ | |||
| 179 | rx-fifo-depth = <16384>; | 180 | rx-fifo-depth = <16384>; |
| 180 | snps,multicast-filter-bins = <256>; | 181 | snps,multicast-filter-bins = <256>; |
| 181 | iommus = <&smmu 2>; | 182 | iommus = <&smmu 2>; |
| 183 | altr,sysmgr-syscon = <&sysmgr 0x48 0>; | ||
| 182 | status = "disabled"; | 184 | status = "disabled"; |
| 183 | }; | 185 | }; |
| 184 | 186 | ||
| @@ -196,6 +198,7 @@ | |||
| 196 | rx-fifo-depth = <16384>; | 198 | rx-fifo-depth = <16384>; |
| 197 | snps,multicast-filter-bins = <256>; | 199 | snps,multicast-filter-bins = <256>; |
| 198 | iommus = <&smmu 3>; | 200 | iommus = <&smmu 3>; |
| 201 | altr,sysmgr-syscon = <&sysmgr 0x4c 0>; | ||
| 199 | status = "disabled"; | 202 | status = "disabled"; |
| 200 | }; | 203 | }; |
| 201 | 204 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 33c44e857247..0e34354b2092 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts | |||
| @@ -108,8 +108,8 @@ | |||
| 108 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; | 108 | snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; |
| 109 | snps,reset-active-low; | 109 | snps,reset-active-low; |
| 110 | snps,reset-delays-us = <0 10000 50000>; | 110 | snps,reset-delays-us = <0 10000 50000>; |
| 111 | tx_delay = <0x25>; | 111 | tx_delay = <0x24>; |
| 112 | rx_delay = <0x11>; | 112 | rx_delay = <0x18>; |
| 113 | status = "okay"; | 113 | status = "okay"; |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 2157a528276b..79b4d1d4b5d6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
| @@ -46,8 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { | 47 | vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { |
| 48 | compatible = "regulator-fixed"; | 48 | compatible = "regulator-fixed"; |
| 49 | enable-active-high; | 49 | gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; |
| 50 | gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>; | ||
| 51 | pinctrl-names = "default"; | 50 | pinctrl-names = "default"; |
| 52 | pinctrl-0 = <&usb20_host_drv>; | 51 | pinctrl-0 = <&usb20_host_drv>; |
| 53 | regulator-name = "vcc_host1_5v"; | 52 | regulator-name = "vcc_host1_5v"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 84f14b132e8f..dabef1a21649 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi | |||
| @@ -1445,11 +1445,11 @@ | |||
| 1445 | 1445 | ||
| 1446 | sdmmc0 { | 1446 | sdmmc0 { |
| 1447 | sdmmc0_clk: sdmmc0-clk { | 1447 | sdmmc0_clk: sdmmc0-clk { |
| 1448 | rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; | 1448 | rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>; |
| 1449 | }; | 1449 | }; |
| 1450 | 1450 | ||
| 1451 | sdmmc0_cmd: sdmmc0-cmd { | 1451 | sdmmc0_cmd: sdmmc0-cmd { |
| 1452 | rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; | 1452 | rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>; |
| 1453 | }; | 1453 | }; |
| 1454 | 1454 | ||
| 1455 | sdmmc0_dectn: sdmmc0-dectn { | 1455 | sdmmc0_dectn: sdmmc0-dectn { |
| @@ -1461,14 +1461,14 @@ | |||
| 1461 | }; | 1461 | }; |
| 1462 | 1462 | ||
| 1463 | sdmmc0_bus1: sdmmc0-bus1 { | 1463 | sdmmc0_bus1: sdmmc0-bus1 { |
| 1464 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; | 1464 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>; |
| 1465 | }; | 1465 | }; |
| 1466 | 1466 | ||
| 1467 | sdmmc0_bus4: sdmmc0-bus4 { | 1467 | sdmmc0_bus4: sdmmc0-bus4 { |
| 1468 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, | 1468 | rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>, |
| 1469 | <1 RK_PA1 1 &pcfg_pull_up_4ma>, | 1469 | <1 RK_PA1 1 &pcfg_pull_up_8ma>, |
| 1470 | <1 RK_PA2 1 &pcfg_pull_up_4ma>, | 1470 | <1 RK_PA2 1 &pcfg_pull_up_8ma>, |
| 1471 | <1 RK_PA3 1 &pcfg_pull_up_4ma>; | 1471 | <1 RK_PA3 1 &pcfg_pull_up_8ma>; |
| 1472 | }; | 1472 | }; |
| 1473 | 1473 | ||
| 1474 | sdmmc0_gpio: sdmmc0-gpio { | 1474 | sdmmc0_gpio: sdmmc0-gpio { |
| @@ -1642,50 +1642,50 @@ | |||
| 1642 | rgmiim1_pins: rgmiim1-pins { | 1642 | rgmiim1_pins: rgmiim1-pins { |
| 1643 | rockchip,pins = | 1643 | rockchip,pins = |
| 1644 | /* mac_txclk */ | 1644 | /* mac_txclk */ |
| 1645 | <1 RK_PB4 2 &pcfg_pull_none_12ma>, | 1645 | <1 RK_PB4 2 &pcfg_pull_none_8ma>, |
| 1646 | /* mac_rxclk */ | 1646 | /* mac_rxclk */ |
| 1647 | <1 RK_PB5 2 &pcfg_pull_none_2ma>, | 1647 | <1 RK_PB5 2 &pcfg_pull_none_4ma>, |
| 1648 | /* mac_mdio */ | 1648 | /* mac_mdio */ |
| 1649 | <1 RK_PC3 2 &pcfg_pull_none_2ma>, | 1649 | <1 RK_PC3 2 &pcfg_pull_none_4ma>, |
| 1650 | /* mac_txen */ | 1650 | /* mac_txen */ |
| 1651 | <1 RK_PD1 2 &pcfg_pull_none_12ma>, | 1651 | <1 RK_PD1 2 &pcfg_pull_none_8ma>, |
| 1652 | /* mac_clk */ | 1652 | /* mac_clk */ |
| 1653 | <1 RK_PC5 2 &pcfg_pull_none_2ma>, | 1653 | <1 RK_PC5 2 &pcfg_pull_none_4ma>, |
| 1654 | /* mac_rxdv */ | 1654 | /* mac_rxdv */ |
| 1655 | <1 RK_PC6 2 &pcfg_pull_none_2ma>, | 1655 | <1 RK_PC6 2 &pcfg_pull_none_4ma>, |
| 1656 | /* mac_mdc */ | 1656 | /* mac_mdc */ |
| 1657 | <1 RK_PC7 2 &pcfg_pull_none_2ma>, | 1657 | <1 RK_PC7 2 &pcfg_pull_none_4ma>, |
| 1658 | /* mac_rxd1 */ | 1658 | /* mac_rxd1 */ |
| 1659 | <1 RK_PB2 2 &pcfg_pull_none_2ma>, | 1659 | <1 RK_PB2 2 &pcfg_pull_none_4ma>, |
| 1660 | /* mac_rxd0 */ | 1660 | /* mac_rxd0 */ |
| 1661 | <1 RK_PB3 2 &pcfg_pull_none_2ma>, | 1661 | <1 RK_PB3 2 &pcfg_pull_none_4ma>, |
| 1662 | /* mac_txd1 */ | 1662 | /* mac_txd1 */ |
| 1663 | <1 RK_PB0 2 &pcfg_pull_none_12ma>, | 1663 | <1 RK_PB0 2 &pcfg_pull_none_8ma>, |
| 1664 | /* mac_txd0 */ | 1664 | /* mac_txd0 */ |
| 1665 | <1 RK_PB1 2 &pcfg_pull_none_12ma>, | 1665 | <1 RK_PB1 2 &pcfg_pull_none_8ma>, |
| 1666 | /* mac_rxd3 */ | 1666 | /* mac_rxd3 */ |
| 1667 | <1 RK_PB6 2 &pcfg_pull_none_2ma>, | 1667 | <1 RK_PB6 2 &pcfg_pull_none_4ma>, |
| 1668 | /* mac_rxd2 */ | 1668 | /* mac_rxd2 */ |
| 1669 | <1 RK_PB7 2 &pcfg_pull_none_2ma>, | 1669 | <1 RK_PB7 2 &pcfg_pull_none_4ma>, |
| 1670 | /* mac_txd3 */ | 1670 | /* mac_txd3 */ |
| 1671 | <1 RK_PC0 2 &pcfg_pull_none_12ma>, | 1671 | <1 RK_PC0 2 &pcfg_pull_none_8ma>, |
| 1672 | /* mac_txd2 */ | 1672 | /* mac_txd2 */ |
| 1673 | <1 RK_PC1 2 &pcfg_pull_none_12ma>, | 1673 | <1 RK_PC1 2 &pcfg_pull_none_8ma>, |
| 1674 | 1674 | ||
| 1675 | /* mac_txclk */ | 1675 | /* mac_txclk */ |
| 1676 | <0 RK_PB0 1 &pcfg_pull_none>, | 1676 | <0 RK_PB0 1 &pcfg_pull_none_8ma>, |
| 1677 | /* mac_txen */ | 1677 | /* mac_txen */ |
| 1678 | <0 RK_PB4 1 &pcfg_pull_none>, | 1678 | <0 RK_PB4 1 &pcfg_pull_none_8ma>, |
| 1679 | /* mac_clk */ | 1679 | /* mac_clk */ |
| 1680 | <0 RK_PD0 1 &pcfg_pull_none>, | 1680 | <0 RK_PD0 1 &pcfg_pull_none_4ma>, |
| 1681 | /* mac_txd1 */ | 1681 | /* mac_txd1 */ |
| 1682 | <0 RK_PC0 1 &pcfg_pull_none>, | 1682 | <0 RK_PC0 1 &pcfg_pull_none_8ma>, |
| 1683 | /* mac_txd0 */ | 1683 | /* mac_txd0 */ |
| 1684 | <0 RK_PC1 1 &pcfg_pull_none>, | 1684 | <0 RK_PC1 1 &pcfg_pull_none_8ma>, |
| 1685 | /* mac_txd3 */ | 1685 | /* mac_txd3 */ |
| 1686 | <0 RK_PC7 1 &pcfg_pull_none>, | 1686 | <0 RK_PC7 1 &pcfg_pull_none_8ma>, |
| 1687 | /* mac_txd2 */ | 1687 | /* mac_txd2 */ |
| 1688 | <0 RK_PC6 1 &pcfg_pull_none>; | 1688 | <0 RK_PC6 1 &pcfg_pull_none_8ma>; |
| 1689 | }; | 1689 | }; |
| 1690 | 1690 | ||
| 1691 | rmiim1_pins: rmiim1-pins { | 1691 | rmiim1_pins: rmiim1-pins { |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts index 4a543f2117d4..844eac939a97 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts | |||
| @@ -158,6 +158,7 @@ | |||
| 158 | }; | 158 | }; |
| 159 | 159 | ||
| 160 | &hdmi { | 160 | &hdmi { |
| 161 | ddc-i2c-bus = <&i2c3>; | ||
| 161 | pinctrl-names = "default"; | 162 | pinctrl-names = "default"; |
| 162 | pinctrl-0 = <&hdmi_cec>; | 163 | pinctrl-0 = <&hdmi_cec>; |
| 163 | status = "okay"; | 164 | status = "okay"; |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index cccb83ad7fa8..c7e1a7837706 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
| @@ -30,8 +30,8 @@ do { \ | |||
| 30 | " prfm pstl1strm, %2\n" \ | 30 | " prfm pstl1strm, %2\n" \ |
| 31 | "1: ldxr %w1, %2\n" \ | 31 | "1: ldxr %w1, %2\n" \ |
| 32 | insn "\n" \ | 32 | insn "\n" \ |
| 33 | "2: stlxr %w3, %w0, %2\n" \ | 33 | "2: stlxr %w0, %w3, %2\n" \ |
| 34 | " cbnz %w3, 1b\n" \ | 34 | " cbnz %w0, 1b\n" \ |
| 35 | " dmb ish\n" \ | 35 | " dmb ish\n" \ |
| 36 | "3:\n" \ | 36 | "3:\n" \ |
| 37 | " .pushsection .fixup,\"ax\"\n" \ | 37 | " .pushsection .fixup,\"ax\"\n" \ |
| @@ -57,23 +57,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) | |||
| 57 | 57 | ||
| 58 | switch (op) { | 58 | switch (op) { |
| 59 | case FUTEX_OP_SET: | 59 | case FUTEX_OP_SET: |
| 60 | __futex_atomic_op("mov %w0, %w4", | 60 | __futex_atomic_op("mov %w3, %w4", |
| 61 | ret, oldval, uaddr, tmp, oparg); | 61 | ret, oldval, uaddr, tmp, oparg); |
| 62 | break; | 62 | break; |
| 63 | case FUTEX_OP_ADD: | 63 | case FUTEX_OP_ADD: |
| 64 | __futex_atomic_op("add %w0, %w1, %w4", | 64 | __futex_atomic_op("add %w3, %w1, %w4", |
| 65 | ret, oldval, uaddr, tmp, oparg); | 65 | ret, oldval, uaddr, tmp, oparg); |
| 66 | break; | 66 | break; |
| 67 | case FUTEX_OP_OR: | 67 | case FUTEX_OP_OR: |
| 68 | __futex_atomic_op("orr %w0, %w1, %w4", | 68 | __futex_atomic_op("orr %w3, %w1, %w4", |
| 69 | ret, oldval, uaddr, tmp, oparg); | 69 | ret, oldval, uaddr, tmp, oparg); |
| 70 | break; | 70 | break; |
| 71 | case FUTEX_OP_ANDN: | 71 | case FUTEX_OP_ANDN: |
| 72 | __futex_atomic_op("and %w0, %w1, %w4", | 72 | __futex_atomic_op("and %w3, %w1, %w4", |
| 73 | ret, oldval, uaddr, tmp, ~oparg); | 73 | ret, oldval, uaddr, tmp, ~oparg); |
| 74 | break; | 74 | break; |
| 75 | case FUTEX_OP_XOR: | 75 | case FUTEX_OP_XOR: |
| 76 | __futex_atomic_op("eor %w0, %w1, %w4", | 76 | __futex_atomic_op("eor %w3, %w1, %w4", |
| 77 | ret, oldval, uaddr, tmp, oparg); | 77 | ret, oldval, uaddr, tmp, oparg); |
| 78 | break; | 78 | break; |
| 79 | default: | 79 | default: |
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 905e1bb0e7bd..cd9f4e9d04d3 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h | |||
| @@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place) | |||
| 73 | struct plt_entry get_plt_entry(u64 dst, void *pc); | 73 | struct plt_entry get_plt_entry(u64 dst, void *pc); |
| 74 | bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); | 74 | bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); |
| 75 | 75 | ||
| 76 | static inline bool plt_entry_is_initialized(const struct plt_entry *e) | ||
| 77 | { | ||
| 78 | return e->adrp || e->add || e->br; | ||
| 79 | } | ||
| 80 | |||
| 76 | #endif /* __ASM_MODULE_H */ | 81 | #endif /* __ASM_MODULE_H */ |
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index ad8be16a39c9..a179df3674a1 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h | |||
| @@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 65 | 65 | ||
| 66 | static inline void syscall_get_arguments(struct task_struct *task, | 66 | static inline void syscall_get_arguments(struct task_struct *task, |
| 67 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, | ||
| 69 | unsigned long *args) | 68 | unsigned long *args) |
| 70 | { | 69 | { |
| 71 | if (n == 0) | 70 | args[0] = regs->orig_x0; |
| 72 | return; | 71 | args++; |
| 73 | 72 | ||
| 74 | if (i + n > SYSCALL_MAX_ARGS) { | 73 | memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); |
| 75 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 76 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 77 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 78 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 79 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 80 | } | ||
| 81 | |||
| 82 | if (i == 0) { | ||
| 83 | args[0] = regs->orig_x0; | ||
| 84 | args++; | ||
| 85 | i++; | ||
| 86 | n--; | ||
| 87 | } | ||
| 88 | |||
| 89 | memcpy(args, ®s->regs[i], n * sizeof(args[0])); | ||
| 90 | } | 74 | } |
| 91 | 75 | ||
| 92 | static inline void syscall_set_arguments(struct task_struct *task, | 76 | static inline void syscall_set_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 77 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | const unsigned long *args) | 78 | const unsigned long *args) |
| 96 | { | 79 | { |
| 97 | if (n == 0) | 80 | regs->orig_x0 = args[0]; |
| 98 | return; | 81 | args++; |
| 99 | 82 | ||
| 100 | if (i + n > SYSCALL_MAX_ARGS) { | 83 | memcpy(®s->regs[1], args, 5 * sizeof(args[0])); |
| 101 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 102 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 103 | n = SYSCALL_MAX_ARGS - i; | ||
| 104 | } | ||
| 105 | |||
| 106 | if (i == 0) { | ||
| 107 | regs->orig_x0 = args[0]; | ||
| 108 | args++; | ||
| 109 | i++; | ||
| 110 | n--; | ||
| 111 | } | ||
| 112 | |||
| 113 | memcpy(®s->regs[i], args, n * sizeof(args[0])); | ||
| 114 | } | 84 | } |
| 115 | 85 | ||
| 116 | /* | 86 | /* |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index d1dd93436e1e..f2a83ff6b73c 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) | 44 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) |
| 45 | #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) | 45 | #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) |
| 46 | 46 | ||
| 47 | #define __NR_compat_syscalls 424 | 47 | #define __NR_compat_syscalls 428 |
| 48 | #endif | 48 | #endif |
| 49 | 49 | ||
| 50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5590f2623690..23f1a44acada 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
| @@ -866,6 +866,14 @@ __SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64) | |||
| 866 | __SYSCALL(__NR_futex_time64, sys_futex) | 866 | __SYSCALL(__NR_futex_time64, sys_futex) |
| 867 | #define __NR_sched_rr_get_interval_time64 423 | 867 | #define __NR_sched_rr_get_interval_time64 423 |
| 868 | __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) | 868 | __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) |
| 869 | #define __NR_pidfd_send_signal 424 | ||
| 870 | __SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal) | ||
| 871 | #define __NR_io_uring_setup 425 | ||
| 872 | __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup) | ||
| 873 | #define __NR_io_uring_enter 426 | ||
| 874 | __SYSCALL(__NR_io_uring_enter, sys_io_uring_enter) | ||
| 875 | #define __NR_io_uring_register 427 | ||
| 876 | __SYSCALL(__NR_io_uring_register, sys_io_uring_register) | ||
| 869 | 877 | ||
| 870 | /* | 878 | /* |
| 871 | * Please add new compat syscalls above this comment and update | 879 | * Please add new compat syscalls above this comment and update |
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 8e4431a8821f..07b298120182 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c | |||
| @@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
| 107 | trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); | 107 | trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); |
| 108 | if (!plt_entries_equal(mod->arch.ftrace_trampoline, | 108 | if (!plt_entries_equal(mod->arch.ftrace_trampoline, |
| 109 | &trampoline)) { | 109 | &trampoline)) { |
| 110 | if (!plt_entries_equal(mod->arch.ftrace_trampoline, | 110 | if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { |
| 111 | &(struct plt_entry){})) { | ||
| 112 | pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); | 111 | pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); |
| 113 | return -EINVAL; | 112 | return -EINVAL; |
| 114 | } | 113 | } |
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 5ba4465e44f0..ea94cf8f9dc6 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c | |||
| @@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) | |||
| 94 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); | 94 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); |
| 95 | unsigned long high = low + SDEI_STACK_SIZE; | 95 | unsigned long high = low + SDEI_STACK_SIZE; |
| 96 | 96 | ||
| 97 | if (!low) | ||
| 98 | return false; | ||
| 99 | |||
| 97 | if (sp < low || sp >= high) | 100 | if (sp < low || sp >= high) |
| 98 | return false; | 101 | return false; |
| 99 | 102 | ||
| @@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) | |||
| 111 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); | 114 | unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); |
| 112 | unsigned long high = low + SDEI_STACK_SIZE; | 115 | unsigned long high = low + SDEI_STACK_SIZE; |
| 113 | 116 | ||
| 117 | if (!low) | ||
| 118 | return false; | ||
| 119 | |||
| 114 | if (sp < low || sp >= high) | 120 | if (sp < low || sp >= high) |
| 115 | return false; | 121 | return false; |
| 116 | 122 | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8ad119c3f665..29755989f616 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
| @@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) | |||
| 102 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 102 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
| 103 | { | 103 | { |
| 104 | struct stackframe frame; | 104 | struct stackframe frame; |
| 105 | int skip; | 105 | int skip = 0; |
| 106 | 106 | ||
| 107 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | 107 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
| 108 | 108 | ||
| 109 | if (regs) { | ||
| 110 | if (user_mode(regs)) | ||
| 111 | return; | ||
| 112 | skip = 1; | ||
| 113 | } | ||
| 114 | |||
| 109 | if (!tsk) | 115 | if (!tsk) |
| 110 | tsk = current; | 116 | tsk = current; |
| 111 | 117 | ||
| @@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
| 126 | frame.graph = 0; | 132 | frame.graph = 0; |
| 127 | #endif | 133 | #endif |
| 128 | 134 | ||
| 129 | skip = !!regs; | ||
| 130 | printk("Call trace:\n"); | 135 | printk("Call trace:\n"); |
| 131 | do { | 136 | do { |
| 132 | /* skip until specified stack frame */ | 137 | /* skip until specified stack frame */ |
| @@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs) | |||
| 176 | return ret; | 181 | return ret; |
| 177 | 182 | ||
| 178 | print_modules(); | 183 | print_modules(); |
| 179 | __show_regs(regs); | ||
| 180 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", | 184 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", |
| 181 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), | 185 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), |
| 182 | end_of_stack(tsk)); | 186 | end_of_stack(tsk)); |
| 187 | show_regs(regs); | ||
| 183 | 188 | ||
| 184 | if (!user_mode(regs)) { | 189 | if (!user_mode(regs)) |
| 185 | dump_backtrace(regs, tsk); | ||
| 186 | dump_instr(KERN_EMERG, regs); | 190 | dump_instr(KERN_EMERG, regs); |
| 187 | } | ||
| 188 | 191 | ||
| 189 | return ret; | 192 | return ret; |
| 190 | } | 193 | } |
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h index ae2be315ee9c..15ba8599858e 100644 --- a/arch/c6x/include/asm/syscall.h +++ b/arch/c6x/include/asm/syscall.h | |||
| @@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static inline void syscall_get_arguments(struct task_struct *task, | 48 | static inline void syscall_get_arguments(struct task_struct *task, |
| 49 | struct pt_regs *regs, unsigned int i, | 49 | struct pt_regs *regs, |
| 50 | unsigned int n, unsigned long *args) | 50 | unsigned long *args) |
| 51 | { | 51 | { |
| 52 | switch (i) { | 52 | *args++ = regs->a4; |
| 53 | case 0: | 53 | *args++ = regs->b4; |
| 54 | if (!n--) | 54 | *args++ = regs->a6; |
| 55 | break; | 55 | *args++ = regs->b6; |
| 56 | *args++ = regs->a4; | 56 | *args++ = regs->a8; |
| 57 | case 1: | 57 | *args = regs->b8; |
| 58 | if (!n--) | ||
| 59 | break; | ||
| 60 | *args++ = regs->b4; | ||
| 61 | case 2: | ||
| 62 | if (!n--) | ||
| 63 | break; | ||
| 64 | *args++ = regs->a6; | ||
| 65 | case 3: | ||
| 66 | if (!n--) | ||
| 67 | break; | ||
| 68 | *args++ = regs->b6; | ||
| 69 | case 4: | ||
| 70 | if (!n--) | ||
| 71 | break; | ||
| 72 | *args++ = regs->a8; | ||
| 73 | case 5: | ||
| 74 | if (!n--) | ||
| 75 | break; | ||
| 76 | *args++ = regs->b8; | ||
| 77 | case 6: | ||
| 78 | if (!n--) | ||
| 79 | break; | ||
| 80 | default: | ||
| 81 | BUG(); | ||
| 82 | } | ||
| 83 | } | 58 | } |
| 84 | 59 | ||
| 85 | static inline void syscall_set_arguments(struct task_struct *task, | 60 | static inline void syscall_set_arguments(struct task_struct *task, |
| 86 | struct pt_regs *regs, | 61 | struct pt_regs *regs, |
| 87 | unsigned int i, unsigned int n, | ||
| 88 | const unsigned long *args) | 62 | const unsigned long *args) |
| 89 | { | 63 | { |
| 90 | switch (i) { | 64 | regs->a4 = *args++; |
| 91 | case 0: | 65 | regs->b4 = *args++; |
| 92 | if (!n--) | 66 | regs->a6 = *args++; |
| 93 | break; | 67 | regs->b6 = *args++; |
| 94 | regs->a4 = *args++; | 68 | regs->a8 = *args++; |
| 95 | case 1: | 69 | regs->a9 = *args; |
| 96 | if (!n--) | ||
| 97 | break; | ||
| 98 | regs->b4 = *args++; | ||
| 99 | case 2: | ||
| 100 | if (!n--) | ||
| 101 | break; | ||
| 102 | regs->a6 = *args++; | ||
| 103 | case 3: | ||
| 104 | if (!n--) | ||
| 105 | break; | ||
| 106 | regs->b6 = *args++; | ||
| 107 | case 4: | ||
| 108 | if (!n--) | ||
| 109 | break; | ||
| 110 | regs->a8 = *args++; | ||
| 111 | case 5: | ||
| 112 | if (!n--) | ||
| 113 | break; | ||
| 114 | regs->a9 = *args++; | ||
| 115 | case 6: | ||
| 116 | if (!n) | ||
| 117 | break; | ||
| 118 | default: | ||
| 119 | BUG(); | ||
| 120 | } | ||
| 121 | } | 70 | } |
| 122 | 71 | ||
| 123 | #endif /* __ASM_C6X_SYSCALLS_H */ | 72 | #endif /* __ASM_C6X_SYSCALLS_H */ |
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index d637445737b7..bda0a446c63e 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h | |||
| @@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 43 | 43 | ||
| 44 | static inline void | 44 | static inline void |
| 45 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 45 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 46 | unsigned int i, unsigned int n, unsigned long *args) | 46 | unsigned long *args) |
| 47 | { | 47 | { |
| 48 | BUG_ON(i + n > 6); | 48 | args[0] = regs->orig_a0; |
| 49 | if (i == 0) { | 49 | args++; |
| 50 | args[0] = regs->orig_a0; | 50 | memcpy(args, ®s->a1, 5 * sizeof(args[0])); |
| 51 | args++; | ||
| 52 | i++; | ||
| 53 | n--; | ||
| 54 | } | ||
| 55 | memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); | ||
| 56 | } | 51 | } |
| 57 | 52 | ||
| 58 | static inline void | 53 | static inline void |
| 59 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 54 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 60 | unsigned int i, unsigned int n, const unsigned long *args) | 55 | const unsigned long *args) |
| 61 | { | 56 | { |
| 62 | BUG_ON(i + n > 6); | 57 | regs->orig_a0 = args[0]; |
| 63 | if (i == 0) { | 58 | args++; |
| 64 | regs->orig_a0 = args[0]; | 59 | memcpy(®s->a1, args, 5 * sizeof(regs->a1)); |
| 65 | args++; | ||
| 66 | i++; | ||
| 67 | n--; | ||
| 68 | } | ||
| 69 | memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); | ||
| 70 | } | 60 | } |
| 71 | 61 | ||
| 72 | static inline int | 62 | static inline int |
diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h index 924990401237..ddd483c6ca95 100644 --- a/arch/h8300/include/asm/syscall.h +++ b/arch/h8300/include/asm/syscall.h | |||
| @@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | |||
| 17 | 17 | ||
| 18 | static inline void | 18 | static inline void |
| 19 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 19 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 20 | unsigned int i, unsigned int n, unsigned long *args) | 20 | unsigned long *args) |
| 21 | { | 21 | { |
| 22 | BUG_ON(i + n > 6); | 22 | *args++ = regs->er1; |
| 23 | 23 | *args++ = regs->er2; | |
| 24 | while (n > 0) { | 24 | *args++ = regs->er3; |
| 25 | switch (i) { | 25 | *args++ = regs->er4; |
| 26 | case 0: | 26 | *args++ = regs->er5; |
| 27 | *args++ = regs->er1; | 27 | *args = regs->er6; |
| 28 | break; | ||
| 29 | case 1: | ||
| 30 | *args++ = regs->er2; | ||
| 31 | break; | ||
| 32 | case 2: | ||
| 33 | *args++ = regs->er3; | ||
| 34 | break; | ||
| 35 | case 3: | ||
| 36 | *args++ = regs->er4; | ||
| 37 | break; | ||
| 38 | case 4: | ||
| 39 | *args++ = regs->er5; | ||
| 40 | break; | ||
| 41 | case 5: | ||
| 42 | *args++ = regs->er6; | ||
| 43 | break; | ||
| 44 | } | ||
| 45 | i++; | ||
| 46 | n--; | ||
| 47 | } | ||
| 48 | } | 28 | } |
| 49 | 29 | ||
| 50 | 30 | ||
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h index 4af9c7b6f13a..ae3a1e24fabd 100644 --- a/arch/hexagon/include/asm/syscall.h +++ b/arch/hexagon/include/asm/syscall.h | |||
| @@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task, | |||
| 37 | 37 | ||
| 38 | static inline void syscall_get_arguments(struct task_struct *task, | 38 | static inline void syscall_get_arguments(struct task_struct *task, |
| 39 | struct pt_regs *regs, | 39 | struct pt_regs *regs, |
| 40 | unsigned int i, unsigned int n, | ||
| 41 | unsigned long *args) | 40 | unsigned long *args) |
| 42 | { | 41 | { |
| 43 | BUG_ON(i + n > 6); | 42 | memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); |
| 44 | memcpy(args, &(®s->r00)[i], n * sizeof(args[0])); | ||
| 45 | } | 43 | } |
| 46 | #endif | 44 | #endif |
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 1d0b875fec44..0d9e7fab4a79 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h | |||
| @@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | extern void ia64_syscall_get_set_arguments(struct task_struct *task, | 61 | extern void ia64_syscall_get_set_arguments(struct task_struct *task, |
| 62 | struct pt_regs *regs, unsigned int i, unsigned int n, | 62 | struct pt_regs *regs, unsigned long *args, int rw); |
| 63 | unsigned long *args, int rw); | ||
| 64 | static inline void syscall_get_arguments(struct task_struct *task, | 63 | static inline void syscall_get_arguments(struct task_struct *task, |
| 65 | struct pt_regs *regs, | 64 | struct pt_regs *regs, |
| 66 | unsigned int i, unsigned int n, | ||
| 67 | unsigned long *args) | 65 | unsigned long *args) |
| 68 | { | 66 | { |
| 69 | BUG_ON(i + n > 6); | 67 | ia64_syscall_get_set_arguments(task, regs, args, 0); |
| 70 | |||
| 71 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); | ||
| 72 | } | 68 | } |
| 73 | 69 | ||
| 74 | static inline void syscall_set_arguments(struct task_struct *task, | 70 | static inline void syscall_set_arguments(struct task_struct *task, |
| 75 | struct pt_regs *regs, | 71 | struct pt_regs *regs, |
| 76 | unsigned int i, unsigned int n, | ||
| 77 | unsigned long *args) | 72 | unsigned long *args) |
| 78 | { | 73 | { |
| 79 | BUG_ON(i + n > 6); | 74 | ia64_syscall_get_set_arguments(task, regs, args, 1); |
| 80 | |||
| 81 | ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); | ||
| 82 | } | 75 | } |
| 83 | 76 | ||
| 84 | static inline int syscall_get_arch(void) | 77 | static inline int syscall_get_arch(void) |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6d50ede0ed69..bf9c24d9ce84 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
| @@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) | |||
| 2179 | } | 2179 | } |
| 2180 | 2180 | ||
| 2181 | void ia64_syscall_get_set_arguments(struct task_struct *task, | 2181 | void ia64_syscall_get_set_arguments(struct task_struct *task, |
| 2182 | struct pt_regs *regs, unsigned int i, unsigned int n, | 2182 | struct pt_regs *regs, unsigned long *args, int rw) |
| 2183 | unsigned long *args, int rw) | ||
| 2184 | { | 2183 | { |
| 2185 | struct syscall_get_set_args data = { | 2184 | struct syscall_get_set_args data = { |
| 2186 | .i = i, | 2185 | .i = 0, |
| 2187 | .n = n, | 2186 | .n = 6, |
| 2188 | .args = args, | 2187 | .args = args, |
| 2189 | .regs = regs, | 2188 | .regs = regs, |
| 2190 | .rw = rw, | 2189 | .rw = rw, |
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index ab9cda5f6136..56e3d0b685e1 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl | |||
| @@ -344,3 +344,7 @@ | |||
| 344 | 332 common pkey_free sys_pkey_free | 344 | 332 common pkey_free sys_pkey_free |
| 345 | 333 common rseq sys_rseq | 345 | 333 common rseq sys_rseq |
| 346 | # 334 through 423 are reserved to sync up with other architectures | 346 | # 334 through 423 are reserved to sync up with other architectures |
| 347 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 348 | 425 common io_uring_setup sys_io_uring_setup | ||
| 349 | 426 common io_uring_enter sys_io_uring_enter | ||
| 350 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 125c14178979..df4ec3ec71d1 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl | |||
| @@ -423,3 +423,7 @@ | |||
| 423 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait | 423 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait |
| 424 | 422 common futex_time64 sys_futex | 424 | 422 common futex_time64 sys_futex |
| 425 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval | 425 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 426 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 427 | 425 common io_uring_setup sys_io_uring_setup | ||
| 428 | 426 common io_uring_enter sys_io_uring_enter | ||
| 429 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h index 220decd605a4..833d3a53dab3 100644 --- a/arch/microblaze/include/asm/syscall.h +++ b/arch/microblaze/include/asm/syscall.h | |||
| @@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs, | |||
| 82 | 82 | ||
| 83 | static inline void syscall_get_arguments(struct task_struct *task, | 83 | static inline void syscall_get_arguments(struct task_struct *task, |
| 84 | struct pt_regs *regs, | 84 | struct pt_regs *regs, |
| 85 | unsigned int i, unsigned int n, | ||
| 86 | unsigned long *args) | 85 | unsigned long *args) |
| 87 | { | 86 | { |
| 87 | unsigned int i = 0; | ||
| 88 | unsigned int n = 6; | ||
| 89 | |||
| 88 | while (n--) | 90 | while (n--) |
| 89 | *args++ = microblaze_get_syscall_arg(regs, i++); | 91 | *args++ = microblaze_get_syscall_arg(regs, i++); |
| 90 | } | 92 | } |
| 91 | 93 | ||
| 92 | static inline void syscall_set_arguments(struct task_struct *task, | 94 | static inline void syscall_set_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 95 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | const unsigned long *args) | 96 | const unsigned long *args) |
| 96 | { | 97 | { |
| 98 | unsigned int i = 0; | ||
| 99 | unsigned int n = 6; | ||
| 100 | |||
| 97 | while (n--) | 101 | while (n--) |
| 98 | microblaze_set_syscall_arg(regs, i++, *args++); | 102 | microblaze_set_syscall_arg(regs, i++, *args++); |
| 99 | } | 103 | } |
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index 8ee3a8c18498..4964947732af 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl | |||
| @@ -429,3 +429,7 @@ | |||
| 429 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait | 429 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait |
| 430 | 422 common futex_time64 sys_futex | 430 | 422 common futex_time64 sys_futex |
| 431 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval | 431 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 432 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 433 | 425 common io_uring_setup sys_io_uring_setup | ||
| 434 | 426 common io_uring_enter sys_io_uring_enter | ||
| 435 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 4a70c5de8c92..25a57895a3a3 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c | |||
| @@ -210,12 +210,6 @@ const char *get_system_type(void) | |||
| 210 | return ath79_sys_type; | 210 | return ath79_sys_type; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | int get_c0_perfcount_int(void) | ||
| 214 | { | ||
| 215 | return ATH79_MISC_IRQ(5); | ||
| 216 | } | ||
| 217 | EXPORT_SYMBOL_GPL(get_c0_perfcount_int); | ||
| 218 | |||
| 219 | unsigned int get_c0_compare_int(void) | 213 | unsigned int get_c0_compare_int(void) |
| 220 | { | 214 | { |
| 221 | return CP0_LEGACY_COMPARE_IRQ; | 215 | return CP0_LEGACY_COMPARE_IRQ; |
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config index f607888d2483..184eb65a6ba7 100644 --- a/arch/mips/configs/generic/board-ocelot.config +++ b/arch/mips/configs/generic/board-ocelot.config | |||
| @@ -1,6 +1,10 @@ | |||
| 1 | # require CONFIG_CPU_MIPS32_R2=y | 1 | # require CONFIG_CPU_MIPS32_R2=y |
| 2 | 2 | ||
| 3 | CONFIG_LEGACY_BOARD_OCELOT=y | 3 | CONFIG_LEGACY_BOARD_OCELOT=y |
| 4 | CONFIG_FIT_IMAGE_FDT_OCELOT=y | ||
| 5 | |||
| 6 | CONFIG_BRIDGE=y | ||
| 7 | CONFIG_GENERIC_PHY=y | ||
| 4 | 8 | ||
| 5 | CONFIG_MTD=y | 9 | CONFIG_MTD=y |
| 6 | CONFIG_MTD_CMDLINE_PARTS=y | 10 | CONFIG_MTD_CMDLINE_PARTS=y |
| @@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 19 | CONFIG_SERIAL_OF_PLATFORM=y | 23 | CONFIG_SERIAL_OF_PLATFORM=y |
| 20 | 24 | ||
| 21 | CONFIG_NETDEVICES=y | 25 | CONFIG_NETDEVICES=y |
| 26 | CONFIG_NET_SWITCHDEV=y | ||
| 27 | CONFIG_NET_DSA=y | ||
| 22 | CONFIG_MSCC_OCELOT_SWITCH=y | 28 | CONFIG_MSCC_OCELOT_SWITCH=y |
| 23 | CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y | 29 | CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y |
| 24 | CONFIG_MDIO_MSCC_MIIM=y | 30 | CONFIG_MDIO_MSCC_MIIM=y |
| @@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y | |||
| 35 | CONFIG_SPI_DW_MMIO=y | 41 | CONFIG_SPI_DW_MMIO=y |
| 36 | CONFIG_SPI_SPIDEV=y | 42 | CONFIG_SPI_SPIDEV=y |
| 37 | 43 | ||
| 44 | CONFIG_PINCTRL_OCELOT=y | ||
| 45 | |||
| 38 | CONFIG_GPIO_SYSFS=y | 46 | CONFIG_GPIO_SYSFS=y |
| 39 | 47 | ||
| 40 | CONFIG_POWER_RESET=y | 48 | CONFIG_POWER_RESET=y |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 6cf8ffb5367e..a2b4748655df 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
| @@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 116 | 116 | ||
| 117 | static inline void syscall_get_arguments(struct task_struct *task, | 117 | static inline void syscall_get_arguments(struct task_struct *task, |
| 118 | struct pt_regs *regs, | 118 | struct pt_regs *regs, |
| 119 | unsigned int i, unsigned int n, | ||
| 120 | unsigned long *args) | 119 | unsigned long *args) |
| 121 | { | 120 | { |
| 121 | unsigned int i = 0; | ||
| 122 | unsigned int n = 6; | ||
| 122 | int ret; | 123 | int ret; |
| 123 | 124 | ||
| 124 | /* O32 ABI syscall() */ | 125 | /* O32 ABI syscall() */ |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 6e574c02e4c3..ea781b29f7f1 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
| 34 | #include <asm/sigcontext.h> | 34 | #include <asm/sigcontext.h> |
| 35 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
| 36 | #include <asm/irq_regs.h> | ||
| 36 | 37 | ||
| 37 | static struct hard_trap_info { | 38 | static struct hard_trap_info { |
| 38 | unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ | 39 | unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ |
| @@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored) | |||
| 214 | old_fs = get_fs(); | 215 | old_fs = get_fs(); |
| 215 | set_fs(KERNEL_DS); | 216 | set_fs(KERNEL_DS); |
| 216 | 217 | ||
| 217 | kgdb_nmicallback(raw_smp_processor_id(), NULL); | 218 | kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); |
| 218 | 219 | ||
| 219 | set_fs(old_fs); | 220 | set_fs(old_fs); |
| 220 | } | 221 | } |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0057c910bc2f..3a62f80958e1 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) | |||
| 1419 | 1419 | ||
| 1420 | sd.nr = syscall; | 1420 | sd.nr = syscall; |
| 1421 | sd.arch = syscall_get_arch(); | 1421 | sd.arch = syscall_get_arch(); |
| 1422 | syscall_get_arguments(current, regs, 0, 6, args); | 1422 | syscall_get_arguments(current, regs, args); |
| 1423 | for (i = 0; i < 6; i++) | 1423 | for (i = 0; i < 6; i++) |
| 1424 | sd.args[i] = args[i]; | 1424 | sd.args[i] = args[i]; |
| 1425 | sd.instruction_pointer = KSTK_EIP(current); | 1425 | sd.instruction_pointer = KSTK_EIP(current); |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f158c5894a9a..feb2653490df 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -125,7 +125,7 @@ trace_a_syscall: | |||
| 125 | subu t1, v0, __NR_O32_Linux | 125 | subu t1, v0, __NR_O32_Linux |
| 126 | move a1, v0 | 126 | move a1, v0 |
| 127 | bnez t1, 1f /* __NR_syscall at offset 0 */ | 127 | bnez t1, 1f /* __NR_syscall at offset 0 */ |
| 128 | lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ | 128 | ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ |
| 129 | .set pop | 129 | .set pop |
| 130 | 130 | ||
| 131 | 1: jal syscall_trace_enter | 131 | 1: jal syscall_trace_enter |
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 15f4117900ee..9392dfe33f97 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl | |||
| @@ -362,3 +362,7 @@ | |||
| 362 | 421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64 | 362 | 421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64 |
| 363 | 422 n32 futex_time64 sys_futex | 363 | 422 n32 futex_time64 sys_futex |
| 364 | 423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval | 364 | 423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 365 | 424 n32 pidfd_send_signal sys_pidfd_send_signal | ||
| 366 | 425 n32 io_uring_setup sys_io_uring_setup | ||
| 367 | 426 n32 io_uring_enter sys_io_uring_enter | ||
| 368 | 427 n32 io_uring_register sys_io_uring_register | ||
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index c85502e67b44..cd0c8aa21fba 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl | |||
| @@ -338,3 +338,7 @@ | |||
| 338 | 327 n64 rseq sys_rseq | 338 | 327 n64 rseq sys_rseq |
| 339 | 328 n64 io_pgetevents sys_io_pgetevents | 339 | 328 n64 io_pgetevents sys_io_pgetevents |
| 340 | # 329 through 423 are reserved to sync up with other architectures | 340 | # 329 through 423 are reserved to sync up with other architectures |
| 341 | 424 n64 pidfd_send_signal sys_pidfd_send_signal | ||
| 342 | 425 n64 io_uring_setup sys_io_uring_setup | ||
| 343 | 426 n64 io_uring_enter sys_io_uring_enter | ||
| 344 | 427 n64 io_uring_register sys_io_uring_register | ||
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 2e063d0f837e..e849e8ffe4a2 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl | |||
| @@ -411,3 +411,7 @@ | |||
| 411 | 421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 | 411 | 421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 |
| 412 | 422 o32 futex_time64 sys_futex sys_futex | 412 | 422 o32 futex_time64 sys_futex sys_futex |
| 413 | 423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval | 413 | 423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval |
| 414 | 424 o32 pidfd_send_signal sys_pidfd_send_signal | ||
| 415 | 425 o32 io_uring_setup sys_io_uring_setup | ||
| 416 | 426 o32 io_uring_enter sys_io_uring_enter | ||
| 417 | 427 o32 io_uring_register sys_io_uring_register | ||
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 710a59764b01..a32f843cdbe0 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
| @@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d) | |||
| 118 | { | 118 | { |
| 119 | struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); | 119 | struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); |
| 120 | struct bridge_controller *bc; | 120 | struct bridge_controller *bc; |
| 121 | int pin = hd->pin; | ||
| 122 | 121 | ||
| 123 | if (!hd) | 122 | if (!hd) |
| 124 | return; | 123 | return; |
| @@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d) | |||
| 126 | disable_hub_irq(d); | 125 | disable_hub_irq(d); |
| 127 | 126 | ||
| 128 | bc = hd->bc; | 127 | bc = hd->bc; |
| 129 | bridge_clr(bc, b_int_enable, (1 << pin)); | 128 | bridge_clr(bc, b_int_enable, (1 << hd->pin)); |
| 130 | bridge_read(bc, b_wid_tflush); | 129 | bridge_read(bc, b_wid_tflush); |
| 131 | } | 130 | } |
| 132 | 131 | ||
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index f7e5e86765fe..671ebd357496 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h | |||
| @@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 108 | * syscall_get_arguments - extract system call parameter values | 108 | * syscall_get_arguments - extract system call parameter values |
| 109 | * @task: task of interest, must be blocked | 109 | * @task: task of interest, must be blocked |
| 110 | * @regs: task_pt_regs() of @task | 110 | * @regs: task_pt_regs() of @task |
| 111 | * @i: argument index [0,5] | ||
| 112 | * @n: number of arguments; n+i must be [1,6]. | ||
| 113 | * @args: array filled with argument values | 111 | * @args: array filled with argument values |
| 114 | * | 112 | * |
| 115 | * Fetches @n arguments to the system call starting with the @i'th argument | 113 | * Fetches 6 arguments to the system call (from 0 through 5). The first |
| 116 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | 114 | * argument is stored in @args[0], and so on. |
| 117 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 118 | * | 115 | * |
| 119 | * It's only valid to call this when @task is stopped for tracing on | 116 | * It's only valid to call this when @task is stopped for tracing on |
| 120 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 117 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 121 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 122 | * taking up to 6 arguments. | ||
| 123 | */ | 118 | */ |
| 124 | #define SYSCALL_MAX_ARGS 6 | 119 | #define SYSCALL_MAX_ARGS 6 |
| 125 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 120 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 126 | unsigned int i, unsigned int n, unsigned long *args) | 121 | unsigned long *args) |
| 127 | { | 122 | { |
| 128 | if (n == 0) | 123 | args[0] = regs->orig_r0; |
| 129 | return; | 124 | args++; |
| 130 | if (i + n > SYSCALL_MAX_ARGS) { | 125 | memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0])); |
| 131 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | ||
| 132 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | ||
| 133 | pr_warning("%s called with max args %d, handling only %d\n", | ||
| 134 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 135 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 136 | memset(args_bad, 0, n_bad * sizeof(args[0])); | ||
| 137 | } | ||
| 138 | |||
| 139 | if (i == 0) { | ||
| 140 | args[0] = regs->orig_r0; | ||
| 141 | args++; | ||
| 142 | i++; | ||
| 143 | n--; | ||
| 144 | } | ||
| 145 | |||
| 146 | memcpy(args, ®s->uregs[0] + i, n * sizeof(args[0])); | ||
| 147 | } | 126 | } |
| 148 | 127 | ||
| 149 | /** | 128 | /** |
| 150 | * syscall_set_arguments - change system call parameter value | 129 | * syscall_set_arguments - change system call parameter value |
| 151 | * @task: task of interest, must be in system call entry tracing | 130 | * @task: task of interest, must be in system call entry tracing |
| 152 | * @regs: task_pt_regs() of @task | 131 | * @regs: task_pt_regs() of @task |
| 153 | * @i: argument index [0,5] | ||
| 154 | * @n: number of arguments; n+i must be [1,6]. | ||
| 155 | * @args: array of argument values to store | 132 | * @args: array of argument values to store |
| 156 | * | 133 | * |
| 157 | * Changes @n arguments to the system call starting with the @i'th argument. | 134 | * Changes 6 arguments to the system call. The first argument gets value |
| 158 | * Argument @i gets value @args[0], and so on. | 135 | * @args[0], and so on. |
| 159 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 160 | * | 136 | * |
| 161 | * It's only valid to call this when @task is stopped for tracing on | 137 | * It's only valid to call this when @task is stopped for tracing on |
| 162 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 138 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 163 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 164 | * taking up to 6 arguments. | ||
| 165 | */ | 139 | */ |
| 166 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 140 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 167 | unsigned int i, unsigned int n, | ||
| 168 | const unsigned long *args) | 141 | const unsigned long *args) |
| 169 | { | 142 | { |
| 170 | if (n == 0) | 143 | regs->orig_r0 = args[0]; |
| 171 | return; | 144 | args++; |
| 172 | |||
| 173 | if (i + n > SYSCALL_MAX_ARGS) { | ||
| 174 | pr_warn("%s called with max args %d, handling only %d\n", | ||
| 175 | __func__, i + n, SYSCALL_MAX_ARGS); | ||
| 176 | n = SYSCALL_MAX_ARGS - i; | ||
| 177 | } | ||
| 178 | |||
| 179 | if (i == 0) { | ||
| 180 | regs->orig_r0 = args[0]; | ||
| 181 | args++; | ||
| 182 | i++; | ||
| 183 | n--; | ||
| 184 | } | ||
| 185 | 145 | ||
| 186 | memcpy(®s->uregs[0] + i, args, n * sizeof(args[0])); | 146 | memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0])); |
| 187 | } | 147 | } |
| 188 | #endif /* _ASM_NDS32_SYSCALL_H */ | 148 | #endif /* _ASM_NDS32_SYSCALL_H */ |
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h index 9de220854c4a..d7624ed06efb 100644 --- a/arch/nios2/include/asm/syscall.h +++ b/arch/nios2/include/asm/syscall.h | |||
| @@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static inline void syscall_get_arguments(struct task_struct *task, | 60 | static inline void syscall_get_arguments(struct task_struct *task, |
| 61 | struct pt_regs *regs, unsigned int i, unsigned int n, | 61 | struct pt_regs *regs, unsigned long *args) |
| 62 | unsigned long *args) | ||
| 63 | { | 62 | { |
| 64 | BUG_ON(i + n > 6); | 63 | *args++ = regs->r4; |
| 65 | 64 | *args++ = regs->r5; | |
| 66 | switch (i) { | 65 | *args++ = regs->r6; |
| 67 | case 0: | 66 | *args++ = regs->r7; |
| 68 | if (!n--) | 67 | *args++ = regs->r8; |
| 69 | break; | 68 | *args = regs->r9; |
| 70 | *args++ = regs->r4; | ||
| 71 | case 1: | ||
| 72 | if (!n--) | ||
| 73 | break; | ||
| 74 | *args++ = regs->r5; | ||
| 75 | case 2: | ||
| 76 | if (!n--) | ||
| 77 | break; | ||
| 78 | *args++ = regs->r6; | ||
| 79 | case 3: | ||
| 80 | if (!n--) | ||
| 81 | break; | ||
| 82 | *args++ = regs->r7; | ||
| 83 | case 4: | ||
| 84 | if (!n--) | ||
| 85 | break; | ||
| 86 | *args++ = regs->r8; | ||
| 87 | case 5: | ||
| 88 | if (!n--) | ||
| 89 | break; | ||
| 90 | *args++ = regs->r9; | ||
| 91 | case 6: | ||
| 92 | if (!n--) | ||
| 93 | break; | ||
| 94 | default: | ||
| 95 | BUG(); | ||
| 96 | } | ||
| 97 | } | 69 | } |
| 98 | 70 | ||
| 99 | static inline void syscall_set_arguments(struct task_struct *task, | 71 | static inline void syscall_set_arguments(struct task_struct *task, |
| 100 | struct pt_regs *regs, unsigned int i, unsigned int n, | 72 | struct pt_regs *regs, const unsigned long *args) |
| 101 | const unsigned long *args) | ||
| 102 | { | 73 | { |
| 103 | BUG_ON(i + n > 6); | 74 | regs->r4 = *args++; |
| 104 | 75 | regs->r5 = *args++; | |
| 105 | switch (i) { | 76 | regs->r6 = *args++; |
| 106 | case 0: | 77 | regs->r7 = *args++; |
| 107 | if (!n--) | 78 | regs->r8 = *args++; |
| 108 | break; | 79 | regs->r9 = *args; |
| 109 | regs->r4 = *args++; | ||
| 110 | case 1: | ||
| 111 | if (!n--) | ||
| 112 | break; | ||
| 113 | regs->r5 = *args++; | ||
| 114 | case 2: | ||
| 115 | if (!n--) | ||
| 116 | break; | ||
| 117 | regs->r6 = *args++; | ||
| 118 | case 3: | ||
| 119 | if (!n--) | ||
| 120 | break; | ||
| 121 | regs->r7 = *args++; | ||
| 122 | case 4: | ||
| 123 | if (!n--) | ||
| 124 | break; | ||
| 125 | regs->r8 = *args++; | ||
| 126 | case 5: | ||
| 127 | if (!n--) | ||
| 128 | break; | ||
| 129 | regs->r9 = *args++; | ||
| 130 | case 6: | ||
| 131 | if (!n) | ||
| 132 | break; | ||
| 133 | default: | ||
| 134 | BUG(); | ||
| 135 | } | ||
| 136 | } | 80 | } |
| 137 | 81 | ||
| 138 | #endif | 82 | #endif |
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h index 2db9f1cf0694..b4ff07c1baed 100644 --- a/arch/openrisc/include/asm/syscall.h +++ b/arch/openrisc/include/asm/syscall.h | |||
| @@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 56 | 56 | ||
| 57 | static inline void | 57 | static inline void |
| 58 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 58 | syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, unsigned long *args) | 59 | unsigned long *args) |
| 60 | { | 60 | { |
| 61 | BUG_ON(i + n > 6); | 61 | memcpy(args, ®s->gpr[3], 6 * sizeof(args[0])); |
| 62 | |||
| 63 | memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0])); | ||
| 64 | } | 62 | } |
| 65 | 63 | ||
| 66 | static inline void | 64 | static inline void |
| 67 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 65 | syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, const unsigned long *args) | 66 | const unsigned long *args) |
| 69 | { | 67 | { |
| 70 | BUG_ON(i + n > 6); | 68 | memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); |
| 71 | |||
| 72 | memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); | ||
| 73 | } | 69 | } |
| 74 | 70 | ||
| 75 | static inline int syscall_get_arch(void) | 71 | static inline int syscall_get_arch(void) |
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 2a27b275ab09..9ff033d261ab 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h | |||
| @@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *); | |||
| 22 | 22 | ||
| 23 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 23 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
| 24 | { | 24 | { |
| 25 | return regs->gr[20]; | 25 | return regs->gr[28]; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | static inline void instruction_pointer_set(struct pt_regs *regs, | 28 | static inline void instruction_pointer_set(struct pt_regs *regs, |
| 29 | unsigned long val) | 29 | unsigned long val) |
| 30 | { | 30 | { |
| 31 | regs->iaoq[0] = val; | 31 | regs->iaoq[0] = val; |
| 32 | regs->iaoq[1] = val + 4; | ||
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | /* Query offset/name of register from its name/offset */ | 35 | /* Query offset/name of register from its name/offset */ |
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h index 8bff1a58c97f..62a6d477fae0 100644 --- a/arch/parisc/include/asm/syscall.h +++ b/arch/parisc/include/asm/syscall.h | |||
| @@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk, | |||
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | static inline void syscall_get_arguments(struct task_struct *tsk, | 20 | static inline void syscall_get_arguments(struct task_struct *tsk, |
| 21 | struct pt_regs *regs, unsigned int i, | 21 | struct pt_regs *regs, |
| 22 | unsigned int n, unsigned long *args) | 22 | unsigned long *args) |
| 23 | { | 23 | { |
| 24 | BUG_ON(i); | 24 | args[5] = regs->gr[21]; |
| 25 | 25 | args[4] = regs->gr[22]; | |
| 26 | switch (n) { | 26 | args[3] = regs->gr[23]; |
| 27 | case 6: | 27 | args[2] = regs->gr[24]; |
| 28 | args[5] = regs->gr[21]; | 28 | args[1] = regs->gr[25]; |
| 29 | case 5: | 29 | args[0] = regs->gr[26]; |
| 30 | args[4] = regs->gr[22]; | ||
| 31 | case 4: | ||
| 32 | args[3] = regs->gr[23]; | ||
| 33 | case 3: | ||
| 34 | args[2] = regs->gr[24]; | ||
| 35 | case 2: | ||
| 36 | args[1] = regs->gr[25]; | ||
| 37 | case 1: | ||
| 38 | args[0] = regs->gr[26]; | ||
| 39 | case 0: | ||
| 40 | break; | ||
| 41 | default: | ||
| 42 | BUG(); | ||
| 43 | } | ||
| 44 | } | 30 | } |
| 45 | 31 | ||
| 46 | static inline long syscall_get_return_value(struct task_struct *task, | 32 | static inline long syscall_get_return_value(struct task_struct *task, |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index eb39e7e380d7..841db71958cd 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
| @@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void) | |||
| 210 | 210 | ||
| 211 | static int __init parisc_idle_init(void) | 211 | static int __init parisc_idle_init(void) |
| 212 | { | 212 | { |
| 213 | const char *marker; | ||
| 214 | |||
| 215 | /* check QEMU/SeaBIOS marker in PAGE0 */ | ||
| 216 | marker = (char *) &PAGE0->pad0; | ||
| 217 | running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); | ||
| 218 | |||
| 219 | if (!running_on_qemu) | 213 | if (!running_on_qemu) |
| 220 | cpu_idle_poll_ctrl(1); | 214 | cpu_idle_poll_ctrl(1); |
| 221 | 215 | ||
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 15dd9e21be7e..d908058d05c1 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
| @@ -397,6 +397,9 @@ void __init start_parisc(void) | |||
| 397 | int ret, cpunum; | 397 | int ret, cpunum; |
| 398 | struct pdc_coproc_cfg coproc_cfg; | 398 | struct pdc_coproc_cfg coproc_cfg; |
| 399 | 399 | ||
| 400 | /* check QEMU/SeaBIOS marker in PAGE0 */ | ||
| 401 | running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); | ||
| 402 | |||
| 400 | cpunum = smp_processor_id(); | 403 | cpunum = smp_processor_id(); |
| 401 | 404 | ||
| 402 | init_cpu_topology(); | 405 | init_cpu_topology(); |
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index b26766c6647d..fe8ca623add8 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl | |||
| @@ -420,3 +420,7 @@ | |||
| 420 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 | 420 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 |
| 421 | 422 32 futex_time64 sys_futex sys_futex | 421 | 422 32 futex_time64 sys_futex sys_futex |
| 422 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval | 422 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval |
| 423 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 424 | 425 common io_uring_setup sys_io_uring_setup | ||
| 425 | 426 common io_uring_enter sys_io_uring_enter | ||
| 426 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 598cdcdd1355..8ddd4a91bdc1 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
| @@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void) | |||
| 352 | #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ | 352 | #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ |
| 353 | defined (CONFIG_PPC_64K_PAGES) | 353 | defined (CONFIG_PPC_64K_PAGES) |
| 354 | #define MAX_PHYSMEM_BITS 51 | 354 | #define MAX_PHYSMEM_BITS 51 |
| 355 | #elif defined(CONFIG_SPARSEMEM) | 355 | #elif defined(CONFIG_PPC64) |
| 356 | #define MAX_PHYSMEM_BITS 46 | 356 | #define MAX_PHYSMEM_BITS 46 |
| 357 | #endif | 357 | #endif |
| 358 | 358 | ||
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 1a0e7a8b1c81..1243045bad2d 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
| @@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 65 | 65 | ||
| 66 | static inline void syscall_get_arguments(struct task_struct *task, | 66 | static inline void syscall_get_arguments(struct task_struct *task, |
| 67 | struct pt_regs *regs, | 67 | struct pt_regs *regs, |
| 68 | unsigned int i, unsigned int n, | ||
| 69 | unsigned long *args) | 68 | unsigned long *args) |
| 70 | { | 69 | { |
| 71 | unsigned long val, mask = -1UL; | 70 | unsigned long val, mask = -1UL; |
| 72 | 71 | unsigned int n = 6; | |
| 73 | BUG_ON(i + n > 6); | ||
| 74 | 72 | ||
| 75 | #ifdef CONFIG_COMPAT | 73 | #ifdef CONFIG_COMPAT |
| 76 | if (test_tsk_thread_flag(task, TIF_32BIT)) | 74 | if (test_tsk_thread_flag(task, TIF_32BIT)) |
| 77 | mask = 0xffffffff; | 75 | mask = 0xffffffff; |
| 78 | #endif | 76 | #endif |
| 79 | while (n--) { | 77 | while (n--) { |
| 80 | if (n == 0 && i == 0) | 78 | if (n == 0) |
| 81 | val = regs->orig_gpr3; | 79 | val = regs->orig_gpr3; |
| 82 | else | 80 | else |
| 83 | val = regs->gpr[3 + i + n]; | 81 | val = regs->gpr[3 + n]; |
| 84 | 82 | ||
| 85 | args[n] = val & mask; | 83 | args[n] = val & mask; |
| 86 | } | 84 | } |
| @@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 88 | 86 | ||
| 89 | static inline void syscall_set_arguments(struct task_struct *task, | 87 | static inline void syscall_set_arguments(struct task_struct *task, |
| 90 | struct pt_regs *regs, | 88 | struct pt_regs *regs, |
| 91 | unsigned int i, unsigned int n, | ||
| 92 | const unsigned long *args) | 89 | const unsigned long *args) |
| 93 | { | 90 | { |
| 94 | BUG_ON(i + n > 6); | 91 | memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); |
| 95 | memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); | ||
| 96 | 92 | ||
| 97 | /* Also copy the first argument into orig_gpr3 */ | 93 | /* Also copy the first argument into orig_gpr3 */ |
| 98 | if (i == 0 && n > 0) | 94 | regs->orig_gpr3 = args[0]; |
| 99 | regs->orig_gpr3 = args[0]; | ||
| 100 | } | 95 | } |
| 101 | 96 | ||
| 102 | static inline int syscall_get_arch(void) | 97 | static inline int syscall_get_arch(void) |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a5b8fbae56a0..9481a117e242 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common) | |||
| 656 | ld r4,PACA_EXSLB+EX_DAR(r13) | 656 | ld r4,PACA_EXSLB+EX_DAR(r13) |
| 657 | std r4,_DAR(r1) | 657 | std r4,_DAR(r1) |
| 658 | addi r3,r1,STACK_FRAME_OVERHEAD | 658 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 659 | BEGIN_MMU_FTR_SECTION | ||
| 660 | /* HPT case, do SLB fault */ | ||
| 659 | bl do_slb_fault | 661 | bl do_slb_fault |
| 660 | cmpdi r3,0 | 662 | cmpdi r3,0 |
| 661 | bne- 1f | 663 | bne- 1f |
| 662 | b fast_exception_return | 664 | b fast_exception_return |
| 663 | 1: /* Error case */ | 665 | 1: /* Error case */ |
| 666 | MMU_FTR_SECTION_ELSE | ||
| 667 | /* Radix case, access is outside page table range */ | ||
| 668 | li r3,-EFAULT | ||
| 669 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) | ||
| 664 | std r3,RESULT(r1) | 670 | std r3,RESULT(r1) |
| 665 | bl save_nvgprs | 671 | bl save_nvgprs |
| 666 | RECONCILE_IRQ_STATE(r10, r11) | 672 | RECONCILE_IRQ_STATE(r10, r11) |
| @@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common) | |||
| 705 | EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) | 711 | EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) |
| 706 | ld r4,_NIP(r1) | 712 | ld r4,_NIP(r1) |
| 707 | addi r3,r1,STACK_FRAME_OVERHEAD | 713 | addi r3,r1,STACK_FRAME_OVERHEAD |
| 714 | BEGIN_MMU_FTR_SECTION | ||
| 715 | /* HPT case, do SLB fault */ | ||
| 708 | bl do_slb_fault | 716 | bl do_slb_fault |
| 709 | cmpdi r3,0 | 717 | cmpdi r3,0 |
| 710 | bne- 1f | 718 | bne- 1f |
| 711 | b fast_exception_return | 719 | b fast_exception_return |
| 712 | 1: /* Error case */ | 720 | 1: /* Error case */ |
| 721 | MMU_FTR_SECTION_ELSE | ||
| 722 | /* Radix case, access is outside page table range */ | ||
| 723 | li r3,-EFAULT | ||
| 724 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) | ||
| 713 | std r3,RESULT(r1) | 725 | std r3,RESULT(r1) |
| 714 | bl save_nvgprs | 726 | bl save_nvgprs |
| 715 | RECONCILE_IRQ_STATE(r10, r11) | 727 | RECONCILE_IRQ_STATE(r10, r11) |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 48051c8977c5..e25b615e9f9e 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
| @@ -851,10 +851,6 @@ __secondary_start: | |||
| 851 | tophys(r4,r2) | 851 | tophys(r4,r2) |
| 852 | addi r4,r4,THREAD /* phys address of our thread_struct */ | 852 | addi r4,r4,THREAD /* phys address of our thread_struct */ |
| 853 | mtspr SPRN_SPRG_THREAD,r4 | 853 | mtspr SPRN_SPRG_THREAD,r4 |
| 854 | #ifdef CONFIG_PPC_RTAS | ||
| 855 | li r3,0 | ||
| 856 | stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ | ||
| 857 | #endif | ||
| 858 | lis r4, (swapper_pg_dir - PAGE_OFFSET)@h | 854 | lis r4, (swapper_pg_dir - PAGE_OFFSET)@h |
| 859 | ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l | 855 | ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l |
| 860 | mtspr SPRN_SPRG_PGDIR, r4 | 856 | mtspr SPRN_SPRG_PGDIR, r4 |
| @@ -941,10 +937,6 @@ start_here: | |||
| 941 | tophys(r4,r2) | 937 | tophys(r4,r2) |
| 942 | addi r4,r4,THREAD /* init task's THREAD */ | 938 | addi r4,r4,THREAD /* init task's THREAD */ |
| 943 | mtspr SPRN_SPRG_THREAD,r4 | 939 | mtspr SPRN_SPRG_THREAD,r4 |
| 944 | #ifdef CONFIG_PPC_RTAS | ||
| 945 | li r3,0 | ||
| 946 | stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ | ||
| 947 | #endif | ||
| 948 | lis r4, (swapper_pg_dir - PAGE_OFFSET)@h | 940 | lis r4, (swapper_pg_dir - PAGE_OFFSET)@h |
| 949 | ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l | 941 | ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l |
| 950 | mtspr SPRN_SPRG_PGDIR, r4 | 942 | mtspr SPRN_SPRG_PGDIR, r4 |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 683b5b3805bd..cd381e2291df 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/export.h> | 24 | #include <linux/export.h> |
| 25 | #include <linux/kmemleak.h> | ||
| 25 | #include <linux/kvm_para.h> | 26 | #include <linux/kvm_para.h> |
| 26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 27 | #include <linux/of.h> | 28 | #include <linux/of.h> |
| @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void) | |||
| 712 | 713 | ||
| 713 | static __init void kvm_free_tmp(void) | 714 | static __init void kvm_free_tmp(void) |
| 714 | { | 715 | { |
| 716 | /* | ||
| 717 | * Inform kmemleak about the hole in the .bss section since the | ||
| 718 | * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. | ||
| 719 | */ | ||
| 720 | kmemleak_free_part(&kvm_tmp[kvm_tmp_index], | ||
| 721 | ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); | ||
| 715 | free_reserved_area(&kvm_tmp[kvm_tmp_index], | 722 | free_reserved_area(&kvm_tmp[kvm_tmp_index], |
| 716 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); | 723 | &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); |
| 717 | } | 724 | } |
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index b18abb0c3dae..00f5a63c8d9a 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl | |||
| @@ -505,3 +505,7 @@ | |||
| 505 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 | 505 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 |
| 506 | 422 32 futex_time64 sys_futex sys_futex | 506 | 422 32 futex_time64 sys_futex sys_futex |
| 507 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval | 507 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval |
| 508 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 509 | 425 common io_uring_setup sys_io_uring_setup | ||
| 510 | 426 common io_uring_enter sys_io_uring_enter | ||
| 511 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 1e0bc5955a40..afd516b572f8 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S | |||
| @@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) | |||
| 98 | * can be used, r7 contains NSEC_PER_SEC. | 98 | * can be used, r7 contains NSEC_PER_SEC. |
| 99 | */ | 99 | */ |
| 100 | 100 | ||
| 101 | lwz r5,WTOM_CLOCK_SEC(r9) | 101 | lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) |
| 102 | lwz r6,WTOM_CLOCK_NSEC(r9) | 102 | lwz r6,WTOM_CLOCK_NSEC(r9) |
| 103 | 103 | ||
| 104 | /* We now have our offset in r5,r6. We create a fake dependency | 104 | /* We now have our offset in r5,r6. We create a fake dependency |
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig new file mode 100644 index 000000000000..1a911ed8e772 --- /dev/null +++ b/arch/riscv/configs/rv32_defconfig | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | CONFIG_SYSVIPC=y | ||
| 2 | CONFIG_POSIX_MQUEUE=y | ||
| 3 | CONFIG_IKCONFIG=y | ||
| 4 | CONFIG_IKCONFIG_PROC=y | ||
| 5 | CONFIG_CGROUPS=y | ||
| 6 | CONFIG_CGROUP_SCHED=y | ||
| 7 | CONFIG_CFS_BANDWIDTH=y | ||
| 8 | CONFIG_CGROUP_BPF=y | ||
| 9 | CONFIG_NAMESPACES=y | ||
| 10 | CONFIG_USER_NS=y | ||
| 11 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 12 | CONFIG_BLK_DEV_INITRD=y | ||
| 13 | CONFIG_EXPERT=y | ||
| 14 | CONFIG_BPF_SYSCALL=y | ||
| 15 | CONFIG_ARCH_RV32I=y | ||
| 16 | CONFIG_SMP=y | ||
| 17 | CONFIG_MODULES=y | ||
| 18 | CONFIG_MODULE_UNLOAD=y | ||
| 19 | CONFIG_NET=y | ||
| 20 | CONFIG_PACKET=y | ||
| 21 | CONFIG_UNIX=y | ||
| 22 | CONFIG_INET=y | ||
| 23 | CONFIG_IP_MULTICAST=y | ||
| 24 | CONFIG_IP_ADVANCED_ROUTER=y | ||
| 25 | CONFIG_IP_PNP=y | ||
| 26 | CONFIG_IP_PNP_DHCP=y | ||
| 27 | CONFIG_IP_PNP_BOOTP=y | ||
| 28 | CONFIG_IP_PNP_RARP=y | ||
| 29 | CONFIG_NETLINK_DIAG=y | ||
| 30 | CONFIG_PCI=y | ||
| 31 | CONFIG_PCIEPORTBUS=y | ||
| 32 | CONFIG_PCI_HOST_GENERIC=y | ||
| 33 | CONFIG_PCIE_XILINX=y | ||
| 34 | CONFIG_DEVTMPFS=y | ||
| 35 | CONFIG_BLK_DEV_LOOP=y | ||
| 36 | CONFIG_VIRTIO_BLK=y | ||
| 37 | CONFIG_BLK_DEV_SD=y | ||
| 38 | CONFIG_BLK_DEV_SR=y | ||
| 39 | CONFIG_ATA=y | ||
| 40 | CONFIG_SATA_AHCI=y | ||
| 41 | CONFIG_SATA_AHCI_PLATFORM=y | ||
| 42 | CONFIG_NETDEVICES=y | ||
| 43 | CONFIG_VIRTIO_NET=y | ||
| 44 | CONFIG_MACB=y | ||
| 45 | CONFIG_E1000E=y | ||
| 46 | CONFIG_R8169=y | ||
| 47 | CONFIG_MICROSEMI_PHY=y | ||
| 48 | CONFIG_INPUT_MOUSEDEV=y | ||
| 49 | CONFIG_SERIAL_8250=y | ||
| 50 | CONFIG_SERIAL_8250_CONSOLE=y | ||
| 51 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 52 | CONFIG_SERIAL_EARLYCON_RISCV_SBI=y | ||
| 53 | CONFIG_HVC_RISCV_SBI=y | ||
| 54 | # CONFIG_PTP_1588_CLOCK is not set | ||
| 55 | CONFIG_DRM=y | ||
| 56 | CONFIG_DRM_RADEON=y | ||
| 57 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
| 58 | CONFIG_USB=y | ||
| 59 | CONFIG_USB_XHCI_HCD=y | ||
| 60 | CONFIG_USB_XHCI_PLATFORM=y | ||
| 61 | CONFIG_USB_EHCI_HCD=y | ||
| 62 | CONFIG_USB_EHCI_HCD_PLATFORM=y | ||
| 63 | CONFIG_USB_OHCI_HCD=y | ||
| 64 | CONFIG_USB_OHCI_HCD_PLATFORM=y | ||
| 65 | CONFIG_USB_STORAGE=y | ||
| 66 | CONFIG_USB_UAS=y | ||
| 67 | CONFIG_VIRTIO_MMIO=y | ||
| 68 | CONFIG_SIFIVE_PLIC=y | ||
| 69 | CONFIG_EXT4_FS=y | ||
| 70 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
| 71 | CONFIG_AUTOFS4_FS=y | ||
| 72 | CONFIG_MSDOS_FS=y | ||
| 73 | CONFIG_VFAT_FS=y | ||
| 74 | CONFIG_TMPFS=y | ||
| 75 | CONFIG_TMPFS_POSIX_ACL=y | ||
| 76 | CONFIG_NFS_FS=y | ||
| 77 | CONFIG_NFS_V4=y | ||
| 78 | CONFIG_NFS_V4_1=y | ||
| 79 | CONFIG_NFS_V4_2=y | ||
| 80 | CONFIG_ROOT_NFS=y | ||
| 81 | CONFIG_CRYPTO_USER_API_HASH=y | ||
| 82 | CONFIG_CRYPTO_DEV_VIRTIO=y | ||
| 83 | CONFIG_PRINTK_TIME=y | ||
| 84 | # CONFIG_RCU_TRACE is not set | ||
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h index 57afe604b495..c207f6634b91 100644 --- a/arch/riscv/include/asm/fixmap.h +++ b/arch/riscv/include/asm/fixmap.h | |||
| @@ -26,7 +26,7 @@ enum fixed_addresses { | |||
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) | 28 | #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) |
| 29 | #define FIXADDR_TOP (PAGE_OFFSET) | 29 | #define FIXADDR_TOP (VMALLOC_START) |
| 30 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | 30 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
| 31 | 31 | ||
| 32 | #define FIXMAP_PAGE_IO PAGE_KERNEL | 32 | #define FIXMAP_PAGE_IO PAGE_KERNEL |
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index bba3da6ef157..a3d5273ded7c 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h | |||
| @@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 72 | 72 | ||
| 73 | static inline void syscall_get_arguments(struct task_struct *task, | 73 | static inline void syscall_get_arguments(struct task_struct *task, |
| 74 | struct pt_regs *regs, | 74 | struct pt_regs *regs, |
| 75 | unsigned int i, unsigned int n, | ||
| 76 | unsigned long *args) | 75 | unsigned long *args) |
| 77 | { | 76 | { |
| 78 | BUG_ON(i + n > 6); | 77 | args[0] = regs->orig_a0; |
| 79 | if (i == 0) { | 78 | args++; |
| 80 | args[0] = regs->orig_a0; | 79 | memcpy(args, ®s->a1, 5 * sizeof(args[0])); |
| 81 | args++; | ||
| 82 | i++; | ||
| 83 | n--; | ||
| 84 | } | ||
| 85 | memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); | ||
| 86 | } | 80 | } |
| 87 | 81 | ||
| 88 | static inline void syscall_set_arguments(struct task_struct *task, | 82 | static inline void syscall_set_arguments(struct task_struct *task, |
| 89 | struct pt_regs *regs, | 83 | struct pt_regs *regs, |
| 90 | unsigned int i, unsigned int n, | ||
| 91 | const unsigned long *args) | 84 | const unsigned long *args) |
| 92 | { | 85 | { |
| 93 | BUG_ON(i + n > 6); | 86 | regs->orig_a0 = args[0]; |
| 94 | if (i == 0) { | 87 | args++; |
| 95 | regs->orig_a0 = args[0]; | 88 | memcpy(®s->a1, args, 5 * sizeof(regs->a1)); |
| 96 | args++; | ||
| 97 | i++; | ||
| 98 | n--; | ||
| 99 | } | ||
| 100 | memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); | ||
| 101 | } | 89 | } |
| 102 | 90 | ||
| 103 | static inline int syscall_get_arch(void) | 91 | static inline int syscall_get_arch(void) |
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index a00168b980d2..fb53a8089e76 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h | |||
| @@ -300,7 +300,7 @@ do { \ | |||
| 300 | " .balign 4\n" \ | 300 | " .balign 4\n" \ |
| 301 | "4:\n" \ | 301 | "4:\n" \ |
| 302 | " li %0, %6\n" \ | 302 | " li %0, %6\n" \ |
| 303 | " jump 2b, %1\n" \ | 303 | " jump 3b, %1\n" \ |
| 304 | " .previous\n" \ | 304 | " .previous\n" \ |
| 305 | " .section __ex_table,\"a\"\n" \ | 305 | " .section __ex_table,\"a\"\n" \ |
| 306 | " .balign " RISCV_SZPTR "\n" \ | 306 | " .balign " RISCV_SZPTR "\n" \ |
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index f13f7f276639..598568168d35 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | ifdef CONFIG_FTRACE | 5 | ifdef CONFIG_FTRACE |
| 6 | CFLAGS_REMOVE_ftrace.o = -pg | 6 | CFLAGS_REMOVE_ftrace.o = -pg |
| 7 | CFLAGS_REMOVE_setup.o = -pg | ||
| 8 | endif | 7 | endif |
| 9 | 8 | ||
| 10 | extra-y += head.o | 9 | extra-y += head.o |
| @@ -29,8 +28,6 @@ obj-y += vdso.o | |||
| 29 | obj-y += cacheinfo.o | 28 | obj-y += cacheinfo.o |
| 30 | obj-y += vdso/ | 29 | obj-y += vdso/ |
| 31 | 30 | ||
| 32 | CFLAGS_setup.o := -mcmodel=medany | ||
| 33 | |||
| 34 | obj-$(CONFIG_FPU) += fpu.o | 31 | obj-$(CONFIG_FPU) += fpu.o |
| 35 | obj-$(CONFIG_SMP) += smpboot.o | 32 | obj-$(CONFIG_SMP) += smpboot.o |
| 36 | obj-$(CONFIG_SMP) += smp.o | 33 | obj-$(CONFIG_SMP) += smp.o |
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 7dd308129b40..2872edce894d 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c | |||
| @@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, | |||
| 141 | { | 141 | { |
| 142 | s32 hi20; | 142 | s32 hi20; |
| 143 | 143 | ||
| 144 | if (IS_ENABLED(CMODEL_MEDLOW)) { | 144 | if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { |
| 145 | pr_err( | 145 | pr_err( |
| 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 147 | me->name, (long long)v, location); | 147 | me->name, (long long)v, location); |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ecb654f6a79e..540a331d1376 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
| @@ -48,14 +48,6 @@ struct screen_info screen_info = { | |||
| 48 | }; | 48 | }; |
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | unsigned long va_pa_offset; | ||
| 52 | EXPORT_SYMBOL(va_pa_offset); | ||
| 53 | unsigned long pfn_base; | ||
| 54 | EXPORT_SYMBOL(pfn_base); | ||
| 55 | |||
| 56 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; | ||
| 57 | EXPORT_SYMBOL(empty_zero_page); | ||
| 58 | |||
| 59 | /* The lucky hart to first increment this variable will boot the other cores */ | 51 | /* The lucky hart to first increment this variable will boot the other cores */ |
| 60 | atomic_t hart_lottery; | 52 | atomic_t hart_lottery; |
| 61 | unsigned long boot_cpu_hartid; | 53 | unsigned long boot_cpu_hartid; |
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index eb22ab49b3e0..b68aac701803 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile | |||
| @@ -1,3 +1,9 @@ | |||
| 1 | |||
| 2 | CFLAGS_init.o := -mcmodel=medany | ||
| 3 | ifdef CONFIG_FTRACE | ||
| 4 | CFLAGS_REMOVE_init.o = -pg | ||
| 5 | endif | ||
| 6 | |||
| 1 | obj-y += init.o | 7 | obj-y += init.o |
| 2 | obj-y += fault.o | 8 | obj-y += fault.o |
| 3 | obj-y += extable.o | 9 | obj-y += extable.o |
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index b379a75ac6a6..bc7b77e34d09 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c | |||
| @@ -25,6 +25,10 @@ | |||
| 25 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
| 26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 27 | 27 | ||
| 28 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] | ||
| 29 | __page_aligned_bss; | ||
| 30 | EXPORT_SYMBOL(empty_zero_page); | ||
| 31 | |||
| 28 | static void __init zone_sizes_init(void) | 32 | static void __init zone_sizes_init(void) |
| 29 | { | 33 | { |
| 30 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; | 34 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
| @@ -117,6 +121,14 @@ void __init setup_bootmem(void) | |||
| 117 | */ | 121 | */ |
| 118 | memblock_reserve(reg->base, vmlinux_end - reg->base); | 122 | memblock_reserve(reg->base, vmlinux_end - reg->base); |
| 119 | mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); | 123 | mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); |
| 124 | |||
| 125 | /* | ||
| 126 | * Remove memblock from the end of usable area to the | ||
| 127 | * end of region | ||
| 128 | */ | ||
| 129 | if (reg->base + mem_size < end) | ||
| 130 | memblock_remove(reg->base + mem_size, | ||
| 131 | end - reg->base - mem_size); | ||
| 120 | } | 132 | } |
| 121 | } | 133 | } |
| 122 | BUG_ON(mem_size == 0); | 134 | BUG_ON(mem_size == 0); |
| @@ -143,6 +155,11 @@ void __init setup_bootmem(void) | |||
| 143 | } | 155 | } |
| 144 | } | 156 | } |
| 145 | 157 | ||
| 158 | unsigned long va_pa_offset; | ||
| 159 | EXPORT_SYMBOL(va_pa_offset); | ||
| 160 | unsigned long pfn_base; | ||
| 161 | EXPORT_SYMBOL(pfn_base); | ||
| 162 | |||
| 146 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; | 163 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
| 147 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); | 164 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); |
| 148 | 165 | ||
| @@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) | |||
| 172 | } | 189 | } |
| 173 | } | 190 | } |
| 174 | 191 | ||
| 192 | /* | ||
| 193 | * setup_vm() is called from head.S with MMU-off. | ||
| 194 | * | ||
| 195 | * Following requirements should be honoured for setup_vm() to work | ||
| 196 | * correctly: | ||
| 197 | * 1) It should use PC-relative addressing for accessing kernel symbols. | ||
| 198 | * To achieve this we always use GCC cmodel=medany. | ||
| 199 | * 2) The compiler instrumentation for FTRACE will not work for setup_vm() | ||
| 200 | * so disable compiler instrumentation when FTRACE is enabled. | ||
| 201 | * | ||
| 202 | * Currently, the above requirements are honoured by using custom CFLAGS | ||
| 203 | * for init.o in mm/Makefile. | ||
| 204 | */ | ||
| 205 | |||
| 206 | #ifndef __riscv_cmodel_medany | ||
| 207 | #error "setup_vm() is called from head.S before relocate so it should " | ||
| 208 | "not use absolute addressing." | ||
| 209 | #endif | ||
| 210 | |||
| 175 | asmlinkage void __init setup_vm(void) | 211 | asmlinkage void __init setup_vm(void) |
| 176 | { | 212 | { |
| 177 | extern char _start; | 213 | extern char _start; |
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c index 4cb771ba13fa..5d316fe40480 100644 --- a/arch/s390/boot/mem_detect.c +++ b/arch/s390/boot/mem_detect.c | |||
| @@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void) | |||
| 25 | { | 25 | { |
| 26 | unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); | 26 | unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); |
| 27 | 27 | ||
| 28 | if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && | 28 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && |
| 29 | INITRD_START < offset + ENTRIES_EXTENDED_MAX) | 29 | INITRD_START < offset + ENTRIES_EXTENDED_MAX) |
| 30 | offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); | 30 | offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); |
| 31 | 31 | ||
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 96f9a9151fde..59c3e91f2cdb 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
| @@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 56 | 56 | ||
| 57 | static inline void syscall_get_arguments(struct task_struct *task, | 57 | static inline void syscall_get_arguments(struct task_struct *task, |
| 58 | struct pt_regs *regs, | 58 | struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, | ||
| 60 | unsigned long *args) | 59 | unsigned long *args) |
| 61 | { | 60 | { |
| 62 | unsigned long mask = -1UL; | 61 | unsigned long mask = -1UL; |
| 62 | unsigned int n = 6; | ||
| 63 | 63 | ||
| 64 | /* | ||
| 65 | * No arguments for this syscall, there's nothing to do. | ||
| 66 | */ | ||
| 67 | if (!n) | ||
| 68 | return; | ||
| 69 | |||
| 70 | BUG_ON(i + n > 6); | ||
| 71 | #ifdef CONFIG_COMPAT | 64 | #ifdef CONFIG_COMPAT |
| 72 | if (test_tsk_thread_flag(task, TIF_31BIT)) | 65 | if (test_tsk_thread_flag(task, TIF_31BIT)) |
| 73 | mask = 0xffffffff; | 66 | mask = 0xffffffff; |
| 74 | #endif | 67 | #endif |
| 75 | while (n-- > 0) | 68 | while (n-- > 0) |
| 76 | if (i + n > 0) | 69 | if (n > 0) |
| 77 | args[n] = regs->gprs[2 + i + n] & mask; | 70 | args[n] = regs->gprs[2 + n] & mask; |
| 78 | if (i == 0) | 71 | |
| 79 | args[0] = regs->orig_gpr2 & mask; | 72 | args[0] = regs->orig_gpr2 & mask; |
| 80 | } | 73 | } |
| 81 | 74 | ||
| 82 | static inline void syscall_set_arguments(struct task_struct *task, | 75 | static inline void syscall_set_arguments(struct task_struct *task, |
| 83 | struct pt_regs *regs, | 76 | struct pt_regs *regs, |
| 84 | unsigned int i, unsigned int n, | ||
| 85 | const unsigned long *args) | 77 | const unsigned long *args) |
| 86 | { | 78 | { |
| 87 | BUG_ON(i + n > 6); | 79 | unsigned int n = 6; |
| 80 | |||
| 88 | while (n-- > 0) | 81 | while (n-- > 0) |
| 89 | if (i + n > 0) | 82 | if (n > 0) |
| 90 | regs->gprs[2 + i + n] = args[n]; | 83 | regs->gprs[2 + n] = args[n]; |
| 91 | if (i == 0) | 84 | regs->orig_gpr2 = args[0]; |
| 92 | regs->orig_gpr2 = args[0]; | ||
| 93 | } | 85 | } |
| 94 | 86 | ||
| 95 | static inline int syscall_get_arch(void) | 87 | static inline int syscall_get_arch(void) |
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index 594464f2129d..0da378e2eb25 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c | |||
| @@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) | |||
| 23 | 23 | ||
| 24 | if (flags & KERNEL_FPC) | 24 | if (flags & KERNEL_FPC) |
| 25 | /* Save floating point control */ | 25 | /* Save floating point control */ |
| 26 | asm volatile("stfpc %0" : "=m" (state->fpc)); | 26 | asm volatile("stfpc %0" : "=Q" (state->fpc)); |
| 27 | 27 | ||
| 28 | if (!MACHINE_HAS_VX) { | 28 | if (!MACHINE_HAS_VX) { |
| 29 | if (flags & KERNEL_VXR_V0V7) { | 29 | if (flags & KERNEL_VXR_V0V7) { |
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 02579f95f391..061418f787c3 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl | |||
| @@ -426,3 +426,7 @@ | |||
| 426 | 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 | 426 | 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 |
| 427 | 422 32 futex_time64 - sys_futex | 427 | 422 32 futex_time64 - sys_futex |
| 428 | 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval | 428 | 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval |
| 429 | 424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal | ||
| 430 | 425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup | ||
| 431 | 426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter | ||
| 432 | 427 common io_uring_register sys_io_uring_register sys_io_uring_register | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index a69a0911ed0e..c475ca49cfc6 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
| @@ -37,7 +37,7 @@ static inline u64 get_vtimer(void) | |||
| 37 | { | 37 | { |
| 38 | u64 timer; | 38 | u64 timer; |
| 39 | 39 | ||
| 40 | asm volatile("stpt %0" : "=m" (timer)); | 40 | asm volatile("stpt %0" : "=Q" (timer)); |
| 41 | return timer; | 41 | return timer; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| @@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires) | |||
| 48 | asm volatile( | 48 | asm volatile( |
| 49 | " stpt %0\n" /* Store current cpu timer value */ | 49 | " stpt %0\n" /* Store current cpu timer value */ |
| 50 | " spt %1" /* Set new value imm. afterwards */ | 50 | " spt %1" /* Set new value imm. afterwards */ |
| 51 | : "=m" (timer) : "m" (expires)); | 51 | : "=Q" (timer) : "Q" (expires)); |
| 52 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; | 52 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
| 53 | S390_lowcore.last_update_timer = expires; | 53 | S390_lowcore.last_update_timer = expires; |
| 54 | } | 54 | } |
| @@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk) | |||
| 135 | #else | 135 | #else |
| 136 | " stck %1" /* Store current tod clock value */ | 136 | " stck %1" /* Store current tod clock value */ |
| 137 | #endif | 137 | #endif |
| 138 | : "=m" (S390_lowcore.last_update_timer), | 138 | : "=Q" (S390_lowcore.last_update_timer), |
| 139 | "=m" (S390_lowcore.last_update_clock)); | 139 | "=Q" (S390_lowcore.last_update_clock)); |
| 140 | clock = S390_lowcore.last_update_clock - clock; | 140 | clock = S390_lowcore.last_update_clock - clock; |
| 141 | timer -= S390_lowcore.last_update_timer; | 141 | timer -= S390_lowcore.last_update_timer; |
| 142 | 142 | ||
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 958f46da3a79..d91065e81a4e 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c | |||
| @@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = { | |||
| 164 | 164 | ||
| 165 | struct sh_clk_ops; | 165 | struct sh_clk_ops; |
| 166 | 166 | ||
| 167 | void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) | 167 | void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx) |
| 168 | { | 168 | { |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | void __init plat_irq_setup(void) | 171 | void __init __weak plat_irq_setup(void) |
| 172 | { | 172 | { |
| 173 | } | 173 | } |
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6e118799831c..8c9d7e5e5dcc 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h | |||
| @@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 48 | 48 | ||
| 49 | static inline void syscall_get_arguments(struct task_struct *task, | 49 | static inline void syscall_get_arguments(struct task_struct *task, |
| 50 | struct pt_regs *regs, | 50 | struct pt_regs *regs, |
| 51 | unsigned int i, unsigned int n, | ||
| 52 | unsigned long *args) | 51 | unsigned long *args) |
| 53 | { | 52 | { |
| 54 | /* | ||
| 55 | * Do this simply for now. If we need to start supporting | ||
| 56 | * fetching arguments from arbitrary indices, this will need some | ||
| 57 | * extra logic. Presently there are no in-tree users that depend | ||
| 58 | * on this behaviour. | ||
| 59 | */ | ||
| 60 | BUG_ON(i); | ||
| 61 | 53 | ||
| 62 | /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ | 54 | /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ |
| 63 | switch (n) { | 55 | args[5] = regs->regs[1]; |
| 64 | case 6: args[5] = regs->regs[1]; | 56 | args[4] = regs->regs[0]; |
| 65 | case 5: args[4] = regs->regs[0]; | 57 | args[3] = regs->regs[7]; |
| 66 | case 4: args[3] = regs->regs[7]; | 58 | args[2] = regs->regs[6]; |
| 67 | case 3: args[2] = regs->regs[6]; | 59 | args[1] = regs->regs[5]; |
| 68 | case 2: args[1] = regs->regs[5]; | 60 | args[0] = regs->regs[4]; |
| 69 | case 1: args[0] = regs->regs[4]; | ||
| 70 | case 0: | ||
| 71 | break; | ||
| 72 | default: | ||
| 73 | BUG(); | ||
| 74 | } | ||
| 75 | } | 61 | } |
| 76 | 62 | ||
| 77 | static inline void syscall_set_arguments(struct task_struct *task, | 63 | static inline void syscall_set_arguments(struct task_struct *task, |
| 78 | struct pt_regs *regs, | 64 | struct pt_regs *regs, |
| 79 | unsigned int i, unsigned int n, | ||
| 80 | const unsigned long *args) | 65 | const unsigned long *args) |
| 81 | { | 66 | { |
| 82 | /* Same note as above applies */ | 67 | regs->regs[1] = args[5]; |
| 83 | BUG_ON(i); | 68 | regs->regs[0] = args[4]; |
| 84 | 69 | regs->regs[7] = args[3]; | |
| 85 | switch (n) { | 70 | regs->regs[6] = args[2]; |
| 86 | case 6: regs->regs[1] = args[5]; | 71 | regs->regs[5] = args[1]; |
| 87 | case 5: regs->regs[0] = args[4]; | 72 | regs->regs[4] = args[0]; |
| 88 | case 4: regs->regs[7] = args[3]; | ||
| 89 | case 3: regs->regs[6] = args[2]; | ||
| 90 | case 2: regs->regs[5] = args[1]; | ||
| 91 | case 1: regs->regs[4] = args[0]; | ||
| 92 | break; | ||
| 93 | default: | ||
| 94 | BUG(); | ||
| 95 | } | ||
| 96 | } | 73 | } |
| 97 | 74 | ||
| 98 | static inline int syscall_get_arch(void) | 75 | static inline int syscall_get_arch(void) |
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index 43882580c7f9..22fad97da066 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h | |||
| @@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 47 | 47 | ||
| 48 | static inline void syscall_get_arguments(struct task_struct *task, | 48 | static inline void syscall_get_arguments(struct task_struct *task, |
| 49 | struct pt_regs *regs, | 49 | struct pt_regs *regs, |
| 50 | unsigned int i, unsigned int n, | ||
| 51 | unsigned long *args) | 50 | unsigned long *args) |
| 52 | { | 51 | { |
| 53 | BUG_ON(i + n > 6); | 52 | memcpy(args, ®s->regs[2], 6 * sizeof(args[0])); |
| 54 | memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); | ||
| 55 | } | 53 | } |
| 56 | 54 | ||
| 57 | static inline void syscall_set_arguments(struct task_struct *task, | 55 | static inline void syscall_set_arguments(struct task_struct *task, |
| 58 | struct pt_regs *regs, | 56 | struct pt_regs *regs, |
| 59 | unsigned int i, unsigned int n, | ||
| 60 | const unsigned long *args) | 57 | const unsigned long *args) |
| 61 | { | 58 | { |
| 62 | BUG_ON(i + n > 6); | 59 | memcpy(®s->regs[2], args, 6 * sizeof(args[0])); |
| 63 | memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); | ||
| 64 | } | 60 | } |
| 65 | 61 | ||
| 66 | static inline int syscall_get_arch(void) | 62 | static inline int syscall_get_arch(void) |
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index bfda678576e4..480b057556ee 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl | |||
| @@ -426,3 +426,7 @@ | |||
| 426 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait | 426 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait |
| 427 | 422 common futex_time64 sys_futex | 427 | 422 common futex_time64 sys_futex |
| 428 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval | 428 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 429 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 430 | 425 common io_uring_setup sys_io_uring_setup | ||
| 431 | 426 common io_uring_enter sys_io_uring_enter | ||
| 432 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 053989e3f6a6..4d075434e816 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h | |||
| @@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 96 | 96 | ||
| 97 | static inline void syscall_get_arguments(struct task_struct *task, | 97 | static inline void syscall_get_arguments(struct task_struct *task, |
| 98 | struct pt_regs *regs, | 98 | struct pt_regs *regs, |
| 99 | unsigned int i, unsigned int n, | ||
| 100 | unsigned long *args) | 99 | unsigned long *args) |
| 101 | { | 100 | { |
| 102 | int zero_extend = 0; | 101 | int zero_extend = 0; |
| 103 | unsigned int j; | 102 | unsigned int j; |
| 103 | unsigned int n = 6; | ||
| 104 | 104 | ||
| 105 | #ifdef CONFIG_SPARC64 | 105 | #ifdef CONFIG_SPARC64 |
| 106 | if (test_tsk_thread_flag(task, TIF_32BIT)) | 106 | if (test_tsk_thread_flag(task, TIF_32BIT)) |
| @@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 108 | #endif | 108 | #endif |
| 109 | 109 | ||
| 110 | for (j = 0; j < n; j++) { | 110 | for (j = 0; j < n; j++) { |
| 111 | unsigned long val = regs->u_regs[UREG_I0 + i + j]; | 111 | unsigned long val = regs->u_regs[UREG_I0 + j]; |
| 112 | 112 | ||
| 113 | if (zero_extend) | 113 | if (zero_extend) |
| 114 | args[j] = (u32) val; | 114 | args[j] = (u32) val; |
| @@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
| 119 | 119 | ||
| 120 | static inline void syscall_set_arguments(struct task_struct *task, | 120 | static inline void syscall_set_arguments(struct task_struct *task, |
| 121 | struct pt_regs *regs, | 121 | struct pt_regs *regs, |
| 122 | unsigned int i, unsigned int n, | ||
| 123 | const unsigned long *args) | 122 | const unsigned long *args) |
| 124 | { | 123 | { |
| 125 | unsigned int j; | 124 | unsigned int i; |
| 126 | 125 | ||
| 127 | for (j = 0; j < n; j++) | 126 | for (i = 0; i < 6; i++) |
| 128 | regs->u_regs[UREG_I0 + i + j] = args[j]; | 127 | regs->u_regs[UREG_I0 + i] = args[i]; |
| 129 | } | 128 | } |
| 130 | 129 | ||
| 131 | static inline int syscall_get_arch(void) | 130 | static inline int syscall_get_arch(void) |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index a8af6023c126..14b93c5564e3 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
| @@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns | |||
| 73 | p->npages = 0; | 73 | p->npages = 0; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) | ||
| 77 | { | ||
| 78 | return iommu->atu && mask > DMA_BIT_MASK(32); | ||
| 79 | } | ||
| 80 | |||
| 76 | /* Interrupts must be disabled. */ | 81 | /* Interrupts must be disabled. */ |
| 77 | static long iommu_batch_flush(struct iommu_batch *p, u64 mask) | 82 | static long iommu_batch_flush(struct iommu_batch *p, u64 mask) |
| 78 | { | 83 | { |
| @@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) | |||
| 92 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); | 97 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); |
| 93 | 98 | ||
| 94 | while (npages != 0) { | 99 | while (npages != 0) { |
| 95 | if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { | 100 | if (!iommu_use_atu(pbm->iommu, mask)) { |
| 96 | num = pci_sun4v_iommu_map(devhandle, | 101 | num = pci_sun4v_iommu_map(devhandle, |
| 97 | HV_PCI_TSBID(0, entry), | 102 | HV_PCI_TSBID(0, entry), |
| 98 | npages, | 103 | npages, |
| @@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
| 179 | unsigned long flags, order, first_page, npages, n; | 184 | unsigned long flags, order, first_page, npages, n; |
| 180 | unsigned long prot = 0; | 185 | unsigned long prot = 0; |
| 181 | struct iommu *iommu; | 186 | struct iommu *iommu; |
| 182 | struct atu *atu; | ||
| 183 | struct iommu_map_table *tbl; | 187 | struct iommu_map_table *tbl; |
| 184 | struct page *page; | 188 | struct page *page; |
| 185 | void *ret; | 189 | void *ret; |
| @@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
| 205 | memset((char *)first_page, 0, PAGE_SIZE << order); | 209 | memset((char *)first_page, 0, PAGE_SIZE << order); |
| 206 | 210 | ||
| 207 | iommu = dev->archdata.iommu; | 211 | iommu = dev->archdata.iommu; |
| 208 | atu = iommu->atu; | ||
| 209 | |||
| 210 | mask = dev->coherent_dma_mask; | 212 | mask = dev->coherent_dma_mask; |
| 211 | if (mask <= DMA_BIT_MASK(32) || !atu) | 213 | if (!iommu_use_atu(iommu, mask)) |
| 212 | tbl = &iommu->tbl; | 214 | tbl = &iommu->tbl; |
| 213 | else | 215 | else |
| 214 | tbl = &atu->tbl; | 216 | tbl = &iommu->atu->tbl; |
| 215 | 217 | ||
| 216 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, | 218 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, |
| 217 | (unsigned long)(-1), 0); | 219 | (unsigned long)(-1), 0); |
| @@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
| 333 | atu = iommu->atu; | 335 | atu = iommu->atu; |
| 334 | devhandle = pbm->devhandle; | 336 | devhandle = pbm->devhandle; |
| 335 | 337 | ||
| 336 | if (dvma <= DMA_BIT_MASK(32)) { | 338 | if (!iommu_use_atu(iommu, dvma)) { |
| 337 | tbl = &iommu->tbl; | 339 | tbl = &iommu->tbl; |
| 338 | iotsb_num = 0; /* we don't care for legacy iommu */ | 340 | iotsb_num = 0; /* we don't care for legacy iommu */ |
| 339 | } else { | 341 | } else { |
| @@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
| 374 | npages >>= IO_PAGE_SHIFT; | 376 | npages >>= IO_PAGE_SHIFT; |
| 375 | 377 | ||
| 376 | mask = *dev->dma_mask; | 378 | mask = *dev->dma_mask; |
| 377 | if (mask <= DMA_BIT_MASK(32)) | 379 | if (!iommu_use_atu(iommu, mask)) |
| 378 | tbl = &iommu->tbl; | 380 | tbl = &iommu->tbl; |
| 379 | else | 381 | else |
| 380 | tbl = &atu->tbl; | 382 | tbl = &atu->tbl; |
| @@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 510 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | 512 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
| 511 | 513 | ||
| 512 | mask = *dev->dma_mask; | 514 | mask = *dev->dma_mask; |
| 513 | if (mask <= DMA_BIT_MASK(32)) | 515 | if (!iommu_use_atu(iommu, mask)) |
| 514 | tbl = &iommu->tbl; | 516 | tbl = &iommu->tbl; |
| 515 | else | 517 | else |
| 516 | tbl = &atu->tbl; | 518 | tbl = &atu->tbl; |
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index b9a5a04b2d2c..a1dd24307b00 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl | |||
| @@ -469,3 +469,7 @@ | |||
| 469 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 | 469 | 421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 |
| 470 | 422 32 futex_time64 sys_futex sys_futex | 470 | 422 32 futex_time64 sys_futex sys_futex |
| 471 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval | 471 | 423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval |
| 472 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 473 | 425 common io_uring_setup sys_io_uring_setup | ||
| 474 | 426 common io_uring_enter sys_io_uring_enter | ||
| 475 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h index 9fb9cf8cd39a..98e50c50c12e 100644 --- a/arch/um/include/asm/syscall-generic.h +++ b/arch/um/include/asm/syscall-generic.h | |||
| @@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 53 | 53 | ||
| 54 | static inline void syscall_get_arguments(struct task_struct *task, | 54 | static inline void syscall_get_arguments(struct task_struct *task, |
| 55 | struct pt_regs *regs, | 55 | struct pt_regs *regs, |
| 56 | unsigned int i, unsigned int n, | ||
| 57 | unsigned long *args) | 56 | unsigned long *args) |
| 58 | { | 57 | { |
| 59 | const struct uml_pt_regs *r = ®s->regs; | 58 | const struct uml_pt_regs *r = ®s->regs; |
| 60 | 59 | ||
| 61 | switch (i) { | 60 | *args++ = UPT_SYSCALL_ARG1(r); |
| 62 | case 0: | 61 | *args++ = UPT_SYSCALL_ARG2(r); |
| 63 | if (!n--) | 62 | *args++ = UPT_SYSCALL_ARG3(r); |
| 64 | break; | 63 | *args++ = UPT_SYSCALL_ARG4(r); |
| 65 | *args++ = UPT_SYSCALL_ARG1(r); | 64 | *args++ = UPT_SYSCALL_ARG5(r); |
| 66 | case 1: | 65 | *args = UPT_SYSCALL_ARG6(r); |
| 67 | if (!n--) | ||
| 68 | break; | ||
| 69 | *args++ = UPT_SYSCALL_ARG2(r); | ||
| 70 | case 2: | ||
| 71 | if (!n--) | ||
| 72 | break; | ||
| 73 | *args++ = UPT_SYSCALL_ARG3(r); | ||
| 74 | case 3: | ||
| 75 | if (!n--) | ||
| 76 | break; | ||
| 77 | *args++ = UPT_SYSCALL_ARG4(r); | ||
| 78 | case 4: | ||
| 79 | if (!n--) | ||
| 80 | break; | ||
| 81 | *args++ = UPT_SYSCALL_ARG5(r); | ||
| 82 | case 5: | ||
| 83 | if (!n--) | ||
| 84 | break; | ||
| 85 | *args++ = UPT_SYSCALL_ARG6(r); | ||
| 86 | case 6: | ||
| 87 | if (!n--) | ||
| 88 | break; | ||
| 89 | default: | ||
| 90 | BUG(); | ||
| 91 | break; | ||
| 92 | } | ||
| 93 | } | 66 | } |
| 94 | 67 | ||
| 95 | static inline void syscall_set_arguments(struct task_struct *task, | 68 | static inline void syscall_set_arguments(struct task_struct *task, |
| 96 | struct pt_regs *regs, | 69 | struct pt_regs *regs, |
| 97 | unsigned int i, unsigned int n, | ||
| 98 | const unsigned long *args) | 70 | const unsigned long *args) |
| 99 | { | 71 | { |
| 100 | struct uml_pt_regs *r = ®s->regs; | 72 | struct uml_pt_regs *r = ®s->regs; |
| 101 | 73 | ||
| 102 | switch (i) { | 74 | UPT_SYSCALL_ARG1(r) = *args++; |
| 103 | case 0: | 75 | UPT_SYSCALL_ARG2(r) = *args++; |
| 104 | if (!n--) | 76 | UPT_SYSCALL_ARG3(r) = *args++; |
| 105 | break; | 77 | UPT_SYSCALL_ARG4(r) = *args++; |
| 106 | UPT_SYSCALL_ARG1(r) = *args++; | 78 | UPT_SYSCALL_ARG5(r) = *args++; |
| 107 | case 1: | 79 | UPT_SYSCALL_ARG6(r) = *args; |
| 108 | if (!n--) | ||
| 109 | break; | ||
| 110 | UPT_SYSCALL_ARG2(r) = *args++; | ||
| 111 | case 2: | ||
| 112 | if (!n--) | ||
| 113 | break; | ||
| 114 | UPT_SYSCALL_ARG3(r) = *args++; | ||
| 115 | case 3: | ||
| 116 | if (!n--) | ||
| 117 | break; | ||
| 118 | UPT_SYSCALL_ARG4(r) = *args++; | ||
| 119 | case 4: | ||
| 120 | if (!n--) | ||
| 121 | break; | ||
| 122 | UPT_SYSCALL_ARG5(r) = *args++; | ||
| 123 | case 5: | ||
| 124 | if (!n--) | ||
| 125 | break; | ||
| 126 | UPT_SYSCALL_ARG6(r) = *args++; | ||
| 127 | case 6: | ||
| 128 | if (!n--) | ||
| 129 | break; | ||
| 130 | default: | ||
| 131 | BUG(); | ||
| 132 | break; | ||
| 133 | } | ||
| 134 | } | 80 | } |
| 135 | 81 | ||
| 136 | /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ | 82 | /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5ad92419be19..62fc3fda1a05 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -1499,7 +1499,7 @@ config X86_CPA_STATISTICS | |||
| 1499 | depends on DEBUG_FS | 1499 | depends on DEBUG_FS |
| 1500 | ---help--- | 1500 | ---help--- |
| 1501 | Expose statistics about the Change Page Attribute mechanims, which | 1501 | Expose statistics about the Change Page Attribute mechanims, which |
| 1502 | helps to determine the effectivness of preserving large and huge | 1502 | helps to determine the effectiveness of preserving large and huge |
| 1503 | page mappings when mapping protections are changed. | 1503 | page mappings when mapping protections are changed. |
| 1504 | 1504 | ||
| 1505 | config ARCH_HAS_MEM_ENCRYPT | 1505 | config ARCH_HAS_MEM_ENCRYPT |
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index 3b6e70d085da..8457cdd47f75 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S | |||
| @@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2) | |||
| 323 | vpaddq t2,t1,t1 | 323 | vpaddq t2,t1,t1 |
| 324 | vmovq t1x,d4 | 324 | vmovq t1x,d4 |
| 325 | 325 | ||
| 326 | # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> | ||
| 327 | # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small | ||
| 328 | # amount. Careful: we must not assume the carry bits 'd0 >> 26', | ||
| 329 | # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit | ||
| 330 | # integers. It's true in a single-block implementation, but not here. | ||
| 331 | |||
| 326 | # d1 += d0 >> 26 | 332 | # d1 += d0 >> 26 |
| 327 | mov d0,%rax | 333 | mov d0,%rax |
| 328 | shr $26,%rax | 334 | shr $26,%rax |
| @@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2) | |||
| 361 | # h0 += (d4 >> 26) * 5 | 367 | # h0 += (d4 >> 26) * 5 |
| 362 | mov d4,%rax | 368 | mov d4,%rax |
| 363 | shr $26,%rax | 369 | shr $26,%rax |
| 364 | lea (%eax,%eax,4),%eax | 370 | lea (%rax,%rax,4),%rax |
| 365 | add %eax,%ebx | 371 | add %rax,%rbx |
| 366 | # h4 = d4 & 0x3ffffff | 372 | # h4 = d4 & 0x3ffffff |
| 367 | mov d4,%rax | 373 | mov d4,%rax |
| 368 | and $0x3ffffff,%eax | 374 | and $0x3ffffff,%eax |
| 369 | mov %eax,h4 | 375 | mov %eax,h4 |
| 370 | 376 | ||
| 371 | # h1 += h0 >> 26 | 377 | # h1 += h0 >> 26 |
| 372 | mov %ebx,%eax | 378 | mov %rbx,%rax |
| 373 | shr $26,%eax | 379 | shr $26,%rax |
| 374 | add %eax,h1 | 380 | add %eax,h1 |
| 375 | # h0 = h0 & 0x3ffffff | 381 | # h0 = h0 & 0x3ffffff |
| 376 | andl $0x3ffffff,%ebx | 382 | andl $0x3ffffff,%ebx |
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index e6add74d78a5..6f0be7a86964 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S | |||
| @@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2) | |||
| 253 | # h0 += (d4 >> 26) * 5 | 253 | # h0 += (d4 >> 26) * 5 |
| 254 | mov d4,%rax | 254 | mov d4,%rax |
| 255 | shr $26,%rax | 255 | shr $26,%rax |
| 256 | lea (%eax,%eax,4),%eax | 256 | lea (%rax,%rax,4),%rax |
| 257 | add %eax,%ebx | 257 | add %rax,%rbx |
| 258 | # h4 = d4 & 0x3ffffff | 258 | # h4 = d4 & 0x3ffffff |
| 259 | mov d4,%rax | 259 | mov d4,%rax |
| 260 | and $0x3ffffff,%eax | 260 | and $0x3ffffff,%eax |
| 261 | mov %eax,h4 | 261 | mov %eax,h4 |
| 262 | 262 | ||
| 263 | # h1 += h0 >> 26 | 263 | # h1 += h0 >> 26 |
| 264 | mov %ebx,%eax | 264 | mov %rbx,%rax |
| 265 | shr $26,%eax | 265 | shr $26,%rax |
| 266 | add %eax,h1 | 266 | add %eax,h1 |
| 267 | # h0 = h0 & 0x3ffffff | 267 | # h0 = h0 & 0x3ffffff |
| 268 | andl $0x3ffffff,%ebx | 268 | andl $0x3ffffff,%ebx |
| @@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2) | |||
| 524 | paddq t2,t1 | 524 | paddq t2,t1 |
| 525 | movq t1,d4 | 525 | movq t1,d4 |
| 526 | 526 | ||
| 527 | # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> | ||
| 528 | # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small | ||
| 529 | # amount. Careful: we must not assume the carry bits 'd0 >> 26', | ||
| 530 | # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit | ||
| 531 | # integers. It's true in a single-block implementation, but not here. | ||
| 532 | |||
| 527 | # d1 += d0 >> 26 | 533 | # d1 += d0 >> 26 |
| 528 | mov d0,%rax | 534 | mov d0,%rax |
| 529 | shr $26,%rax | 535 | shr $26,%rax |
| @@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2) | |||
| 562 | # h0 += (d4 >> 26) * 5 | 568 | # h0 += (d4 >> 26) * 5 |
| 563 | mov d4,%rax | 569 | mov d4,%rax |
| 564 | shr $26,%rax | 570 | shr $26,%rax |
| 565 | lea (%eax,%eax,4),%eax | 571 | lea (%rax,%rax,4),%rax |
| 566 | add %eax,%ebx | 572 | add %rax,%rbx |
| 567 | # h4 = d4 & 0x3ffffff | 573 | # h4 = d4 & 0x3ffffff |
| 568 | mov d4,%rax | 574 | mov d4,%rax |
| 569 | and $0x3ffffff,%eax | 575 | and $0x3ffffff,%eax |
| 570 | mov %eax,h4 | 576 | mov %eax,h4 |
| 571 | 577 | ||
| 572 | # h1 += h0 >> 26 | 578 | # h1 += h0 >> 26 |
| 573 | mov %ebx,%eax | 579 | mov %rbx,%rax |
| 574 | shr $26,%eax | 580 | shr $26,%rax |
| 575 | add %eax,h1 | 581 | add %eax,h1 |
| 576 | # h0 = h0 & 0x3ffffff | 582 | # h0 = h0 & 0x3ffffff |
| 577 | andl $0x3ffffff,%ebx | 583 | andl $0x3ffffff,%ebx |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 7d2d7c801dba..d45f3fbd232e 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
| @@ -3,10 +3,14 @@ | |||
| 3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
| 4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
| 5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 6 | #include <linux/delay.h> | ||
| 6 | #include <asm/apicdef.h> | 7 | #include <asm/apicdef.h> |
| 8 | #include <asm/nmi.h> | ||
| 7 | 9 | ||
| 8 | #include "../perf_event.h" | 10 | #include "../perf_event.h" |
| 9 | 11 | ||
| 12 | static DEFINE_PER_CPU(unsigned int, perf_nmi_counter); | ||
| 13 | |||
| 10 | static __initconst const u64 amd_hw_cache_event_ids | 14 | static __initconst const u64 amd_hw_cache_event_ids |
| 11 | [PERF_COUNT_HW_CACHE_MAX] | 15 | [PERF_COUNT_HW_CACHE_MAX] |
| 12 | [PERF_COUNT_HW_CACHE_OP_MAX] | 16 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| @@ -113,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
| 113 | }; | 117 | }; |
| 114 | 118 | ||
| 115 | /* | 119 | /* |
| 116 | * AMD Performance Monitor K7 and later. | 120 | * AMD Performance Monitor K7 and later, up to and including Family 16h: |
| 117 | */ | 121 | */ |
| 118 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = | 122 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = |
| 119 | { | 123 | { |
| 120 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | 124 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
| 121 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 125 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
| 122 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, | 126 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, |
| 123 | [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, | 127 | [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, |
| 124 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | 128 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
| 125 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | 129 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, |
| 126 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | 130 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ |
| 127 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | 131 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ |
| 132 | }; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * AMD Performance Monitor Family 17h and later: | ||
| 136 | */ | ||
| 137 | static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = | ||
| 138 | { | ||
| 139 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | ||
| 140 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
| 141 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, | ||
| 142 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | ||
| 143 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | ||
| 144 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, | ||
| 145 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, | ||
| 128 | }; | 146 | }; |
| 129 | 147 | ||
| 130 | static u64 amd_pmu_event_map(int hw_event) | 148 | static u64 amd_pmu_event_map(int hw_event) |
| 131 | { | 149 | { |
| 150 | if (boot_cpu_data.x86 >= 0x17) | ||
| 151 | return amd_f17h_perfmon_event_map[hw_event]; | ||
| 152 | |||
| 132 | return amd_perfmon_event_map[hw_event]; | 153 | return amd_perfmon_event_map[hw_event]; |
| 133 | } | 154 | } |
| 134 | 155 | ||
| @@ -429,6 +450,132 @@ static void amd_pmu_cpu_dead(int cpu) | |||
| 429 | } | 450 | } |
| 430 | } | 451 | } |
| 431 | 452 | ||
| 453 | /* | ||
| 454 | * When a PMC counter overflows, an NMI is used to process the event and | ||
| 455 | * reset the counter. NMI latency can result in the counter being updated | ||
| 456 | * before the NMI can run, which can result in what appear to be spurious | ||
| 457 | * NMIs. This function is intended to wait for the NMI to run and reset | ||
| 458 | * the counter to avoid possible unhandled NMI messages. | ||
| 459 | */ | ||
| 460 | #define OVERFLOW_WAIT_COUNT 50 | ||
| 461 | |||
| 462 | static void amd_pmu_wait_on_overflow(int idx) | ||
| 463 | { | ||
| 464 | unsigned int i; | ||
| 465 | u64 counter; | ||
| 466 | |||
| 467 | /* | ||
| 468 | * Wait for the counter to be reset if it has overflowed. This loop | ||
| 469 | * should exit very, very quickly, but just in case, don't wait | ||
| 470 | * forever... | ||
| 471 | */ | ||
| 472 | for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { | ||
| 473 | rdmsrl(x86_pmu_event_addr(idx), counter); | ||
| 474 | if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) | ||
| 475 | break; | ||
| 476 | |||
| 477 | /* Might be in IRQ context, so can't sleep */ | ||
| 478 | udelay(1); | ||
| 479 | } | ||
| 480 | } | ||
| 481 | |||
| 482 | static void amd_pmu_disable_all(void) | ||
| 483 | { | ||
| 484 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
| 485 | int idx; | ||
| 486 | |||
| 487 | x86_pmu_disable_all(); | ||
| 488 | |||
| 489 | /* | ||
| 490 | * This shouldn't be called from NMI context, but add a safeguard here | ||
| 491 | * to return, since if we're in NMI context we can't wait for an NMI | ||
| 492 | * to reset an overflowed counter value. | ||
| 493 | */ | ||
| 494 | if (in_nmi()) | ||
| 495 | return; | ||
| 496 | |||
| 497 | /* | ||
| 498 | * Check each counter for overflow and wait for it to be reset by the | ||
| 499 | * NMI if it has overflowed. This relies on the fact that all active | ||
| 500 | * counters are always enabled when this function is caled and | ||
| 501 | * ARCH_PERFMON_EVENTSEL_INT is always set. | ||
| 502 | */ | ||
| 503 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
| 504 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 505 | continue; | ||
| 506 | |||
| 507 | amd_pmu_wait_on_overflow(idx); | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 511 | static void amd_pmu_disable_event(struct perf_event *event) | ||
| 512 | { | ||
| 513 | x86_pmu_disable_event(event); | ||
| 514 | |||
| 515 | /* | ||
| 516 | * This can be called from NMI context (via x86_pmu_stop). The counter | ||
| 517 | * may have overflowed, but either way, we'll never see it get reset | ||
| 518 | * by the NMI if we're already in the NMI. And the NMI latency support | ||
| 519 | * below will take care of any pending NMI that might have been | ||
| 520 | * generated by the overflow. | ||
| 521 | */ | ||
| 522 | if (in_nmi()) | ||
| 523 | return; | ||
| 524 | |||
| 525 | amd_pmu_wait_on_overflow(event->hw.idx); | ||
| 526 | } | ||
| 527 | |||
| 528 | /* | ||
| 529 | * Because of NMI latency, if multiple PMC counters are active or other sources | ||
| 530 | * of NMIs are received, the perf NMI handler can handle one or more overflowed | ||
| 531 | * PMC counters outside of the NMI associated with the PMC overflow. If the NMI | ||
| 532 | * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel | ||
| 533 | * back-to-back NMI support won't be active. This PMC handler needs to take into | ||
| 534 | * account that this can occur, otherwise this could result in unknown NMI | ||
| 535 | * messages being issued. Examples of this is PMC overflow while in the NMI | ||
| 536 | * handler when multiple PMCs are active or PMC overflow while handling some | ||
| 537 | * other source of an NMI. | ||
| 538 | * | ||
| 539 | * Attempt to mitigate this by using the number of active PMCs to determine | ||
| 540 | * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset | ||
| 541 | * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the | ||
| 542 | * number of active PMCs or 2. The value of 2 is used in case an NMI does not | ||
| 543 | * arrive at the LAPIC in time to be collapsed into an already pending NMI. | ||
| 544 | */ | ||
| 545 | static int amd_pmu_handle_irq(struct pt_regs *regs) | ||
| 546 | { | ||
| 547 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
| 548 | int active, handled; | ||
| 549 | |||
| 550 | /* | ||
| 551 | * Obtain the active count before calling x86_pmu_handle_irq() since | ||
| 552 | * it is possible that x86_pmu_handle_irq() may make a counter | ||
| 553 | * inactive (through x86_pmu_stop). | ||
| 554 | */ | ||
| 555 | active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); | ||
| 556 | |||
| 557 | /* Process any counter overflows */ | ||
| 558 | handled = x86_pmu_handle_irq(regs); | ||
| 559 | |||
| 560 | /* | ||
| 561 | * If a counter was handled, record the number of possible remaining | ||
| 562 | * NMIs that can occur. | ||
| 563 | */ | ||
| 564 | if (handled) { | ||
| 565 | this_cpu_write(perf_nmi_counter, | ||
| 566 | min_t(unsigned int, 2, active)); | ||
| 567 | |||
| 568 | return handled; | ||
| 569 | } | ||
| 570 | |||
| 571 | if (!this_cpu_read(perf_nmi_counter)) | ||
| 572 | return NMI_DONE; | ||
| 573 | |||
| 574 | this_cpu_dec(perf_nmi_counter); | ||
| 575 | |||
| 576 | return NMI_HANDLED; | ||
| 577 | } | ||
| 578 | |||
| 432 | static struct event_constraint * | 579 | static struct event_constraint * |
| 433 | amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, | 580 | amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
| 434 | struct perf_event *event) | 581 | struct perf_event *event) |
| @@ -621,11 +768,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) | |||
| 621 | 768 | ||
| 622 | static __initconst const struct x86_pmu amd_pmu = { | 769 | static __initconst const struct x86_pmu amd_pmu = { |
| 623 | .name = "AMD", | 770 | .name = "AMD", |
| 624 | .handle_irq = x86_pmu_handle_irq, | 771 | .handle_irq = amd_pmu_handle_irq, |
| 625 | .disable_all = x86_pmu_disable_all, | 772 | .disable_all = amd_pmu_disable_all, |
| 626 | .enable_all = x86_pmu_enable_all, | 773 | .enable_all = x86_pmu_enable_all, |
| 627 | .enable = x86_pmu_enable_event, | 774 | .enable = x86_pmu_enable_event, |
| 628 | .disable = x86_pmu_disable_event, | 775 | .disable = amd_pmu_disable_event, |
| 629 | .hw_config = amd_pmu_hw_config, | 776 | .hw_config = amd_pmu_hw_config, |
| 630 | .schedule_events = x86_schedule_events, | 777 | .schedule_events = x86_schedule_events, |
| 631 | .eventsel = MSR_K7_EVNTSEL0, | 778 | .eventsel = MSR_K7_EVNTSEL0, |
| @@ -732,7 +879,7 @@ void amd_pmu_enable_virt(void) | |||
| 732 | cpuc->perf_ctr_virt_mask = 0; | 879 | cpuc->perf_ctr_virt_mask = 0; |
| 733 | 880 | ||
| 734 | /* Reload all events */ | 881 | /* Reload all events */ |
| 735 | x86_pmu_disable_all(); | 882 | amd_pmu_disable_all(); |
| 736 | x86_pmu_enable_all(0); | 883 | x86_pmu_enable_all(0); |
| 737 | } | 884 | } |
| 738 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | 885 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); |
| @@ -750,7 +897,7 @@ void amd_pmu_disable_virt(void) | |||
| 750 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; | 897 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
| 751 | 898 | ||
| 752 | /* Reload all events */ | 899 | /* Reload all events */ |
| 753 | x86_pmu_disable_all(); | 900 | amd_pmu_disable_all(); |
| 754 | x86_pmu_enable_all(0); | 901 | x86_pmu_enable_all(0); |
| 755 | } | 902 | } |
| 756 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); | 903 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e2b1447192a8..81911e11a15d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags) | |||
| 1349 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 1349 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
| 1350 | struct hw_perf_event *hwc = &event->hw; | 1350 | struct hw_perf_event *hwc = &event->hw; |
| 1351 | 1351 | ||
| 1352 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { | 1352 | if (test_bit(hwc->idx, cpuc->active_mask)) { |
| 1353 | x86_pmu.disable(event); | 1353 | x86_pmu.disable(event); |
| 1354 | __clear_bit(hwc->idx, cpuc->active_mask); | ||
| 1354 | cpuc->events[hwc->idx] = NULL; | 1355 | cpuc->events[hwc->idx] = NULL; |
| 1355 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | 1356 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
| 1356 | hwc->state |= PERF_HES_STOPPED; | 1357 | hwc->state |= PERF_HES_STOPPED; |
| @@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs) | |||
| 1447 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1448 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 1448 | 1449 | ||
| 1449 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1450 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
| 1450 | if (!test_bit(idx, cpuc->active_mask)) { | 1451 | if (!test_bit(idx, cpuc->active_mask)) |
| 1451 | /* | ||
| 1452 | * Though we deactivated the counter some cpus | ||
| 1453 | * might still deliver spurious interrupts still | ||
| 1454 | * in flight. Catch them: | ||
| 1455 | */ | ||
| 1456 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
| 1457 | handled++; | ||
| 1458 | continue; | 1452 | continue; |
| 1459 | } | ||
| 1460 | 1453 | ||
| 1461 | event = cpuc->events[idx]; | 1454 | event = cpuc->events[idx]; |
| 1462 | 1455 | ||
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 8baa441d8000..f9451566cd9b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
| @@ -3131,7 +3131,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) | |||
| 3131 | flags &= ~PERF_SAMPLE_TIME; | 3131 | flags &= ~PERF_SAMPLE_TIME; |
| 3132 | if (!event->attr.exclude_kernel) | 3132 | if (!event->attr.exclude_kernel) |
| 3133 | flags &= ~PERF_SAMPLE_REGS_USER; | 3133 | flags &= ~PERF_SAMPLE_REGS_USER; |
| 3134 | if (event->attr.sample_regs_user & ~PEBS_REGS) | 3134 | if (event->attr.sample_regs_user & ~PEBS_GP_REGS) |
| 3135 | flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); | 3135 | flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); |
| 3136 | return flags; | 3136 | return flags; |
| 3137 | } | 3137 | } |
| @@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
| 3185 | return ret; | 3185 | return ret; |
| 3186 | 3186 | ||
| 3187 | if (event->attr.precise_ip) { | 3187 | if (event->attr.precise_ip) { |
| 3188 | if (!event->attr.freq) { | 3188 | if (!(event->attr.freq || event->attr.wakeup_events)) { |
| 3189 | event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; | 3189 | event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; |
| 3190 | if (!(event->attr.sample_type & | 3190 | if (!(event->attr.sample_type & |
| 3191 | ~intel_pmu_large_pebs_flags(event))) | 3191 | ~intel_pmu_large_pebs_flags(event))) |
| @@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu) | |||
| 3575 | 3575 | ||
| 3576 | cpuc->lbr_sel = NULL; | 3576 | cpuc->lbr_sel = NULL; |
| 3577 | 3577 | ||
| 3578 | if (x86_pmu.flags & PMU_FL_TFA) { | ||
| 3579 | WARN_ON_ONCE(cpuc->tfa_shadow); | ||
| 3580 | cpuc->tfa_shadow = ~0ULL; | ||
| 3581 | intel_set_tfa(cpuc, false); | ||
| 3582 | } | ||
| 3583 | |||
| 3578 | if (x86_pmu.version > 1) | 3584 | if (x86_pmu.version > 1) |
| 3579 | flip_smm_bit(&x86_pmu.attr_freeze_on_smi); | 3585 | flip_smm_bit(&x86_pmu.attr_freeze_on_smi); |
| 3580 | 3586 | ||
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index a75955741c50..1e98a42b560a 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
| @@ -96,25 +96,25 @@ struct amd_nb { | |||
| 96 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ | 96 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
| 97 | PERF_SAMPLE_PERIOD) | 97 | PERF_SAMPLE_PERIOD) |
| 98 | 98 | ||
| 99 | #define PEBS_REGS \ | 99 | #define PEBS_GP_REGS \ |
| 100 | (PERF_REG_X86_AX | \ | 100 | ((1ULL << PERF_REG_X86_AX) | \ |
| 101 | PERF_REG_X86_BX | \ | 101 | (1ULL << PERF_REG_X86_BX) | \ |
| 102 | PERF_REG_X86_CX | \ | 102 | (1ULL << PERF_REG_X86_CX) | \ |
| 103 | PERF_REG_X86_DX | \ | 103 | (1ULL << PERF_REG_X86_DX) | \ |
| 104 | PERF_REG_X86_DI | \ | 104 | (1ULL << PERF_REG_X86_DI) | \ |
| 105 | PERF_REG_X86_SI | \ | 105 | (1ULL << PERF_REG_X86_SI) | \ |
| 106 | PERF_REG_X86_SP | \ | 106 | (1ULL << PERF_REG_X86_SP) | \ |
| 107 | PERF_REG_X86_BP | \ | 107 | (1ULL << PERF_REG_X86_BP) | \ |
| 108 | PERF_REG_X86_IP | \ | 108 | (1ULL << PERF_REG_X86_IP) | \ |
| 109 | PERF_REG_X86_FLAGS | \ | 109 | (1ULL << PERF_REG_X86_FLAGS) | \ |
| 110 | PERF_REG_X86_R8 | \ | 110 | (1ULL << PERF_REG_X86_R8) | \ |
| 111 | PERF_REG_X86_R9 | \ | 111 | (1ULL << PERF_REG_X86_R9) | \ |
| 112 | PERF_REG_X86_R10 | \ | 112 | (1ULL << PERF_REG_X86_R10) | \ |
| 113 | PERF_REG_X86_R11 | \ | 113 | (1ULL << PERF_REG_X86_R11) | \ |
| 114 | PERF_REG_X86_R12 | \ | 114 | (1ULL << PERF_REG_X86_R12) | \ |
| 115 | PERF_REG_X86_R13 | \ | 115 | (1ULL << PERF_REG_X86_R13) | \ |
| 116 | PERF_REG_X86_R14 | \ | 116 | (1ULL << PERF_REG_X86_R14) | \ |
| 117 | PERF_REG_X86_R15) | 117 | (1ULL << PERF_REG_X86_R15)) |
| 118 | 118 | ||
| 119 | /* | 119 | /* |
| 120 | * Per register state. | 120 | * Per register state. |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index d153d570bb04..8e790ec219a5 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
| @@ -36,16 +36,17 @@ | |||
| 36 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 36 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 37 | */ | 37 | */ |
| 38 | 38 | ||
| 39 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) | 39 | #define RLONG_ADDR(x) "m" (*(volatile long *) (x)) |
| 40 | #define WBYTE_ADDR(x) "+m" (*(volatile char *) (x)) | ||
| 40 | 41 | ||
| 41 | #define ADDR BITOP_ADDR(addr) | 42 | #define ADDR RLONG_ADDR(addr) |
| 42 | 43 | ||
| 43 | /* | 44 | /* |
| 44 | * We do the locked ops that don't return the old value as | 45 | * We do the locked ops that don't return the old value as |
| 45 | * a mask operation on a byte. | 46 | * a mask operation on a byte. |
| 46 | */ | 47 | */ |
| 47 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) | 48 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
| 48 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) | 49 | #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) |
| 49 | #define CONST_MASK(nr) (1 << ((nr) & 7)) | 50 | #define CONST_MASK(nr) (1 << ((nr) & 7)) |
| 50 | 51 | ||
| 51 | /** | 52 | /** |
| @@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr) | |||
| 73 | : "memory"); | 74 | : "memory"); |
| 74 | } else { | 75 | } else { |
| 75 | asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" | 76 | asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" |
| 76 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | 77 | : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); |
| 77 | } | 78 | } |
| 78 | } | 79 | } |
| 79 | 80 | ||
| @@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr) | |||
| 88 | */ | 89 | */ |
| 89 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) | 90 | static __always_inline void __set_bit(long nr, volatile unsigned long *addr) |
| 90 | { | 91 | { |
| 91 | asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); | 92 | asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
| 92 | } | 93 | } |
| 93 | 94 | ||
| 94 | /** | 95 | /** |
| @@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr) | |||
| 110 | : "iq" ((u8)~CONST_MASK(nr))); | 111 | : "iq" ((u8)~CONST_MASK(nr))); |
| 111 | } else { | 112 | } else { |
| 112 | asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" | 113 | asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" |
| 113 | : BITOP_ADDR(addr) | 114 | : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); |
| 114 | : "Ir" (nr)); | ||
| 115 | } | 115 | } |
| 116 | } | 116 | } |
| 117 | 117 | ||
| @@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad | |||
| 131 | 131 | ||
| 132 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) | 132 | static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) |
| 133 | { | 133 | { |
| 134 | asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); | 134 | asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) | 137 | static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) |
| @@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile | |||
| 139 | bool negative; | 139 | bool negative; |
| 140 | asm volatile(LOCK_PREFIX "andb %2,%1" | 140 | asm volatile(LOCK_PREFIX "andb %2,%1" |
| 141 | CC_SET(s) | 141 | CC_SET(s) |
| 142 | : CC_OUT(s) (negative), ADDR | 142 | : CC_OUT(s) (negative), WBYTE_ADDR(addr) |
| 143 | : "ir" ((char) ~(1 << nr)) : "memory"); | 143 | : "ir" ((char) ~(1 << nr)) : "memory"); |
| 144 | return negative; | 144 | return negative; |
| 145 | } | 145 | } |
| @@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile | |||
| 155 | * __clear_bit() is non-atomic and implies release semantics before the memory | 155 | * __clear_bit() is non-atomic and implies release semantics before the memory |
| 156 | * operation. It can be used for an unlock if no other CPUs can concurrently | 156 | * operation. It can be used for an unlock if no other CPUs can concurrently |
| 157 | * modify other bits in the word. | 157 | * modify other bits in the word. |
| 158 | * | ||
| 159 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
| 160 | * older loads. Same principle as spin_unlock. | ||
| 161 | */ | 158 | */ |
| 162 | static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) | 159 | static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
| 163 | { | 160 | { |
| 164 | barrier(); | ||
| 165 | __clear_bit(nr, addr); | 161 | __clear_bit(nr, addr); |
| 166 | } | 162 | } |
| 167 | 163 | ||
| @@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * | |||
| 176 | */ | 172 | */ |
| 177 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) | 173 | static __always_inline void __change_bit(long nr, volatile unsigned long *addr) |
| 178 | { | 174 | { |
| 179 | asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); | 175 | asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); |
| 180 | } | 176 | } |
| 181 | 177 | ||
| 182 | /** | 178 | /** |
| @@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) | |||
| 196 | : "iq" ((u8)CONST_MASK(nr))); | 192 | : "iq" ((u8)CONST_MASK(nr))); |
| 197 | } else { | 193 | } else { |
| 198 | asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" | 194 | asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" |
| 199 | : BITOP_ADDR(addr) | 195 | : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); |
| 200 | : "Ir" (nr)); | ||
| 201 | } | 196 | } |
| 202 | } | 197 | } |
| 203 | 198 | ||
| @@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * | |||
| 242 | 237 | ||
| 243 | asm(__ASM_SIZE(bts) " %2,%1" | 238 | asm(__ASM_SIZE(bts) " %2,%1" |
| 244 | CC_SET(c) | 239 | CC_SET(c) |
| 245 | : CC_OUT(c) (oldbit), ADDR | 240 | : CC_OUT(c) (oldbit) |
| 246 | : "Ir" (nr)); | 241 | : ADDR, "Ir" (nr) : "memory"); |
| 247 | return oldbit; | 242 | return oldbit; |
| 248 | } | 243 | } |
| 249 | 244 | ||
| @@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long | |||
| 282 | 277 | ||
| 283 | asm volatile(__ASM_SIZE(btr) " %2,%1" | 278 | asm volatile(__ASM_SIZE(btr) " %2,%1" |
| 284 | CC_SET(c) | 279 | CC_SET(c) |
| 285 | : CC_OUT(c) (oldbit), ADDR | 280 | : CC_OUT(c) (oldbit) |
| 286 | : "Ir" (nr)); | 281 | : ADDR, "Ir" (nr) : "memory"); |
| 287 | return oldbit; | 282 | return oldbit; |
| 288 | } | 283 | } |
| 289 | 284 | ||
| @@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon | |||
| 294 | 289 | ||
| 295 | asm volatile(__ASM_SIZE(btc) " %2,%1" | 290 | asm volatile(__ASM_SIZE(btc) " %2,%1" |
| 296 | CC_SET(c) | 291 | CC_SET(c) |
| 297 | : CC_OUT(c) (oldbit), ADDR | 292 | : CC_OUT(c) (oldbit) |
| 298 | : "Ir" (nr) : "memory"); | 293 | : ADDR, "Ir" (nr) : "memory"); |
| 299 | 294 | ||
| 300 | return oldbit; | 295 | return oldbit; |
| 301 | } | 296 | } |
| @@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l | |||
| 326 | asm volatile(__ASM_SIZE(bt) " %2,%1" | 321 | asm volatile(__ASM_SIZE(bt) " %2,%1" |
| 327 | CC_SET(c) | 322 | CC_SET(c) |
| 328 | : CC_OUT(c) (oldbit) | 323 | : CC_OUT(c) (oldbit) |
| 329 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | 324 | : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); |
| 330 | 325 | ||
| 331 | return oldbit; | 326 | return oldbit; |
| 332 | } | 327 | } |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 93c4bf598fb0..feab24cac610 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
| @@ -226,7 +226,9 @@ struct x86_emulate_ops { | |||
| 226 | 226 | ||
| 227 | unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); | 227 | unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); |
| 228 | void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); | 228 | void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); |
| 229 | int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase); | 229 | int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, |
| 230 | const char *smstate); | ||
| 231 | void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt); | ||
| 230 | 232 | ||
| 231 | }; | 233 | }; |
| 232 | 234 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 159b5988292f..a9d03af34030 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | #define KVM_PERMILLE_MMU_PAGES 20 | 128 | #define KVM_PERMILLE_MMU_PAGES 20 |
| 129 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | 129 | #define KVM_MIN_ALLOC_MMU_PAGES 64UL |
| 130 | #define KVM_MMU_HASH_SHIFT 12 | 130 | #define KVM_MMU_HASH_SHIFT 12 |
| 131 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | 131 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
| 132 | #define KVM_MIN_FREE_MMU_PAGES 5 | 132 | #define KVM_MIN_FREE_MMU_PAGES 5 |
| @@ -844,9 +844,9 @@ enum kvm_irqchip_mode { | |||
| 844 | }; | 844 | }; |
| 845 | 845 | ||
| 846 | struct kvm_arch { | 846 | struct kvm_arch { |
| 847 | unsigned int n_used_mmu_pages; | 847 | unsigned long n_used_mmu_pages; |
| 848 | unsigned int n_requested_mmu_pages; | 848 | unsigned long n_requested_mmu_pages; |
| 849 | unsigned int n_max_mmu_pages; | 849 | unsigned long n_max_mmu_pages; |
| 850 | unsigned int indirect_shadow_pages; | 850 | unsigned int indirect_shadow_pages; |
| 851 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | 851 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
| 852 | /* | 852 | /* |
| @@ -1182,7 +1182,7 @@ struct kvm_x86_ops { | |||
| 1182 | 1182 | ||
| 1183 | int (*smi_allowed)(struct kvm_vcpu *vcpu); | 1183 | int (*smi_allowed)(struct kvm_vcpu *vcpu); |
| 1184 | int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); | 1184 | int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); |
| 1185 | int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); | 1185 | int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); |
| 1186 | int (*enable_smi_window)(struct kvm_vcpu *vcpu); | 1186 | int (*enable_smi_window)(struct kvm_vcpu *vcpu); |
| 1187 | 1187 | ||
| 1188 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); | 1188 | int (*mem_enc_op)(struct kvm *kvm, void __user *argp); |
| @@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | |||
| 1256 | gfn_t gfn_offset, unsigned long mask); | 1256 | gfn_t gfn_offset, unsigned long mask); |
| 1257 | void kvm_mmu_zap_all(struct kvm *kvm); | 1257 | void kvm_mmu_zap_all(struct kvm *kvm); |
| 1258 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); | 1258 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); |
| 1259 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); | 1259 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); |
| 1260 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | 1260 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); |
| 1261 | 1261 | ||
| 1262 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); | 1262 | int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
| 1263 | bool pdptrs_changed(struct kvm_vcpu *vcpu); | 1263 | bool pdptrs_changed(struct kvm_vcpu *vcpu); |
| @@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) | |||
| 1592 | #define put_smstate(type, buf, offset, val) \ | 1592 | #define put_smstate(type, buf, offset, val) \ |
| 1593 | *(type *)((buf) + (offset) - 0x7e00) = val | 1593 | *(type *)((buf) + (offset) - 0x7e00) = val |
| 1594 | 1594 | ||
| 1595 | #define GET_SMSTATE(type, buf, offset) \ | ||
| 1596 | (*(type *)((buf) + (offset) - 0x7e00)) | ||
| 1597 | |||
| 1595 | #endif /* _ASM_X86_KVM_HOST_H */ | 1598 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d653139857af..4c305471ec33 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
| @@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 91 | 91 | ||
| 92 | static inline void syscall_get_arguments(struct task_struct *task, | 92 | static inline void syscall_get_arguments(struct task_struct *task, |
| 93 | struct pt_regs *regs, | 93 | struct pt_regs *regs, |
| 94 | unsigned int i, unsigned int n, | ||
| 95 | unsigned long *args) | 94 | unsigned long *args) |
| 96 | { | 95 | { |
| 97 | BUG_ON(i + n > 6); | 96 | memcpy(args, ®s->bx, 6 * sizeof(args[0])); |
| 98 | memcpy(args, ®s->bx + i, n * sizeof(args[0])); | ||
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | static inline void syscall_set_arguments(struct task_struct *task, | 99 | static inline void syscall_set_arguments(struct task_struct *task, |
| @@ -116,124 +114,50 @@ static inline int syscall_get_arch(void) | |||
| 116 | 114 | ||
| 117 | static inline void syscall_get_arguments(struct task_struct *task, | 115 | static inline void syscall_get_arguments(struct task_struct *task, |
| 118 | struct pt_regs *regs, | 116 | struct pt_regs *regs, |
| 119 | unsigned int i, unsigned int n, | ||
| 120 | unsigned long *args) | 117 | unsigned long *args) |
| 121 | { | 118 | { |
| 122 | # ifdef CONFIG_IA32_EMULATION | 119 | # ifdef CONFIG_IA32_EMULATION |
| 123 | if (task->thread_info.status & TS_COMPAT) | 120 | if (task->thread_info.status & TS_COMPAT) { |
| 124 | switch (i) { | 121 | *args++ = regs->bx; |
| 125 | case 0: | 122 | *args++ = regs->cx; |
| 126 | if (!n--) break; | 123 | *args++ = regs->dx; |
| 127 | *args++ = regs->bx; | 124 | *args++ = regs->si; |
| 128 | case 1: | 125 | *args++ = regs->di; |
| 129 | if (!n--) break; | 126 | *args = regs->bp; |
| 130 | *args++ = regs->cx; | 127 | } else |
| 131 | case 2: | ||
| 132 | if (!n--) break; | ||
| 133 | *args++ = regs->dx; | ||
| 134 | case 3: | ||
| 135 | if (!n--) break; | ||
| 136 | *args++ = regs->si; | ||
| 137 | case 4: | ||
| 138 | if (!n--) break; | ||
| 139 | *args++ = regs->di; | ||
| 140 | case 5: | ||
| 141 | if (!n--) break; | ||
| 142 | *args++ = regs->bp; | ||
| 143 | case 6: | ||
| 144 | if (!n--) break; | ||
| 145 | default: | ||
| 146 | BUG(); | ||
| 147 | break; | ||
| 148 | } | ||
| 149 | else | ||
| 150 | # endif | 128 | # endif |
| 151 | switch (i) { | 129 | { |
| 152 | case 0: | 130 | *args++ = regs->di; |
| 153 | if (!n--) break; | 131 | *args++ = regs->si; |
| 154 | *args++ = regs->di; | 132 | *args++ = regs->dx; |
| 155 | case 1: | 133 | *args++ = regs->r10; |
| 156 | if (!n--) break; | 134 | *args++ = regs->r8; |
| 157 | *args++ = regs->si; | 135 | *args = regs->r9; |
| 158 | case 2: | 136 | } |
| 159 | if (!n--) break; | ||
| 160 | *args++ = regs->dx; | ||
| 161 | case 3: | ||
| 162 | if (!n--) break; | ||
| 163 | *args++ = regs->r10; | ||
| 164 | case 4: | ||
| 165 | if (!n--) break; | ||
| 166 | *args++ = regs->r8; | ||
| 167 | case 5: | ||
| 168 | if (!n--) break; | ||
| 169 | *args++ = regs->r9; | ||
| 170 | case 6: | ||
| 171 | if (!n--) break; | ||
| 172 | default: | ||
| 173 | BUG(); | ||
| 174 | break; | ||
| 175 | } | ||
| 176 | } | 137 | } |
| 177 | 138 | ||
| 178 | static inline void syscall_set_arguments(struct task_struct *task, | 139 | static inline void syscall_set_arguments(struct task_struct *task, |
| 179 | struct pt_regs *regs, | 140 | struct pt_regs *regs, |
| 180 | unsigned int i, unsigned int n, | ||
| 181 | const unsigned long *args) | 141 | const unsigned long *args) |
| 182 | { | 142 | { |
| 183 | # ifdef CONFIG_IA32_EMULATION | 143 | # ifdef CONFIG_IA32_EMULATION |
| 184 | if (task->thread_info.status & TS_COMPAT) | 144 | if (task->thread_info.status & TS_COMPAT) { |
| 185 | switch (i) { | 145 | regs->bx = *args++; |
| 186 | case 0: | 146 | regs->cx = *args++; |
| 187 | if (!n--) break; | 147 | regs->dx = *args++; |
| 188 | regs->bx = *args++; | 148 | regs->si = *args++; |
| 189 | case 1: | 149 | regs->di = *args++; |
| 190 | if (!n--) break; | 150 | regs->bp = *args; |
| 191 | regs->cx = *args++; | 151 | } else |
| 192 | case 2: | ||
| 193 | if (!n--) break; | ||
| 194 | regs->dx = *args++; | ||
| 195 | case 3: | ||
| 196 | if (!n--) break; | ||
| 197 | regs->si = *args++; | ||
| 198 | case 4: | ||
| 199 | if (!n--) break; | ||
| 200 | regs->di = *args++; | ||
| 201 | case 5: | ||
| 202 | if (!n--) break; | ||
| 203 | regs->bp = *args++; | ||
| 204 | case 6: | ||
| 205 | if (!n--) break; | ||
| 206 | default: | ||
| 207 | BUG(); | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | else | ||
| 211 | # endif | 152 | # endif |
| 212 | switch (i) { | 153 | { |
| 213 | case 0: | 154 | regs->di = *args++; |
| 214 | if (!n--) break; | 155 | regs->si = *args++; |
| 215 | regs->di = *args++; | 156 | regs->dx = *args++; |
| 216 | case 1: | 157 | regs->r10 = *args++; |
| 217 | if (!n--) break; | 158 | regs->r8 = *args++; |
| 218 | regs->si = *args++; | 159 | regs->r9 = *args; |
| 219 | case 2: | 160 | } |
| 220 | if (!n--) break; | ||
| 221 | regs->dx = *args++; | ||
| 222 | case 3: | ||
| 223 | if (!n--) break; | ||
| 224 | regs->r10 = *args++; | ||
| 225 | case 4: | ||
| 226 | if (!n--) break; | ||
| 227 | regs->r8 = *args++; | ||
| 228 | case 5: | ||
| 229 | if (!n--) break; | ||
| 230 | regs->r9 = *args++; | ||
| 231 | case 6: | ||
| 232 | if (!n--) break; | ||
| 233 | default: | ||
| 234 | BUG(); | ||
| 235 | break; | ||
| 236 | } | ||
| 237 | } | 161 | } |
| 238 | 162 | ||
| 239 | static inline int syscall_get_arch(void) | 163 | static inline int syscall_get_arch(void) |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index de6f0d59a24f..2863c2026655 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
| @@ -206,6 +206,9 @@ xen_single_call(unsigned int call, | |||
| 206 | __HYPERCALL_DECLS; | 206 | __HYPERCALL_DECLS; |
| 207 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); | 207 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); |
| 208 | 208 | ||
| 209 | if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) | ||
| 210 | return -EINVAL; | ||
| 211 | |||
| 209 | asm volatile(CALL_NOSPEC | 212 | asm volatile(CALL_NOSPEC |
| 210 | : __HYPERCALL_5PARAM | 213 | : __HYPERCALL_5PARAM |
| 211 | : [thunk_target] "a" (&hypercall_page[call]) | 214 | : [thunk_target] "a" (&hypercall_page[call]) |
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index f0b0c90dd398..d213ec5c3766 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h | |||
| @@ -146,6 +146,7 @@ | |||
| 146 | 146 | ||
| 147 | #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 | 147 | #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 |
| 148 | #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 | 148 | #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 |
| 149 | #define VMX_ABORT_VMCS_CORRUPTED 3 | ||
| 149 | #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 | 150 | #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 |
| 150 | 151 | ||
| 151 | #endif /* _UAPIVMX_H */ | 152 | #endif /* _UAPIVMX_H */ |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 2da82eff0eb4..b91b3bfa5cfb 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -275,7 +275,7 @@ static const struct { | |||
| 275 | const char *option; | 275 | const char *option; |
| 276 | enum spectre_v2_user_cmd cmd; | 276 | enum spectre_v2_user_cmd cmd; |
| 277 | bool secure; | 277 | bool secure; |
| 278 | } v2_user_options[] __initdata = { | 278 | } v2_user_options[] __initconst = { |
| 279 | { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, | 279 | { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, |
| 280 | { "off", SPECTRE_V2_USER_CMD_NONE, false }, | 280 | { "off", SPECTRE_V2_USER_CMD_NONE, false }, |
| 281 | { "on", SPECTRE_V2_USER_CMD_FORCE, true }, | 281 | { "on", SPECTRE_V2_USER_CMD_FORCE, true }, |
| @@ -419,7 +419,7 @@ static const struct { | |||
| 419 | const char *option; | 419 | const char *option; |
| 420 | enum spectre_v2_mitigation_cmd cmd; | 420 | enum spectre_v2_mitigation_cmd cmd; |
| 421 | bool secure; | 421 | bool secure; |
| 422 | } mitigation_options[] __initdata = { | 422 | } mitigation_options[] __initconst = { |
| 423 | { "off", SPECTRE_V2_CMD_NONE, false }, | 423 | { "off", SPECTRE_V2_CMD_NONE, false }, |
| 424 | { "on", SPECTRE_V2_CMD_FORCE, true }, | 424 | { "on", SPECTRE_V2_CMD_FORCE, true }, |
| 425 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, | 425 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, |
| @@ -658,7 +658,7 @@ static const char * const ssb_strings[] = { | |||
| 658 | static const struct { | 658 | static const struct { |
| 659 | const char *option; | 659 | const char *option; |
| 660 | enum ssb_mitigation_cmd cmd; | 660 | enum ssb_mitigation_cmd cmd; |
| 661 | } ssb_mitigation_options[] __initdata = { | 661 | } ssb_mitigation_options[] __initconst = { |
| 662 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ | 662 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ |
| 663 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ | 663 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ |
| 664 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ | 664 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fc3c07fe7df5..3142fd7a9b32 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -611,8 +611,8 @@ static void init_intel_energy_perf(struct cpuinfo_x86 *c) | |||
| 611 | if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) | 611 | if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) |
| 612 | return; | 612 | return; |
| 613 | 613 | ||
| 614 | pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); | 614 | pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); |
| 615 | pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); | 615 | pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); |
| 616 | epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; | 616 | epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; |
| 617 | wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); | 617 | wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); |
| 618 | } | 618 | } |
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 399601eda8e4..85212a32b54d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c | |||
| @@ -2039,14 +2039,14 @@ out: | |||
| 2039 | enum rdt_param { | 2039 | enum rdt_param { |
| 2040 | Opt_cdp, | 2040 | Opt_cdp, |
| 2041 | Opt_cdpl2, | 2041 | Opt_cdpl2, |
| 2042 | Opt_mba_mpbs, | 2042 | Opt_mba_mbps, |
| 2043 | nr__rdt_params | 2043 | nr__rdt_params |
| 2044 | }; | 2044 | }; |
| 2045 | 2045 | ||
| 2046 | static const struct fs_parameter_spec rdt_param_specs[] = { | 2046 | static const struct fs_parameter_spec rdt_param_specs[] = { |
| 2047 | fsparam_flag("cdp", Opt_cdp), | 2047 | fsparam_flag("cdp", Opt_cdp), |
| 2048 | fsparam_flag("cdpl2", Opt_cdpl2), | 2048 | fsparam_flag("cdpl2", Opt_cdpl2), |
| 2049 | fsparam_flag("mba_mpbs", Opt_mba_mpbs), | 2049 | fsparam_flag("mba_MBps", Opt_mba_mbps), |
| 2050 | {} | 2050 | {} |
| 2051 | }; | 2051 | }; |
| 2052 | 2052 | ||
| @@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) | |||
| 2072 | case Opt_cdpl2: | 2072 | case Opt_cdpl2: |
| 2073 | ctx->enable_cdpl2 = true; | 2073 | ctx->enable_cdpl2 = true; |
| 2074 | return 0; | 2074 | return 0; |
| 2075 | case Opt_mba_mpbs: | 2075 | case Opt_mba_mbps: |
| 2076 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | 2076 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
| 2077 | return -EINVAL; | 2077 | return -EINVAL; |
| 2078 | ctx->enable_mba_mbps = true; | 2078 | ctx->enable_mba_mbps = true; |
| @@ -2610,9 +2610,10 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
| 2610 | rdt_last_cmd_puts("Failed to initialize allocations\n"); | 2610 | rdt_last_cmd_puts("Failed to initialize allocations\n"); |
| 2611 | return ret; | 2611 | return ret; |
| 2612 | } | 2612 | } |
| 2613 | rdtgrp->mode = RDT_MODE_SHAREABLE; | ||
| 2614 | } | 2613 | } |
| 2615 | 2614 | ||
| 2615 | rdtgrp->mode = RDT_MODE_SHAREABLE; | ||
| 2616 | |||
| 2616 | return 0; | 2617 | return 0; |
| 2617 | } | 2618 | } |
| 2618 | 2619 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index a034cb808e7e..fed46ddb1eef 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
| @@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
| 569 | unsigned long *sara = stack_addr(regs); | 569 | unsigned long *sara = stack_addr(regs); |
| 570 | 570 | ||
| 571 | ri->ret_addr = (kprobe_opcode_t *) *sara; | 571 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
| 572 | ri->fp = sara; | ||
| 572 | 573 | ||
| 573 | /* Replace the return addr with trampoline addr */ | 574 | /* Replace the return addr with trampoline addr */ |
| 574 | *sara = (unsigned long) &kretprobe_trampoline; | 575 | *sara = (unsigned long) &kretprobe_trampoline; |
| @@ -748,26 +749,48 @@ asm( | |||
| 748 | NOKPROBE_SYMBOL(kretprobe_trampoline); | 749 | NOKPROBE_SYMBOL(kretprobe_trampoline); |
| 749 | STACK_FRAME_NON_STANDARD(kretprobe_trampoline); | 750 | STACK_FRAME_NON_STANDARD(kretprobe_trampoline); |
| 750 | 751 | ||
| 752 | static struct kprobe kretprobe_kprobe = { | ||
| 753 | .addr = (void *)kretprobe_trampoline, | ||
| 754 | }; | ||
| 755 | |||
| 751 | /* | 756 | /* |
| 752 | * Called from kretprobe_trampoline | 757 | * Called from kretprobe_trampoline |
| 753 | */ | 758 | */ |
| 754 | static __used void *trampoline_handler(struct pt_regs *regs) | 759 | static __used void *trampoline_handler(struct pt_regs *regs) |
| 755 | { | 760 | { |
| 761 | struct kprobe_ctlblk *kcb; | ||
| 756 | struct kretprobe_instance *ri = NULL; | 762 | struct kretprobe_instance *ri = NULL; |
| 757 | struct hlist_head *head, empty_rp; | 763 | struct hlist_head *head, empty_rp; |
| 758 | struct hlist_node *tmp; | 764 | struct hlist_node *tmp; |
| 759 | unsigned long flags, orig_ret_address = 0; | 765 | unsigned long flags, orig_ret_address = 0; |
| 760 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 766 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
| 761 | kprobe_opcode_t *correct_ret_addr = NULL; | 767 | kprobe_opcode_t *correct_ret_addr = NULL; |
| 768 | void *frame_pointer; | ||
| 769 | bool skipped = false; | ||
| 770 | |||
| 771 | preempt_disable(); | ||
| 772 | |||
| 773 | /* | ||
| 774 | * Set a dummy kprobe for avoiding kretprobe recursion. | ||
| 775 | * Since kretprobe never run in kprobe handler, kprobe must not | ||
| 776 | * be running at this point. | ||
| 777 | */ | ||
| 778 | kcb = get_kprobe_ctlblk(); | ||
| 779 | __this_cpu_write(current_kprobe, &kretprobe_kprobe); | ||
| 780 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
| 762 | 781 | ||
| 763 | INIT_HLIST_HEAD(&empty_rp); | 782 | INIT_HLIST_HEAD(&empty_rp); |
| 764 | kretprobe_hash_lock(current, &head, &flags); | 783 | kretprobe_hash_lock(current, &head, &flags); |
| 765 | /* fixup registers */ | 784 | /* fixup registers */ |
| 766 | #ifdef CONFIG_X86_64 | 785 | #ifdef CONFIG_X86_64 |
| 767 | regs->cs = __KERNEL_CS; | 786 | regs->cs = __KERNEL_CS; |
| 787 | /* On x86-64, we use pt_regs->sp for return address holder. */ | ||
| 788 | frame_pointer = ®s->sp; | ||
| 768 | #else | 789 | #else |
| 769 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | 790 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
| 770 | regs->gs = 0; | 791 | regs->gs = 0; |
| 792 | /* On x86-32, we use pt_regs->flags for return address holder. */ | ||
| 793 | frame_pointer = ®s->flags; | ||
| 771 | #endif | 794 | #endif |
| 772 | regs->ip = trampoline_address; | 795 | regs->ip = trampoline_address; |
| 773 | regs->orig_ax = ~0UL; | 796 | regs->orig_ax = ~0UL; |
| @@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs) | |||
| 789 | if (ri->task != current) | 812 | if (ri->task != current) |
| 790 | /* another task is sharing our hash bucket */ | 813 | /* another task is sharing our hash bucket */ |
| 791 | continue; | 814 | continue; |
| 815 | /* | ||
| 816 | * Return probes must be pushed on this hash list correct | ||
| 817 | * order (same as return order) so that it can be poped | ||
| 818 | * correctly. However, if we find it is pushed it incorrect | ||
| 819 | * order, this means we find a function which should not be | ||
| 820 | * probed, because the wrong order entry is pushed on the | ||
| 821 | * path of processing other kretprobe itself. | ||
| 822 | */ | ||
| 823 | if (ri->fp != frame_pointer) { | ||
| 824 | if (!skipped) | ||
| 825 | pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); | ||
| 826 | skipped = true; | ||
| 827 | continue; | ||
| 828 | } | ||
| 792 | 829 | ||
| 793 | orig_ret_address = (unsigned long)ri->ret_addr; | 830 | orig_ret_address = (unsigned long)ri->ret_addr; |
| 831 | if (skipped) | ||
| 832 | pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", | ||
| 833 | ri->rp->kp.addr); | ||
| 794 | 834 | ||
| 795 | if (orig_ret_address != trampoline_address) | 835 | if (orig_ret_address != trampoline_address) |
| 796 | /* | 836 | /* |
| @@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs) | |||
| 808 | if (ri->task != current) | 848 | if (ri->task != current) |
| 809 | /* another task is sharing our hash bucket */ | 849 | /* another task is sharing our hash bucket */ |
| 810 | continue; | 850 | continue; |
| 851 | if (ri->fp != frame_pointer) | ||
| 852 | continue; | ||
| 811 | 853 | ||
| 812 | orig_ret_address = (unsigned long)ri->ret_addr; | 854 | orig_ret_address = (unsigned long)ri->ret_addr; |
| 813 | if (ri->rp && ri->rp->handler) { | 855 | if (ri->rp && ri->rp->handler) { |
| 814 | __this_cpu_write(current_kprobe, &ri->rp->kp); | 856 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
| 815 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | ||
| 816 | ri->ret_addr = correct_ret_addr; | 857 | ri->ret_addr = correct_ret_addr; |
| 817 | ri->rp->handler(ri, regs); | 858 | ri->rp->handler(ri, regs); |
| 818 | __this_cpu_write(current_kprobe, NULL); | 859 | __this_cpu_write(current_kprobe, &kretprobe_kprobe); |
| 819 | } | 860 | } |
| 820 | 861 | ||
| 821 | recycle_rp_inst(ri, &empty_rp); | 862 | recycle_rp_inst(ri, &empty_rp); |
| @@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs) | |||
| 831 | 872 | ||
| 832 | kretprobe_hash_unlock(current, &flags); | 873 | kretprobe_hash_unlock(current, &flags); |
| 833 | 874 | ||
| 875 | __this_cpu_write(current_kprobe, NULL); | ||
| 876 | preempt_enable(); | ||
| 877 | |||
| 834 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { | 878 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
| 835 | hlist_del(&ri->hlist); | 879 | hlist_del(&ri->hlist); |
| 836 | kfree(ri); | 880 | kfree(ri); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 58ac7be52c7a..957eae13b370 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, | |||
| 426 | u64 msr = x86_spec_ctrl_base; | 426 | u64 msr = x86_spec_ctrl_base; |
| 427 | bool updmsr = false; | 427 | bool updmsr = false; |
| 428 | 428 | ||
| 429 | lockdep_assert_irqs_disabled(); | ||
| 430 | |||
| 429 | /* | 431 | /* |
| 430 | * If TIF_SSBD is different, select the proper mitigation | 432 | * If TIF_SSBD is different, select the proper mitigation |
| 431 | * method. Note that if SSBD mitigation is disabled or permanentely | 433 | * method. Note that if SSBD mitigation is disabled or permanentely |
| @@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) | |||
| 477 | 479 | ||
| 478 | void speculation_ctrl_update(unsigned long tif) | 480 | void speculation_ctrl_update(unsigned long tif) |
| 479 | { | 481 | { |
| 482 | unsigned long flags; | ||
| 483 | |||
| 480 | /* Forced update. Make sure all relevant TIF flags are different */ | 484 | /* Forced update. Make sure all relevant TIF flags are different */ |
| 481 | preempt_disable(); | 485 | local_irq_save(flags); |
| 482 | __speculation_ctrl_update(~tif, tif); | 486 | __speculation_ctrl_update(~tif, tif); |
| 483 | preempt_enable(); | 487 | local_irq_restore(flags); |
| 484 | } | 488 | } |
| 485 | 489 | ||
| 486 | /* Called from seccomp/prctl update */ | 490 | /* Called from seccomp/prctl update */ |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 725624b6c0c0..8fd3cedd9acc 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) | |||
| 81 | return 0; | 81 | return 0; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | /* | ||
| 85 | * Some machines don't handle the default ACPI reboot method and | ||
| 86 | * require the EFI reboot method: | ||
| 87 | */ | ||
| 88 | static int __init set_efi_reboot(const struct dmi_system_id *d) | ||
| 89 | { | ||
| 90 | if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) { | ||
| 91 | reboot_type = BOOT_EFI; | ||
| 92 | pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident); | ||
| 93 | } | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 84 | void __noreturn machine_real_restart(unsigned int type) | 97 | void __noreturn machine_real_restart(unsigned int type) |
| 85 | { | 98 | { |
| 86 | local_irq_disable(); | 99 | local_irq_disable(); |
| @@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { | |||
| 166 | DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), | 179 | DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), |
| 167 | }, | 180 | }, |
| 168 | }, | 181 | }, |
| 182 | { /* Handle reboot issue on Acer TravelMate X514-51T */ | ||
| 183 | .callback = set_efi_reboot, | ||
| 184 | .ident = "Acer TravelMate X514-51T", | ||
| 185 | .matches = { | ||
| 186 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
| 187 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"), | ||
| 188 | }, | ||
| 189 | }, | ||
| 169 | 190 | ||
| 170 | /* Apple */ | 191 | /* Apple */ |
| 171 | { /* Handle problems with rebooting on Apple MacBook5 */ | 192 | { /* Handle problems with rebooting on Apple MacBook5 */ |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bad8c51fee6e..a5127b2c195f 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
| @@ -362,7 +362,7 @@ SECTIONS | |||
| 362 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { | 362 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
| 363 | __bss_start = .; | 363 | __bss_start = .; |
| 364 | *(.bss..page_aligned) | 364 | *(.bss..page_aligned) |
| 365 | *(.bss) | 365 | *(BSS_MAIN) |
| 366 | BSS_DECRYPTED | 366 | BSS_DECRYPTED |
| 367 | . = ALIGN(PAGE_SIZE); | 367 | . = ALIGN(PAGE_SIZE); |
| 368 | __bss_stop = .; | 368 | __bss_stop = .; |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index c338984c850d..d0d5dd44b4f4 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
| @@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt) | |||
| 2331 | 2331 | ||
| 2332 | static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) | 2332 | static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) |
| 2333 | { | 2333 | { |
| 2334 | #ifdef CONFIG_X86_64 | ||
| 2334 | u32 eax, ebx, ecx, edx; | 2335 | u32 eax, ebx, ecx, edx; |
| 2335 | 2336 | ||
| 2336 | eax = 0x80000001; | 2337 | eax = 0x80000001; |
| 2337 | ecx = 0; | 2338 | ecx = 0; |
| 2338 | ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); | 2339 | ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); |
| 2339 | return edx & bit(X86_FEATURE_LM); | 2340 | return edx & bit(X86_FEATURE_LM); |
| 2341 | #else | ||
| 2342 | return false; | ||
| 2343 | #endif | ||
| 2340 | } | 2344 | } |
| 2341 | 2345 | ||
| 2342 | #define GET_SMSTATE(type, smbase, offset) \ | ||
| 2343 | ({ \ | ||
| 2344 | type __val; \ | ||
| 2345 | int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \ | ||
| 2346 | sizeof(__val)); \ | ||
| 2347 | if (r != X86EMUL_CONTINUE) \ | ||
| 2348 | return X86EMUL_UNHANDLEABLE; \ | ||
| 2349 | __val; \ | ||
| 2350 | }) | ||
| 2351 | |||
| 2352 | static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) | 2346 | static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) |
| 2353 | { | 2347 | { |
| 2354 | desc->g = (flags >> 23) & 1; | 2348 | desc->g = (flags >> 23) & 1; |
| @@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) | |||
| 2361 | desc->type = (flags >> 8) & 15; | 2355 | desc->type = (flags >> 8) & 15; |
| 2362 | } | 2356 | } |
| 2363 | 2357 | ||
| 2364 | static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) | 2358 | static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, |
| 2359 | int n) | ||
| 2365 | { | 2360 | { |
| 2366 | struct desc_struct desc; | 2361 | struct desc_struct desc; |
| 2367 | int offset; | 2362 | int offset; |
| 2368 | u16 selector; | 2363 | u16 selector; |
| 2369 | 2364 | ||
| 2370 | selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4); | 2365 | selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); |
| 2371 | 2366 | ||
| 2372 | if (n < 3) | 2367 | if (n < 3) |
| 2373 | offset = 0x7f84 + n * 12; | 2368 | offset = 0x7f84 + n * 12; |
| 2374 | else | 2369 | else |
| 2375 | offset = 0x7f2c + (n - 3) * 12; | 2370 | offset = 0x7f2c + (n - 3) * 12; |
| 2376 | 2371 | ||
| 2377 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); | 2372 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); |
| 2378 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); | 2373 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); |
| 2379 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset)); | 2374 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset)); |
| 2380 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); | 2375 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); |
| 2381 | return X86EMUL_CONTINUE; | 2376 | return X86EMUL_CONTINUE; |
| 2382 | } | 2377 | } |
| 2383 | 2378 | ||
| 2384 | static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) | 2379 | #ifdef CONFIG_X86_64 |
| 2380 | static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, | ||
| 2381 | int n) | ||
| 2385 | { | 2382 | { |
| 2386 | struct desc_struct desc; | 2383 | struct desc_struct desc; |
| 2387 | int offset; | 2384 | int offset; |
| @@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) | |||
| 2390 | 2387 | ||
| 2391 | offset = 0x7e00 + n * 16; | 2388 | offset = 0x7e00 + n * 16; |
| 2392 | 2389 | ||
| 2393 | selector = GET_SMSTATE(u16, smbase, offset); | 2390 | selector = GET_SMSTATE(u16, smstate, offset); |
| 2394 | rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8); | 2391 | rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8); |
| 2395 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4)); | 2392 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); |
| 2396 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8)); | 2393 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); |
| 2397 | base3 = GET_SMSTATE(u32, smbase, offset + 12); | 2394 | base3 = GET_SMSTATE(u32, smstate, offset + 12); |
| 2398 | 2395 | ||
| 2399 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); | 2396 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); |
| 2400 | return X86EMUL_CONTINUE; | 2397 | return X86EMUL_CONTINUE; |
| 2401 | } | 2398 | } |
| 2399 | #endif | ||
| 2402 | 2400 | ||
| 2403 | static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, | 2401 | static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, |
| 2404 | u64 cr0, u64 cr3, u64 cr4) | 2402 | u64 cr0, u64 cr3, u64 cr4) |
| @@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, | |||
| 2445 | return X86EMUL_CONTINUE; | 2443 | return X86EMUL_CONTINUE; |
| 2446 | } | 2444 | } |
| 2447 | 2445 | ||
| 2448 | static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) | 2446 | static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, |
| 2447 | const char *smstate) | ||
| 2449 | { | 2448 | { |
| 2450 | struct desc_struct desc; | 2449 | struct desc_struct desc; |
| 2451 | struct desc_ptr dt; | 2450 | struct desc_ptr dt; |
| @@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
| 2453 | u32 val, cr0, cr3, cr4; | 2452 | u32 val, cr0, cr3, cr4; |
| 2454 | int i; | 2453 | int i; |
| 2455 | 2454 | ||
| 2456 | cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); | 2455 | cr0 = GET_SMSTATE(u32, smstate, 0x7ffc); |
| 2457 | cr3 = GET_SMSTATE(u32, smbase, 0x7ff8); | 2456 | cr3 = GET_SMSTATE(u32, smstate, 0x7ff8); |
| 2458 | ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; | 2457 | ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; |
| 2459 | ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); | 2458 | ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); |
| 2460 | 2459 | ||
| 2461 | for (i = 0; i < 8; i++) | 2460 | for (i = 0; i < 8; i++) |
| 2462 | *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4); | 2461 | *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); |
| 2463 | 2462 | ||
| 2464 | val = GET_SMSTATE(u32, smbase, 0x7fcc); | 2463 | val = GET_SMSTATE(u32, smstate, 0x7fcc); |
| 2465 | ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); | 2464 | ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); |
| 2466 | val = GET_SMSTATE(u32, smbase, 0x7fc8); | 2465 | val = GET_SMSTATE(u32, smstate, 0x7fc8); |
| 2467 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); | 2466 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); |
| 2468 | 2467 | ||
| 2469 | selector = GET_SMSTATE(u32, smbase, 0x7fc4); | 2468 | selector = GET_SMSTATE(u32, smstate, 0x7fc4); |
| 2470 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64)); | 2469 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); |
| 2471 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60)); | 2470 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60)); |
| 2472 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c)); | 2471 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c)); |
| 2473 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); | 2472 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); |
| 2474 | 2473 | ||
| 2475 | selector = GET_SMSTATE(u32, smbase, 0x7fc0); | 2474 | selector = GET_SMSTATE(u32, smstate, 0x7fc0); |
| 2476 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80)); | 2475 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80)); |
| 2477 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c)); | 2476 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c)); |
| 2478 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78)); | 2477 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78)); |
| 2479 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); | 2478 | ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); |
| 2480 | 2479 | ||
| 2481 | dt.address = GET_SMSTATE(u32, smbase, 0x7f74); | 2480 | dt.address = GET_SMSTATE(u32, smstate, 0x7f74); |
| 2482 | dt.size = GET_SMSTATE(u32, smbase, 0x7f70); | 2481 | dt.size = GET_SMSTATE(u32, smstate, 0x7f70); |
| 2483 | ctxt->ops->set_gdt(ctxt, &dt); | 2482 | ctxt->ops->set_gdt(ctxt, &dt); |
| 2484 | 2483 | ||
| 2485 | dt.address = GET_SMSTATE(u32, smbase, 0x7f58); | 2484 | dt.address = GET_SMSTATE(u32, smstate, 0x7f58); |
| 2486 | dt.size = GET_SMSTATE(u32, smbase, 0x7f54); | 2485 | dt.size = GET_SMSTATE(u32, smstate, 0x7f54); |
| 2487 | ctxt->ops->set_idt(ctxt, &dt); | 2486 | ctxt->ops->set_idt(ctxt, &dt); |
| 2488 | 2487 | ||
| 2489 | for (i = 0; i < 6; i++) { | 2488 | for (i = 0; i < 6; i++) { |
| 2490 | int r = rsm_load_seg_32(ctxt, smbase, i); | 2489 | int r = rsm_load_seg_32(ctxt, smstate, i); |
| 2491 | if (r != X86EMUL_CONTINUE) | 2490 | if (r != X86EMUL_CONTINUE) |
| 2492 | return r; | 2491 | return r; |
| 2493 | } | 2492 | } |
| 2494 | 2493 | ||
| 2495 | cr4 = GET_SMSTATE(u32, smbase, 0x7f14); | 2494 | cr4 = GET_SMSTATE(u32, smstate, 0x7f14); |
| 2496 | 2495 | ||
| 2497 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); | 2496 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); |
| 2498 | 2497 | ||
| 2499 | return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); | 2498 | return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); |
| 2500 | } | 2499 | } |
| 2501 | 2500 | ||
| 2502 | static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | 2501 | #ifdef CONFIG_X86_64 |
| 2502 | static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, | ||
| 2503 | const char *smstate) | ||
| 2503 | { | 2504 | { |
| 2504 | struct desc_struct desc; | 2505 | struct desc_struct desc; |
| 2505 | struct desc_ptr dt; | 2506 | struct desc_ptr dt; |
| @@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
| 2509 | int i, r; | 2510 | int i, r; |
| 2510 | 2511 | ||
| 2511 | for (i = 0; i < 16; i++) | 2512 | for (i = 0; i < 16; i++) |
| 2512 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); | 2513 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8); |
| 2513 | 2514 | ||
| 2514 | ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78); | 2515 | ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); |
| 2515 | ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED; | 2516 | ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; |
| 2516 | 2517 | ||
| 2517 | val = GET_SMSTATE(u32, smbase, 0x7f68); | 2518 | val = GET_SMSTATE(u32, smstate, 0x7f68); |
| 2518 | ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); | 2519 | ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); |
| 2519 | val = GET_SMSTATE(u32, smbase, 0x7f60); | 2520 | val = GET_SMSTATE(u32, smstate, 0x7f60); |
| 2520 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); | 2521 | ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); |
| 2521 | 2522 | ||
| 2522 | cr0 = GET_SMSTATE(u64, smbase, 0x7f58); | 2523 | cr0 = GET_SMSTATE(u64, smstate, 0x7f58); |
| 2523 | cr3 = GET_SMSTATE(u64, smbase, 0x7f50); | 2524 | cr3 = GET_SMSTATE(u64, smstate, 0x7f50); |
| 2524 | cr4 = GET_SMSTATE(u64, smbase, 0x7f48); | 2525 | cr4 = GET_SMSTATE(u64, smstate, 0x7f48); |
| 2525 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); | 2526 | ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); |
| 2526 | val = GET_SMSTATE(u64, smbase, 0x7ed0); | 2527 | val = GET_SMSTATE(u64, smstate, 0x7ed0); |
| 2527 | ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); | 2528 | ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); |
| 2528 | 2529 | ||
| 2529 | selector = GET_SMSTATE(u32, smbase, 0x7e90); | 2530 | selector = GET_SMSTATE(u32, smstate, 0x7e90); |
| 2530 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8); | 2531 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8); |
| 2531 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94)); | 2532 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94)); |
| 2532 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98)); | 2533 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98)); |
| 2533 | base3 = GET_SMSTATE(u32, smbase, 0x7e9c); | 2534 | base3 = GET_SMSTATE(u32, smstate, 0x7e9c); |
| 2534 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); | 2535 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); |
| 2535 | 2536 | ||
| 2536 | dt.size = GET_SMSTATE(u32, smbase, 0x7e84); | 2537 | dt.size = GET_SMSTATE(u32, smstate, 0x7e84); |
| 2537 | dt.address = GET_SMSTATE(u64, smbase, 0x7e88); | 2538 | dt.address = GET_SMSTATE(u64, smstate, 0x7e88); |
| 2538 | ctxt->ops->set_idt(ctxt, &dt); | 2539 | ctxt->ops->set_idt(ctxt, &dt); |
| 2539 | 2540 | ||
| 2540 | selector = GET_SMSTATE(u32, smbase, 0x7e70); | 2541 | selector = GET_SMSTATE(u32, smstate, 0x7e70); |
| 2541 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8); | 2542 | rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8); |
| 2542 | set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74)); | 2543 | set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74)); |
| 2543 | set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78)); | 2544 | set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78)); |
| 2544 | base3 = GET_SMSTATE(u32, smbase, 0x7e7c); | 2545 | base3 = GET_SMSTATE(u32, smstate, 0x7e7c); |
| 2545 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); | 2546 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); |
| 2546 | 2547 | ||
| 2547 | dt.size = GET_SMSTATE(u32, smbase, 0x7e64); | 2548 | dt.size = GET_SMSTATE(u32, smstate, 0x7e64); |
| 2548 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); | 2549 | dt.address = GET_SMSTATE(u64, smstate, 0x7e68); |
| 2549 | ctxt->ops->set_gdt(ctxt, &dt); | 2550 | ctxt->ops->set_gdt(ctxt, &dt); |
| 2550 | 2551 | ||
| 2551 | r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); | 2552 | r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); |
| @@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
| 2553 | return r; | 2554 | return r; |
| 2554 | 2555 | ||
| 2555 | for (i = 0; i < 6; i++) { | 2556 | for (i = 0; i < 6; i++) { |
| 2556 | r = rsm_load_seg_64(ctxt, smbase, i); | 2557 | r = rsm_load_seg_64(ctxt, smstate, i); |
| 2557 | if (r != X86EMUL_CONTINUE) | 2558 | if (r != X86EMUL_CONTINUE) |
| 2558 | return r; | 2559 | return r; |
| 2559 | } | 2560 | } |
| 2560 | 2561 | ||
| 2561 | return X86EMUL_CONTINUE; | 2562 | return X86EMUL_CONTINUE; |
| 2562 | } | 2563 | } |
| 2564 | #endif | ||
| 2563 | 2565 | ||
| 2564 | static int em_rsm(struct x86_emulate_ctxt *ctxt) | 2566 | static int em_rsm(struct x86_emulate_ctxt *ctxt) |
| 2565 | { | 2567 | { |
| 2566 | unsigned long cr0, cr4, efer; | 2568 | unsigned long cr0, cr4, efer; |
| 2569 | char buf[512]; | ||
| 2567 | u64 smbase; | 2570 | u64 smbase; |
| 2568 | int ret; | 2571 | int ret; |
| 2569 | 2572 | ||
| 2570 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) | 2573 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) |
| 2571 | return emulate_ud(ctxt); | 2574 | return emulate_ud(ctxt); |
| 2572 | 2575 | ||
| 2576 | smbase = ctxt->ops->get_smbase(ctxt); | ||
| 2577 | |||
| 2578 | ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); | ||
| 2579 | if (ret != X86EMUL_CONTINUE) | ||
| 2580 | return X86EMUL_UNHANDLEABLE; | ||
| 2581 | |||
| 2582 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) | ||
| 2583 | ctxt->ops->set_nmi_mask(ctxt, false); | ||
| 2584 | |||
| 2585 | ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & | ||
| 2586 | ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); | ||
| 2587 | |||
| 2573 | /* | 2588 | /* |
| 2574 | * Get back to real mode, to prepare a safe state in which to load | 2589 | * Get back to real mode, to prepare a safe state in which to load |
| 2575 | * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU | 2590 | * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU |
| 2576 | * supports long mode. | 2591 | * supports long mode. |
| 2577 | */ | 2592 | */ |
| 2578 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
| 2579 | if (emulator_has_longmode(ctxt)) { | 2593 | if (emulator_has_longmode(ctxt)) { |
| 2580 | struct desc_struct cs_desc; | 2594 | struct desc_struct cs_desc; |
| 2581 | 2595 | ||
| 2582 | /* Zero CR4.PCIDE before CR0.PG. */ | 2596 | /* Zero CR4.PCIDE before CR0.PG. */ |
| 2583 | if (cr4 & X86_CR4_PCIDE) { | 2597 | cr4 = ctxt->ops->get_cr(ctxt, 4); |
| 2598 | if (cr4 & X86_CR4_PCIDE) | ||
| 2584 | ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); | 2599 | ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); |
| 2585 | cr4 &= ~X86_CR4_PCIDE; | ||
| 2586 | } | ||
| 2587 | 2600 | ||
| 2588 | /* A 32-bit code segment is required to clear EFER.LMA. */ | 2601 | /* A 32-bit code segment is required to clear EFER.LMA. */ |
| 2589 | memset(&cs_desc, 0, sizeof(cs_desc)); | 2602 | memset(&cs_desc, 0, sizeof(cs_desc)); |
| @@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) | |||
| 2597 | if (cr0 & X86_CR0_PE) | 2610 | if (cr0 & X86_CR0_PE) |
| 2598 | ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); | 2611 | ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); |
| 2599 | 2612 | ||
| 2600 | /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ | 2613 | if (emulator_has_longmode(ctxt)) { |
| 2601 | if (cr4 & X86_CR4_PAE) | 2614 | /* Clear CR4.PAE before clearing EFER.LME. */ |
| 2602 | ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); | 2615 | cr4 = ctxt->ops->get_cr(ctxt, 4); |
| 2603 | 2616 | if (cr4 & X86_CR4_PAE) | |
| 2604 | /* And finally go back to 32-bit mode. */ | 2617 | ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); |
| 2605 | efer = 0; | ||
| 2606 | ctxt->ops->set_msr(ctxt, MSR_EFER, efer); | ||
| 2607 | 2618 | ||
| 2608 | smbase = ctxt->ops->get_smbase(ctxt); | 2619 | /* And finally go back to 32-bit mode. */ |
| 2620 | efer = 0; | ||
| 2621 | ctxt->ops->set_msr(ctxt, MSR_EFER, efer); | ||
| 2622 | } | ||
| 2609 | 2623 | ||
| 2610 | /* | 2624 | /* |
| 2611 | * Give pre_leave_smm() a chance to make ISA-specific changes to the | 2625 | * Give pre_leave_smm() a chance to make ISA-specific changes to the |
| 2612 | * vCPU state (e.g. enter guest mode) before loading state from the SMM | 2626 | * vCPU state (e.g. enter guest mode) before loading state from the SMM |
| 2613 | * state-save area. | 2627 | * state-save area. |
| 2614 | */ | 2628 | */ |
| 2615 | if (ctxt->ops->pre_leave_smm(ctxt, smbase)) | 2629 | if (ctxt->ops->pre_leave_smm(ctxt, buf)) |
| 2616 | return X86EMUL_UNHANDLEABLE; | 2630 | return X86EMUL_UNHANDLEABLE; |
| 2617 | 2631 | ||
| 2632 | #ifdef CONFIG_X86_64 | ||
| 2618 | if (emulator_has_longmode(ctxt)) | 2633 | if (emulator_has_longmode(ctxt)) |
| 2619 | ret = rsm_load_state_64(ctxt, smbase + 0x8000); | 2634 | ret = rsm_load_state_64(ctxt, buf); |
| 2620 | else | 2635 | else |
| 2621 | ret = rsm_load_state_32(ctxt, smbase + 0x8000); | 2636 | #endif |
| 2637 | ret = rsm_load_state_32(ctxt, buf); | ||
| 2622 | 2638 | ||
| 2623 | if (ret != X86EMUL_CONTINUE) { | 2639 | if (ret != X86EMUL_CONTINUE) { |
| 2624 | /* FIXME: should triple fault */ | 2640 | /* FIXME: should triple fault */ |
| 2625 | return X86EMUL_UNHANDLEABLE; | 2641 | return X86EMUL_UNHANDLEABLE; |
| 2626 | } | 2642 | } |
| 2627 | 2643 | ||
| 2628 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) | 2644 | ctxt->ops->post_leave_smm(ctxt); |
| 2629 | ctxt->ops->set_nmi_mask(ctxt, false); | ||
| 2630 | 2645 | ||
| 2631 | ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & | ||
| 2632 | ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); | ||
| 2633 | return X86EMUL_CONTINUE; | 2646 | return X86EMUL_CONTINUE; |
| 2634 | } | 2647 | } |
| 2635 | 2648 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 991fdf7fc17f..9bf70cf84564 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -138,6 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, | |||
| 138 | if (offset <= max_apic_id) { | 138 | if (offset <= max_apic_id) { |
| 139 | u8 cluster_size = min(max_apic_id - offset + 1, 16U); | 139 | u8 cluster_size = min(max_apic_id - offset + 1, 16U); |
| 140 | 140 | ||
| 141 | offset = array_index_nospec(offset, map->max_apic_id + 1); | ||
| 141 | *cluster = &map->phys_map[offset]; | 142 | *cluster = &map->phys_map[offset]; |
| 142 | *mask = dest_id & (0xffff >> (16 - cluster_size)); | 143 | *mask = dest_id & (0xffff >> (16 - cluster_size)); |
| 143 | } else { | 144 | } else { |
| @@ -901,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, | |||
| 901 | if (irq->dest_id > map->max_apic_id) { | 902 | if (irq->dest_id > map->max_apic_id) { |
| 902 | *bitmap = 0; | 903 | *bitmap = 0; |
| 903 | } else { | 904 | } else { |
| 904 | *dst = &map->phys_map[irq->dest_id]; | 905 | u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1); |
| 906 | *dst = &map->phys_map[dest_id]; | ||
| 905 | *bitmap = 1; | 907 | *bitmap = 1; |
| 906 | } | 908 | } |
| 907 | return true; | 909 | return true; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index eee455a8a612..e10962dfc203 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
| 2007 | * aggregate version in order to make the slab shrinker | 2007 | * aggregate version in order to make the slab shrinker |
| 2008 | * faster | 2008 | * faster |
| 2009 | */ | 2009 | */ |
| 2010 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) | 2010 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) |
| 2011 | { | 2011 | { |
| 2012 | kvm->arch.n_used_mmu_pages += nr; | 2012 | kvm->arch.n_used_mmu_pages += nr; |
| 2013 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); | 2013 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| @@ -2238,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, | |||
| 2238 | struct list_head *invalid_list, | 2238 | struct list_head *invalid_list, |
| 2239 | bool remote_flush) | 2239 | bool remote_flush) |
| 2240 | { | 2240 | { |
| 2241 | if (!remote_flush && !list_empty(invalid_list)) | 2241 | if (!remote_flush && list_empty(invalid_list)) |
| 2242 | return false; | 2242 | return false; |
| 2243 | 2243 | ||
| 2244 | if (!list_empty(invalid_list)) | 2244 | if (!list_empty(invalid_list)) |
| @@ -2763,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, | |||
| 2763 | * Changing the number of mmu pages allocated to the vm | 2763 | * Changing the number of mmu pages allocated to the vm |
| 2764 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock | 2764 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
| 2765 | */ | 2765 | */ |
| 2766 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | 2766 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
| 2767 | { | 2767 | { |
| 2768 | LIST_HEAD(invalid_list); | 2768 | LIST_HEAD(invalid_list); |
| 2769 | 2769 | ||
| @@ -6031,10 +6031,10 @@ out: | |||
| 6031 | /* | 6031 | /* |
| 6032 | * Calculate mmu pages needed for kvm. | 6032 | * Calculate mmu pages needed for kvm. |
| 6033 | */ | 6033 | */ |
| 6034 | unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) | 6034 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
| 6035 | { | 6035 | { |
| 6036 | unsigned int nr_mmu_pages; | 6036 | unsigned long nr_mmu_pages; |
| 6037 | unsigned int nr_pages = 0; | 6037 | unsigned long nr_pages = 0; |
| 6038 | struct kvm_memslots *slots; | 6038 | struct kvm_memslots *slots; |
| 6039 | struct kvm_memory_slot *memslot; | 6039 | struct kvm_memory_slot *memslot; |
| 6040 | int i; | 6040 | int i; |
| @@ -6047,8 +6047,7 @@ unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) | |||
| 6047 | } | 6047 | } |
| 6048 | 6048 | ||
| 6049 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; | 6049 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
| 6050 | nr_mmu_pages = max(nr_mmu_pages, | 6050 | nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); |
| 6051 | (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); | ||
| 6052 | 6051 | ||
| 6053 | return nr_mmu_pages; | 6052 | return nr_mmu_pages; |
| 6054 | } | 6053 | } |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index bbdc60f2fae8..54c2a377795b 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
| @@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); | |||
| 64 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, | 64 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
| 65 | u64 fault_address, char *insn, int insn_len); | 65 | u64 fault_address, char *insn, int insn_len); |
| 66 | 66 | ||
| 67 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | 67 | static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) |
| 68 | { | 68 | { |
| 69 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) | 69 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
| 70 | return kvm->arch.n_max_mmu_pages - | 70 | return kvm->arch.n_max_mmu_pages - |
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 58ead7db71a3..e39741997893 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
| @@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | |||
| 281 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | 281 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) |
| 282 | { | 282 | { |
| 283 | bool fast_mode = idx & (1u << 31); | 283 | bool fast_mode = idx & (1u << 31); |
| 284 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
| 284 | struct kvm_pmc *pmc; | 285 | struct kvm_pmc *pmc; |
| 285 | u64 ctr_val; | 286 | u64 ctr_val; |
| 286 | 287 | ||
| 288 | if (!pmu->version) | ||
| 289 | return 1; | ||
| 290 | |||
| 287 | if (is_vmware_backdoor_pmc(idx)) | 291 | if (is_vmware_backdoor_pmc(idx)) |
| 288 | return kvm_pmu_rdpmc_vmware(vcpu, idx, data); | 292 | return kvm_pmu_rdpmc_vmware(vcpu, idx, data); |
| 289 | 293 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 426039285fd1..406b558abfef 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -262,6 +262,7 @@ struct amd_svm_iommu_ir { | |||
| 262 | }; | 262 | }; |
| 263 | 263 | ||
| 264 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) | 264 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) |
| 265 | #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 | ||
| 265 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) | 266 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) |
| 266 | 267 | ||
| 267 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) | 268 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) |
| @@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm) | |||
| 2692 | static int db_interception(struct vcpu_svm *svm) | 2693 | static int db_interception(struct vcpu_svm *svm) |
| 2693 | { | 2694 | { |
| 2694 | struct kvm_run *kvm_run = svm->vcpu.run; | 2695 | struct kvm_run *kvm_run = svm->vcpu.run; |
| 2696 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
| 2695 | 2697 | ||
| 2696 | if (!(svm->vcpu.guest_debug & | 2698 | if (!(svm->vcpu.guest_debug & |
| 2697 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && | 2699 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
| @@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm) | |||
| 2702 | 2704 | ||
| 2703 | if (svm->nmi_singlestep) { | 2705 | if (svm->nmi_singlestep) { |
| 2704 | disable_nmi_singlestep(svm); | 2706 | disable_nmi_singlestep(svm); |
| 2707 | /* Make sure we check for pending NMIs upon entry */ | ||
| 2708 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
| 2705 | } | 2709 | } |
| 2706 | 2710 | ||
| 2707 | if (svm->vcpu.guest_debug & | 2711 | if (svm->vcpu.guest_debug & |
| @@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) | |||
| 4517 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); | 4521 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); |
| 4518 | break; | 4522 | break; |
| 4519 | case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { | 4523 | case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { |
| 4524 | int i; | ||
| 4525 | struct kvm_vcpu *vcpu; | ||
| 4526 | struct kvm *kvm = svm->vcpu.kvm; | ||
| 4520 | struct kvm_lapic *apic = svm->vcpu.arch.apic; | 4527 | struct kvm_lapic *apic = svm->vcpu.arch.apic; |
| 4521 | 4528 | ||
| 4522 | /* | 4529 | /* |
| 4523 | * Update ICR high and low, then emulate sending IPI, | 4530 | * At this point, we expect that the AVIC HW has already |
| 4524 | * which is handled when writing APIC_ICR. | 4531 | * set the appropriate IRR bits on the valid target |
| 4532 | * vcpus. So, we just need to kick the appropriate vcpu. | ||
| 4525 | */ | 4533 | */ |
| 4526 | kvm_lapic_reg_write(apic, APIC_ICR2, icrh); | 4534 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 4527 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); | 4535 | bool m = kvm_apic_match_dest(vcpu, apic, |
| 4536 | icrl & KVM_APIC_SHORT_MASK, | ||
| 4537 | GET_APIC_DEST_FIELD(icrh), | ||
| 4538 | icrl & KVM_APIC_DEST_MASK); | ||
| 4539 | |||
| 4540 | if (m && !avic_vcpu_is_running(vcpu)) | ||
| 4541 | kvm_vcpu_wake_up(vcpu); | ||
| 4542 | } | ||
| 4528 | break; | 4543 | break; |
| 4529 | } | 4544 | } |
| 4530 | case AVIC_IPI_FAILURE_INVALID_TARGET: | 4545 | case AVIC_IPI_FAILURE_INVALID_TARGET: |
| @@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu) | |||
| 4596 | u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); | 4611 | u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); |
| 4597 | 4612 | ||
| 4598 | if (entry) | 4613 | if (entry) |
| 4599 | WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK); | 4614 | clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry); |
| 4600 | } | 4615 | } |
| 4601 | 4616 | ||
| 4602 | static int avic_handle_ldr_update(struct kvm_vcpu *vcpu) | 4617 | static int avic_handle_ldr_update(struct kvm_vcpu *vcpu) |
| @@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 5621 | svm->vmcb->save.cr2 = vcpu->arch.cr2; | 5636 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
| 5622 | 5637 | ||
| 5623 | clgi(); | 5638 | clgi(); |
| 5639 | kvm_load_guest_xcr0(vcpu); | ||
| 5624 | 5640 | ||
| 5625 | /* | 5641 | /* |
| 5626 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if | 5642 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
| @@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 5766 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) | 5782 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
| 5767 | kvm_before_interrupt(&svm->vcpu); | 5783 | kvm_before_interrupt(&svm->vcpu); |
| 5768 | 5784 | ||
| 5785 | kvm_put_guest_xcr0(vcpu); | ||
| 5769 | stgi(); | 5786 | stgi(); |
| 5770 | 5787 | ||
| 5771 | /* Any pending NMI will happen here */ | 5788 | /* Any pending NMI will happen here */ |
| @@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) | |||
| 6215 | return 0; | 6232 | return 0; |
| 6216 | } | 6233 | } |
| 6217 | 6234 | ||
| 6218 | static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) | 6235 | static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
| 6219 | { | 6236 | { |
| 6220 | struct vcpu_svm *svm = to_svm(vcpu); | 6237 | struct vcpu_svm *svm = to_svm(vcpu); |
| 6221 | struct vmcb *nested_vmcb; | 6238 | struct vmcb *nested_vmcb; |
| 6222 | struct page *page; | 6239 | struct page *page; |
| 6223 | struct { | 6240 | u64 guest; |
| 6224 | u64 guest; | 6241 | u64 vmcb; |
| 6225 | u64 vmcb; | ||
| 6226 | } svm_state_save; | ||
| 6227 | int ret; | ||
| 6228 | 6242 | ||
| 6229 | ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save, | 6243 | guest = GET_SMSTATE(u64, smstate, 0x7ed8); |
| 6230 | sizeof(svm_state_save)); | 6244 | vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); |
| 6231 | if (ret) | ||
| 6232 | return ret; | ||
| 6233 | 6245 | ||
| 6234 | if (svm_state_save.guest) { | 6246 | if (guest) { |
| 6235 | vcpu->arch.hflags &= ~HF_SMM_MASK; | 6247 | nested_vmcb = nested_svm_map(svm, vmcb, &page); |
| 6236 | nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page); | 6248 | if (!nested_vmcb) |
| 6237 | if (nested_vmcb) | 6249 | return 1; |
| 6238 | enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page); | 6250 | enter_svm_guest_mode(svm, vmcb, nested_vmcb, page); |
| 6239 | else | ||
| 6240 | ret = 1; | ||
| 6241 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
| 6242 | } | 6251 | } |
| 6243 | return ret; | 6252 | return 0; |
| 6244 | } | 6253 | } |
| 6245 | 6254 | ||
| 6246 | static int enable_smi_window(struct kvm_vcpu *vcpu) | 6255 | static int enable_smi_window(struct kvm_vcpu *vcpu) |
| @@ -6422,11 +6431,11 @@ e_free: | |||
| 6422 | return ret; | 6431 | return ret; |
| 6423 | } | 6432 | } |
| 6424 | 6433 | ||
| 6425 | static int get_num_contig_pages(int idx, struct page **inpages, | 6434 | static unsigned long get_num_contig_pages(unsigned long idx, |
| 6426 | unsigned long npages) | 6435 | struct page **inpages, unsigned long npages) |
| 6427 | { | 6436 | { |
| 6428 | unsigned long paddr, next_paddr; | 6437 | unsigned long paddr, next_paddr; |
| 6429 | int i = idx + 1, pages = 1; | 6438 | unsigned long i = idx + 1, pages = 1; |
| 6430 | 6439 | ||
| 6431 | /* find the number of contiguous pages starting from idx */ | 6440 | /* find the number of contiguous pages starting from idx */ |
| 6432 | paddr = __sme_page_pa(inpages[idx]); | 6441 | paddr = __sme_page_pa(inpages[idx]); |
| @@ -6445,12 +6454,12 @@ static int get_num_contig_pages(int idx, struct page **inpages, | |||
| 6445 | 6454 | ||
| 6446 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) | 6455 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 6447 | { | 6456 | { |
| 6448 | unsigned long vaddr, vaddr_end, next_vaddr, npages, size; | 6457 | unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; |
| 6449 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | 6458 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 6450 | struct kvm_sev_launch_update_data params; | 6459 | struct kvm_sev_launch_update_data params; |
| 6451 | struct sev_data_launch_update_data *data; | 6460 | struct sev_data_launch_update_data *data; |
| 6452 | struct page **inpages; | 6461 | struct page **inpages; |
| 6453 | int i, ret, pages; | 6462 | int ret; |
| 6454 | 6463 | ||
| 6455 | if (!sev_guest(kvm)) | 6464 | if (!sev_guest(kvm)) |
| 6456 | return -ENOTTY; | 6465 | return -ENOTTY; |
| @@ -6799,7 +6808,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6799 | struct page **src_p, **dst_p; | 6808 | struct page **src_p, **dst_p; |
| 6800 | struct kvm_sev_dbg debug; | 6809 | struct kvm_sev_dbg debug; |
| 6801 | unsigned long n; | 6810 | unsigned long n; |
| 6802 | int ret, size; | 6811 | unsigned int size; |
| 6812 | int ret; | ||
| 6803 | 6813 | ||
| 6804 | if (!sev_guest(kvm)) | 6814 | if (!sev_guest(kvm)) |
| 6805 | return -ENOTTY; | 6815 | return -ENOTTY; |
| @@ -6807,6 +6817,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6807 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) | 6817 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) |
| 6808 | return -EFAULT; | 6818 | return -EFAULT; |
| 6809 | 6819 | ||
| 6820 | if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) | ||
| 6821 | return -EINVAL; | ||
| 6822 | if (!debug.dst_uaddr) | ||
| 6823 | return -EINVAL; | ||
| 6824 | |||
| 6810 | vaddr = debug.src_uaddr; | 6825 | vaddr = debug.src_uaddr; |
| 6811 | size = debug.len; | 6826 | size = debug.len; |
| 6812 | vaddr_end = vaddr + size; | 6827 | vaddr_end = vaddr + size; |
| @@ -6857,8 +6872,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) | |||
| 6857 | dst_vaddr, | 6872 | dst_vaddr, |
| 6858 | len, &argp->error); | 6873 | len, &argp->error); |
| 6859 | 6874 | ||
| 6860 | sev_unpin_memory(kvm, src_p, 1); | 6875 | sev_unpin_memory(kvm, src_p, n); |
| 6861 | sev_unpin_memory(kvm, dst_p, 1); | 6876 | sev_unpin_memory(kvm, dst_p, n); |
| 6862 | 6877 | ||
| 6863 | if (ret) | 6878 | if (ret) |
| 6864 | goto err; | 6879 | goto err; |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 6432d08c7de7..4d47a2631d1f 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
| @@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi, | |||
| 438 | ); | 438 | ); |
| 439 | 439 | ||
| 440 | TRACE_EVENT(kvm_apic_accept_irq, | 440 | TRACE_EVENT(kvm_apic_accept_irq, |
| 441 | TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec), | 441 | TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), |
| 442 | TP_ARGS(apicid, dm, tm, vec), | 442 | TP_ARGS(apicid, dm, tm, vec), |
| 443 | 443 | ||
| 444 | TP_STRUCT__entry( | 444 | TP_STRUCT__entry( |
| 445 | __field( __u32, apicid ) | 445 | __field( __u32, apicid ) |
| 446 | __field( __u16, dm ) | 446 | __field( __u16, dm ) |
| 447 | __field( __u8, tm ) | 447 | __field( __u16, tm ) |
| 448 | __field( __u8, vec ) | 448 | __field( __u8, vec ) |
| 449 | ), | 449 | ), |
| 450 | 450 | ||
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 153e539c29c9..6401eb7ef19c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
| @@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, | |||
| 500 | } | 500 | } |
| 501 | } | 501 | } |
| 502 | 502 | ||
| 503 | static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { | ||
| 504 | int msr; | ||
| 505 | |||
| 506 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 507 | unsigned word = msr / BITS_PER_LONG; | ||
| 508 | |||
| 509 | msr_bitmap[word] = ~0; | ||
| 510 | msr_bitmap[word + (0x800 / sizeof(long))] = ~0; | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 503 | /* | 514 | /* |
| 504 | * Merge L0's and L1's MSR bitmap, return false to indicate that | 515 | * Merge L0's and L1's MSR bitmap, return false to indicate that |
| 505 | * we do not use the hardware. | 516 | * we do not use the hardware. |
| @@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 541 | return false; | 552 | return false; |
| 542 | 553 | ||
| 543 | msr_bitmap_l1 = (unsigned long *)kmap(page); | 554 | msr_bitmap_l1 = (unsigned long *)kmap(page); |
| 544 | if (nested_cpu_has_apic_reg_virt(vmcs12)) { | ||
| 545 | /* | ||
| 546 | * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it | ||
| 547 | * just lets the processor take the value from the virtual-APIC page; | ||
| 548 | * take those 256 bits directly from the L1 bitmap. | ||
| 549 | */ | ||
| 550 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 551 | unsigned word = msr / BITS_PER_LONG; | ||
| 552 | msr_bitmap_l0[word] = msr_bitmap_l1[word]; | ||
| 553 | msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; | ||
| 554 | } | ||
| 555 | } else { | ||
| 556 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 557 | unsigned word = msr / BITS_PER_LONG; | ||
| 558 | msr_bitmap_l0[word] = ~0; | ||
| 559 | msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; | ||
| 560 | } | ||
| 561 | } | ||
| 562 | 555 | ||
| 563 | nested_vmx_disable_intercept_for_msr( | 556 | /* |
| 564 | msr_bitmap_l1, msr_bitmap_l0, | 557 | * To keep the control flow simple, pay eight 8-byte writes (sixteen |
| 565 | X2APIC_MSR(APIC_TASKPRI), | 558 | * 4-byte writes on 32-bit systems) up front to enable intercepts for |
| 566 | MSR_TYPE_W); | 559 | * the x2APIC MSR range and selectively disable them below. |
| 560 | */ | ||
| 561 | enable_x2apic_msr_intercepts(msr_bitmap_l0); | ||
| 562 | |||
| 563 | if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { | ||
| 564 | if (nested_cpu_has_apic_reg_virt(vmcs12)) { | ||
| 565 | /* | ||
| 566 | * L0 need not intercept reads for MSRs between 0x800 | ||
| 567 | * and 0x8ff, it just lets the processor take the value | ||
| 568 | * from the virtual-APIC page; take those 256 bits | ||
| 569 | * directly from the L1 bitmap. | ||
| 570 | */ | ||
| 571 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | ||
| 572 | unsigned word = msr / BITS_PER_LONG; | ||
| 573 | |||
| 574 | msr_bitmap_l0[word] = msr_bitmap_l1[word]; | ||
| 575 | } | ||
| 576 | } | ||
| 567 | 577 | ||
| 568 | if (nested_cpu_has_vid(vmcs12)) { | ||
| 569 | nested_vmx_disable_intercept_for_msr( | ||
| 570 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 571 | X2APIC_MSR(APIC_EOI), | ||
| 572 | MSR_TYPE_W); | ||
| 573 | nested_vmx_disable_intercept_for_msr( | 578 | nested_vmx_disable_intercept_for_msr( |
| 574 | msr_bitmap_l1, msr_bitmap_l0, | 579 | msr_bitmap_l1, msr_bitmap_l0, |
| 575 | X2APIC_MSR(APIC_SELF_IPI), | 580 | X2APIC_MSR(APIC_TASKPRI), |
| 576 | MSR_TYPE_W); | 581 | MSR_TYPE_R | MSR_TYPE_W); |
| 582 | |||
| 583 | if (nested_cpu_has_vid(vmcs12)) { | ||
| 584 | nested_vmx_disable_intercept_for_msr( | ||
| 585 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 586 | X2APIC_MSR(APIC_EOI), | ||
| 587 | MSR_TYPE_W); | ||
| 588 | nested_vmx_disable_intercept_for_msr( | ||
| 589 | msr_bitmap_l1, msr_bitmap_l0, | ||
| 590 | X2APIC_MSR(APIC_SELF_IPI), | ||
| 591 | MSR_TYPE_W); | ||
| 592 | } | ||
| 577 | } | 593 | } |
| 578 | 594 | ||
| 579 | if (spec_ctrl) | 595 | if (spec_ctrl) |
| @@ -2857,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) | |||
| 2857 | /* | 2873 | /* |
| 2858 | * If translation failed, VM entry will fail because | 2874 | * If translation failed, VM entry will fail because |
| 2859 | * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. | 2875 | * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. |
| 2860 | * Failing the vm entry is _not_ what the processor | ||
| 2861 | * does but it's basically the only possibility we | ||
| 2862 | * have. We could still enter the guest if CR8 load | ||
| 2863 | * exits are enabled, CR8 store exits are enabled, and | ||
| 2864 | * virtualize APIC access is disabled; in this case | ||
| 2865 | * the processor would never use the TPR shadow and we | ||
| 2866 | * could simply clear the bit from the execution | ||
| 2867 | * control. But such a configuration is useless, so | ||
| 2868 | * let's keep the code simple. | ||
| 2869 | */ | 2876 | */ |
| 2870 | if (!is_error_page(page)) { | 2877 | if (!is_error_page(page)) { |
| 2871 | vmx->nested.virtual_apic_page = page; | 2878 | vmx->nested.virtual_apic_page = page; |
| 2872 | hpa = page_to_phys(vmx->nested.virtual_apic_page); | 2879 | hpa = page_to_phys(vmx->nested.virtual_apic_page); |
| 2873 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); | 2880 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); |
| 2881 | } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && | ||
| 2882 | nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && | ||
| 2883 | !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { | ||
| 2884 | /* | ||
| 2885 | * The processor will never use the TPR shadow, simply | ||
| 2886 | * clear the bit from the execution control. Such a | ||
| 2887 | * configuration is useless, but it happens in tests. | ||
| 2888 | * For any other configuration, failing the vm entry is | ||
| 2889 | * _not_ what the processor does but it's basically the | ||
| 2890 | * only possibility we have. | ||
| 2891 | */ | ||
| 2892 | vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, | ||
| 2893 | CPU_BASED_TPR_SHADOW); | ||
| 2894 | } else { | ||
| 2895 | printk("bad virtual-APIC page address\n"); | ||
| 2896 | dump_vmcs(); | ||
| 2874 | } | 2897 | } |
| 2875 | } | 2898 | } |
| 2876 | 2899 | ||
| @@ -3773,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) | |||
| 3773 | vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); | 3796 | vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); |
| 3774 | 3797 | ||
| 3775 | nested_ept_uninit_mmu_context(vcpu); | 3798 | nested_ept_uninit_mmu_context(vcpu); |
| 3776 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | 3799 | |
| 3777 | __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); | 3800 | /* |
| 3801 | * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3 | ||
| 3802 | * points to shadow pages! Fortunately we only get here after a WARN_ON | ||
| 3803 | * if EPT is disabled, so a VMabort is perfectly fine. | ||
| 3804 | */ | ||
| 3805 | if (enable_ept) { | ||
| 3806 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); | ||
| 3807 | __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); | ||
| 3808 | } else { | ||
| 3809 | nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED); | ||
| 3810 | } | ||
| 3778 | 3811 | ||
| 3779 | /* | 3812 | /* |
| 3780 | * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs | 3813 | * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs |
| @@ -5722,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) | |||
| 5722 | { | 5755 | { |
| 5723 | int i; | 5756 | int i; |
| 5724 | 5757 | ||
| 5758 | /* | ||
| 5759 | * Without EPT it is not possible to restore L1's CR3 and PDPTR on | ||
| 5760 | * VMfail, because they are not available in vmcs01. Just always | ||
| 5761 | * use hardware checks. | ||
| 5762 | */ | ||
| 5763 | if (!enable_ept) | ||
| 5764 | nested_early_check = 1; | ||
| 5765 | |||
| 5725 | if (!cpu_has_vmx_shadow_vmcs()) | 5766 | if (!cpu_has_vmx_shadow_vmcs()) |
| 5726 | enable_shadow_vmcs = 0; | 5767 | enable_shadow_vmcs = 0; |
| 5727 | if (enable_shadow_vmcs) { | 5768 | if (enable_shadow_vmcs) { |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ab432a930ae8..b4e7d645275a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
| @@ -5603,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit) | |||
| 5603 | vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); | 5603 | vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); |
| 5604 | } | 5604 | } |
| 5605 | 5605 | ||
| 5606 | static void dump_vmcs(void) | 5606 | void dump_vmcs(void) |
| 5607 | { | 5607 | { |
| 5608 | u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); | 5608 | u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); |
| 5609 | u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); | 5609 | u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); |
| @@ -6410,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6410 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | 6410 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
| 6411 | vmx_set_interrupt_shadow(vcpu, 0); | 6411 | vmx_set_interrupt_shadow(vcpu, 0); |
| 6412 | 6412 | ||
| 6413 | kvm_load_guest_xcr0(vcpu); | ||
| 6414 | |||
| 6413 | if (static_cpu_has(X86_FEATURE_PKU) && | 6415 | if (static_cpu_has(X86_FEATURE_PKU) && |
| 6414 | kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && | 6416 | kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && |
| 6415 | vcpu->arch.pkru != vmx->host_pkru) | 6417 | vcpu->arch.pkru != vmx->host_pkru) |
| @@ -6506,6 +6508,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6506 | __write_pkru(vmx->host_pkru); | 6508 | __write_pkru(vmx->host_pkru); |
| 6507 | } | 6509 | } |
| 6508 | 6510 | ||
| 6511 | kvm_put_guest_xcr0(vcpu); | ||
| 6512 | |||
| 6509 | vmx->nested.nested_run_pending = 0; | 6513 | vmx->nested.nested_run_pending = 0; |
| 6510 | vmx->idt_vectoring_info = 0; | 6514 | vmx->idt_vectoring_info = 0; |
| 6511 | 6515 | ||
| @@ -6852,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu) | |||
| 6852 | } | 6856 | } |
| 6853 | } | 6857 | } |
| 6854 | 6858 | ||
| 6859 | static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu) | ||
| 6860 | { | ||
| 6861 | struct kvm_cpuid_entry2 *entry; | ||
| 6862 | union cpuid10_eax eax; | ||
| 6863 | |||
| 6864 | entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); | ||
| 6865 | if (!entry) | ||
| 6866 | return false; | ||
| 6867 | |||
| 6868 | eax.full = entry->eax; | ||
| 6869 | return (eax.split.version_id > 0); | ||
| 6870 | } | ||
| 6871 | |||
| 6872 | static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu) | ||
| 6873 | { | ||
| 6874 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
| 6875 | bool pmu_enabled = guest_cpuid_has_pmu(vcpu); | ||
| 6876 | |||
| 6877 | if (pmu_enabled) | ||
| 6878 | vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING; | ||
| 6879 | else | ||
| 6880 | vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING; | ||
| 6881 | } | ||
| 6882 | |||
| 6855 | static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) | 6883 | static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) |
| 6856 | { | 6884 | { |
| 6857 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 6885 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| @@ -6940,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | |||
| 6940 | if (nested_vmx_allowed(vcpu)) { | 6968 | if (nested_vmx_allowed(vcpu)) { |
| 6941 | nested_vmx_cr_fixed1_bits_update(vcpu); | 6969 | nested_vmx_cr_fixed1_bits_update(vcpu); |
| 6942 | nested_vmx_entry_exit_ctls_update(vcpu); | 6970 | nested_vmx_entry_exit_ctls_update(vcpu); |
| 6971 | nested_vmx_procbased_ctls_update(vcpu); | ||
| 6943 | } | 6972 | } |
| 6944 | 6973 | ||
| 6945 | if (boot_cpu_has(X86_FEATURE_INTEL_PT) && | 6974 | if (boot_cpu_has(X86_FEATURE_INTEL_PT) && |
| @@ -7369,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) | |||
| 7369 | return 0; | 7398 | return 0; |
| 7370 | } | 7399 | } |
| 7371 | 7400 | ||
| 7372 | static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) | 7401 | static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
| 7373 | { | 7402 | { |
| 7374 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7403 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 7375 | int ret; | 7404 | int ret; |
| @@ -7380,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) | |||
| 7380 | } | 7409 | } |
| 7381 | 7410 | ||
| 7382 | if (vmx->nested.smm.guest_mode) { | 7411 | if (vmx->nested.smm.guest_mode) { |
| 7383 | vcpu->arch.hflags &= ~HF_SMM_MASK; | ||
| 7384 | ret = nested_vmx_enter_non_root_mode(vcpu, false); | 7412 | ret = nested_vmx_enter_non_root_mode(vcpu, false); |
| 7385 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
| 7386 | if (ret) | 7413 | if (ret) |
| 7387 | return ret; | 7414 | return ret; |
| 7388 | 7415 | ||
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index a1e00d0a2482..f879529906b4 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
| @@ -517,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) | |||
| 517 | vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); | 517 | vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); |
| 518 | } | 518 | } |
| 519 | 519 | ||
| 520 | void dump_vmcs(void); | ||
| 521 | |||
| 520 | #endif /* __KVM_X86_VMX_H */ | 522 | #endif /* __KVM_X86_VMX_H */ |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 099b851dabaf..a0d1fc80ac5a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | |||
| 800 | } | 800 | } |
| 801 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 801 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
| 802 | 802 | ||
| 803 | static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) | 803 | void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) |
| 804 | { | 804 | { |
| 805 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && | 805 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && |
| 806 | !vcpu->guest_xcr0_loaded) { | 806 | !vcpu->guest_xcr0_loaded) { |
| @@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) | |||
| 810 | vcpu->guest_xcr0_loaded = 1; | 810 | vcpu->guest_xcr0_loaded = 1; |
| 811 | } | 811 | } |
| 812 | } | 812 | } |
| 813 | EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); | ||
| 813 | 814 | ||
| 814 | static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | 815 | void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) |
| 815 | { | 816 | { |
| 816 | if (vcpu->guest_xcr0_loaded) { | 817 | if (vcpu->guest_xcr0_loaded) { |
| 817 | if (vcpu->arch.xcr0 != host_xcr0) | 818 | if (vcpu->arch.xcr0 != host_xcr0) |
| @@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | |||
| 819 | vcpu->guest_xcr0_loaded = 0; | 820 | vcpu->guest_xcr0_loaded = 0; |
| 820 | } | 821 | } |
| 821 | } | 822 | } |
| 823 | EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0); | ||
| 822 | 824 | ||
| 823 | static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 825 | static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
| 824 | { | 826 | { |
| @@ -3093,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 3093 | break; | 3095 | break; |
| 3094 | case KVM_CAP_NESTED_STATE: | 3096 | case KVM_CAP_NESTED_STATE: |
| 3095 | r = kvm_x86_ops->get_nested_state ? | 3097 | r = kvm_x86_ops->get_nested_state ? |
| 3096 | kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0; | 3098 | kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0; |
| 3097 | break; | 3099 | break; |
| 3098 | default: | 3100 | default: |
| 3099 | break; | 3101 | break; |
| @@ -3528,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
| 3528 | memset(&events->reserved, 0, sizeof(events->reserved)); | 3530 | memset(&events->reserved, 0, sizeof(events->reserved)); |
| 3529 | } | 3531 | } |
| 3530 | 3532 | ||
| 3531 | static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags); | 3533 | static void kvm_smm_changed(struct kvm_vcpu *vcpu); |
| 3532 | 3534 | ||
| 3533 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 3535 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
| 3534 | struct kvm_vcpu_events *events) | 3536 | struct kvm_vcpu_events *events) |
| @@ -3588,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
| 3588 | vcpu->arch.apic->sipi_vector = events->sipi_vector; | 3590 | vcpu->arch.apic->sipi_vector = events->sipi_vector; |
| 3589 | 3591 | ||
| 3590 | if (events->flags & KVM_VCPUEVENT_VALID_SMM) { | 3592 | if (events->flags & KVM_VCPUEVENT_VALID_SMM) { |
| 3591 | u32 hflags = vcpu->arch.hflags; | 3593 | if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { |
| 3592 | if (events->smi.smm) | 3594 | if (events->smi.smm) |
| 3593 | hflags |= HF_SMM_MASK; | 3595 | vcpu->arch.hflags |= HF_SMM_MASK; |
| 3594 | else | 3596 | else |
| 3595 | hflags &= ~HF_SMM_MASK; | 3597 | vcpu->arch.hflags &= ~HF_SMM_MASK; |
| 3596 | kvm_set_hflags(vcpu, hflags); | 3598 | kvm_smm_changed(vcpu); |
| 3599 | } | ||
| 3597 | 3600 | ||
| 3598 | vcpu->arch.smi_pending = events->smi.pending; | 3601 | vcpu->arch.smi_pending = events->smi.pending; |
| 3599 | 3602 | ||
| @@ -4270,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, | |||
| 4270 | } | 4273 | } |
| 4271 | 4274 | ||
| 4272 | static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | 4275 | static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, |
| 4273 | u32 kvm_nr_mmu_pages) | 4276 | unsigned long kvm_nr_mmu_pages) |
| 4274 | { | 4277 | { |
| 4275 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | 4278 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) |
| 4276 | return -EINVAL; | 4279 | return -EINVAL; |
| @@ -4284,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
| 4284 | return 0; | 4287 | return 0; |
| 4285 | } | 4288 | } |
| 4286 | 4289 | ||
| 4287 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | 4290 | static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) |
| 4288 | { | 4291 | { |
| 4289 | return kvm->arch.n_max_mmu_pages; | 4292 | return kvm->arch.n_max_mmu_pages; |
| 4290 | } | 4293 | } |
| @@ -5958,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) | |||
| 5958 | 5961 | ||
| 5959 | static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) | 5962 | static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) |
| 5960 | { | 5963 | { |
| 5961 | kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); | 5964 | emul_to_vcpu(ctxt)->arch.hflags = emul_flags; |
| 5965 | } | ||
| 5966 | |||
| 5967 | static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, | ||
| 5968 | const char *smstate) | ||
| 5969 | { | ||
| 5970 | return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate); | ||
| 5962 | } | 5971 | } |
| 5963 | 5972 | ||
| 5964 | static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase) | 5973 | static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) |
| 5965 | { | 5974 | { |
| 5966 | return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase); | 5975 | kvm_smm_changed(emul_to_vcpu(ctxt)); |
| 5967 | } | 5976 | } |
| 5968 | 5977 | ||
| 5969 | static const struct x86_emulate_ops emulate_ops = { | 5978 | static const struct x86_emulate_ops emulate_ops = { |
| @@ -6006,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
| 6006 | .get_hflags = emulator_get_hflags, | 6015 | .get_hflags = emulator_get_hflags, |
| 6007 | .set_hflags = emulator_set_hflags, | 6016 | .set_hflags = emulator_set_hflags, |
| 6008 | .pre_leave_smm = emulator_pre_leave_smm, | 6017 | .pre_leave_smm = emulator_pre_leave_smm, |
| 6018 | .post_leave_smm = emulator_post_leave_smm, | ||
| 6009 | }; | 6019 | }; |
| 6010 | 6020 | ||
| 6011 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 6021 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
| @@ -6247,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu) | |||
| 6247 | kvm_mmu_reset_context(vcpu); | 6257 | kvm_mmu_reset_context(vcpu); |
| 6248 | } | 6258 | } |
| 6249 | 6259 | ||
| 6250 | static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) | ||
| 6251 | { | ||
| 6252 | unsigned changed = vcpu->arch.hflags ^ emul_flags; | ||
| 6253 | |||
| 6254 | vcpu->arch.hflags = emul_flags; | ||
| 6255 | |||
| 6256 | if (changed & HF_SMM_MASK) | ||
| 6257 | kvm_smm_changed(vcpu); | ||
| 6258 | } | ||
| 6259 | |||
| 6260 | static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, | 6260 | static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, |
| 6261 | unsigned long *db) | 6261 | unsigned long *db) |
| 6262 | { | 6262 | { |
| @@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) | |||
| 7441 | put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); | 7441 | put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); |
| 7442 | } | 7442 | } |
| 7443 | 7443 | ||
| 7444 | #ifdef CONFIG_X86_64 | ||
| 7444 | static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) | 7445 | static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) |
| 7445 | { | 7446 | { |
| 7446 | #ifdef CONFIG_X86_64 | ||
| 7447 | struct desc_ptr dt; | 7447 | struct desc_ptr dt; |
| 7448 | struct kvm_segment seg; | 7448 | struct kvm_segment seg; |
| 7449 | unsigned long val; | 7449 | unsigned long val; |
| @@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) | |||
| 7493 | 7493 | ||
| 7494 | for (i = 0; i < 6; i++) | 7494 | for (i = 0; i < 6; i++) |
| 7495 | enter_smm_save_seg_64(vcpu, buf, i); | 7495 | enter_smm_save_seg_64(vcpu, buf, i); |
| 7496 | #else | ||
| 7497 | WARN_ON_ONCE(1); | ||
| 7498 | #endif | ||
| 7499 | } | 7496 | } |
| 7497 | #endif | ||
| 7500 | 7498 | ||
| 7501 | static void enter_smm(struct kvm_vcpu *vcpu) | 7499 | static void enter_smm(struct kvm_vcpu *vcpu) |
| 7502 | { | 7500 | { |
| @@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |||
| 7507 | 7505 | ||
| 7508 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); | 7506 | trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); |
| 7509 | memset(buf, 0, 512); | 7507 | memset(buf, 0, 512); |
| 7508 | #ifdef CONFIG_X86_64 | ||
| 7510 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) | 7509 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
| 7511 | enter_smm_save_state_64(vcpu, buf); | 7510 | enter_smm_save_state_64(vcpu, buf); |
| 7512 | else | 7511 | else |
| 7512 | #endif | ||
| 7513 | enter_smm_save_state_32(vcpu, buf); | 7513 | enter_smm_save_state_32(vcpu, buf); |
| 7514 | 7514 | ||
| 7515 | /* | 7515 | /* |
| @@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu) | |||
| 7567 | kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); | 7567 | kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); |
| 7568 | kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); | 7568 | kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); |
| 7569 | 7569 | ||
| 7570 | #ifdef CONFIG_X86_64 | ||
| 7570 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) | 7571 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
| 7571 | kvm_x86_ops->set_efer(vcpu, 0); | 7572 | kvm_x86_ops->set_efer(vcpu, 0); |
| 7573 | #endif | ||
| 7572 | 7574 | ||
| 7573 | kvm_update_cpuid(vcpu); | 7575 | kvm_update_cpuid(vcpu); |
| 7574 | kvm_mmu_reset_context(vcpu); | 7576 | kvm_mmu_reset_context(vcpu); |
| @@ -7865,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
| 7865 | goto cancel_injection; | 7867 | goto cancel_injection; |
| 7866 | } | 7868 | } |
| 7867 | 7869 | ||
| 7868 | kvm_load_guest_xcr0(vcpu); | ||
| 7869 | |||
| 7870 | if (req_immediate_exit) { | 7870 | if (req_immediate_exit) { |
| 7871 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7871 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 7872 | kvm_x86_ops->request_immediate_exit(vcpu); | 7872 | kvm_x86_ops->request_immediate_exit(vcpu); |
| @@ -7919,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
| 7919 | vcpu->mode = OUTSIDE_GUEST_MODE; | 7919 | vcpu->mode = OUTSIDE_GUEST_MODE; |
| 7920 | smp_wmb(); | 7920 | smp_wmb(); |
| 7921 | 7921 | ||
| 7922 | kvm_put_guest_xcr0(vcpu); | ||
| 7923 | |||
| 7924 | kvm_before_interrupt(vcpu); | 7922 | kvm_before_interrupt(vcpu); |
| 7925 | kvm_x86_ops->handle_external_intr(vcpu); | 7923 | kvm_x86_ops->handle_external_intr(vcpu); |
| 7926 | kvm_after_interrupt(vcpu); | 7924 | kvm_after_interrupt(vcpu); |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 28406aa1136d..aedc5d0d4989 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
| @@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) | |||
| 347 | __this_cpu_write(current_vcpu, NULL); | 347 | __this_cpu_write(current_vcpu, NULL); |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); | ||
| 351 | void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); | ||
| 350 | #endif | 352 | #endif |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index ee8f8ab46941..c0309ea9abee 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
| @@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st) | |||
| 259 | #endif | 259 | #endif |
| 260 | /* Account the WX pages */ | 260 | /* Account the WX pages */ |
| 261 | st->wx_pages += npages; | 261 | st->wx_pages += npages; |
| 262 | WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n", | 262 | WARN_ONCE(__supported_pte_mask & _PAGE_NX, |
| 263 | "x86/mm: Found insecure W+X mapping at address %pS\n", | ||
| 263 | (void *)st->start_address); | 264 | (void *)st->start_address); |
| 264 | } | 265 | } |
| 265 | 266 | ||
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0029604af8a4..dd73d5d74393 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx, | |||
| 825 | pte = early_ioremap_pte(addr); | 825 | pte = early_ioremap_pte(addr); |
| 826 | 826 | ||
| 827 | /* Sanitize 'prot' against any unsupported bits: */ | 827 | /* Sanitize 'prot' against any unsupported bits: */ |
| 828 | pgprot_val(flags) &= __default_kernel_pte_mask; | 828 | pgprot_val(flags) &= __supported_pte_mask; |
| 829 | 829 | ||
| 830 | if (pgprot_val(flags)) | 830 | if (pgprot_val(flags)) |
| 831 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | 831 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 3f452ffed7e9..d669c5e797e0 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c | |||
| @@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void) | |||
| 94 | if (!kaslr_memory_enabled()) | 94 | if (!kaslr_memory_enabled()) |
| 95 | return; | 95 | return; |
| 96 | 96 | ||
| 97 | kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); | 97 | kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT); |
| 98 | kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; | 98 | kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; |
| 99 | 99 | ||
| 100 | /* | 100 | /* |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index bc4bc7b2f075..487b8474c01c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -728,7 +728,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |||
| 728 | { | 728 | { |
| 729 | int cpu; | 729 | int cpu; |
| 730 | 730 | ||
| 731 | struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = { | 731 | struct flush_tlb_info info = { |
| 732 | .mm = mm, | 732 | .mm = mm, |
| 733 | .stride_shift = stride_shift, | 733 | .stride_shift = stride_shift, |
| 734 | .freed_tables = freed_tables, | 734 | .freed_tables = freed_tables, |
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index f7dd895b2353..0c14018d1c26 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
| @@ -187,15 +187,18 @@ struct thread_struct { | |||
| 187 | 187 | ||
| 188 | /* Clearing a0 terminates the backtrace. */ | 188 | /* Clearing a0 terminates the backtrace. */ |
| 189 | #define start_thread(regs, new_pc, new_sp) \ | 189 | #define start_thread(regs, new_pc, new_sp) \ |
| 190 | memset(regs, 0, sizeof(*regs)); \ | 190 | do { \ |
| 191 | regs->pc = new_pc; \ | 191 | memset((regs), 0, sizeof(*(regs))); \ |
| 192 | regs->ps = USER_PS_VALUE; \ | 192 | (regs)->pc = (new_pc); \ |
| 193 | regs->areg[1] = new_sp; \ | 193 | (regs)->ps = USER_PS_VALUE; \ |
| 194 | regs->areg[0] = 0; \ | 194 | (regs)->areg[1] = (new_sp); \ |
| 195 | regs->wmask = 1; \ | 195 | (regs)->areg[0] = 0; \ |
| 196 | regs->depc = 0; \ | 196 | (regs)->wmask = 1; \ |
| 197 | regs->windowbase = 0; \ | 197 | (regs)->depc = 0; \ |
| 198 | regs->windowstart = 1; | 198 | (regs)->windowbase = 0; \ |
| 199 | (regs)->windowstart = 1; \ | ||
| 200 | (regs)->syscall = NO_SYSCALL; \ | ||
| 201 | } while (0) | ||
| 199 | 202 | ||
| 200 | /* Forward declaration */ | 203 | /* Forward declaration */ |
| 201 | struct task_struct; | 204 | struct task_struct; |
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index a168bf81c7f4..91dc06d58060 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
| @@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task, | |||
| 59 | 59 | ||
| 60 | static inline void syscall_get_arguments(struct task_struct *task, | 60 | static inline void syscall_get_arguments(struct task_struct *task, |
| 61 | struct pt_regs *regs, | 61 | struct pt_regs *regs, |
| 62 | unsigned int i, unsigned int n, | ||
| 63 | unsigned long *args) | 62 | unsigned long *args) |
| 64 | { | 63 | { |
| 65 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; | 64 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; |
| 66 | unsigned int j; | 65 | unsigned int i; |
| 67 | 66 | ||
| 68 | if (n == 0) | 67 | for (i = 0; i < 6; ++i) |
| 69 | return; | 68 | args[i] = regs->areg[reg[i]]; |
| 70 | |||
| 71 | WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS); | ||
| 72 | |||
| 73 | for (j = 0; j < n; ++j) { | ||
| 74 | if (i + j < SYSCALL_MAX_ARGS) | ||
| 75 | args[j] = regs->areg[reg[i + j]]; | ||
| 76 | else | ||
| 77 | args[j] = 0; | ||
| 78 | } | ||
| 79 | } | 69 | } |
| 80 | 70 | ||
| 81 | static inline void syscall_set_arguments(struct task_struct *task, | 71 | static inline void syscall_set_arguments(struct task_struct *task, |
| 82 | struct pt_regs *regs, | 72 | struct pt_regs *regs, |
| 83 | unsigned int i, unsigned int n, | ||
| 84 | const unsigned long *args) | 73 | const unsigned long *args) |
| 85 | { | 74 | { |
| 86 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; | 75 | static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; |
| 87 | unsigned int j; | 76 | unsigned int i; |
| 88 | |||
| 89 | if (n == 0) | ||
| 90 | return; | ||
| 91 | |||
| 92 | if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) { | ||
| 93 | if (i < SYSCALL_MAX_ARGS) | ||
| 94 | n = SYSCALL_MAX_ARGS - i; | ||
| 95 | else | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | 77 | ||
| 99 | for (j = 0; j < n; ++j) | 78 | for (i = 0; i < 6; ++i) |
| 100 | regs->areg[reg[i + j]] = args[j]; | 79 | regs->areg[reg[i]] = args[i]; |
| 101 | } | 80 | } |
| 102 | 81 | ||
| 103 | asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); | 82 | asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); |
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index e50f5124dc6f..e54af8b7e0f8 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
| @@ -1860,6 +1860,8 @@ ENTRY(system_call) | |||
| 1860 | l32i a7, a2, PT_SYSCALL | 1860 | l32i a7, a2, PT_SYSCALL |
| 1861 | 1861 | ||
| 1862 | 1: | 1862 | 1: |
| 1863 | s32i a7, a1, 4 | ||
| 1864 | |||
| 1863 | /* syscall = sys_call_table[syscall_nr] */ | 1865 | /* syscall = sys_call_table[syscall_nr] */ |
| 1864 | 1866 | ||
| 1865 | movi a4, sys_call_table | 1867 | movi a4, sys_call_table |
| @@ -1893,8 +1895,12 @@ ENTRY(system_call) | |||
| 1893 | retw | 1895 | retw |
| 1894 | 1896 | ||
| 1895 | 1: | 1897 | 1: |
| 1898 | l32i a4, a1, 4 | ||
| 1899 | l32i a3, a2, PT_SYSCALL | ||
| 1900 | s32i a4, a2, PT_SYSCALL | ||
| 1896 | mov a6, a2 | 1901 | mov a6, a2 |
| 1897 | call4 do_syscall_trace_leave | 1902 | call4 do_syscall_trace_leave |
| 1903 | s32i a3, a2, PT_SYSCALL | ||
| 1898 | retw | 1904 | retw |
| 1899 | 1905 | ||
| 1900 | ENDPROC(system_call) | 1906 | ENDPROC(system_call) |
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 174c11f13bba..b9f82510c650 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c | |||
| @@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data) | |||
| 253 | return 1; | 253 | return 1; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | /* | ||
| 257 | * level == 0 is for the return address from the caller of this function, | ||
| 258 | * not from this function itself. | ||
| 259 | */ | ||
| 256 | unsigned long return_address(unsigned level) | 260 | unsigned long return_address(unsigned level) |
| 257 | { | 261 | { |
| 258 | struct return_addr_data r = { | 262 | struct return_addr_data r = { |
| 259 | .skip = level + 1, | 263 | .skip = level, |
| 260 | }; | 264 | }; |
| 261 | walk_stackframe(stack_pointer(NULL), return_address_cb, &r); | 265 | walk_stackframe(stack_pointer(NULL), return_address_cb, &r); |
| 262 | return r.addr; | 266 | return r.addr; |
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 6af49929de85..30084eaf8422 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl | |||
| @@ -394,3 +394,7 @@ | |||
| 394 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait | 394 | 421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait |
| 395 | 422 common futex_time64 sys_futex | 395 | 422 common futex_time64 sys_futex |
| 396 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval | 396 | 423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval |
| 397 | 424 common pidfd_send_signal sys_pidfd_send_signal | ||
| 398 | 425 common io_uring_setup sys_io_uring_setup | ||
| 399 | 426 common io_uring_enter sys_io_uring_enter | ||
| 400 | 427 common io_uring_register sys_io_uring_register | ||
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 2fb7d1172228..03678c4afc39 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c | |||
| @@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) | |||
| 33 | 33 | ||
| 34 | pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); | 34 | pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); |
| 35 | if (!pte) | 35 | if (!pte) |
| 36 | panic("%s: Failed to allocate %zu bytes align=%lx\n", | 36 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
| 37 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); | 37 | __func__, n_pages * sizeof(pte_t), PAGE_SIZE); |
| 38 | 38 | ||
| 39 | for (i = 0; i < n_pages; ++i) | 39 | for (i = 0; i < n_pages; ++i) |
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4c592496a16a..5ba1e0d841b4 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c | |||
| @@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) | |||
| 674 | * at least two nodes. | 674 | * at least two nodes. |
| 675 | */ | 675 | */ |
| 676 | return !(varied_queue_weights || multiple_classes_busy | 676 | return !(varied_queue_weights || multiple_classes_busy |
| 677 | #ifdef BFQ_GROUP_IOSCHED_ENABLED | 677 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 678 | || bfqd->num_groups_with_pending_reqs > 0 | 678 | || bfqd->num_groups_with_pending_reqs > 0 |
| 679 | #endif | 679 | #endif |
| 680 | ); | 680 | ); |
| @@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) | |||
| 2822 | bfq_remove_request(q, rq); | 2822 | bfq_remove_request(q, rq); |
| 2823 | } | 2823 | } |
| 2824 | 2824 | ||
| 2825 | static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 2825 | static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 2826 | { | 2826 | { |
| 2827 | /* | 2827 | /* |
| 2828 | * If this bfqq is shared between multiple processes, check | 2828 | * If this bfqq is shared between multiple processes, check |
| @@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) | |||
| 2855 | /* | 2855 | /* |
| 2856 | * All in-service entities must have been properly deactivated | 2856 | * All in-service entities must have been properly deactivated |
| 2857 | * or requeued before executing the next function, which | 2857 | * or requeued before executing the next function, which |
| 2858 | * resets all in-service entites as no more in service. | 2858 | * resets all in-service entities as no more in service. This |
| 2859 | * may cause bfqq to be freed. If this happens, the next | ||
| 2860 | * function returns true. | ||
| 2859 | */ | 2861 | */ |
| 2860 | __bfq_bfqd_reset_in_service(bfqd); | 2862 | return __bfq_bfqd_reset_in_service(bfqd); |
| 2861 | } | 2863 | } |
| 2862 | 2864 | ||
| 2863 | /** | 2865 | /** |
| @@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, | |||
| 3262 | bool slow; | 3264 | bool slow; |
| 3263 | unsigned long delta = 0; | 3265 | unsigned long delta = 0; |
| 3264 | struct bfq_entity *entity = &bfqq->entity; | 3266 | struct bfq_entity *entity = &bfqq->entity; |
| 3265 | int ref; | ||
| 3266 | 3267 | ||
| 3267 | /* | 3268 | /* |
| 3268 | * Check whether the process is slow (see bfq_bfqq_is_slow). | 3269 | * Check whether the process is slow (see bfq_bfqq_is_slow). |
| @@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, | |||
| 3347 | * reason. | 3348 | * reason. |
| 3348 | */ | 3349 | */ |
| 3349 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); | 3350 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); |
| 3350 | ref = bfqq->ref; | 3351 | if (__bfq_bfqq_expire(bfqd, bfqq)) |
| 3351 | __bfq_bfqq_expire(bfqd, bfqq); | 3352 | /* bfqq is gone, no more actions on it */ |
| 3352 | |||
| 3353 | if (ref == 1) /* bfqq is gone, no more actions on it */ | ||
| 3354 | return; | 3353 | return; |
| 3355 | 3354 | ||
| 3356 | bfqq->injected_service = 0; | 3355 | bfqq->injected_service = 0; |
| @@ -5397,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd, | |||
| 5397 | return min_shallow; | 5396 | return min_shallow; |
| 5398 | } | 5397 | } |
| 5399 | 5398 | ||
| 5400 | static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) | 5399 | static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) |
| 5401 | { | 5400 | { |
| 5402 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | 5401 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
| 5403 | struct blk_mq_tags *tags = hctx->sched_tags; | 5402 | struct blk_mq_tags *tags = hctx->sched_tags; |
| @@ -5405,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) | |||
| 5405 | 5404 | ||
| 5406 | min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); | 5405 | min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); |
| 5407 | sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); | 5406 | sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); |
| 5407 | } | ||
| 5408 | |||
| 5409 | static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) | ||
| 5410 | { | ||
| 5411 | bfq_depth_updated(hctx); | ||
| 5408 | return 0; | 5412 | return 0; |
| 5409 | } | 5413 | } |
| 5410 | 5414 | ||
| @@ -5827,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = { | |||
| 5827 | .requests_merged = bfq_requests_merged, | 5831 | .requests_merged = bfq_requests_merged, |
| 5828 | .request_merged = bfq_request_merged, | 5832 | .request_merged = bfq_request_merged, |
| 5829 | .has_work = bfq_has_work, | 5833 | .has_work = bfq_has_work, |
| 5834 | .depth_updated = bfq_depth_updated, | ||
| 5830 | .init_hctx = bfq_init_hctx, | 5835 | .init_hctx = bfq_init_hctx, |
| 5831 | .init_sched = bfq_init_queue, | 5836 | .init_sched = bfq_init_queue, |
| 5832 | .exit_sched = bfq_exit_queue, | 5837 | .exit_sched = bfq_exit_queue, |
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 062e1c4787f4..86394e503ca9 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h | |||
| @@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, | |||
| 995 | bool ins_into_idle_tree); | 995 | bool ins_into_idle_tree); |
| 996 | bool next_queue_may_preempt(struct bfq_data *bfqd); | 996 | bool next_queue_may_preempt(struct bfq_data *bfqd); |
| 997 | struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); | 997 | struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd); |
| 998 | void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); | 998 | bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd); |
| 999 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 999 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1000 | bool ins_into_idle_tree, bool expiration); | 1000 | bool ins_into_idle_tree, bool expiration); |
| 1001 | void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); | 1001 | void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63311d1ff1ed..ae4d000ac0af 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
| @@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity, | |||
| 1012 | entity->on_st = true; | 1012 | entity->on_st = true; |
| 1013 | } | 1013 | } |
| 1014 | 1014 | ||
| 1015 | #ifdef BFQ_GROUP_IOSCHED_ENABLED | 1015 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 1016 | if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ | 1016 | if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ |
| 1017 | struct bfq_group *bfqg = | 1017 | struct bfq_group *bfqg = |
| 1018 | container_of(entity, struct bfq_group, entity); | 1018 | container_of(entity, struct bfq_group, entity); |
| @@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) | |||
| 1605 | return bfqq; | 1605 | return bfqq; |
| 1606 | } | 1606 | } |
| 1607 | 1607 | ||
| 1608 | void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) | 1608 | /* returns true if the in-service queue gets freed */ |
| 1609 | bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) | ||
| 1609 | { | 1610 | { |
| 1610 | struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; | 1611 | struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; |
| 1611 | struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; | 1612 | struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; |
| @@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) | |||
| 1629 | * service tree either, then release the service reference to | 1630 | * service tree either, then release the service reference to |
| 1630 | * the queue it represents (taken with bfq_get_entity). | 1631 | * the queue it represents (taken with bfq_get_entity). |
| 1631 | */ | 1632 | */ |
| 1632 | if (!in_serv_entity->on_st) | 1633 | if (!in_serv_entity->on_st) { |
| 1634 | /* | ||
| 1635 | * If no process is referencing in_serv_bfqq any | ||
| 1636 | * longer, then the service reference may be the only | ||
| 1637 | * reference to the queue. If this is the case, then | ||
| 1638 | * bfqq gets freed here. | ||
| 1639 | */ | ||
| 1640 | int ref = in_serv_bfqq->ref; | ||
| 1633 | bfq_put_queue(in_serv_bfqq); | 1641 | bfq_put_queue(in_serv_bfqq); |
| 1642 | if (ref == 1) | ||
| 1643 | return true; | ||
| 1644 | } | ||
| 1645 | |||
| 1646 | return false; | ||
| 1634 | } | 1647 | } |
| 1635 | 1648 | ||
| 1636 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 1649 | void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
diff --git a/block/bio.c b/block/bio.c index b64cedc7f87c..716510ecd7ff 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -1298,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 1298 | } | 1298 | } |
| 1299 | } | 1299 | } |
| 1300 | 1300 | ||
| 1301 | if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) | 1301 | if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { |
| 1302 | if (!map_data) | ||
| 1303 | __free_page(page); | ||
| 1302 | break; | 1304 | break; |
| 1305 | } | ||
| 1303 | 1306 | ||
| 1304 | len -= bytes; | 1307 | len -= bytes; |
| 1305 | offset = 0; | 1308 | offset = 0; |
diff --git a/block/blk-core.c b/block/blk-core.c index 4673ebe42255..a55389ba8779 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, | |||
| 1245 | */ | 1245 | */ |
| 1246 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 1246 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
| 1247 | { | 1247 | { |
| 1248 | blk_qc_t unused; | ||
| 1249 | |||
| 1250 | if (blk_cloned_rq_check_limits(q, rq)) | 1248 | if (blk_cloned_rq_check_limits(q, rq)) |
| 1251 | return BLK_STS_IOERR; | 1249 | return BLK_STS_IOERR; |
| 1252 | 1250 | ||
| @@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * | |||
| 1262 | * bypass a potential scheduler on the bottom device for | 1260 | * bypass a potential scheduler on the bottom device for |
| 1263 | * insert. | 1261 | * insert. |
| 1264 | */ | 1262 | */ |
| 1265 | return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true); | 1263 | return blk_mq_request_issue_directly(rq, true); |
| 1266 | } | 1264 | } |
| 1267 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1265 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
| 1268 | 1266 | ||
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 40905539afed..aa6bc5c02643 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
| @@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, | |||
| 423 | * busy in case of 'none' scheduler, and this way may save | 423 | * busy in case of 'none' scheduler, and this way may save |
| 424 | * us one extra enqueue & dequeue to sw queue. | 424 | * us one extra enqueue & dequeue to sw queue. |
| 425 | */ | 425 | */ |
| 426 | if (!hctx->dispatch_busy && !e && !run_queue_async) | 426 | if (!hctx->dispatch_busy && !e && !run_queue_async) { |
| 427 | blk_mq_try_issue_list_directly(hctx, list); | 427 | blk_mq_try_issue_list_directly(hctx, list); |
| 428 | else | 428 | if (list_empty(list)) |
| 429 | blk_mq_insert_requests(hctx, ctx, list); | 429 | return; |
| 430 | } | ||
| 431 | blk_mq_insert_requests(hctx, ctx, list); | ||
| 430 | } | 432 | } |
| 431 | 433 | ||
| 432 | blk_mq_run_hw_queue(hctx, run_queue_async); | 434 | blk_mq_run_hw_queue(hctx, run_queue_async); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ff3d7b49969..fc60ed7e940e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -654,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq) | |||
| 654 | } | 654 | } |
| 655 | EXPORT_SYMBOL(blk_mq_complete_request); | 655 | EXPORT_SYMBOL(blk_mq_complete_request); |
| 656 | 656 | ||
| 657 | void blk_mq_complete_request_sync(struct request *rq) | ||
| 658 | { | ||
| 659 | WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); | ||
| 660 | rq->q->mq_ops->complete(rq); | ||
| 661 | } | ||
| 662 | EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync); | ||
| 663 | |||
| 657 | int blk_mq_request_started(struct request *rq) | 664 | int blk_mq_request_started(struct request *rq) |
| 658 | { | 665 | { |
| 659 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; | 666 | return blk_mq_rq_state(rq) != MQ_RQ_IDLE; |
| @@ -1711,11 +1718,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1711 | unsigned int depth; | 1718 | unsigned int depth; |
| 1712 | 1719 | ||
| 1713 | list_splice_init(&plug->mq_list, &list); | 1720 | list_splice_init(&plug->mq_list, &list); |
| 1714 | plug->rq_count = 0; | ||
| 1715 | 1721 | ||
| 1716 | if (plug->rq_count > 2 && plug->multiple_queues) | 1722 | if (plug->rq_count > 2 && plug->multiple_queues) |
| 1717 | list_sort(NULL, &list, plug_rq_cmp); | 1723 | list_sort(NULL, &list, plug_rq_cmp); |
| 1718 | 1724 | ||
| 1725 | plug->rq_count = 0; | ||
| 1726 | |||
| 1719 | this_q = NULL; | 1727 | this_q = NULL; |
| 1720 | this_hctx = NULL; | 1728 | this_hctx = NULL; |
| 1721 | this_ctx = NULL; | 1729 | this_ctx = NULL; |
| @@ -1800,74 +1808,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1800 | return ret; | 1808 | return ret; |
| 1801 | } | 1809 | } |
| 1802 | 1810 | ||
| 1803 | blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | 1811 | static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, |
| 1804 | struct request *rq, | 1812 | struct request *rq, |
| 1805 | blk_qc_t *cookie, | 1813 | blk_qc_t *cookie, |
| 1806 | bool bypass, bool last) | 1814 | bool bypass_insert, bool last) |
| 1807 | { | 1815 | { |
| 1808 | struct request_queue *q = rq->q; | 1816 | struct request_queue *q = rq->q; |
| 1809 | bool run_queue = true; | 1817 | bool run_queue = true; |
| 1810 | blk_status_t ret = BLK_STS_RESOURCE; | ||
| 1811 | int srcu_idx; | ||
| 1812 | bool force = false; | ||
| 1813 | 1818 | ||
| 1814 | hctx_lock(hctx, &srcu_idx); | ||
| 1815 | /* | 1819 | /* |
| 1816 | * hctx_lock is needed before checking quiesced flag. | 1820 | * RCU or SRCU read lock is needed before checking quiesced flag. |
| 1817 | * | 1821 | * |
| 1818 | * When queue is stopped or quiesced, ignore 'bypass', insert | 1822 | * When queue is stopped or quiesced, ignore 'bypass_insert' from |
| 1819 | * and return BLK_STS_OK to caller, and avoid driver to try to | 1823 | * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, |
| 1820 | * dispatch again. | 1824 | * and avoid driver to try to dispatch again. |
| 1821 | */ | 1825 | */ |
| 1822 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) { | 1826 | if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { |
| 1823 | run_queue = false; | 1827 | run_queue = false; |
| 1824 | bypass = false; | 1828 | bypass_insert = false; |
| 1825 | goto out_unlock; | 1829 | goto insert; |
| 1826 | } | 1830 | } |
| 1827 | 1831 | ||
| 1828 | if (unlikely(q->elevator && !bypass)) | 1832 | if (q->elevator && !bypass_insert) |
| 1829 | goto out_unlock; | 1833 | goto insert; |
| 1830 | 1834 | ||
| 1831 | if (!blk_mq_get_dispatch_budget(hctx)) | 1835 | if (!blk_mq_get_dispatch_budget(hctx)) |
| 1832 | goto out_unlock; | 1836 | goto insert; |
| 1833 | 1837 | ||
| 1834 | if (!blk_mq_get_driver_tag(rq)) { | 1838 | if (!blk_mq_get_driver_tag(rq)) { |
| 1835 | blk_mq_put_dispatch_budget(hctx); | 1839 | blk_mq_put_dispatch_budget(hctx); |
| 1836 | goto out_unlock; | 1840 | goto insert; |
| 1837 | } | 1841 | } |
| 1838 | 1842 | ||
| 1839 | /* | 1843 | return __blk_mq_issue_directly(hctx, rq, cookie, last); |
| 1840 | * Always add a request that has been through | 1844 | insert: |
| 1841 | *.queue_rq() to the hardware dispatch list. | 1845 | if (bypass_insert) |
| 1842 | */ | 1846 | return BLK_STS_RESOURCE; |
| 1843 | force = true; | 1847 | |
| 1844 | ret = __blk_mq_issue_directly(hctx, rq, cookie, last); | 1848 | blk_mq_request_bypass_insert(rq, run_queue); |
| 1845 | out_unlock: | 1849 | return BLK_STS_OK; |
| 1850 | } | ||
| 1851 | |||
| 1852 | static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | ||
| 1853 | struct request *rq, blk_qc_t *cookie) | ||
| 1854 | { | ||
| 1855 | blk_status_t ret; | ||
| 1856 | int srcu_idx; | ||
| 1857 | |||
| 1858 | might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); | ||
| 1859 | |||
| 1860 | hctx_lock(hctx, &srcu_idx); | ||
| 1861 | |||
| 1862 | ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); | ||
| 1863 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) | ||
| 1864 | blk_mq_request_bypass_insert(rq, true); | ||
| 1865 | else if (ret != BLK_STS_OK) | ||
| 1866 | blk_mq_end_request(rq, ret); | ||
| 1867 | |||
| 1868 | hctx_unlock(hctx, srcu_idx); | ||
| 1869 | } | ||
| 1870 | |||
| 1871 | blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) | ||
| 1872 | { | ||
| 1873 | blk_status_t ret; | ||
| 1874 | int srcu_idx; | ||
| 1875 | blk_qc_t unused_cookie; | ||
| 1876 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; | ||
| 1877 | |||
| 1878 | hctx_lock(hctx, &srcu_idx); | ||
| 1879 | ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); | ||
| 1846 | hctx_unlock(hctx, srcu_idx); | 1880 | hctx_unlock(hctx, srcu_idx); |
| 1847 | switch (ret) { | ||
| 1848 | case BLK_STS_OK: | ||
| 1849 | break; | ||
| 1850 | case BLK_STS_DEV_RESOURCE: | ||
| 1851 | case BLK_STS_RESOURCE: | ||
| 1852 | if (force) { | ||
| 1853 | blk_mq_request_bypass_insert(rq, run_queue); | ||
| 1854 | /* | ||
| 1855 | * We have to return BLK_STS_OK for the DM | ||
| 1856 | * to avoid livelock. Otherwise, we return | ||
| 1857 | * the real result to indicate whether the | ||
| 1858 | * request is direct-issued successfully. | ||
| 1859 | */ | ||
| 1860 | ret = bypass ? BLK_STS_OK : ret; | ||
| 1861 | } else if (!bypass) { | ||
| 1862 | blk_mq_sched_insert_request(rq, false, | ||
| 1863 | run_queue, false); | ||
| 1864 | } | ||
| 1865 | break; | ||
| 1866 | default: | ||
| 1867 | if (!bypass) | ||
| 1868 | blk_mq_end_request(rq, ret); | ||
| 1869 | break; | ||
| 1870 | } | ||
| 1871 | 1881 | ||
| 1872 | return ret; | 1882 | return ret; |
| 1873 | } | 1883 | } |
| @@ -1875,20 +1885,22 @@ out_unlock: | |||
| 1875 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | 1885 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, |
| 1876 | struct list_head *list) | 1886 | struct list_head *list) |
| 1877 | { | 1887 | { |
| 1878 | blk_qc_t unused; | ||
| 1879 | blk_status_t ret = BLK_STS_OK; | ||
| 1880 | |||
| 1881 | while (!list_empty(list)) { | 1888 | while (!list_empty(list)) { |
| 1889 | blk_status_t ret; | ||
| 1882 | struct request *rq = list_first_entry(list, struct request, | 1890 | struct request *rq = list_first_entry(list, struct request, |
| 1883 | queuelist); | 1891 | queuelist); |
| 1884 | 1892 | ||
| 1885 | list_del_init(&rq->queuelist); | 1893 | list_del_init(&rq->queuelist); |
| 1886 | if (ret == BLK_STS_OK) | 1894 | ret = blk_mq_request_issue_directly(rq, list_empty(list)); |
| 1887 | ret = blk_mq_try_issue_directly(hctx, rq, &unused, | 1895 | if (ret != BLK_STS_OK) { |
| 1888 | false, | 1896 | if (ret == BLK_STS_RESOURCE || |
| 1897 | ret == BLK_STS_DEV_RESOURCE) { | ||
| 1898 | blk_mq_request_bypass_insert(rq, | ||
| 1889 | list_empty(list)); | 1899 | list_empty(list)); |
| 1890 | else | 1900 | break; |
| 1891 | blk_mq_sched_insert_request(rq, false, true, false); | 1901 | } |
| 1902 | blk_mq_end_request(rq, ret); | ||
| 1903 | } | ||
| 1892 | } | 1904 | } |
| 1893 | 1905 | ||
| 1894 | /* | 1906 | /* |
| @@ -1896,7 +1908,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | |||
| 1896 | * the driver there was more coming, but that turned out to | 1908 | * the driver there was more coming, but that turned out to |
| 1897 | * be a lie. | 1909 | * be a lie. |
| 1898 | */ | 1910 | */ |
| 1899 | if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs) | 1911 | if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) |
| 1900 | hctx->queue->mq_ops->commit_rqs(hctx); | 1912 | hctx->queue->mq_ops->commit_rqs(hctx); |
| 1901 | } | 1913 | } |
| 1902 | 1914 | ||
| @@ -2003,19 +2015,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 2003 | plug->rq_count--; | 2015 | plug->rq_count--; |
| 2004 | } | 2016 | } |
| 2005 | blk_add_rq_to_plug(plug, rq); | 2017 | blk_add_rq_to_plug(plug, rq); |
| 2018 | trace_block_plug(q); | ||
| 2006 | 2019 | ||
| 2007 | blk_mq_put_ctx(data.ctx); | 2020 | blk_mq_put_ctx(data.ctx); |
| 2008 | 2021 | ||
| 2009 | if (same_queue_rq) { | 2022 | if (same_queue_rq) { |
| 2010 | data.hctx = same_queue_rq->mq_hctx; | 2023 | data.hctx = same_queue_rq->mq_hctx; |
| 2024 | trace_block_unplug(q, 1, true); | ||
| 2011 | blk_mq_try_issue_directly(data.hctx, same_queue_rq, | 2025 | blk_mq_try_issue_directly(data.hctx, same_queue_rq, |
| 2012 | &cookie, false, true); | 2026 | &cookie); |
| 2013 | } | 2027 | } |
| 2014 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && | 2028 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && |
| 2015 | !data.hctx->dispatch_busy)) { | 2029 | !data.hctx->dispatch_busy)) { |
| 2016 | blk_mq_put_ctx(data.ctx); | 2030 | blk_mq_put_ctx(data.ctx); |
| 2017 | blk_mq_bio_to_request(rq, bio); | 2031 | blk_mq_bio_to_request(rq, bio); |
| 2018 | blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true); | 2032 | blk_mq_try_issue_directly(data.hctx, rq, &cookie); |
| 2019 | } else { | 2033 | } else { |
| 2020 | blk_mq_put_ctx(data.ctx); | 2034 | blk_mq_put_ctx(data.ctx); |
| 2021 | blk_mq_bio_to_request(rq, bio); | 2035 | blk_mq_bio_to_request(rq, bio); |
| @@ -2332,7 +2346,7 @@ static int blk_mq_init_hctx(struct request_queue *q, | |||
| 2332 | return 0; | 2346 | return 0; |
| 2333 | 2347 | ||
| 2334 | free_fq: | 2348 | free_fq: |
| 2335 | kfree(hctx->fq); | 2349 | blk_free_flush_queue(hctx->fq); |
| 2336 | exit_hctx: | 2350 | exit_hctx: |
| 2337 | if (set->ops->exit_hctx) | 2351 | if (set->ops->exit_hctx) |
| 2338 | set->ops->exit_hctx(hctx, hctx_idx); | 2352 | set->ops->exit_hctx(hctx, hctx_idx); |
| @@ -3121,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) | |||
| 3121 | } | 3135 | } |
| 3122 | if (ret) | 3136 | if (ret) |
| 3123 | break; | 3137 | break; |
| 3138 | if (q->elevator && q->elevator->type->ops.depth_updated) | ||
| 3139 | q->elevator->type->ops.depth_updated(hctx); | ||
| 3124 | } | 3140 | } |
| 3125 | 3141 | ||
| 3126 | if (!ret) | 3142 | if (!ret) |
diff --git a/block/blk-mq.h b/block/blk-mq.h index d704fc7766f4..423ea88ab6fb 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
| @@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); | |||
| 70 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | 70 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
| 71 | struct list_head *list); | 71 | struct list_head *list); |
| 72 | 72 | ||
| 73 | blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, | 73 | /* Used by blk_insert_cloned_request() to issue request directly */ |
| 74 | struct request *rq, | 74 | blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); |
| 75 | blk_qc_t *cookie, | ||
| 76 | bool bypass, bool last); | ||
| 77 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | 75 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, |
| 78 | struct list_head *list); | 76 | struct list_head *list); |
| 79 | 77 | ||
diff --git a/crypto/lrw.c b/crypto/lrw.c index 0430ccd08728..08a0e458bc3e 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
| @@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err) | |||
| 212 | { | 212 | { |
| 213 | struct skcipher_request *req = areq->data; | 213 | struct skcipher_request *req = areq->data; |
| 214 | 214 | ||
| 215 | if (!err) | 215 | if (!err) { |
| 216 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 217 | |||
| 218 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 216 | err = xor_tweak_post(req); | 219 | err = xor_tweak_post(req); |
| 220 | } | ||
| 217 | 221 | ||
| 218 | skcipher_request_complete(req, err); | 222 | skcipher_request_complete(req, err); |
| 219 | } | 223 | } |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index f267633cf13a..d18a37629f05 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
| @@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = { | |||
| 5634 | .psize = 80, | 5634 | .psize = 80, |
| 5635 | .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" | 5635 | .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" |
| 5636 | "\x00\x00\x00\x00\x00\x00\x00\x00", | 5636 | "\x00\x00\x00\x00\x00\x00\x00\x00", |
| 5637 | }, | 5637 | }, { /* Regression test for overflow in AVX2 implementation */ |
| 5638 | .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5639 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5640 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5641 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5642 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5643 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5644 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5645 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5646 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5647 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5648 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5649 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5650 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5651 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5652 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5653 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5654 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5655 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5656 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5657 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5658 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5659 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5660 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5661 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5662 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5663 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5664 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5665 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5666 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5667 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5668 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5669 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5670 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5671 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5672 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5673 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5674 | "\xff\xff\xff\xff\xff\xff\xff\xff" | ||
| 5675 | "\xff\xff\xff\xff", | ||
| 5676 | .psize = 300, | ||
| 5677 | .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" | ||
| 5678 | "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", | ||
| 5679 | } | ||
| 5638 | }; | 5680 | }; |
| 5639 | 5681 | ||
| 5640 | /* NHPoly1305 test vectors from https://github.com/google/adiantum */ | 5682 | /* NHPoly1305 test vectors from https://github.com/google/adiantum */ |
diff --git a/crypto/xts.c b/crypto/xts.c index 847f54f76789..2f948328cabb 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
| @@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err) | |||
| 137 | { | 137 | { |
| 138 | struct skcipher_request *req = areq->data; | 138 | struct skcipher_request *req = areq->data; |
| 139 | 139 | ||
| 140 | if (!err) | 140 | if (!err) { |
| 141 | struct rctx *rctx = skcipher_request_ctx(req); | ||
| 142 | |||
| 143 | rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
| 141 | err = xor_tweak_post(req); | 144 | err = xor_tweak_post(req); |
| 145 | } | ||
| 142 | 146 | ||
| 143 | skcipher_request_complete(req, err); | 147 | skcipher_request_complete(req, err); |
| 144 | } | 148 | } |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 62d3aa74277b..5e9d7348c16f 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
| 81 | 81 | ||
| 82 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 82 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
| 83 | 83 | ||
| 84 | /* Enable the requested GPE */ | 84 | /* Clear the GPE status */ |
| 85 | status = acpi_hw_clear_gpe(gpe_event_info); | ||
| 86 | if (ACPI_FAILURE(status)) | ||
| 87 | return_ACPI_STATUS(status); | ||
| 85 | 88 | ||
| 89 | /* Enable the requested GPE */ | ||
| 86 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | 90 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); |
| 87 | return_ACPI_STATUS(status); | 91 | return_ACPI_STATUS(status); |
| 88 | } | 92 | } |
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 8638f43cfc3d..79d86da1c892 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c | |||
| @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) | |||
| 186 | } | 186 | } |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | if (obj_desc->common.type == ACPI_TYPE_REGION) { | ||
| 190 | acpi_ut_remove_address_range(obj_desc->region.space_id, node); | ||
| 191 | } | ||
| 192 | |||
| 189 | /* Clear the Node entry in all cases */ | 193 | /* Clear the Node entry in all cases */ |
| 190 | 194 | ||
| 191 | node->object = NULL; | 195 | node->object = NULL; |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 5a389a4f4f65..f1ed0befe303 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 567 | goto out; | 567 | goto out; |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, | ||
| 571 | cmd_name, out_obj->buffer.length); | ||
| 572 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, | ||
| 573 | out_obj->buffer.pointer, | ||
| 574 | min_t(u32, 128, out_obj->buffer.length), true); | ||
| 575 | |||
| 570 | if (call_pkg) { | 576 | if (call_pkg) { |
| 571 | call_pkg->nd_fw_size = out_obj->buffer.length; | 577 | call_pkg->nd_fw_size = out_obj->buffer.length; |
| 572 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, | 578 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, |
| @@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 585 | return 0; | 591 | return 0; |
| 586 | } | 592 | } |
| 587 | 593 | ||
| 588 | dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, | ||
| 589 | cmd_name, out_obj->buffer.length); | ||
| 590 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, | ||
| 591 | out_obj->buffer.pointer, | ||
| 592 | min_t(u32, 128, out_obj->buffer.length), true); | ||
| 593 | |||
| 594 | for (i = 0, offset = 0; i < desc->out_num; i++) { | 594 | for (i = 0, offset = 0; i < desc->out_num; i++) { |
| 595 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, | 595 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, |
| 596 | (u32 *) out_obj->buffer.pointer, | 596 | (u32 *) out_obj->buffer.pointer, |
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index f70de71f79d6..cddd0fcf622c 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c | |||
| @@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm, | |||
| 122 | if (!test_bit(cmd, &nfit_mem->dsm_mask)) | 122 | if (!test_bit(cmd, &nfit_mem->dsm_mask)) |
| 123 | return -ENOTTY; | 123 | return -ENOTTY; |
| 124 | 124 | ||
| 125 | if (old_data) | 125 | memcpy(nd_cmd.cmd.old_pass, old_data->data, |
| 126 | memcpy(nd_cmd.cmd.old_pass, old_data->data, | 126 | sizeof(nd_cmd.cmd.old_pass)); |
| 127 | sizeof(nd_cmd.cmd.old_pass)); | ||
| 128 | memcpy(nd_cmd.cmd.new_pass, new_data->data, | 127 | memcpy(nd_cmd.cmd.new_pass, new_data->data, |
| 129 | sizeof(nd_cmd.cmd.new_pass)); | 128 | sizeof(nd_cmd.cmd.new_pass)); |
| 130 | rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); | 129 | rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); |
| @@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, | |||
| 336 | 335 | ||
| 337 | /* flush all cache before we erase DIMM */ | 336 | /* flush all cache before we erase DIMM */ |
| 338 | nvdimm_invalidate_cache(); | 337 | nvdimm_invalidate_cache(); |
| 339 | if (nkey) | 338 | memcpy(nd_cmd.cmd.passphrase, nkey->data, |
| 340 | memcpy(nd_cmd.cmd.passphrase, nkey->data, | 339 | sizeof(nd_cmd.cmd.passphrase)); |
| 341 | sizeof(nd_cmd.cmd.passphrase)); | ||
| 342 | rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); | 340 | rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); |
| 343 | if (rc < 0) | 341 | if (rc < 0) |
| 344 | return rc; | 342 | return rc; |
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 11e1663bdc4d..b2c06da4f62e 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
| @@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id) | |||
| 1646 | } | 1646 | } |
| 1647 | 1647 | ||
| 1648 | if (status & ISR_TBRQ_W) { | 1648 | if (status & ISR_TBRQ_W) { |
| 1649 | fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n"); | 1649 | fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n"); |
| 1650 | process_txdone_queue (dev, &dev->tx_relq); | 1650 | process_txdone_queue (dev, &dev->tx_relq); |
| 1651 | } | 1651 | } |
| 1652 | 1652 | ||
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index cb8347500ce2..e49028a60429 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr, | |||
| 506 | 506 | ||
| 507 | ret = lock_device_hotplug_sysfs(); | 507 | ret = lock_device_hotplug_sysfs(); |
| 508 | if (ret) | 508 | if (ret) |
| 509 | goto out; | 509 | return ret; |
| 510 | 510 | ||
| 511 | nid = memory_add_physaddr_to_nid(phys_addr); | 511 | nid = memory_add_physaddr_to_nid(phys_addr); |
| 512 | ret = __add_memory(nid, phys_addr, | 512 | ret = __add_memory(nid, phys_addr, |
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 417a9f15c116..d7ac09c092f2 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c | |||
| @@ -1748,6 +1748,11 @@ static int __init null_init(void) | |||
| 1748 | return -EINVAL; | 1748 | return -EINVAL; |
| 1749 | } | 1749 | } |
| 1750 | 1750 | ||
| 1751 | if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { | ||
| 1752 | pr_err("null_blk: invalid home_node value\n"); | ||
| 1753 | g_home_node = NUMA_NO_NODE; | ||
| 1754 | } | ||
| 1755 | |||
| 1751 | if (g_queue_mode == NULL_Q_RQ) { | 1756 | if (g_queue_mode == NULL_Q_RQ) { |
| 1752 | pr_err("null_blk: legacy IO path no longer available\n"); | 1757 | pr_err("null_blk: legacy IO path no longer available\n"); |
| 1753 | return -EINVAL; | 1758 | return -EINVAL; |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 377a694dc228..6d415b20fb70 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
| @@ -314,6 +314,7 @@ static void pcd_init_units(void) | |||
| 314 | disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, | 314 | disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, |
| 315 | 1, BLK_MQ_F_SHOULD_MERGE); | 315 | 1, BLK_MQ_F_SHOULD_MERGE); |
| 316 | if (IS_ERR(disk->queue)) { | 316 | if (IS_ERR(disk->queue)) { |
| 317 | put_disk(disk); | ||
| 317 | disk->queue = NULL; | 318 | disk->queue = NULL; |
| 318 | continue; | 319 | continue; |
| 319 | } | 320 | } |
| @@ -750,6 +751,8 @@ static int pcd_detect(void) | |||
| 750 | 751 | ||
| 751 | printk("%s: No CD-ROM drive found\n", name); | 752 | printk("%s: No CD-ROM drive found\n", name); |
| 752 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { | 753 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 754 | if (!cd->disk) | ||
| 755 | continue; | ||
| 753 | blk_cleanup_queue(cd->disk->queue); | 756 | blk_cleanup_queue(cd->disk->queue); |
| 754 | cd->disk->queue = NULL; | 757 | cd->disk->queue = NULL; |
| 755 | blk_mq_free_tag_set(&cd->tag_set); | 758 | blk_mq_free_tag_set(&cd->tag_set); |
| @@ -1010,8 +1013,14 @@ static int __init pcd_init(void) | |||
| 1010 | pcd_probe_capabilities(); | 1013 | pcd_probe_capabilities(); |
| 1011 | 1014 | ||
| 1012 | if (register_blkdev(major, name)) { | 1015 | if (register_blkdev(major, name)) { |
| 1013 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) | 1016 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 1017 | if (!cd->disk) | ||
| 1018 | continue; | ||
| 1019 | |||
| 1020 | blk_cleanup_queue(cd->disk->queue); | ||
| 1021 | blk_mq_free_tag_set(&cd->tag_set); | ||
| 1014 | put_disk(cd->disk); | 1022 | put_disk(cd->disk); |
| 1023 | } | ||
| 1015 | return -EBUSY; | 1024 | return -EBUSY; |
| 1016 | } | 1025 | } |
| 1017 | 1026 | ||
| @@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void) | |||
| 1032 | int unit; | 1041 | int unit; |
| 1033 | 1042 | ||
| 1034 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { | 1043 | for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { |
| 1044 | if (!cd->disk) | ||
| 1045 | continue; | ||
| 1046 | |||
| 1035 | if (cd->present) { | 1047 | if (cd->present) { |
| 1036 | del_gendisk(cd->disk); | 1048 | del_gendisk(cd->disk); |
| 1037 | pi_release(cd->pi); | 1049 | pi_release(cd->pi); |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 103b617cdc31..35e6e271b219 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -762,6 +762,8 @@ static int pf_detect(void) | |||
| 762 | 762 | ||
| 763 | printk("%s: No ATAPI disk detected\n", name); | 763 | printk("%s: No ATAPI disk detected\n", name); |
| 764 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 764 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 765 | if (!pf->disk) | ||
| 766 | continue; | ||
| 765 | blk_cleanup_queue(pf->disk->queue); | 767 | blk_cleanup_queue(pf->disk->queue); |
| 766 | pf->disk->queue = NULL; | 768 | pf->disk->queue = NULL; |
| 767 | blk_mq_free_tag_set(&pf->tag_set); | 769 | blk_mq_free_tag_set(&pf->tag_set); |
| @@ -1029,8 +1031,13 @@ static int __init pf_init(void) | |||
| 1029 | pf_busy = 0; | 1031 | pf_busy = 0; |
| 1030 | 1032 | ||
| 1031 | if (register_blkdev(major, name)) { | 1033 | if (register_blkdev(major, name)) { |
| 1032 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) | 1034 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 1035 | if (!pf->disk) | ||
| 1036 | continue; | ||
| 1037 | blk_cleanup_queue(pf->disk->queue); | ||
| 1038 | blk_mq_free_tag_set(&pf->tag_set); | ||
| 1033 | put_disk(pf->disk); | 1039 | put_disk(pf->disk); |
| 1040 | } | ||
| 1034 | return -EBUSY; | 1041 | return -EBUSY; |
| 1035 | } | 1042 | } |
| 1036 | 1043 | ||
| @@ -1051,6 +1058,9 @@ static void __exit pf_exit(void) | |||
| 1051 | int unit; | 1058 | int unit; |
| 1052 | unregister_blkdev(major, name); | 1059 | unregister_blkdev(major, name); |
| 1053 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 1060 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 1061 | if (!pf->disk) | ||
| 1062 | continue; | ||
| 1063 | |||
| 1054 | if (pf->present) | 1064 | if (pf->present) |
| 1055 | del_gendisk(pf->disk); | 1065 | del_gendisk(pf->disk); |
| 1056 | 1066 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4bc083b7c9b5..2a7ca4a1e6f7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk) | |||
| 513 | if (err) | 513 | if (err) |
| 514 | num_vqs = 1; | 514 | num_vqs = 1; |
| 515 | 515 | ||
| 516 | num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); | ||
| 517 | |||
| 516 | vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); | 518 | vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); |
| 517 | if (!vblk->vqs) | 519 | if (!vblk->vqs) |
| 518 | return -ENOMEM; | 520 | return -ENOMEM; |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 87ccef4bd69e..32a21b8d1d85 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
| @@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace) | |||
| 1090 | return 0; | 1090 | return 0; |
| 1091 | 1091 | ||
| 1092 | err_read: | 1092 | err_read: |
| 1093 | /* prevent double queue cleanup */ | ||
| 1094 | ace->gd->queue = NULL; | ||
| 1093 | put_disk(ace->gd); | 1095 | put_disk(ace->gd); |
| 1094 | err_alloc_disk: | 1096 | err_alloc_disk: |
| 1095 | blk_cleanup_queue(ace->queue); | 1097 | blk_cleanup_queue(ace->queue); |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ded198328f21..7db48ae65cd2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
| @@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) | |||
| 2942 | return 0; | 2942 | return 0; |
| 2943 | } | 2943 | } |
| 2944 | 2944 | ||
| 2945 | irq_set_status_flags(irq, IRQ_NOAUTOEN); | ||
| 2945 | ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, | 2946 | ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, |
| 2946 | 0, "OOB Wake-on-BT", data); | 2947 | 0, "OOB Wake-on-BT", data); |
| 2947 | if (ret) { | 2948 | if (ret) { |
| @@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) | |||
| 2956 | } | 2957 | } |
| 2957 | 2958 | ||
| 2958 | data->oob_wake_irq = irq; | 2959 | data->oob_wake_irq = irq; |
| 2959 | disable_irq(irq); | ||
| 2960 | bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); | 2960 | bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); |
| 2961 | return 0; | 2961 | return 0; |
| 2962 | } | 2962 | } |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 72866a004f07..466ebd84ad17 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -348,7 +348,7 @@ config XILINX_HWICAP | |||
| 348 | 348 | ||
| 349 | config R3964 | 349 | config R3964 |
| 350 | tristate "Siemens R3964 line discipline" | 350 | tristate "Siemens R3964 line discipline" |
| 351 | depends on TTY | 351 | depends on TTY && BROKEN |
| 352 | ---help--- | 352 | ---help--- |
| 353 | This driver allows synchronous communication with devices using the | 353 | This driver allows synchronous communication with devices using the |
| 354 | Siemens R3964 packet protocol. Unless you are dealing with special | 354 | Siemens R3964 packet protocol. Unless you are dealing with special |
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index ff0b199be472..f2411468f33f 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c | |||
| @@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, | |||
| 66 | return; | 66 | return; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | memset(&p, 0, sizeof(p)); | ||
| 70 | p.addr = base_addr; | 69 | p.addr = base_addr; |
| 71 | p.space = space; | 70 | p.space = space; |
| 72 | p.regspacing = offset; | 71 | p.regspacing = offset; |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e8ba67834746..00bf4b17edbf 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -214,6 +214,9 @@ struct ipmi_user { | |||
| 214 | 214 | ||
| 215 | /* Does this interface receive IPMI events? */ | 215 | /* Does this interface receive IPMI events? */ |
| 216 | bool gets_events; | 216 | bool gets_events; |
| 217 | |||
| 218 | /* Free must run in process context for RCU cleanup. */ | ||
| 219 | struct work_struct remove_work; | ||
| 217 | }; | 220 | }; |
| 218 | 221 | ||
| 219 | static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) | 222 | static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) |
| @@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf, | |||
| 1157 | return rv; | 1160 | return rv; |
| 1158 | } | 1161 | } |
| 1159 | 1162 | ||
| 1163 | static void free_user_work(struct work_struct *work) | ||
| 1164 | { | ||
| 1165 | struct ipmi_user *user = container_of(work, struct ipmi_user, | ||
| 1166 | remove_work); | ||
| 1167 | |||
| 1168 | cleanup_srcu_struct(&user->release_barrier); | ||
| 1169 | kfree(user); | ||
| 1170 | } | ||
| 1171 | |||
| 1160 | int ipmi_create_user(unsigned int if_num, | 1172 | int ipmi_create_user(unsigned int if_num, |
| 1161 | const struct ipmi_user_hndl *handler, | 1173 | const struct ipmi_user_hndl *handler, |
| 1162 | void *handler_data, | 1174 | void *handler_data, |
| @@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num, | |||
| 1200 | goto out_kfree; | 1212 | goto out_kfree; |
| 1201 | 1213 | ||
| 1202 | found: | 1214 | found: |
| 1215 | INIT_WORK(&new_user->remove_work, free_user_work); | ||
| 1216 | |||
| 1203 | rv = init_srcu_struct(&new_user->release_barrier); | 1217 | rv = init_srcu_struct(&new_user->release_barrier); |
| 1204 | if (rv) | 1218 | if (rv) |
| 1205 | goto out_kfree; | 1219 | goto out_kfree; |
| @@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info); | |||
| 1260 | static void free_user(struct kref *ref) | 1274 | static void free_user(struct kref *ref) |
| 1261 | { | 1275 | { |
| 1262 | struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); | 1276 | struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); |
| 1263 | cleanup_srcu_struct(&user->release_barrier); | 1277 | |
| 1264 | kfree(user); | 1278 | /* SRCU cleanup must happen in task context. */ |
| 1279 | schedule_work(&user->remove_work); | ||
| 1265 | } | 1280 | } |
| 1266 | 1281 | ||
| 1267 | static void _ipmi_destroy_user(struct ipmi_user *user) | 1282 | static void _ipmi_destroy_user(struct ipmi_user *user) |
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 01946cad3d13..682221eebd66 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c | |||
| @@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void) | |||
| 118 | char *str; | 118 | char *str; |
| 119 | char *si_type[SI_MAX_PARMS]; | 119 | char *si_type[SI_MAX_PARMS]; |
| 120 | 120 | ||
| 121 | memset(si_type, 0, sizeof(si_type)); | ||
| 122 | |||
| 121 | /* Parse out the si_type string into its components. */ | 123 | /* Parse out the si_type string into its components. */ |
| 122 | str = si_type_str; | 124 | str = si_type_str; |
| 123 | if (*str != '\0') { | 125 | if (*str != '\0') { |
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index d8b77133a83a..f824563fc28d 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c | |||
| @@ -37,8 +37,8 @@ | |||
| 37 | * | 37 | * |
| 38 | * Returns size of the event. If it is an invalid event, returns 0. | 38 | * Returns size of the event. If it is an invalid event, returns 0. |
| 39 | */ | 39 | */ |
| 40 | static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event, | 40 | static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, |
| 41 | struct tcg_pcr_event *event_header) | 41 | struct tcg_pcr_event *event_header) |
| 42 | { | 42 | { |
| 43 | struct tcg_efi_specid_event_head *efispecid; | 43 | struct tcg_efi_specid_event_head *efispecid; |
| 44 | struct tcg_event_field *event_field; | 44 | struct tcg_event_field *event_field; |
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 8856cce5a23b..817ae09a369e 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c | |||
| @@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) | |||
| 233 | __poll_t mask = 0; | 233 | __poll_t mask = 0; |
| 234 | 234 | ||
| 235 | poll_wait(file, &priv->async_wait, wait); | 235 | poll_wait(file, &priv->async_wait, wait); |
| 236 | mutex_lock(&priv->buffer_mutex); | ||
| 236 | 237 | ||
| 237 | if (!priv->response_read || priv->response_length) | 238 | /* |
| 239 | * The response_length indicates if there is still response | ||
| 240 | * (or part of it) to be consumed. Partial reads decrease it | ||
| 241 | * by the number of bytes read, and write resets it the zero. | ||
| 242 | */ | ||
| 243 | if (priv->response_length) | ||
| 238 | mask = EPOLLIN | EPOLLRDNORM; | 244 | mask = EPOLLIN | EPOLLRDNORM; |
| 239 | else | 245 | else |
| 240 | mask = EPOLLOUT | EPOLLWRNORM; | 246 | mask = EPOLLOUT | EPOLLWRNORM; |
| 241 | 247 | ||
| 248 | mutex_unlock(&priv->buffer_mutex); | ||
| 242 | return mask; | 249 | return mask; |
| 243 | } | 250 | } |
| 244 | 251 | ||
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 83ece5639f86..ae1030c9b086 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
| @@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev) | |||
| 402 | if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) | 402 | if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) |
| 403 | return 0; | 403 | return 0; |
| 404 | 404 | ||
| 405 | if (chip->flags & TPM_CHIP_FLAG_TPM2) { | 405 | if (!tpm_chip_start(chip)) { |
| 406 | mutex_lock(&chip->tpm_mutex); | 406 | if (chip->flags & TPM_CHIP_FLAG_TPM2) |
| 407 | if (!tpm_chip_start(chip)) { | ||
| 408 | tpm2_shutdown(chip, TPM2_SU_STATE); | 407 | tpm2_shutdown(chip, TPM2_SU_STATE); |
| 409 | tpm_chip_stop(chip); | 408 | else |
| 410 | } | 409 | rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); |
| 411 | mutex_unlock(&chip->tpm_mutex); | 410 | |
| 412 | } else { | 411 | tpm_chip_stop(chip); |
| 413 | rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); | ||
| 414 | } | 412 | } |
| 415 | 413 | ||
| 416 | return rc; | 414 | return rc; |
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 89d6f3736dbf..f8edbb65eda3 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c | |||
| @@ -20,8 +20,7 @@ | |||
| 20 | #define PROG_ID_MAX 7 | 20 | #define PROG_ID_MAX 7 |
| 21 | 21 | ||
| 22 | #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) | 22 | #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) |
| 23 | #define PROG_PRES_MASK 0x7 | 23 | #define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask) |
| 24 | #define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) | ||
| 25 | #define PROG_MAX_RM9200_CSS 3 | 24 | #define PROG_MAX_RM9200_CSS 3 |
| 26 | 25 | ||
| 27 | struct clk_programmable { | 26 | struct clk_programmable { |
| @@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, | |||
| 37 | unsigned long parent_rate) | 36 | unsigned long parent_rate) |
| 38 | { | 37 | { |
| 39 | struct clk_programmable *prog = to_clk_programmable(hw); | 38 | struct clk_programmable *prog = to_clk_programmable(hw); |
| 39 | const struct clk_programmable_layout *layout = prog->layout; | ||
| 40 | unsigned int pckr; | 40 | unsigned int pckr; |
| 41 | unsigned long rate; | ||
| 41 | 42 | ||
| 42 | regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); | 43 | regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); |
| 43 | 44 | ||
| 44 | return parent_rate >> PROG_PRES(prog->layout, pckr); | 45 | if (layout->is_pres_direct) |
| 46 | rate = parent_rate / (PROG_PRES(layout, pckr) + 1); | ||
| 47 | else | ||
| 48 | rate = parent_rate >> PROG_PRES(layout, pckr); | ||
| 49 | |||
| 50 | return rate; | ||
| 45 | } | 51 | } |
| 46 | 52 | ||
| 47 | static int clk_programmable_determine_rate(struct clk_hw *hw, | 53 | static int clk_programmable_determine_rate(struct clk_hw *hw, |
| 48 | struct clk_rate_request *req) | 54 | struct clk_rate_request *req) |
| 49 | { | 55 | { |
| 56 | struct clk_programmable *prog = to_clk_programmable(hw); | ||
| 57 | const struct clk_programmable_layout *layout = prog->layout; | ||
| 50 | struct clk_hw *parent; | 58 | struct clk_hw *parent; |
| 51 | long best_rate = -EINVAL; | 59 | long best_rate = -EINVAL; |
| 52 | unsigned long parent_rate; | 60 | unsigned long parent_rate; |
| 53 | unsigned long tmp_rate; | 61 | unsigned long tmp_rate = 0; |
| 54 | int shift; | 62 | int shift; |
| 55 | int i; | 63 | int i; |
| 56 | 64 | ||
| @@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw, | |||
| 60 | continue; | 68 | continue; |
| 61 | 69 | ||
| 62 | parent_rate = clk_hw_get_rate(parent); | 70 | parent_rate = clk_hw_get_rate(parent); |
| 63 | for (shift = 0; shift < PROG_PRES_MASK; shift++) { | 71 | if (layout->is_pres_direct) { |
| 64 | tmp_rate = parent_rate >> shift; | 72 | for (shift = 0; shift <= layout->pres_mask; shift++) { |
| 65 | if (tmp_rate <= req->rate) | 73 | tmp_rate = parent_rate / (shift + 1); |
| 66 | break; | 74 | if (tmp_rate <= req->rate) |
| 75 | break; | ||
| 76 | } | ||
| 77 | } else { | ||
| 78 | for (shift = 0; shift < layout->pres_mask; shift++) { | ||
| 79 | tmp_rate = parent_rate >> shift; | ||
| 80 | if (tmp_rate <= req->rate) | ||
| 81 | break; | ||
| 82 | } | ||
| 67 | } | 83 | } |
| 68 | 84 | ||
| 69 | if (tmp_rate > req->rate) | 85 | if (tmp_rate > req->rate) |
| @@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 137 | if (!div) | 153 | if (!div) |
| 138 | return -EINVAL; | 154 | return -EINVAL; |
| 139 | 155 | ||
| 140 | shift = fls(div) - 1; | 156 | if (layout->is_pres_direct) { |
| 157 | shift = div - 1; | ||
| 141 | 158 | ||
| 142 | if (div != (1 << shift)) | 159 | if (shift > layout->pres_mask) |
| 143 | return -EINVAL; | 160 | return -EINVAL; |
| 161 | } else { | ||
| 162 | shift = fls(div) - 1; | ||
| 144 | 163 | ||
| 145 | if (shift >= PROG_PRES_MASK) | 164 | if (div != (1 << shift)) |
| 146 | return -EINVAL; | 165 | return -EINVAL; |
| 166 | |||
| 167 | if (shift >= layout->pres_mask) | ||
| 168 | return -EINVAL; | ||
| 169 | } | ||
| 147 | 170 | ||
| 148 | regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), | 171 | regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), |
| 149 | PROG_PRES_MASK << layout->pres_shift, | 172 | layout->pres_mask << layout->pres_shift, |
| 150 | shift << layout->pres_shift); | 173 | shift << layout->pres_shift); |
| 151 | 174 | ||
| 152 | return 0; | 175 | return 0; |
| @@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap, | |||
| 202 | } | 225 | } |
| 203 | 226 | ||
| 204 | const struct clk_programmable_layout at91rm9200_programmable_layout = { | 227 | const struct clk_programmable_layout at91rm9200_programmable_layout = { |
| 228 | .pres_mask = 0x7, | ||
| 205 | .pres_shift = 2, | 229 | .pres_shift = 2, |
| 206 | .css_mask = 0x3, | 230 | .css_mask = 0x3, |
| 207 | .have_slck_mck = 0, | 231 | .have_slck_mck = 0, |
| 232 | .is_pres_direct = 0, | ||
| 208 | }; | 233 | }; |
| 209 | 234 | ||
| 210 | const struct clk_programmable_layout at91sam9g45_programmable_layout = { | 235 | const struct clk_programmable_layout at91sam9g45_programmable_layout = { |
| 236 | .pres_mask = 0x7, | ||
| 211 | .pres_shift = 2, | 237 | .pres_shift = 2, |
| 212 | .css_mask = 0x3, | 238 | .css_mask = 0x3, |
| 213 | .have_slck_mck = 1, | 239 | .have_slck_mck = 1, |
| 240 | .is_pres_direct = 0, | ||
| 214 | }; | 241 | }; |
| 215 | 242 | ||
| 216 | const struct clk_programmable_layout at91sam9x5_programmable_layout = { | 243 | const struct clk_programmable_layout at91sam9x5_programmable_layout = { |
| 244 | .pres_mask = 0x7, | ||
| 217 | .pres_shift = 4, | 245 | .pres_shift = 4, |
| 218 | .css_mask = 0x7, | 246 | .css_mask = 0x7, |
| 219 | .have_slck_mck = 0, | 247 | .have_slck_mck = 0, |
| 248 | .is_pres_direct = 0, | ||
| 220 | }; | 249 | }; |
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 672a79bda88c..a0e5ce9c9b9e 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h | |||
| @@ -71,9 +71,11 @@ struct clk_pll_characteristics { | |||
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | struct clk_programmable_layout { | 73 | struct clk_programmable_layout { |
| 74 | u8 pres_mask; | ||
| 74 | u8 pres_shift; | 75 | u8 pres_shift; |
| 75 | u8 css_mask; | 76 | u8 css_mask; |
| 76 | u8 have_slck_mck; | 77 | u8 have_slck_mck; |
| 78 | u8 is_pres_direct; | ||
| 77 | }; | 79 | }; |
| 78 | 80 | ||
| 79 | extern const struct clk_programmable_layout at91rm9200_programmable_layout; | 81 | extern const struct clk_programmable_layout at91rm9200_programmable_layout; |
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 1f70cb164b06..81943fac4537 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c | |||
| @@ -125,6 +125,14 @@ static const struct { | |||
| 125 | .pll = true }, | 125 | .pll = true }, |
| 126 | }; | 126 | }; |
| 127 | 127 | ||
| 128 | static const struct clk_programmable_layout sama5d2_programmable_layout = { | ||
| 129 | .pres_mask = 0xff, | ||
| 130 | .pres_shift = 4, | ||
| 131 | .css_mask = 0x7, | ||
| 132 | .have_slck_mck = 0, | ||
| 133 | .is_pres_direct = 1, | ||
| 134 | }; | ||
| 135 | |||
| 128 | static void __init sama5d2_pmc_setup(struct device_node *np) | 136 | static void __init sama5d2_pmc_setup(struct device_node *np) |
| 129 | { | 137 | { |
| 130 | struct clk_range range = CLK_RANGE(0, 0); | 138 | struct clk_range range = CLK_RANGE(0, 0); |
| @@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) | |||
| 249 | 257 | ||
| 250 | hw = at91_clk_register_programmable(regmap, name, | 258 | hw = at91_clk_register_programmable(regmap, name, |
| 251 | parent_names, 6, i, | 259 | parent_names, 6, i, |
| 252 | &at91sam9x5_programmable_layout); | 260 | &sama5d2_programmable_layout); |
| 253 | if (IS_ERR(hw)) | 261 | if (IS_ERR(hw)) |
| 254 | goto err_free; | 262 | goto err_free; |
| 255 | } | 263 | } |
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 1acfa3e3cfb4..113d71042199 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c | |||
| @@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, | |||
| 362 | 362 | ||
| 363 | switch (pll_clk->type) { | 363 | switch (pll_clk->type) { |
| 364 | case PLL_1416X: | 364 | case PLL_1416X: |
| 365 | if (!pll->rate_table) | 365 | if (!pll_clk->rate_table) |
| 366 | init.ops = &clk_pll1416x_min_ops; | 366 | init.ops = &clk_pll1416x_min_ops; |
| 367 | else | 367 | else |
| 368 | init.ops = &clk_pll1416x_ops; | 368 | init.ops = &clk_pll1416x_ops; |
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index 9628d4e7690b..85daf826619a 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c | |||
| @@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate( | |||
| 169 | return ERR_PTR(-ENOMEM); | 169 | return ERR_PTR(-ENOMEM); |
| 170 | 170 | ||
| 171 | init.name = name; | 171 | init.name = name; |
| 172 | init.flags = CLK_SET_RATE_PARENT; | 172 | init.flags = flags | CLK_SET_RATE_PARENT; |
| 173 | init.parent_names = parent_name ? &parent_name : NULL; | 173 | init.parent_names = parent_name ? &parent_name : NULL; |
| 174 | init.num_parents = parent_name ? 1 : 0; | 174 | init.num_parents = parent_name ? 1 : 0; |
| 175 | init.ops = ops; | 175 | init.ops = ops; |
| 176 | init.flags = flags; | ||
| 177 | 176 | ||
| 178 | cg->regmap = regmap; | 177 | cg->regmap = regmap; |
| 179 | cg->set_ofs = set_ofs; | 178 | cg->set_ofs = set_ofs; |
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 41e16dd7272a..7a14ac9b2fec 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c | |||
| @@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate, | |||
| 120 | return true; | 120 | return true; |
| 121 | } else { | 121 | } else { |
| 122 | /* Round down */ | 122 | /* Round down */ |
| 123 | if (now < rate && best < now) | 123 | if (now <= rate && best < now) |
| 124 | return true; | 124 | return true; |
| 125 | } | 125 | } |
| 126 | 126 | ||
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 0e1ce8c03259..f7b11e1eeebe 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c | |||
| @@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = { | |||
| 960 | /* VPU Clock */ | 960 | /* VPU Clock */ |
| 961 | 961 | ||
| 962 | static const char * const g12a_vpu_parent_names[] = { | 962 | static const char * const g12a_vpu_parent_names[] = { |
| 963 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", | 963 | "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7", |
| 964 | "mpll1", "vid_pll", "hifi_pll", "gp0_pll", | 964 | "mpll1", "vid_pll", "hifi_pll", "gp0_pll", |
| 965 | }; | 965 | }; |
| 966 | 966 | ||
| 967 | static struct clk_regmap g12a_vpu_0_sel = { | 967 | static struct clk_regmap g12a_vpu_0_sel = { |
| 968 | .data = &(struct clk_regmap_mux_data){ | 968 | .data = &(struct clk_regmap_mux_data){ |
| 969 | .offset = HHI_VPU_CLK_CNTL, | 969 | .offset = HHI_VPU_CLK_CNTL, |
| 970 | .mask = 0x3, | 970 | .mask = 0x7, |
| 971 | .shift = 9, | 971 | .shift = 9, |
| 972 | }, | 972 | }, |
| 973 | .hw.init = &(struct clk_init_data){ | 973 | .hw.init = &(struct clk_init_data){ |
| @@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = { | |||
| 1011 | static struct clk_regmap g12a_vpu_1_sel = { | 1011 | static struct clk_regmap g12a_vpu_1_sel = { |
| 1012 | .data = &(struct clk_regmap_mux_data){ | 1012 | .data = &(struct clk_regmap_mux_data){ |
| 1013 | .offset = HHI_VPU_CLK_CNTL, | 1013 | .offset = HHI_VPU_CLK_CNTL, |
| 1014 | .mask = 0x3, | 1014 | .mask = 0x7, |
| 1015 | .shift = 25, | 1015 | .shift = 25, |
| 1016 | }, | 1016 | }, |
| 1017 | .hw.init = &(struct clk_init_data){ | 1017 | .hw.init = &(struct clk_init_data){ |
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 04df2e208ed6..29ffb4fde714 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c | |||
| @@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = { | |||
| 2216 | .offset = HHI_VDEC_CLK_CNTL, | 2216 | .offset = HHI_VDEC_CLK_CNTL, |
| 2217 | .shift = 0, | 2217 | .shift = 0, |
| 2218 | .width = 7, | 2218 | .width = 7, |
| 2219 | .flags = CLK_DIVIDER_ROUND_CLOSEST, | ||
| 2219 | }, | 2220 | }, |
| 2220 | .hw.init = &(struct clk_init_data){ | 2221 | .hw.init = &(struct clk_init_data){ |
| 2221 | .name = "vdec_1_div", | 2222 | .name = "vdec_1_div", |
| @@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = { | |||
| 2261 | .offset = HHI_VDEC2_CLK_CNTL, | 2262 | .offset = HHI_VDEC2_CLK_CNTL, |
| 2262 | .shift = 16, | 2263 | .shift = 16, |
| 2263 | .width = 7, | 2264 | .width = 7, |
| 2265 | .flags = CLK_DIVIDER_ROUND_CLOSEST, | ||
| 2264 | }, | 2266 | }, |
| 2265 | .hw.init = &(struct clk_init_data){ | 2267 | .hw.init = &(struct clk_init_data){ |
| 2266 | .name = "vdec_hevc_div", | 2268 | .name = "vdec_hevc_div", |
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index 08bcc01c0923..daff235bc763 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c | |||
| @@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw, | |||
| 82 | div = _get_table_val(meson_parm_read(clk->map, &pll_div->val), | 82 | div = _get_table_val(meson_parm_read(clk->map, &pll_div->val), |
| 83 | meson_parm_read(clk->map, &pll_div->sel)); | 83 | meson_parm_read(clk->map, &pll_div->sel)); |
| 84 | if (!div || !div->divider) { | 84 | if (!div || !div->divider) { |
| 85 | pr_info("%s: Invalid config value for vid_pll_div\n", __func__); | 85 | pr_debug("%s: Invalid config value for vid_pll_div\n", __func__); |
| 86 | return parent_rate; | 86 | return 0; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider); | 89 | return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider); |
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index d977193842df..19174835693b 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c | |||
| @@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = { | |||
| 165 | }; | 165 | }; |
| 166 | 166 | ||
| 167 | static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, | 167 | static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, |
| 168 | void __iomem *base, | 168 | const struct pmc_clk_data *pmc_data, |
| 169 | const char **parent_names, | 169 | const char **parent_names, |
| 170 | int num_parents) | 170 | int num_parents) |
| 171 | { | 171 | { |
| @@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, | |||
| 184 | init.num_parents = num_parents; | 184 | init.num_parents = num_parents; |
| 185 | 185 | ||
| 186 | pclk->hw.init = &init; | 186 | pclk->hw.init = &init; |
| 187 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; | 187 | pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; |
| 188 | spin_lock_init(&pclk->lock); | 188 | spin_lock_init(&pclk->lock); |
| 189 | 189 | ||
| 190 | /* | ||
| 191 | * On some systems, the pmc_plt_clocks already enabled by the | ||
| 192 | * firmware are being marked as critical to avoid them being | ||
| 193 | * gated by the clock framework. | ||
| 194 | */ | ||
| 195 | if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw)) | ||
| 196 | init.flags |= CLK_IS_CRITICAL; | ||
| 197 | |||
| 190 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); | 198 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); |
| 191 | if (ret) { | 199 | if (ret) { |
| 192 | pclk = ERR_PTR(ret); | 200 | pclk = ERR_PTR(ret); |
| @@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev) | |||
| 332 | return PTR_ERR(parent_names); | 340 | return PTR_ERR(parent_names); |
| 333 | 341 | ||
| 334 | for (i = 0; i < PMC_CLK_NUM; i++) { | 342 | for (i = 0; i < PMC_CLK_NUM; i++) { |
| 335 | data->clks[i] = plt_clk_register(pdev, i, pmc_data->base, | 343 | data->clks[i] = plt_clk_register(pdev, i, pmc_data, |
| 336 | parent_names, data->nparents); | 344 | parent_names, data->nparents); |
| 337 | if (IS_ERR(data->clks[i])) { | 345 | if (IS_ERR(data->clks[i])) { |
| 338 | err = PTR_ERR(data->clks[i]); | 346 | err = PTR_ERR(data->clks[i]); |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 171502a356aa..4b3d143f0f8a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -145,6 +145,7 @@ config VT8500_TIMER | |||
| 145 | config NPCM7XX_TIMER | 145 | config NPCM7XX_TIMER |
| 146 | bool "NPCM7xx timer driver" if COMPILE_TEST | 146 | bool "NPCM7xx timer driver" if COMPILE_TEST |
| 147 | depends on HAS_IOMEM | 147 | depends on HAS_IOMEM |
| 148 | select TIMER_OF | ||
| 148 | select CLKSRC_MMIO | 149 | select CLKSRC_MMIO |
| 149 | help | 150 | help |
| 150 | Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, | 151 | Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aa4ec53281ce..ea373cfbcecb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #define pr_fmt(fmt) "arm_arch_timer: " fmt | 12 | #define pr_fmt(fmt) "arch_timer: " fmt |
| 13 | 13 | ||
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| @@ -33,9 +33,6 @@ | |||
| 33 | 33 | ||
| 34 | #include <clocksource/arm_arch_timer.h> | 34 | #include <clocksource/arm_arch_timer.h> |
| 35 | 35 | ||
| 36 | #undef pr_fmt | ||
| 37 | #define pr_fmt(fmt) "arch_timer: " fmt | ||
| 38 | |||
| 39 | #define CNTTIDR 0x08 | 36 | #define CNTTIDR 0x08 |
| 40 | #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) | 37 | #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) |
| 41 | 38 | ||
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c index eed6feff8b5f..30c6f4ce672b 100644 --- a/drivers/clocksource/timer-oxnas-rps.c +++ b/drivers/clocksource/timer-oxnas-rps.c | |||
| @@ -296,4 +296,4 @@ err_alloc: | |||
| 296 | TIMER_OF_DECLARE(ox810se_rps, | 296 | TIMER_OF_DECLARE(ox810se_rps, |
| 297 | "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); | 297 | "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); |
| 298 | TIMER_OF_DECLARE(ox820_rps, | 298 | TIMER_OF_DECLARE(ox820_rps, |
| 299 | "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init); | 299 | "oxsemi,ox820-rps-timer", oxnas_rps_timer_init); |
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 3352da6ed61f..ee8ec5a8cb16 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c | |||
| @@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, | |||
| 585 | return 0; | 585 | return 0; |
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | /* Optimized set_load which removes costly spin wait in timer_start */ | ||
| 589 | static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, | ||
| 590 | int autoreload, unsigned int load) | ||
| 591 | { | ||
| 592 | u32 l; | ||
| 593 | |||
| 594 | if (unlikely(!timer)) | ||
| 595 | return -EINVAL; | ||
| 596 | |||
| 597 | omap_dm_timer_enable(timer); | ||
| 598 | |||
| 599 | l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); | ||
| 600 | if (autoreload) { | ||
| 601 | l |= OMAP_TIMER_CTRL_AR; | ||
| 602 | omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); | ||
| 603 | } else { | ||
| 604 | l &= ~OMAP_TIMER_CTRL_AR; | ||
| 605 | } | ||
| 606 | l |= OMAP_TIMER_CTRL_ST; | ||
| 607 | |||
| 608 | __omap_dm_timer_load_start(timer, l, load, timer->posted); | ||
| 609 | |||
| 610 | /* Save the context */ | ||
| 611 | timer->context.tclr = l; | ||
| 612 | timer->context.tldr = load; | ||
| 613 | timer->context.tcrr = load; | ||
| 614 | return 0; | ||
| 615 | } | ||
| 616 | static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, | 588 | static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, |
| 617 | unsigned int match) | 589 | unsigned int match) |
| 618 | { | 590 | { |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b599c7318aab..2986119dd31f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void) | |||
| 2596 | const struct x86_cpu_id *id; | 2596 | const struct x86_cpu_id *id; |
| 2597 | int rc; | 2597 | int rc; |
| 2598 | 2598 | ||
| 2599 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
| 2600 | return -ENODEV; | ||
| 2601 | |||
| 2599 | if (no_load) | 2602 | if (no_load) |
| 2600 | return -ENODEV; | 2603 | return -ENODEV; |
| 2601 | 2604 | ||
| @@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void) | |||
| 2611 | } else { | 2614 | } else { |
| 2612 | id = x86_match_cpu(intel_pstate_cpu_ids); | 2615 | id = x86_match_cpu(intel_pstate_cpu_ids); |
| 2613 | if (!id) { | 2616 | if (!id) { |
| 2614 | pr_info("CPU ID not supported\n"); | 2617 | pr_info("CPU model not supported\n"); |
| 2615 | return -ENODEV; | 2618 | return -ENODEV; |
| 2616 | } | 2619 | } |
| 2617 | 2620 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b1eadc6652b5..7205d9f4029e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
| 865 | if (ret) | 865 | if (ret) |
| 866 | goto unmap_ctx; | 866 | goto unmap_ctx; |
| 867 | 867 | ||
| 868 | if (mapped_nents) { | 868 | if (mapped_nents) |
| 869 | sg_to_sec4_sg_last(req->src, mapped_nents, | 869 | sg_to_sec4_sg_last(req->src, mapped_nents, |
| 870 | edesc->sec4_sg + sec4_sg_src_index, | 870 | edesc->sec4_sg + sec4_sg_src_index, |
| 871 | 0); | 871 | 0); |
| 872 | if (*next_buflen) | 872 | else |
| 873 | scatterwalk_map_and_copy(next_buf, req->src, | ||
| 874 | to_hash - *buflen, | ||
| 875 | *next_buflen, 0); | ||
| 876 | } else { | ||
| 877 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - | 873 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - |
| 878 | 1); | 874 | 1); |
| 879 | } | ||
| 880 | 875 | ||
| 876 | if (*next_buflen) | ||
| 877 | scatterwalk_map_and_copy(next_buf, req->src, | ||
| 878 | to_hash - *buflen, | ||
| 879 | *next_buflen, 0); | ||
| 881 | desc = edesc->hw_desc; | 880 | desc = edesc->hw_desc; |
| 882 | 881 | ||
| 883 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 882 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 8e17149655f0..540e8cd16ee6 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig | |||
| @@ -116,7 +116,7 @@ config EXTCON_PALMAS | |||
| 116 | 116 | ||
| 117 | config EXTCON_PTN5150 | 117 | config EXTCON_PTN5150 |
| 118 | tristate "NXP PTN5150 CC LOGIC USB EXTCON support" | 118 | tristate "NXP PTN5150 CC LOGIC USB EXTCON support" |
| 119 | depends on I2C && GPIOLIB || COMPILE_TEST | 119 | depends on I2C && (GPIOLIB || COMPILE_TEST) |
| 120 | select REGMAP_I2C | 120 | select REGMAP_I2C |
| 121 | help | 121 | help |
| 122 | Say Y here to enable support for USB peripheral and USB host | 122 | Say Y here to enable support for USB peripheral and USB host |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4f8fb4ecde34..79fb302fb954 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) | |||
| 3165 | 3165 | ||
| 3166 | /* No need to recover an evicted BO */ | 3166 | /* No need to recover an evicted BO */ |
| 3167 | if (shadow->tbo.mem.mem_type != TTM_PL_TT || | 3167 | if (shadow->tbo.mem.mem_type != TTM_PL_TT || |
| 3168 | shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || | ||
| 3168 | shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) | 3169 | shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) |
| 3169 | continue; | 3170 | continue; |
| 3170 | 3171 | ||
| @@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) | |||
| 3173 | break; | 3174 | break; |
| 3174 | 3175 | ||
| 3175 | if (fence) { | 3176 | if (fence) { |
| 3176 | r = dma_fence_wait_timeout(fence, false, tmo); | 3177 | tmo = dma_fence_wait_timeout(fence, false, tmo); |
| 3177 | dma_fence_put(fence); | 3178 | dma_fence_put(fence); |
| 3178 | fence = next; | 3179 | fence = next; |
| 3179 | if (r <= 0) | 3180 | if (tmo == 0) { |
| 3181 | r = -ETIMEDOUT; | ||
| 3180 | break; | 3182 | break; |
| 3183 | } else if (tmo < 0) { | ||
| 3184 | r = tmo; | ||
| 3185 | break; | ||
| 3186 | } | ||
| 3181 | } else { | 3187 | } else { |
| 3182 | fence = next; | 3188 | fence = next; |
| 3183 | } | 3189 | } |
| @@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) | |||
| 3188 | tmo = dma_fence_wait_timeout(fence, false, tmo); | 3194 | tmo = dma_fence_wait_timeout(fence, false, tmo); |
| 3189 | dma_fence_put(fence); | 3195 | dma_fence_put(fence); |
| 3190 | 3196 | ||
| 3191 | if (r <= 0 || tmo <= 0) { | 3197 | if (r < 0 || tmo <= 0) { |
| 3192 | DRM_ERROR("recover vram bo from shadow failed\n"); | 3198 | DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); |
| 3193 | return -EIO; | 3199 | return -EIO; |
| 3194 | } | 3200 | } |
| 3195 | 3201 | ||
| @@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, | |||
| 3625 | struct pci_dev *pdev = adev->pdev; | 3631 | struct pci_dev *pdev = adev->pdev; |
| 3626 | enum pci_bus_speed cur_speed; | 3632 | enum pci_bus_speed cur_speed; |
| 3627 | enum pcie_link_width cur_width; | 3633 | enum pcie_link_width cur_width; |
| 3634 | u32 ret = 1; | ||
| 3628 | 3635 | ||
| 3629 | *speed = PCI_SPEED_UNKNOWN; | 3636 | *speed = PCI_SPEED_UNKNOWN; |
| 3630 | *width = PCIE_LNK_WIDTH_UNKNOWN; | 3637 | *width = PCIE_LNK_WIDTH_UNKNOWN; |
| @@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, | |||
| 3632 | while (pdev) { | 3639 | while (pdev) { |
| 3633 | cur_speed = pcie_get_speed_cap(pdev); | 3640 | cur_speed = pcie_get_speed_cap(pdev); |
| 3634 | cur_width = pcie_get_width_cap(pdev); | 3641 | cur_width = pcie_get_width_cap(pdev); |
| 3642 | ret = pcie_bandwidth_available(adev->pdev, NULL, | ||
| 3643 | NULL, &cur_width); | ||
| 3644 | if (!ret) | ||
| 3645 | cur_width = PCIE_LNK_WIDTH_RESRV; | ||
| 3635 | 3646 | ||
| 3636 | if (cur_speed != PCI_SPEED_UNKNOWN) { | 3647 | if (cur_speed != PCI_SPEED_UNKNOWN) { |
| 3637 | if (*speed == PCI_SPEED_UNKNOWN) | 3648 | if (*speed == PCI_SPEED_UNKNOWN) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0b8ef2d27d6b..fe393a46f881 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include "amdgpu_trace.h" | 35 | #include "amdgpu_trace.h" |
| 36 | 36 | ||
| 37 | #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) | 37 | #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) |
| 38 | #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) | ||
| 38 | 39 | ||
| 39 | /* | 40 | /* |
| 40 | * IB | 41 | * IB |
| @@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |||
| 344 | * cost waiting for it coming back under RUNTIME only | 345 | * cost waiting for it coming back under RUNTIME only |
| 345 | */ | 346 | */ |
| 346 | tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; | 347 | tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; |
| 348 | } else if (adev->gmc.xgmi.hive_id) { | ||
| 349 | tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; | ||
| 347 | } | 350 | } |
| 348 | 351 | ||
| 349 | for (i = 0; i < adev->num_rings; ++i) { | 352 | for (i = 0; i < adev->num_rings; ++i) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d0309e8c9d12..a11db2b1a63f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
| @@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) | |||
| 2405 | /* disable CG */ | 2405 | /* disable CG */ |
| 2406 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); | 2406 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); |
| 2407 | 2407 | ||
| 2408 | adev->gfx.rlc.funcs->reset(adev); | ||
| 2409 | |||
| 2410 | gfx_v9_0_init_pg(adev); | 2408 | gfx_v9_0_init_pg(adev); |
| 2411 | 2409 | ||
| 2412 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | 2410 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index d0d966d6080a..1696644ec022 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | |||
| @@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) | |||
| 182 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, | 182 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, |
| 183 | L2_CACHE_BIGK_FRAGMENT_SIZE, 6); | 183 | L2_CACHE_BIGK_FRAGMENT_SIZE, 6); |
| 184 | } | 184 | } |
| 185 | WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); | ||
| 185 | 186 | ||
| 186 | tmp = mmVM_L2_CNTL4_DEFAULT; | 187 | tmp = mmVM_L2_CNTL4_DEFAULT; |
| 187 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); | 188 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8be9677c0c07..cf9a49f49d3a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = { | |||
| 320 | { 0x9876, &carrizo_device_info }, /* Carrizo */ | 320 | { 0x9876, &carrizo_device_info }, /* Carrizo */ |
| 321 | { 0x9877, &carrizo_device_info }, /* Carrizo */ | 321 | { 0x9877, &carrizo_device_info }, /* Carrizo */ |
| 322 | { 0x15DD, &raven_device_info }, /* Raven */ | 322 | { 0x15DD, &raven_device_info }, /* Raven */ |
| 323 | { 0x15D8, &raven_device_info }, /* Raven */ | ||
| 323 | #endif | 324 | #endif |
| 324 | { 0x67A0, &hawaii_device_info }, /* Hawaii */ | 325 | { 0x67A0, &hawaii_device_info }, /* Hawaii */ |
| 325 | { 0x67A1, &hawaii_device_info }, /* Hawaii */ | 326 | { 0x67A1, &hawaii_device_info }, /* Hawaii */ |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 81127f7d6ed1..3082b55b1e77 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane, | |||
| 4533 | amdgpu_crtc->cursor_width = plane->state->crtc_w; | 4533 | amdgpu_crtc->cursor_width = plane->state->crtc_w; |
| 4534 | amdgpu_crtc->cursor_height = plane->state->crtc_h; | 4534 | amdgpu_crtc->cursor_height = plane->state->crtc_h; |
| 4535 | 4535 | ||
| 4536 | memset(&attributes, 0, sizeof(attributes)); | ||
| 4536 | attributes.address.high_part = upper_32_bits(address); | 4537 | attributes.address.high_part = upper_32_bits(address); |
| 4537 | attributes.address.low_part = lower_32_bits(address); | 4538 | attributes.address.low_part = lower_32_bits(address); |
| 4538 | attributes.width = plane->state->crtc_w; | 4539 | attributes.width = plane->state->crtc_w; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c68fbd55db3c..a6cda201c964 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
| @@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, | |||
| 1377 | return UPDATE_TYPE_FULL; | 1377 | return UPDATE_TYPE_FULL; |
| 1378 | } | 1378 | } |
| 1379 | 1379 | ||
| 1380 | if (u->surface->force_full_update) { | ||
| 1381 | update_flags->bits.full_update = 1; | ||
| 1382 | return UPDATE_TYPE_FULL; | ||
| 1383 | } | ||
| 1384 | |||
| 1380 | type = get_plane_info_update_type(u); | 1385 | type = get_plane_info_update_type(u); |
| 1381 | elevate_update_type(&overall_type, type); | 1386 | elevate_update_type(&overall_type, type); |
| 1382 | 1387 | ||
| @@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc, | |||
| 1802 | } | 1807 | } |
| 1803 | 1808 | ||
| 1804 | dc_resource_state_copy_construct(state, context); | 1809 | dc_resource_state_copy_construct(state, context); |
| 1810 | |||
| 1811 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 1812 | struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; | ||
| 1813 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | ||
| 1814 | |||
| 1815 | if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) | ||
| 1816 | new_pipe->plane_state->force_full_update = true; | ||
| 1817 | } | ||
| 1805 | } | 1818 | } |
| 1806 | 1819 | ||
| 1807 | 1820 | ||
| @@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc, | |||
| 1838 | dc->current_state = context; | 1851 | dc->current_state = context; |
| 1839 | dc_release_state(old); | 1852 | dc_release_state(old); |
| 1840 | 1853 | ||
| 1854 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 1855 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 1856 | |||
| 1857 | if (pipe_ctx->plane_state && pipe_ctx->stream == stream) | ||
| 1858 | pipe_ctx->plane_state->force_full_update = false; | ||
| 1859 | } | ||
| 1841 | } | 1860 | } |
| 1842 | /*let's use current_state to update watermark etc*/ | 1861 | /*let's use current_state to update watermark etc*/ |
| 1843 | if (update_type >= UPDATE_TYPE_FULL) | 1862 | if (update_type >= UPDATE_TYPE_FULL) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4eba3c4800b6..ea18e9c2d8ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -2660,12 +2660,18 @@ void core_link_enable_stream( | |||
| 2660 | void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) | 2660 | void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) |
| 2661 | { | 2661 | { |
| 2662 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; | 2662 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; |
| 2663 | struct dc_stream_state *stream = pipe_ctx->stream; | ||
| 2663 | 2664 | ||
| 2664 | core_dc->hwss.blank_stream(pipe_ctx); | 2665 | core_dc->hwss.blank_stream(pipe_ctx); |
| 2665 | 2666 | ||
| 2666 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) | 2667 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) |
| 2667 | deallocate_mst_payload(pipe_ctx); | 2668 | deallocate_mst_payload(pipe_ctx); |
| 2668 | 2669 | ||
| 2670 | if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) | ||
| 2671 | dal_ddc_service_write_scdc_data( | ||
| 2672 | stream->link->ddc, 0, | ||
| 2673 | stream->timing.flags.LTE_340MCSC_SCRAMBLE); | ||
| 2674 | |||
| 2669 | core_dc->hwss.disable_stream(pipe_ctx, option); | 2675 | core_dc->hwss.disable_stream(pipe_ctx, option); |
| 2670 | 2676 | ||
| 2671 | disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); | 2677 | disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1a7fd6aa77eb..0515095574e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
| @@ -503,6 +503,9 @@ struct dc_plane_state { | |||
| 503 | struct dc_plane_status status; | 503 | struct dc_plane_status status; |
| 504 | struct dc_context *ctx; | 504 | struct dc_context *ctx; |
| 505 | 505 | ||
| 506 | /* HACK: Workaround for forcing full reprogramming under some conditions */ | ||
| 507 | bool force_full_update; | ||
| 508 | |||
| 506 | /* private to dc_surface.c */ | 509 | /* private to dc_surface.c */ |
| 507 | enum dc_irq_source irq_source; | 510 | enum dc_irq_source irq_source; |
| 508 | struct kref refcount; | 511 | struct kref refcount; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 4febf4ef7240..4fe3664fb495 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | |||
| @@ -190,6 +190,12 @@ static void submit_channel_request( | |||
| 190 | 1, | 190 | 1, |
| 191 | 0); | 191 | 0); |
| 192 | } | 192 | } |
| 193 | |||
| 194 | REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); | ||
| 195 | |||
| 196 | REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, | ||
| 197 | 10, aux110->timeout_period/10); | ||
| 198 | |||
| 193 | /* set the delay and the number of bytes to write */ | 199 | /* set the delay and the number of bytes to write */ |
| 194 | 200 | ||
| 195 | /* The length include | 201 | /* The length include |
| @@ -242,9 +248,6 @@ static void submit_channel_request( | |||
| 242 | } | 248 | } |
| 243 | } | 249 | } |
| 244 | 250 | ||
| 245 | REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); | ||
| 246 | REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, | ||
| 247 | 10, aux110->timeout_period/10); | ||
| 248 | REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); | 251 | REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); |
| 249 | } | 252 | } |
| 250 | 253 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index d27f22c05e4b..e28ed6a00ff4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h | |||
| @@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a, | |||
| 71 | * at most within ~240usec. That means, | 71 | * at most within ~240usec. That means, |
| 72 | * increasing this timeout will not affect normal operation, | 72 | * increasing this timeout will not affect normal operation, |
| 73 | * and we'll timeout after | 73 | * and we'll timeout after |
| 74 | * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. | 74 | * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec. |
| 75 | * This timeout is especially important for | 75 | * This timeout is especially important for |
| 76 | * resume from S3 and CTS. | 76 | * converters, resume from S3, and CTS. |
| 77 | */ | 77 | */ |
| 78 | SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 | 78 | SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6 |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | struct dce_aux { | 81 | struct dce_aux { |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 683829466a44..0ba68d41b9c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
| @@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position( | |||
| 1150 | REG_UPDATE(CURSOR_CONTROL, | 1150 | REG_UPDATE(CURSOR_CONTROL, |
| 1151 | CURSOR_ENABLE, cur_en); | 1151 | CURSOR_ENABLE, cur_en); |
| 1152 | 1152 | ||
| 1153 | //account for cases where we see negative offset relative to overlay plane | 1153 | REG_SET_2(CURSOR_POSITION, 0, |
| 1154 | if (src_x_offset < 0 && src_y_offset < 0) { | ||
| 1155 | REG_SET_2(CURSOR_POSITION, 0, | ||
| 1156 | CURSOR_X_POSITION, 0, | ||
| 1157 | CURSOR_Y_POSITION, 0); | ||
| 1158 | x_hotspot -= src_x_offset; | ||
| 1159 | y_hotspot -= src_y_offset; | ||
| 1160 | } else if (src_x_offset < 0) { | ||
| 1161 | REG_SET_2(CURSOR_POSITION, 0, | ||
| 1162 | CURSOR_X_POSITION, 0, | ||
| 1163 | CURSOR_Y_POSITION, pos->y); | ||
| 1164 | x_hotspot -= src_x_offset; | ||
| 1165 | } else if (src_y_offset < 0) { | ||
| 1166 | REG_SET_2(CURSOR_POSITION, 0, | ||
| 1167 | CURSOR_X_POSITION, pos->x, | 1154 | CURSOR_X_POSITION, pos->x, |
| 1168 | CURSOR_Y_POSITION, 0); | 1155 | CURSOR_Y_POSITION, pos->y); |
| 1169 | y_hotspot -= src_y_offset; | ||
| 1170 | } else { | ||
| 1171 | REG_SET_2(CURSOR_POSITION, 0, | ||
| 1172 | CURSOR_X_POSITION, pos->x, | ||
| 1173 | CURSOR_Y_POSITION, pos->y); | ||
| 1174 | } | ||
| 1175 | 1156 | ||
| 1176 | REG_SET_2(CURSOR_HOT_SPOT, 0, | 1157 | REG_SET_2(CURSOR_HOT_SPOT, 0, |
| 1177 | CURSOR_HOT_SPOT_X, x_hotspot, | 1158 | CURSOR_HOT_SPOT_X, x_hotspot, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 9aa7bec1b5fe..23b5b94a4939 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
| @@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
| 91 | * MP0CLK DS | 91 | * MP0CLK DS |
| 92 | */ | 92 | */ |
| 93 | data->registry_data.disallowed_features = 0xE0041C00; | 93 | data->registry_data.disallowed_features = 0xE0041C00; |
| 94 | /* ECC feature should be disabled on old SMUs */ | ||
| 95 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); | ||
| 96 | hwmgr->smu_version = smum_get_argument(hwmgr); | ||
| 97 | if (hwmgr->smu_version < 0x282100) | ||
| 98 | data->registry_data.disallowed_features |= FEATURE_ECC_MASK; | ||
| 99 | |||
| 94 | data->registry_data.od_state_in_dc_support = 0; | 100 | data->registry_data.od_state_in_dc_support = 0; |
| 95 | data->registry_data.thermal_support = 1; | 101 | data->registry_data.thermal_support = 1; |
| 96 | data->registry_data.skip_baco_hardware = 0; | 102 | data->registry_data.skip_baco_hardware = 0; |
| @@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
| 357 | data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; | 363 | data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; |
| 358 | data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; | 364 | data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; |
| 359 | data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; | 365 | data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; |
| 366 | data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; | ||
| 360 | 367 | ||
| 361 | for (i = 0; i < GNLD_FEATURES_MAX; i++) { | 368 | for (i = 0; i < GNLD_FEATURES_MAX; i++) { |
| 362 | data->smu_features[i].smu_feature_bitmap = | 369 | data->smu_features[i].smu_feature_bitmap = |
| @@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) | |||
| 3020 | "FCLK_DS", | 3027 | "FCLK_DS", |
| 3021 | "MP1CLK_DS", | 3028 | "MP1CLK_DS", |
| 3022 | "MP0CLK_DS", | 3029 | "MP0CLK_DS", |
| 3023 | "XGMI"}; | 3030 | "XGMI", |
| 3031 | "ECC"}; | ||
| 3024 | static const char *output_title[] = { | 3032 | static const char *output_title[] = { |
| 3025 | "FEATURES", | 3033 | "FEATURES", |
| 3026 | "BITMASK", | 3034 | "BITMASK", |
| @@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) | |||
| 3462 | struct vega20_single_dpm_table *dpm_table; | 3470 | struct vega20_single_dpm_table *dpm_table; |
| 3463 | bool vblank_too_short = false; | 3471 | bool vblank_too_short = false; |
| 3464 | bool disable_mclk_switching; | 3472 | bool disable_mclk_switching; |
| 3473 | bool disable_fclk_switching; | ||
| 3465 | uint32_t i, latency; | 3474 | uint32_t i, latency; |
| 3466 | 3475 | ||
| 3467 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && | 3476 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && |
| @@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) | |||
| 3537 | if (hwmgr->display_config->nb_pstate_switch_disable) | 3546 | if (hwmgr->display_config->nb_pstate_switch_disable) |
| 3538 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; | 3547 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; |
| 3539 | 3548 | ||
| 3549 | if ((disable_mclk_switching && | ||
| 3550 | (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || | ||
| 3551 | hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) | ||
| 3552 | disable_fclk_switching = true; | ||
| 3553 | else | ||
| 3554 | disable_fclk_switching = false; | ||
| 3555 | |||
| 3540 | /* fclk */ | 3556 | /* fclk */ |
| 3541 | dpm_table = &(data->dpm_table.fclk_table); | 3557 | dpm_table = &(data->dpm_table.fclk_table); |
| 3542 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; | 3558 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; |
| 3543 | dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; | 3559 | dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; |
| 3544 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; | 3560 | dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; |
| 3545 | dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; | 3561 | dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; |
| 3546 | if (hwmgr->display_config->nb_pstate_switch_disable) | 3562 | if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) |
| 3547 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; | 3563 | dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; |
| 3548 | 3564 | ||
| 3549 | /* vclk */ | 3565 | /* vclk */ |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index a5bc758ae097..ac2a3118a0ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | |||
| @@ -80,6 +80,7 @@ enum { | |||
| 80 | GNLD_DS_MP1CLK, | 80 | GNLD_DS_MP1CLK, |
| 81 | GNLD_DS_MP0CLK, | 81 | GNLD_DS_MP0CLK, |
| 82 | GNLD_XGMI, | 82 | GNLD_XGMI, |
| 83 | GNLD_ECC, | ||
| 83 | 84 | ||
| 84 | GNLD_FEATURES_MAX | 85 | GNLD_FEATURES_MAX |
| 85 | }; | 86 | }; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 63d5cf691549..195c4ae67058 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | |||
| @@ -99,7 +99,7 @@ | |||
| 99 | #define FEATURE_DS_MP1CLK_BIT 30 | 99 | #define FEATURE_DS_MP1CLK_BIT 30 |
| 100 | #define FEATURE_DS_MP0CLK_BIT 31 | 100 | #define FEATURE_DS_MP0CLK_BIT 31 |
| 101 | #define FEATURE_XGMI_BIT 32 | 101 | #define FEATURE_XGMI_BIT 32 |
| 102 | #define FEATURE_SPARE_33_BIT 33 | 102 | #define FEATURE_ECC_BIT 33 |
| 103 | #define FEATURE_SPARE_34_BIT 34 | 103 | #define FEATURE_SPARE_34_BIT 34 |
| 104 | #define FEATURE_SPARE_35_BIT 35 | 104 | #define FEATURE_SPARE_35_BIT 35 |
| 105 | #define FEATURE_SPARE_36_BIT 36 | 105 | #define FEATURE_SPARE_36_BIT 36 |
| @@ -165,7 +165,8 @@ | |||
| 165 | #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) | 165 | #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) |
| 166 | #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) | 166 | #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) |
| 167 | #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) | 167 | #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) |
| 168 | #define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) | 168 | #define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) |
| 169 | #define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) | ||
| 169 | 170 | ||
| 170 | #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 | 171 | #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 |
| 171 | #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 | 172 | #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index a63e5f0dae56..ab7968c8f6a2 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | |||
| @@ -1037,6 +1037,35 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, | |||
| 1037 | } | 1037 | } |
| 1038 | EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); | 1038 | EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); |
| 1039 | 1039 | ||
| 1040 | /* Filter out invalid setups to avoid configuring SCDC and scrambling */ | ||
| 1041 | static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) | ||
| 1042 | { | ||
| 1043 | struct drm_display_info *display = &hdmi->connector.display_info; | ||
| 1044 | |||
| 1045 | /* Completely disable SCDC support for older controllers */ | ||
| 1046 | if (hdmi->version < 0x200a) | ||
| 1047 | return false; | ||
| 1048 | |||
| 1049 | /* Disable if no DDC bus */ | ||
| 1050 | if (!hdmi->ddc) | ||
| 1051 | return false; | ||
| 1052 | |||
| 1053 | /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ | ||
| 1054 | if (!display->hdmi.scdc.supported || | ||
| 1055 | !display->hdmi.scdc.scrambling.supported) | ||
| 1056 | return false; | ||
| 1057 | |||
| 1058 | /* | ||
| 1059 | * Disable if display only support low TMDS rates and scrambling | ||
| 1060 | * for low rates is not supported either | ||
| 1061 | */ | ||
| 1062 | if (!display->hdmi.scdc.scrambling.low_rates && | ||
| 1063 | display->max_tmds_clock <= 340000) | ||
| 1064 | return false; | ||
| 1065 | |||
| 1066 | return true; | ||
| 1067 | } | ||
| 1068 | |||
| 1040 | /* | 1069 | /* |
| 1041 | * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: | 1070 | * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: |
| 1042 | * - The Source shall suspend transmission of the TMDS clock and data | 1071 | * - The Source shall suspend transmission of the TMDS clock and data |
| @@ -1055,7 +1084,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi) | |||
| 1055 | unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; | 1084 | unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; |
| 1056 | 1085 | ||
| 1057 | /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ | 1086 | /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ |
| 1058 | if (hdmi->connector.display_info.hdmi.scdc.supported) { | 1087 | if (dw_hdmi_support_scdc(hdmi)) { |
| 1059 | if (mtmdsclock > HDMI14_MAX_TMDSCLK) | 1088 | if (mtmdsclock > HDMI14_MAX_TMDSCLK) |
| 1060 | drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1); | 1089 | drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1); |
| 1061 | else | 1090 | else |
| @@ -1579,8 +1608,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, | |||
| 1579 | 1608 | ||
| 1580 | /* Set up HDMI_FC_INVIDCONF */ | 1609 | /* Set up HDMI_FC_INVIDCONF */ |
| 1581 | inv_val = (hdmi->hdmi_data.hdcp_enable || | 1610 | inv_val = (hdmi->hdmi_data.hdcp_enable || |
| 1582 | vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || | 1611 | (dw_hdmi_support_scdc(hdmi) && |
| 1583 | hdmi_info->scdc.scrambling.low_rates ? | 1612 | (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || |
| 1613 | hdmi_info->scdc.scrambling.low_rates)) ? | ||
| 1584 | HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : | 1614 | HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : |
| 1585 | HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); | 1615 | HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); |
| 1586 | 1616 | ||
| @@ -1646,7 +1676,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, | |||
| 1646 | } | 1676 | } |
| 1647 | 1677 | ||
| 1648 | /* Scrambling Control */ | 1678 | /* Scrambling Control */ |
| 1649 | if (hdmi_info->scdc.supported) { | 1679 | if (dw_hdmi_support_scdc(hdmi)) { |
| 1650 | if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || | 1680 | if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || |
| 1651 | hdmi_info->scdc.scrambling.low_rates) { | 1681 | hdmi_info->scdc.scrambling.low_rates) { |
| 1652 | /* | 1682 | /* |
| @@ -1658,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, | |||
| 1658 | * Source Devices compliant shall set the | 1688 | * Source Devices compliant shall set the |
| 1659 | * Source Version = 1. | 1689 | * Source Version = 1. |
| 1660 | */ | 1690 | */ |
| 1661 | drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION, | 1691 | drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION, |
| 1662 | &bytes); | 1692 | &bytes); |
| 1663 | drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION, | 1693 | drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION, |
| 1664 | min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); | 1694 | min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); |
| 1665 | 1695 | ||
| 1666 | /* Enabled Scrambling in the Sink */ | 1696 | /* Enabled Scrambling in the Sink */ |
| 1667 | drm_scdc_set_scrambling(&hdmi->i2c->adap, 1); | 1697 | drm_scdc_set_scrambling(hdmi->ddc, 1); |
| 1668 | 1698 | ||
| 1669 | /* | 1699 | /* |
| 1670 | * To activate the scrambler feature, you must ensure | 1700 | * To activate the scrambler feature, you must ensure |
| @@ -1680,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, | |||
| 1680 | hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); | 1710 | hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); |
| 1681 | hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, | 1711 | hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, |
| 1682 | HDMI_MC_SWRSTZ); | 1712 | HDMI_MC_SWRSTZ); |
| 1683 | drm_scdc_set_scrambling(&hdmi->i2c->adap, 0); | 1713 | drm_scdc_set_scrambling(hdmi->ddc, 0); |
| 1684 | } | 1714 | } |
| 1685 | } | 1715 | } |
| 1686 | 1716 | ||
| @@ -1774,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) | |||
| 1774 | * iteration for others. | 1804 | * iteration for others. |
| 1775 | * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing | 1805 | * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing |
| 1776 | * the workaround with a single iteration. | 1806 | * the workaround with a single iteration. |
| 1807 | * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have | ||
| 1808 | * been identified as needing the workaround with a single iteration. | ||
| 1777 | */ | 1809 | */ |
| 1778 | 1810 | ||
| 1779 | switch (hdmi->version) { | 1811 | switch (hdmi->version) { |
| @@ -1782,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) | |||
| 1782 | break; | 1814 | break; |
| 1783 | case 0x131a: | 1815 | case 0x131a: |
| 1784 | case 0x132a: | 1816 | case 0x132a: |
| 1817 | case 0x200a: | ||
| 1785 | case 0x201a: | 1818 | case 0x201a: |
| 1819 | case 0x211a: | ||
| 1786 | case 0x212a: | 1820 | case 0x212a: |
| 1787 | count = 1; | 1821 | count = 1; |
| 1788 | break; | 1822 | break; |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 40ac19848034..fbb76332cc9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) | |||
| 1034 | funcs->atomic_disable(crtc, old_crtc_state); | 1034 | funcs->atomic_disable(crtc, old_crtc_state); |
| 1035 | else if (funcs->disable) | 1035 | else if (funcs->disable) |
| 1036 | funcs->disable(crtc); | 1036 | funcs->disable(crtc); |
| 1037 | else | 1037 | else if (funcs->dpms) |
| 1038 | funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | 1038 | funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
| 1039 | 1039 | ||
| 1040 | if (!(dev->irq_enabled && dev->num_crtcs)) | 1040 | if (!(dev->irq_enabled && dev->num_crtcs)) |
| @@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, | |||
| 1277 | if (new_crtc_state->enable) { | 1277 | if (new_crtc_state->enable) { |
| 1278 | DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", | 1278 | DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", |
| 1279 | crtc->base.id, crtc->name); | 1279 | crtc->base.id, crtc->name); |
| 1280 | |||
| 1281 | if (funcs->atomic_enable) | 1280 | if (funcs->atomic_enable) |
| 1282 | funcs->atomic_enable(crtc, old_crtc_state); | 1281 | funcs->atomic_enable(crtc, old_crtc_state); |
| 1283 | else | 1282 | else if (funcs->commit) |
| 1284 | funcs->commit(crtc); | 1283 | funcs->commit(crtc); |
| 1285 | } | 1284 | } |
| 1286 | } | 1285 | } |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 035479e273be..e3f9caa7839f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
| @@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) | |||
| 448 | /** | 448 | /** |
| 449 | * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU | 449 | * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU |
| 450 | * @vgpu: a vGPU | 450 | * @vgpu: a vGPU |
| 451 | * @conncted: link state | 451 | * @connected: link state |
| 452 | * | 452 | * |
| 453 | * This function is used to trigger hotplug interrupt for vGPU | 453 | * This function is used to trigger hotplug interrupt for vGPU |
| 454 | * | 454 | * |
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..69a9a1b2ea4a 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
| @@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 209 | struct drm_i915_private *dev_priv = to_i915(dev); | 209 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 210 | struct intel_vgpu_primary_plane_format p; | 210 | struct intel_vgpu_primary_plane_format p; |
| 211 | struct intel_vgpu_cursor_plane_format c; | 211 | struct intel_vgpu_cursor_plane_format c; |
| 212 | int ret; | 212 | int ret, tile_height = 1; |
| 213 | 213 | ||
| 214 | if (plane_id == DRM_PLANE_TYPE_PRIMARY) { | 214 | if (plane_id == DRM_PLANE_TYPE_PRIMARY) { |
| 215 | ret = intel_vgpu_decode_primary_plane(vgpu, &p); | 215 | ret = intel_vgpu_decode_primary_plane(vgpu, &p); |
| @@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 228 | break; | 228 | break; |
| 229 | case PLANE_CTL_TILED_X: | 229 | case PLANE_CTL_TILED_X: |
| 230 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; | 230 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; |
| 231 | tile_height = 8; | ||
| 231 | break; | 232 | break; |
| 232 | case PLANE_CTL_TILED_Y: | 233 | case PLANE_CTL_TILED_Y: |
| 233 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; | 234 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; |
| 235 | tile_height = 32; | ||
| 234 | break; | 236 | break; |
| 235 | case PLANE_CTL_TILED_YF: | 237 | case PLANE_CTL_TILED_YF: |
| 236 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; | 238 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; |
| 239 | tile_height = 32; | ||
| 237 | break; | 240 | break; |
| 238 | default: | 241 | default: |
| 239 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); | 242 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); |
| 240 | } | 243 | } |
| 241 | |||
| 242 | info->size = (((p.stride * p.height * p.bpp) / 8) + | ||
| 243 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
| 244 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { | 244 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
| 245 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); | 245 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
| 246 | if (ret) | 246 | if (ret) |
| @@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 262 | info->x_hot = UINT_MAX; | 262 | info->x_hot = UINT_MAX; |
| 263 | info->y_hot = UINT_MAX; | 263 | info->y_hot = UINT_MAX; |
| 264 | } | 264 | } |
| 265 | |||
| 266 | info->size = (((info->stride * c.height * c.bpp) / 8) | ||
| 267 | + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
| 268 | } else { | 265 | } else { |
| 269 | gvt_vgpu_err("invalid plane id:%d\n", plane_id); | 266 | gvt_vgpu_err("invalid plane id:%d\n", plane_id); |
| 270 | return -EINVAL; | 267 | return -EINVAL; |
| 271 | } | 268 | } |
| 272 | 269 | ||
| 270 | info->size = (info->stride * roundup(info->height, tile_height) | ||
| 271 | + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 273 | if (info->size == 0) { | 272 | if (info->size == 0) { |
| 274 | gvt_vgpu_err("fb size is zero\n"); | 273 | gvt_vgpu_err("fb size is zero\n"); |
| 275 | return -EINVAL; | 274 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d7052ab7908c..9814773882ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) | |||
| 750 | 750 | ||
| 751 | static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) | 751 | static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) |
| 752 | { | 752 | { |
| 753 | struct intel_vgpu_ppgtt_spt *spt; | 753 | struct intel_vgpu_ppgtt_spt *spt, *spn; |
| 754 | struct radix_tree_iter iter; | 754 | struct radix_tree_iter iter; |
| 755 | void **slot; | 755 | LIST_HEAD(all_spt); |
| 756 | void __rcu **slot; | ||
| 756 | 757 | ||
| 758 | rcu_read_lock(); | ||
| 757 | radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { | 759 | radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { |
| 758 | spt = radix_tree_deref_slot(slot); | 760 | spt = radix_tree_deref_slot(slot); |
| 759 | ppgtt_free_spt(spt); | 761 | list_move(&spt->post_shadow_list, &all_spt); |
| 760 | } | 762 | } |
| 763 | rcu_read_unlock(); | ||
| 764 | |||
| 765 | list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) | ||
| 766 | ppgtt_free_spt(spt); | ||
| 761 | } | 767 | } |
| 762 | 768 | ||
| 763 | static int ppgtt_handle_guest_write_page_table_bytes( | 769 | static int ppgtt_handle_guest_write_page_table_bytes( |
| @@ -1946,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) | |||
| 1946 | */ | 1952 | */ |
| 1947 | void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) | 1953 | void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) |
| 1948 | { | 1954 | { |
| 1949 | atomic_dec(&mm->pincount); | 1955 | atomic_dec_if_positive(&mm->pincount); |
| 1950 | } | 1956 | } |
| 1951 | 1957 | ||
| 1952 | /** | 1958 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d5fcc447d22f..a68addf95c23 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) | |||
| 905 | static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, | 905 | static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, |
| 906 | void *buf, unsigned long count, bool is_write) | 906 | void *buf, unsigned long count, bool is_write) |
| 907 | { | 907 | { |
| 908 | void *aperture_va; | 908 | void __iomem *aperture_va; |
| 909 | 909 | ||
| 910 | if (!intel_vgpu_in_aperture(vgpu, off) || | 910 | if (!intel_vgpu_in_aperture(vgpu, off) || |
| 911 | !intel_vgpu_in_aperture(vgpu, off + count)) { | 911 | !intel_vgpu_in_aperture(vgpu, off + count)) { |
| @@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, | |||
| 920 | return -EIO; | 920 | return -EIO; |
| 921 | 921 | ||
| 922 | if (is_write) | 922 | if (is_write) |
| 923 | memcpy(aperture_va + offset_in_page(off), buf, count); | 923 | memcpy_toio(aperture_va + offset_in_page(off), buf, count); |
| 924 | else | 924 | else |
| 925 | memcpy(buf, aperture_va + offset_in_page(off), count); | 925 | memcpy_fromio(buf, aperture_va + offset_in_page(off), count); |
| 926 | 926 | ||
| 927 | io_mapping_unmap(aperture_va); | 927 | io_mapping_unmap(aperture_va); |
| 928 | 928 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 159192c097cc..05b953793316 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | |||
| 1486 | intel_runtime_pm_put_unchecked(dev_priv); | 1486 | intel_runtime_pm_put_unchecked(dev_priv); |
| 1487 | } | 1487 | } |
| 1488 | 1488 | ||
| 1489 | if (ret && (vgpu_is_vm_unhealthy(ret))) { | 1489 | if (ret) { |
| 1490 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); | 1490 | if (vgpu_is_vm_unhealthy(ret)) |
| 1491 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); | ||
| 1491 | intel_vgpu_destroy_workload(workload); | 1492 | intel_vgpu_destroy_workload(workload); |
| 1492 | return ERR_PTR(ret); | 1493 | return ERR_PTR(ret); |
| 1493 | } | 1494 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0bd890c04fe4..f6f6e5b78e97 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) | |||
| 4830 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, | 4830 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, |
| 4831 | &ctx); | 4831 | &ctx); |
| 4832 | if (ret) { | 4832 | if (ret) { |
| 4833 | ret = -EINTR; | 4833 | if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { |
| 4834 | try_again = true; | ||
| 4835 | continue; | ||
| 4836 | } | ||
| 4834 | break; | 4837 | break; |
| 4835 | } | 4838 | } |
| 4836 | crtc = connector->state->crtc; | 4839 | crtc = connector->state->crtc; |
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 73a7bee24a66..641e0778fa9c 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c | |||
| @@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) | |||
| 323 | } | 323 | } |
| 324 | } | 324 | } |
| 325 | 325 | ||
| 326 | static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, | ||
| 327 | struct intel_dsi *intel_dsi) | ||
| 328 | { | ||
| 329 | enum port port; | ||
| 330 | |||
| 331 | for_each_dsi_port(port, intel_dsi->ports) { | ||
| 332 | WARN_ON(intel_dsi->io_wakeref[port]); | ||
| 333 | intel_dsi->io_wakeref[port] = | ||
| 334 | intel_display_power_get(dev_priv, | ||
| 335 | port == PORT_A ? | ||
| 336 | POWER_DOMAIN_PORT_DDI_A_IO : | ||
| 337 | POWER_DOMAIN_PORT_DDI_B_IO); | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 326 | static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) | 341 | static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) |
| 327 | { | 342 | { |
| 328 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 343 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| @@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) | |||
| 336 | I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); | 351 | I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); |
| 337 | } | 352 | } |
| 338 | 353 | ||
| 339 | for_each_dsi_port(port, intel_dsi->ports) { | 354 | get_dsi_io_power_domains(dev_priv, intel_dsi); |
| 340 | intel_dsi->io_wakeref[port] = | ||
| 341 | intel_display_power_get(dev_priv, | ||
| 342 | port == PORT_A ? | ||
| 343 | POWER_DOMAIN_PORT_DDI_A_IO : | ||
| 344 | POWER_DOMAIN_PORT_DDI_B_IO); | ||
| 345 | } | ||
| 346 | } | 355 | } |
| 347 | 356 | ||
| 348 | static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) | 357 | static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) |
| @@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, | |||
| 589 | val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); | 598 | val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); |
| 590 | } | 599 | } |
| 591 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | 600 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); |
| 601 | |||
| 602 | for_each_dsi_port(port, intel_dsi->ports) { | ||
| 603 | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); | ||
| 604 | } | ||
| 605 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
| 606 | |||
| 592 | POSTING_READ(DPCLKA_CFGCR0_ICL); | 607 | POSTING_READ(DPCLKA_CFGCR0_ICL); |
| 593 | 608 | ||
| 594 | mutex_unlock(&dev_priv->dpll_lock); | 609 | mutex_unlock(&dev_priv->dpll_lock); |
| @@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) | |||
| 1117 | DRM_ERROR("DDI port:%c buffer not idle\n", | 1132 | DRM_ERROR("DDI port:%c buffer not idle\n", |
| 1118 | port_name(port)); | 1133 | port_name(port)); |
| 1119 | } | 1134 | } |
| 1120 | gen11_dsi_ungate_clocks(encoder); | 1135 | gen11_dsi_gate_clocks(encoder); |
| 1121 | } | 1136 | } |
| 1122 | 1137 | ||
| 1123 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) | 1138 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) |
| @@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, | |||
| 1218 | return 0; | 1233 | return 0; |
| 1219 | } | 1234 | } |
| 1220 | 1235 | ||
| 1221 | static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, | 1236 | static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, |
| 1222 | struct intel_crtc_state *crtc_state) | 1237 | struct intel_crtc_state *crtc_state) |
| 1223 | { | 1238 | { |
| 1224 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 1239 | get_dsi_io_power_domains(to_i915(encoder->base.dev), |
| 1225 | u64 domains = 0; | 1240 | enc_to_intel_dsi(&encoder->base)); |
| 1226 | enum port port; | ||
| 1227 | |||
| 1228 | for_each_dsi_port(port, intel_dsi->ports) | ||
| 1229 | if (port == PORT_A) | ||
| 1230 | domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO); | ||
| 1231 | else | ||
| 1232 | domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO); | ||
| 1233 | |||
| 1234 | return domains; | ||
| 1235 | } | 1241 | } |
| 1236 | 1242 | ||
| 1237 | static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, | 1243 | static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 14d580cdefd3..98cea1f4b3bf 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) | |||
| 2075 | intel_aux_power_domain(dig_port); | 2075 | intel_aux_power_domain(dig_port); |
| 2076 | } | 2076 | } |
| 2077 | 2077 | ||
| 2078 | static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | 2078 | static void intel_ddi_get_power_domains(struct intel_encoder *encoder, |
| 2079 | struct intel_crtc_state *crtc_state) | 2079 | struct intel_crtc_state *crtc_state) |
| 2080 | { | 2080 | { |
| 2081 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2081 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| 2082 | struct intel_digital_port *dig_port; | 2082 | struct intel_digital_port *dig_port; |
| 2083 | u64 domains; | ||
| 2084 | 2083 | ||
| 2085 | /* | 2084 | /* |
| 2086 | * TODO: Add support for MST encoders. Atm, the following should never | 2085 | * TODO: Add support for MST encoders. Atm, the following should never |
| @@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | |||
| 2088 | * hook. | 2087 | * hook. |
| 2089 | */ | 2088 | */ |
| 2090 | if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) | 2089 | if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) |
| 2091 | return 0; | 2090 | return; |
| 2092 | 2091 | ||
| 2093 | dig_port = enc_to_dig_port(&encoder->base); | 2092 | dig_port = enc_to_dig_port(&encoder->base); |
| 2094 | domains = BIT_ULL(dig_port->ddi_io_power_domain); | 2093 | intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); |
| 2095 | 2094 | ||
| 2096 | /* | 2095 | /* |
| 2097 | * AUX power is only needed for (e)DP mode, and for HDMI mode on TC | 2096 | * AUX power is only needed for (e)DP mode, and for HDMI mode on TC |
| @@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | |||
| 2099 | */ | 2098 | */ |
| 2100 | if (intel_crtc_has_dp_encoder(crtc_state) || | 2099 | if (intel_crtc_has_dp_encoder(crtc_state) || |
| 2101 | intel_port_is_tc(dev_priv, encoder->port)) | 2100 | intel_port_is_tc(dev_priv, encoder->port)) |
| 2102 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); | 2101 | intel_display_power_get(dev_priv, |
| 2102 | intel_ddi_main_link_aux_domain(dig_port)); | ||
| 2103 | 2103 | ||
| 2104 | /* | 2104 | /* |
| 2105 | * VDSC power is needed when DSC is enabled | 2105 | * VDSC power is needed when DSC is enabled |
| 2106 | */ | 2106 | */ |
| 2107 | if (crtc_state->dsc_params.compression_enable) | 2107 | if (crtc_state->dsc_params.compression_enable) |
| 2108 | domains |= BIT_ULL(intel_dsc_power_domain(crtc_state)); | 2108 | intel_display_power_get(dev_priv, |
| 2109 | 2109 | intel_dsc_power_domain(crtc_state)); | |
| 2110 | return domains; | ||
| 2111 | } | 2110 | } |
| 2112 | 2111 | ||
| 2113 | void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) | 2112 | void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) |
| @@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) | |||
| 2825 | return; | 2824 | return; |
| 2826 | } | 2825 | } |
| 2827 | /* | 2826 | /* |
| 2828 | * DSI ports should have their DDI clock ungated when disabled | 2827 | * For DSI we keep the ddi clocks gated |
| 2829 | * and gated when enabled. | 2828 | * except during enable/disable sequence. |
| 2830 | */ | 2829 | */ |
| 2831 | ddi_clk_needed = !encoder->base.crtc; | 2830 | ddi_clk_needed = false; |
| 2832 | } | 2831 | } |
| 2833 | 2832 | ||
| 2834 | val = I915_READ(DPCLKA_CFGCR0_ICL); | 2833 | val = I915_READ(DPCLKA_CFGCR0_ICL); |
| @@ -3863,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, | |||
| 3863 | ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); | 3862 | ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); |
| 3864 | else | 3863 | else |
| 3865 | ret = intel_dp_compute_config(encoder, pipe_config, conn_state); | 3864 | ret = intel_dp_compute_config(encoder, pipe_config, conn_state); |
| 3865 | if (ret) | ||
| 3866 | return ret; | ||
| 3866 | 3867 | ||
| 3867 | if (IS_GEN9_LP(dev_priv) && ret) | 3868 | if (IS_GEN9_LP(dev_priv)) |
| 3868 | pipe_config->lane_lat_optim_mask = | 3869 | pipe_config->lane_lat_optim_mask = |
| 3869 | bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); | 3870 | bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); |
| 3870 | 3871 | ||
| 3871 | intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); | 3872 | intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); |
| 3872 | 3873 | ||
| 3873 | return ret; | 3874 | return 0; |
| 3874 | 3875 | ||
| 3875 | } | 3876 | } |
| 3876 | 3877 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ccb616351bba..421aac80a838 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) | |||
| 15986 | struct intel_encoder *encoder; | 15986 | struct intel_encoder *encoder; |
| 15987 | 15987 | ||
| 15988 | for_each_intel_encoder(&dev_priv->drm, encoder) { | 15988 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
| 15989 | u64 get_domains; | ||
| 15990 | enum intel_display_power_domain domain; | ||
| 15991 | struct intel_crtc_state *crtc_state; | 15989 | struct intel_crtc_state *crtc_state; |
| 15992 | 15990 | ||
| 15993 | if (!encoder->get_power_domains) | 15991 | if (!encoder->get_power_domains) |
| @@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) | |||
| 16001 | continue; | 15999 | continue; |
| 16002 | 16000 | ||
| 16003 | crtc_state = to_intel_crtc_state(encoder->base.crtc->state); | 16001 | crtc_state = to_intel_crtc_state(encoder->base.crtc->state); |
| 16004 | get_domains = encoder->get_power_domains(encoder, crtc_state); | 16002 | encoder->get_power_domains(encoder, crtc_state); |
| 16005 | for_each_power_domain(domain, get_domains) | ||
| 16006 | intel_display_power_get(dev_priv, domain); | ||
| 16007 | } | 16003 | } |
| 16008 | } | 16004 | } |
| 16009 | 16005 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cf709835fb9a..48da4a969a0a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, | |||
| 1859 | return -EINVAL; | 1859 | return -EINVAL; |
| 1860 | } | 1860 | } |
| 1861 | 1861 | ||
| 1862 | /* Optimize link config in order: max bpp, min lanes, min clock */ | ||
| 1863 | static int | ||
| 1864 | intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, | ||
| 1865 | struct intel_crtc_state *pipe_config, | ||
| 1866 | const struct link_config_limits *limits) | ||
| 1867 | { | ||
| 1868 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | ||
| 1869 | int bpp, clock, lane_count; | ||
| 1870 | int mode_rate, link_clock, link_avail; | ||
| 1871 | |||
| 1872 | for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { | ||
| 1873 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | ||
| 1874 | bpp); | ||
| 1875 | |||
| 1876 | for (lane_count = limits->min_lane_count; | ||
| 1877 | lane_count <= limits->max_lane_count; | ||
| 1878 | lane_count <<= 1) { | ||
| 1879 | for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { | ||
| 1880 | link_clock = intel_dp->common_rates[clock]; | ||
| 1881 | link_avail = intel_dp_max_data_rate(link_clock, | ||
| 1882 | lane_count); | ||
| 1883 | |||
| 1884 | if (mode_rate <= link_avail) { | ||
| 1885 | pipe_config->lane_count = lane_count; | ||
| 1886 | pipe_config->pipe_bpp = bpp; | ||
| 1887 | pipe_config->port_clock = link_clock; | ||
| 1888 | |||
| 1889 | return 0; | ||
| 1890 | } | ||
| 1891 | } | ||
| 1892 | } | ||
| 1893 | } | ||
| 1894 | |||
| 1895 | return -EINVAL; | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) | 1862 | static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) |
| 1899 | { | 1863 | { |
| 1900 | int i, num_bpc; | 1864 | int i, num_bpc; |
| @@ -1922,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, | |||
| 1922 | int pipe_bpp; | 1886 | int pipe_bpp; |
| 1923 | int ret; | 1887 | int ret; |
| 1924 | 1888 | ||
| 1889 | pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && | ||
| 1890 | intel_dp_supports_fec(intel_dp, pipe_config); | ||
| 1891 | |||
| 1925 | if (!intel_dp_supports_dsc(intel_dp, pipe_config)) | 1892 | if (!intel_dp_supports_dsc(intel_dp, pipe_config)) |
| 1926 | return -EINVAL; | 1893 | return -EINVAL; |
| 1927 | 1894 | ||
| @@ -2031,15 +1998,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
| 2031 | limits.min_bpp = 6 * 3; | 1998 | limits.min_bpp = 6 * 3; |
| 2032 | limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); | 1999 | limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); |
| 2033 | 2000 | ||
| 2034 | if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { | 2001 | if (intel_dp_is_edp(intel_dp)) { |
| 2035 | /* | 2002 | /* |
| 2036 | * Use the maximum clock and number of lanes the eDP panel | 2003 | * Use the maximum clock and number of lanes the eDP panel |
| 2037 | * advertizes being capable of. The eDP 1.3 and earlier panels | 2004 | * advertizes being capable of. The panels are generally |
| 2038 | * are generally designed to support only a single clock and | 2005 | * designed to support only a single clock and lane |
| 2039 | * lane configuration, and typically these values correspond to | 2006 | * configuration, and typically these values correspond to the |
| 2040 | * the native resolution of the panel. With eDP 1.4 rate select | 2007 | * native resolution of the panel. |
| 2041 | * and DSC, this is decreasingly the case, and we need to be | ||
| 2042 | * able to select less than maximum link config. | ||
| 2043 | */ | 2008 | */ |
| 2044 | limits.min_lane_count = limits.max_lane_count; | 2009 | limits.min_lane_count = limits.max_lane_count; |
| 2045 | limits.min_clock = limits.max_clock; | 2010 | limits.min_clock = limits.max_clock; |
| @@ -2053,22 +2018,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
| 2053 | intel_dp->common_rates[limits.max_clock], | 2018 | intel_dp->common_rates[limits.max_clock], |
| 2054 | limits.max_bpp, adjusted_mode->crtc_clock); | 2019 | limits.max_bpp, adjusted_mode->crtc_clock); |
| 2055 | 2020 | ||
| 2056 | if (intel_dp_is_edp(intel_dp)) | 2021 | /* |
| 2057 | /* | 2022 | * Optimize for slow and wide. This is the place to add alternative |
| 2058 | * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 | 2023 | * optimization policy. |
| 2059 | * section A.1: "It is recommended that the minimum number of | 2024 | */ |
| 2060 | * lanes be used, using the minimum link rate allowed for that | 2025 | ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); |
| 2061 | * lane configuration." | ||
| 2062 | * | ||
| 2063 | * Note that we use the max clock and lane count for eDP 1.3 and | ||
| 2064 | * earlier, and fast vs. wide is irrelevant. | ||
| 2065 | */ | ||
| 2066 | ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, | ||
| 2067 | &limits); | ||
| 2068 | else | ||
| 2069 | /* Optimize for slow and wide. */ | ||
| 2070 | ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, | ||
| 2071 | &limits); | ||
| 2072 | 2026 | ||
| 2073 | /* enable compression if the mode doesn't fit available BW */ | 2027 | /* enable compression if the mode doesn't fit available BW */ |
| 2074 | DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); | 2028 | DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); |
| @@ -2165,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
| 2165 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) | 2119 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
| 2166 | return -EINVAL; | 2120 | return -EINVAL; |
| 2167 | 2121 | ||
| 2168 | pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && | ||
| 2169 | intel_dp_supports_fec(intel_dp, pipe_config); | ||
| 2170 | |||
| 2171 | ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); | 2122 | ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); |
| 2172 | if (ret < 0) | 2123 | if (ret < 0) |
| 2173 | return ret; | 2124 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 15db41394b9e..d5660ac1b0d6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -270,10 +270,12 @@ struct intel_encoder { | |||
| 270 | * be set correctly before calling this function. */ | 270 | * be set correctly before calling this function. */ |
| 271 | void (*get_config)(struct intel_encoder *, | 271 | void (*get_config)(struct intel_encoder *, |
| 272 | struct intel_crtc_state *pipe_config); | 272 | struct intel_crtc_state *pipe_config); |
| 273 | /* Returns a mask of power domains that need to be referenced as part | 273 | /* |
| 274 | * of the hardware state readout code. */ | 274 | * Acquires the power domains needed for an active encoder during |
| 275 | u64 (*get_power_domains)(struct intel_encoder *encoder, | 275 | * hardware state readout. |
| 276 | struct intel_crtc_state *crtc_state); | 276 | */ |
| 277 | void (*get_power_domains)(struct intel_encoder *encoder, | ||
| 278 | struct intel_crtc_state *crtc_state); | ||
| 277 | /* | 279 | /* |
| 278 | * Called during system suspend after all pending requests for the | 280 | * Called during system suspend after all pending requests for the |
| 279 | * encoder are flushed (for example for DP AUX transactions) and | 281 | * encoder are flushed (for example for DP AUX transactions) and |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index e8f694b57b8a..376ffe842e26 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 338 | bool *enabled, int width, int height) | 338 | bool *enabled, int width, int height) |
| 339 | { | 339 | { |
| 340 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | 340 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); |
| 341 | unsigned long conn_configured, conn_seq, mask; | ||
| 341 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | 342 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); |
| 342 | unsigned long conn_configured, conn_seq; | ||
| 343 | int i, j; | 343 | int i, j; |
| 344 | bool *save_enabled; | 344 | bool *save_enabled; |
| 345 | bool fallback = true, ret = true; | 345 | bool fallback = true, ret = true; |
| @@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 357 | drm_modeset_backoff(&ctx); | 357 | drm_modeset_backoff(&ctx); |
| 358 | 358 | ||
| 359 | memcpy(save_enabled, enabled, count); | 359 | memcpy(save_enabled, enabled, count); |
| 360 | conn_seq = GENMASK(count - 1, 0); | 360 | mask = GENMASK(count - 1, 0); |
| 361 | conn_configured = 0; | 361 | conn_configured = 0; |
| 362 | retry: | 362 | retry: |
| 363 | conn_seq = conn_configured; | ||
| 363 | for (i = 0; i < count; i++) { | 364 | for (i = 0; i < count; i++) { |
| 364 | struct drm_fb_helper_connector *fb_conn; | 365 | struct drm_fb_helper_connector *fb_conn; |
| 365 | struct drm_connector *connector; | 366 | struct drm_connector *connector; |
| @@ -372,8 +373,7 @@ retry: | |||
| 372 | if (conn_configured & BIT(i)) | 373 | if (conn_configured & BIT(i)) |
| 373 | continue; | 374 | continue; |
| 374 | 375 | ||
| 375 | /* First pass, only consider tiled connectors */ | 376 | if (conn_seq == 0 && !connector->has_tile) |
| 376 | if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) | ||
| 377 | continue; | 377 | continue; |
| 378 | 378 | ||
| 379 | if (connector->status == connector_status_connected) | 379 | if (connector->status == connector_status_connected) |
| @@ -477,10 +477,8 @@ retry: | |||
| 477 | conn_configured |= BIT(i); | 477 | conn_configured |= BIT(i); |
| 478 | } | 478 | } |
| 479 | 479 | ||
| 480 | if (conn_configured != conn_seq) { /* repeat until no more are found */ | 480 | if ((conn_configured & mask) != mask && conn_configured != conn_seq) |
| 481 | conn_seq = conn_configured; | ||
| 482 | goto retry; | 481 | goto retry; |
| 483 | } | ||
| 484 | 482 | ||
| 485 | /* | 483 | /* |
| 486 | * If the BIOS didn't enable everything it could, fall back to have the | 484 | * If the BIOS didn't enable everything it could, fall back to have the |
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 6403728fe778..31c93c3ccd00 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c | |||
| @@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) | |||
| 256 | mutex_unlock(&dev_priv->sb_lock); | 256 | mutex_unlock(&dev_priv->sb_lock); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) | ||
| 260 | { | ||
| 261 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
| 262 | u32 tmp; | ||
| 263 | |||
| 264 | tmp = I915_READ(PIPEMISC(crtc->pipe)); | ||
| 265 | |||
| 266 | switch (tmp & PIPEMISC_DITHER_BPC_MASK) { | ||
| 267 | case PIPEMISC_DITHER_6_BPC: | ||
| 268 | return 18; | ||
| 269 | case PIPEMISC_DITHER_8_BPC: | ||
| 270 | return 24; | ||
| 271 | case PIPEMISC_DITHER_10_BPC: | ||
| 272 | return 30; | ||
| 273 | case PIPEMISC_DITHER_12_BPC: | ||
| 274 | return 36; | ||
| 275 | default: | ||
| 276 | MISSING_CASE(tmp); | ||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | } | ||
| 280 | |||
| 259 | static int intel_dsi_compute_config(struct intel_encoder *encoder, | 281 | static int intel_dsi_compute_config(struct intel_encoder *encoder, |
| 260 | struct intel_crtc_state *pipe_config, | 282 | struct intel_crtc_state *pipe_config, |
| 261 | struct drm_connector_state *conn_state) | 283 | struct drm_connector_state *conn_state) |
| @@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, | |||
| 1071 | bpp = mipi_dsi_pixel_format_to_bpp( | 1093 | bpp = mipi_dsi_pixel_format_to_bpp( |
| 1072 | pixel_format_from_register_bits(fmt)); | 1094 | pixel_format_from_register_bits(fmt)); |
| 1073 | 1095 | ||
| 1096 | pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); | ||
| 1097 | |||
| 1074 | /* Enable Frame time stamo based scanline reporting */ | 1098 | /* Enable Frame time stamo based scanline reporting */ |
| 1075 | adjusted_mode->private_flags |= | 1099 | adjusted_mode->private_flags |= |
| 1076 | I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; | 1100 | I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index ec3602ebbc1c..54011df8c2e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
| @@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, | |||
| 71 | if (disable_partial) | 71 | if (disable_partial) |
| 72 | ipu_plane_disable(ipu_crtc->plane[1], true); | 72 | ipu_plane_disable(ipu_crtc->plane[1], true); |
| 73 | if (disable_full) | 73 | if (disable_full) |
| 74 | ipu_plane_disable(ipu_crtc->plane[0], false); | 74 | ipu_plane_disable(ipu_crtc->plane[0], true); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, | 77 | static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 22e68a100e7b..5d333138f913 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c | |||
| @@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock) | |||
| 662 | static unsigned int mt2701_calculate_factor(int clock) | 662 | static unsigned int mt2701_calculate_factor(int clock) |
| 663 | { | 663 | { |
| 664 | if (clock <= 64000) | 664 | if (clock <= 64000) |
| 665 | return 16; | ||
| 666 | else if (clock <= 128000) | ||
| 667 | return 8; | ||
| 668 | else if (clock <= 256000) | ||
| 669 | return 4; | 665 | return 4; |
| 670 | else | 666 | else if (clock <= 128000) |
| 671 | return 2; | 667 | return 2; |
| 668 | else | ||
| 669 | return 1; | ||
| 672 | } | 670 | } |
| 673 | 671 | ||
| 674 | static const struct mtk_dpi_conf mt8173_conf = { | 672 | static const struct mtk_dpi_conf mt8173_conf = { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index cf59ea9bccfd..57ce4708ef1b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <drm/drmP.h> | 15 | #include <drm/drmP.h> |
| 16 | #include <drm/drm_atomic.h> | 16 | #include <drm/drm_atomic.h> |
| 17 | #include <drm/drm_atomic_helper.h> | 17 | #include <drm/drm_atomic_helper.h> |
| 18 | #include <drm/drm_fb_helper.h> | ||
| 18 | #include <drm/drm_gem.h> | 19 | #include <drm/drm_gem.h> |
| 19 | #include <drm/drm_gem_cma_helper.h> | 20 | #include <drm/drm_gem_cma_helper.h> |
| 20 | #include <drm/drm_of.h> | 21 | #include <drm/drm_of.h> |
| @@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = { | |||
| 341 | .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, | 342 | .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, |
| 342 | .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, | 343 | .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, |
| 343 | .gem_prime_mmap = mtk_drm_gem_mmap_buf, | 344 | .gem_prime_mmap = mtk_drm_gem_mmap_buf, |
| 345 | .gem_prime_vmap = mtk_drm_gem_prime_vmap, | ||
| 346 | .gem_prime_vunmap = mtk_drm_gem_prime_vunmap, | ||
| 344 | .fops = &mtk_drm_fops, | 347 | .fops = &mtk_drm_fops, |
| 345 | 348 | ||
| 346 | .name = DRIVER_NAME, | 349 | .name = DRIVER_NAME, |
| @@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev) | |||
| 376 | if (ret < 0) | 379 | if (ret < 0) |
| 377 | goto err_deinit; | 380 | goto err_deinit; |
| 378 | 381 | ||
| 382 | ret = drm_fbdev_generic_setup(drm, 32); | ||
| 383 | if (ret) | ||
| 384 | DRM_ERROR("Failed to initialize fbdev: %d\n", ret); | ||
| 385 | |||
| 379 | return 0; | 386 | return 0; |
| 380 | 387 | ||
| 381 | err_deinit: | 388 | err_deinit: |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 259b7b0de1d2..38483e9ee071 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c | |||
| @@ -241,3 +241,49 @@ err_gem_free: | |||
| 241 | kfree(mtk_gem); | 241 | kfree(mtk_gem); |
| 242 | return ERR_PTR(ret); | 242 | return ERR_PTR(ret); |
| 243 | } | 243 | } |
| 244 | |||
| 245 | void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) | ||
| 246 | { | ||
| 247 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); | ||
| 248 | struct sg_table *sgt; | ||
| 249 | struct sg_page_iter iter; | ||
| 250 | unsigned int npages; | ||
| 251 | unsigned int i = 0; | ||
| 252 | |||
| 253 | if (mtk_gem->kvaddr) | ||
| 254 | return mtk_gem->kvaddr; | ||
| 255 | |||
| 256 | sgt = mtk_gem_prime_get_sg_table(obj); | ||
| 257 | if (IS_ERR(sgt)) | ||
| 258 | return NULL; | ||
| 259 | |||
| 260 | npages = obj->size >> PAGE_SHIFT; | ||
| 261 | mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); | ||
| 262 | if (!mtk_gem->pages) | ||
| 263 | goto out; | ||
| 264 | |||
| 265 | for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { | ||
| 266 | mtk_gem->pages[i++] = sg_page_iter_page(&iter); | ||
| 267 | if (i > npages) | ||
| 268 | break; | ||
| 269 | } | ||
| 270 | mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, | ||
| 271 | pgprot_writecombine(PAGE_KERNEL)); | ||
| 272 | |||
| 273 | out: | ||
| 274 | kfree((void *)sgt); | ||
| 275 | |||
| 276 | return mtk_gem->kvaddr; | ||
| 277 | } | ||
| 278 | |||
| 279 | void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | ||
| 280 | { | ||
| 281 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); | ||
| 282 | |||
| 283 | if (!mtk_gem->pages) | ||
| 284 | return; | ||
| 285 | |||
| 286 | vunmap(vaddr); | ||
| 287 | mtk_gem->kvaddr = 0; | ||
| 288 | kfree((void *)mtk_gem->pages); | ||
| 289 | } | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 534639b43a1c..c047a7ef294f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h | |||
| @@ -37,6 +37,7 @@ struct mtk_drm_gem_obj { | |||
| 37 | dma_addr_t dma_addr; | 37 | dma_addr_t dma_addr; |
| 38 | unsigned long dma_attrs; | 38 | unsigned long dma_attrs; |
| 39 | struct sg_table *sg; | 39 | struct sg_table *sg; |
| 40 | struct page **pages; | ||
| 40 | }; | 41 | }; |
| 41 | 42 | ||
| 42 | #define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base) | 43 | #define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base) |
| @@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, | |||
| 52 | struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); | 53 | struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); |
| 53 | struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, | 54 | struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, |
| 54 | struct dma_buf_attachment *attach, struct sg_table *sg); | 55 | struct dma_buf_attachment *attach, struct sg_table *sg); |
| 56 | void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj); | ||
| 57 | void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | ||
| 55 | 58 | ||
| 56 | #endif | 59 | #endif |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 915cc84621ae..e04e6c293d39 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c | |||
| @@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, | |||
| 1480 | if (IS_ERR(regmap)) | 1480 | if (IS_ERR(regmap)) |
| 1481 | ret = PTR_ERR(regmap); | 1481 | ret = PTR_ERR(regmap); |
| 1482 | if (ret) { | 1482 | if (ret) { |
| 1483 | ret = PTR_ERR(regmap); | ||
| 1484 | dev_err(dev, | 1483 | dev_err(dev, |
| 1485 | "Failed to get system configuration registers: %d\n", | 1484 | "Failed to get system configuration registers: %d\n", |
| 1486 | ret); | 1485 | ret); |
| @@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, | |||
| 1516 | of_node_put(remote); | 1515 | of_node_put(remote); |
| 1517 | 1516 | ||
| 1518 | hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); | 1517 | hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); |
| 1518 | of_node_put(i2c_np); | ||
| 1519 | if (!hdmi->ddc_adpt) { | 1519 | if (!hdmi->ddc_adpt) { |
| 1520 | dev_err(dev, "Failed to get ddc i2c adapter by node\n"); | 1520 | dev_err(dev, "Failed to get ddc i2c adapter by node\n"); |
| 1521 | return -EINVAL; | 1521 | return -EINVAL; |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c index 4ef9c57ffd44..5223498502c4 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c | |||
| @@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = { | |||
| 15 | .owner = THIS_MODULE, | 15 | .owner = THIS_MODULE, |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 19 | unsigned long *parent_rate) | ||
| 20 | { | ||
| 21 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | ||
| 22 | |||
| 23 | hdmi_phy->pll_rate = rate; | ||
| 24 | if (rate <= 74250000) | ||
| 25 | *parent_rate = rate; | ||
| 26 | else | ||
| 27 | *parent_rate = rate / 2; | ||
| 28 | |||
| 29 | return rate; | ||
| 30 | } | ||
| 31 | |||
| 32 | unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, | ||
| 33 | unsigned long parent_rate) | ||
| 34 | { | ||
| 35 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | ||
| 36 | |||
| 37 | return hdmi_phy->pll_rate; | ||
| 38 | } | ||
| 39 | |||
| 40 | void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, | 18 | void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, |
| 41 | u32 bits) | 19 | u32 bits) |
| 42 | { | 20 | { |
| @@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) | |||
| 110 | return NULL; | 88 | return NULL; |
| 111 | } | 89 | } |
| 112 | 90 | ||
| 113 | static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy, | 91 | static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, |
| 114 | const struct clk_ops **ops) | 92 | struct clk_init_data *clk_init) |
| 115 | { | 93 | { |
| 116 | if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops) | 94 | clk_init->flags = hdmi_phy->conf->flags; |
| 117 | *ops = hdmi_phy->conf->hdmi_phy_clk_ops; | 95 | clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops; |
| 118 | else | ||
| 119 | dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n"); | ||
| 120 | } | 96 | } |
| 121 | 97 | ||
| 122 | static int mtk_hdmi_phy_probe(struct platform_device *pdev) | 98 | static int mtk_hdmi_phy_probe(struct platform_device *pdev) |
| @@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) | |||
| 129 | struct clk_init_data clk_init = { | 105 | struct clk_init_data clk_init = { |
| 130 | .num_parents = 1, | 106 | .num_parents = 1, |
| 131 | .parent_names = (const char * const *)&ref_clk_name, | 107 | .parent_names = (const char * const *)&ref_clk_name, |
| 132 | .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, | ||
| 133 | }; | 108 | }; |
| 134 | 109 | ||
| 135 | struct phy *phy; | 110 | struct phy *phy; |
| @@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) | |||
| 167 | hdmi_phy->dev = dev; | 142 | hdmi_phy->dev = dev; |
| 168 | hdmi_phy->conf = | 143 | hdmi_phy->conf = |
| 169 | (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev); | 144 | (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev); |
| 170 | mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops); | 145 | mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init); |
| 171 | hdmi_phy->pll_hw.init = &clk_init; | 146 | hdmi_phy->pll_hw.init = &clk_init; |
| 172 | hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); | 147 | hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); |
| 173 | if (IS_ERR(hdmi_phy->pll)) { | 148 | if (IS_ERR(hdmi_phy->pll)) { |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h index f39b1fc66612..2d8b3182470d 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h | |||
| @@ -21,6 +21,7 @@ struct mtk_hdmi_phy; | |||
| 21 | 21 | ||
| 22 | struct mtk_hdmi_phy_conf { | 22 | struct mtk_hdmi_phy_conf { |
| 23 | bool tz_disabled; | 23 | bool tz_disabled; |
| 24 | unsigned long flags; | ||
| 24 | const struct clk_ops *hdmi_phy_clk_ops; | 25 | const struct clk_ops *hdmi_phy_clk_ops; |
| 25 | void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); | 26 | void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); |
| 26 | void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); | 27 | void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); |
| @@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, | |||
| 48 | void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, | 49 | void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, |
| 49 | u32 val, u32 mask); | 50 | u32 val, u32 mask); |
| 50 | struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); | 51 | struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); |
| 51 | long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 52 | unsigned long *parent_rate); | ||
| 53 | unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, | ||
| 54 | unsigned long parent_rate); | ||
| 55 | 52 | ||
| 56 | extern struct platform_driver mtk_hdmi_phy_driver; | 53 | extern struct platform_driver mtk_hdmi_phy_driver; |
| 57 | extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; | 54 | extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; |
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c index fcc42dc6ea7f..d3cc4022e988 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c | |||
| @@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw) | |||
| 79 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); | 79 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); |
| 80 | usleep_range(80, 100); | 80 | usleep_range(80, 100); |
| 81 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); | 81 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); |
| 82 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); | ||
| 83 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); | 82 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); |
| 84 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); | 83 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); |
| 85 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); | 84 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); |
| @@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) | |||
| 94 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); | 93 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); |
| 95 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); | 94 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); |
| 96 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); | 95 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); |
| 97 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); | ||
| 98 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); | 96 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); |
| 99 | usleep_range(80, 100); | 97 | usleep_range(80, 100); |
| 100 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); | 98 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); |
| @@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) | |||
| 108 | usleep_range(80, 100); | 106 | usleep_range(80, 100); |
| 109 | } | 107 | } |
| 110 | 108 | ||
| 109 | static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 110 | unsigned long *parent_rate) | ||
| 111 | { | ||
| 112 | return rate; | ||
| 113 | } | ||
| 114 | |||
| 111 | static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | 115 | static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, |
| 112 | unsigned long parent_rate) | 116 | unsigned long parent_rate) |
| 113 | { | 117 | { |
| @@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 116 | 120 | ||
| 117 | if (rate <= 64000000) | 121 | if (rate <= 64000000) |
| 118 | pos_div = 3; | 122 | pos_div = 3; |
| 119 | else if (rate <= 12800000) | 123 | else if (rate <= 128000000) |
| 120 | pos_div = 1; | 124 | pos_div = 2; |
| 121 | else | 125 | else |
| 122 | pos_div = 1; | 126 | pos_div = 1; |
| 123 | 127 | ||
| 124 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK); | 128 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK); |
| 125 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); | 129 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); |
| 130 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); | ||
| 126 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC), | 131 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC), |
| 127 | RG_HTPLL_IC_MASK); | 132 | RG_HTPLL_IC_MASK); |
| 128 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR), | 133 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR), |
| @@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 154 | return 0; | 159 | return 0; |
| 155 | } | 160 | } |
| 156 | 161 | ||
| 162 | static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, | ||
| 163 | unsigned long parent_rate) | ||
| 164 | { | ||
| 165 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | ||
| 166 | unsigned long out_rate, val; | ||
| 167 | |||
| 168 | val = (readl(hdmi_phy->regs + HDMI_CON6) | ||
| 169 | & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV; | ||
| 170 | switch (val) { | ||
| 171 | case 0x00: | ||
| 172 | out_rate = parent_rate; | ||
| 173 | break; | ||
| 174 | case 0x01: | ||
| 175 | out_rate = parent_rate / 2; | ||
| 176 | break; | ||
| 177 | default: | ||
| 178 | out_rate = parent_rate / 4; | ||
| 179 | break; | ||
| 180 | } | ||
| 181 | |||
| 182 | val = (readl(hdmi_phy->regs + HDMI_CON6) | ||
| 183 | & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV; | ||
| 184 | out_rate *= (val + 1) * 2; | ||
| 185 | val = (readl(hdmi_phy->regs + HDMI_CON2) | ||
| 186 | & RG_HDMITX_TX_POSDIV_MASK); | ||
| 187 | out_rate >>= (val >> RG_HDMITX_TX_POSDIV); | ||
| 188 | |||
| 189 | if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV) | ||
| 190 | out_rate /= 5; | ||
| 191 | |||
| 192 | return out_rate; | ||
| 193 | } | ||
| 194 | |||
| 157 | static const struct clk_ops mtk_hdmi_phy_pll_ops = { | 195 | static const struct clk_ops mtk_hdmi_phy_pll_ops = { |
| 158 | .prepare = mtk_hdmi_pll_prepare, | 196 | .prepare = mtk_hdmi_pll_prepare, |
| 159 | .unprepare = mtk_hdmi_pll_unprepare, | 197 | .unprepare = mtk_hdmi_pll_unprepare, |
| @@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) | |||
| 174 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); | 212 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); |
| 175 | usleep_range(80, 100); | 213 | usleep_range(80, 100); |
| 176 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); | 214 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); |
| 177 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); | ||
| 178 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); | 215 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); |
| 179 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); | 216 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); |
| 180 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); | 217 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); |
| @@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) | |||
| 186 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); | 223 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); |
| 187 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); | 224 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); |
| 188 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); | 225 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); |
| 189 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); | ||
| 190 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); | 226 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); |
| 191 | usleep_range(80, 100); | 227 | usleep_range(80, 100); |
| 192 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); | 228 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); |
| @@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) | |||
| 202 | 238 | ||
| 203 | struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { | 239 | struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { |
| 204 | .tz_disabled = true, | 240 | .tz_disabled = true, |
| 241 | .flags = CLK_SET_RATE_GATE, | ||
| 205 | .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, | 242 | .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, |
| 206 | .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, | 243 | .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, |
| 207 | .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, | 244 | .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, |
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index ed5916b27658..47f8a2951682 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c | |||
| @@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) | |||
| 199 | usleep_range(100, 150); | 199 | usleep_range(100, 150); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 203 | unsigned long *parent_rate) | ||
| 204 | { | ||
| 205 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | ||
| 206 | |||
| 207 | hdmi_phy->pll_rate = rate; | ||
| 208 | if (rate <= 74250000) | ||
| 209 | *parent_rate = rate; | ||
| 210 | else | ||
| 211 | *parent_rate = rate / 2; | ||
| 212 | |||
| 213 | return rate; | ||
| 214 | } | ||
| 215 | |||
| 202 | static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | 216 | static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, |
| 203 | unsigned long parent_rate) | 217 | unsigned long parent_rate) |
| 204 | { | 218 | { |
| @@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 285 | return 0; | 299 | return 0; |
| 286 | } | 300 | } |
| 287 | 301 | ||
| 302 | static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, | ||
| 303 | unsigned long parent_rate) | ||
| 304 | { | ||
| 305 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | ||
| 306 | |||
| 307 | return hdmi_phy->pll_rate; | ||
| 308 | } | ||
| 309 | |||
| 288 | static const struct clk_ops mtk_hdmi_phy_pll_ops = { | 310 | static const struct clk_ops mtk_hdmi_phy_pll_ops = { |
| 289 | .prepare = mtk_hdmi_pll_prepare, | 311 | .prepare = mtk_hdmi_pll_prepare, |
| 290 | .unprepare = mtk_hdmi_pll_unprepare, | 312 | .unprepare = mtk_hdmi_pll_unprepare, |
| @@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) | |||
| 309 | } | 331 | } |
| 310 | 332 | ||
| 311 | struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = { | 333 | struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = { |
| 334 | .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, | ||
| 312 | .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, | 335 | .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, |
| 313 | .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, | 336 | .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, |
| 314 | .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, | 337 | .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 340383150fb9..ebf9c96d43ee 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c | |||
| @@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) | |||
| 175 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3); | 175 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3); |
| 176 | hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE); | 176 | hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE); |
| 177 | hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE); | 177 | hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE); |
| 178 | REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); | ||
| 178 | hdmi4_core_disable(core); | 179 | hdmi4_core_disable(core); |
| 179 | return 0; | 180 | return 0; |
| 180 | } | 181 | } |
| @@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) | |||
| 182 | if (err) | 183 | if (err) |
| 183 | return err; | 184 | return err; |
| 184 | 185 | ||
| 186 | /* | ||
| 187 | * Initialize CEC clock divider: CEC needs 2MHz clock hence | ||
| 188 | * set the divider to 24 to get 48/24=2MHz clock | ||
| 189 | */ | ||
| 190 | REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); | ||
| 191 | |||
| 185 | /* Clear TX FIFO */ | 192 | /* Clear TX FIFO */ |
| 186 | if (!hdmi_cec_clear_tx_fifo(adap)) { | 193 | if (!hdmi_cec_clear_tx_fifo(adap)) { |
| 187 | pr_err("cec-%s: could not clear TX FIFO\n", adap->name); | 194 | pr_err("cec-%s: could not clear TX FIFO\n", adap->name); |
| 188 | return -EIO; | 195 | err = -EIO; |
| 196 | goto err_disable_clk; | ||
| 189 | } | 197 | } |
| 190 | 198 | ||
| 191 | /* Clear RX FIFO */ | 199 | /* Clear RX FIFO */ |
| 192 | if (!hdmi_cec_clear_rx_fifo(adap)) { | 200 | if (!hdmi_cec_clear_rx_fifo(adap)) { |
| 193 | pr_err("cec-%s: could not clear RX FIFO\n", adap->name); | 201 | pr_err("cec-%s: could not clear RX FIFO\n", adap->name); |
| 194 | return -EIO; | 202 | err = -EIO; |
| 203 | goto err_disable_clk; | ||
| 195 | } | 204 | } |
| 196 | 205 | ||
| 197 | /* Clear CEC interrupts */ | 206 | /* Clear CEC interrupts */ |
| @@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) | |||
| 236 | hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp); | 245 | hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp); |
| 237 | } | 246 | } |
| 238 | return 0; | 247 | return 0; |
| 248 | |||
| 249 | err_disable_clk: | ||
| 250 | REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); | ||
| 251 | hdmi4_core_disable(core); | ||
| 252 | |||
| 253 | return err; | ||
| 239 | } | 254 | } |
| 240 | 255 | ||
| 241 | static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) | 256 | static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) |
| @@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core, | |||
| 333 | return ret; | 348 | return ret; |
| 334 | core->wp = wp; | 349 | core->wp = wp; |
| 335 | 350 | ||
| 336 | /* | 351 | /* Disable clock initially, hdmi_cec_adap_enable() manages it */ |
| 337 | * Initialize CEC clock divider: CEC needs 2MHz clock hence | 352 | REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); |
| 338 | * set the devider to 24 to get 48/24=2MHz clock | ||
| 339 | */ | ||
| 340 | REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); | ||
| 341 | 353 | ||
| 342 | ret = cec_register_adapter(core->adap, &pdev->dev); | 354 | ret = cec_register_adapter(core->adap, &pdev->dev); |
| 343 | if (ret < 0) { | 355 | if (ret < 0) { |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 813ba42f2753..e384b95ad857 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | |||
| @@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, | |||
| 708 | else | 708 | else |
| 709 | acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; | 709 | acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; |
| 710 | /* | 710 | /* |
| 711 | * The I2S input word length is twice the lenght given in the IEC-60958 | 711 | * The I2S input word length is twice the length given in the IEC-60958 |
| 712 | * status word. If the word size is greater than | 712 | * status word. If the word size is greater than |
| 713 | * 20 bits, increment by one. | 713 | * 20 bits, increment by one. |
| 714 | */ | 714 | */ |
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 19fc601c9eeb..a1bec2779e76 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
| @@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) | |||
| 366 | EXPORT_SYMBOL(drm_sched_increase_karma); | 366 | EXPORT_SYMBOL(drm_sched_increase_karma); |
| 367 | 367 | ||
| 368 | /** | 368 | /** |
| 369 | * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job | 369 | * drm_sched_stop - stop the scheduler |
| 370 | * | 370 | * |
| 371 | * @sched: scheduler instance | 371 | * @sched: scheduler instance |
| 372 | * @bad: bad scheduler job | ||
| 373 | * | 372 | * |
| 374 | */ | 373 | */ |
| 375 | void drm_sched_stop(struct drm_gpu_scheduler *sched) | 374 | void drm_sched_stop(struct drm_gpu_scheduler *sched) |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 3ebd9f5e2719..29258b404e54 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/of_reserved_mem.h> | 16 | #include <linux/of_reserved_mem.h> |
| 17 | 17 | ||
| 18 | #include <drm/drmP.h> | 18 | #include <drm/drmP.h> |
| 19 | #include <drm/drm_atomic_helper.h> | ||
| 19 | #include <drm/drm_fb_cma_helper.h> | 20 | #include <drm/drm_fb_cma_helper.h> |
| 20 | #include <drm/drm_fb_helper.h> | 21 | #include <drm/drm_fb_helper.h> |
| 21 | #include <drm/drm_gem_cma_helper.h> | 22 | #include <drm/drm_gem_cma_helper.h> |
| @@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev) | |||
| 85 | ret = -ENOMEM; | 86 | ret = -ENOMEM; |
| 86 | goto free_drm; | 87 | goto free_drm; |
| 87 | } | 88 | } |
| 89 | |||
| 90 | dev_set_drvdata(dev, drm); | ||
| 88 | drm->dev_private = drv; | 91 | drm->dev_private = drv; |
| 89 | INIT_LIST_HEAD(&drv->frontend_list); | 92 | INIT_LIST_HEAD(&drv->frontend_list); |
| 90 | INIT_LIST_HEAD(&drv->engine_list); | 93 | INIT_LIST_HEAD(&drv->engine_list); |
| @@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev) | |||
| 144 | 147 | ||
| 145 | drm_dev_unregister(drm); | 148 | drm_dev_unregister(drm); |
| 146 | drm_kms_helper_poll_fini(drm); | 149 | drm_kms_helper_poll_fini(drm); |
| 150 | drm_atomic_helper_shutdown(drm); | ||
| 147 | drm_mode_config_cleanup(drm); | 151 | drm_mode_config_cleanup(drm); |
| 152 | |||
| 153 | component_unbind_all(dev, NULL); | ||
| 148 | of_reserved_mem_device_release(dev); | 154 | of_reserved_mem_device_release(dev); |
| 155 | |||
| 149 | drm_dev_put(drm); | 156 | drm_dev_put(drm); |
| 150 | } | 157 | } |
| 151 | 158 | ||
| @@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev) | |||
| 395 | 402 | ||
| 396 | static int sun4i_drv_remove(struct platform_device *pdev) | 403 | static int sun4i_drv_remove(struct platform_device *pdev) |
| 397 | { | 404 | { |
| 405 | component_master_del(&pdev->dev, &sun4i_drv_master_ops); | ||
| 406 | |||
| 398 | return 0; | 407 | return 0; |
| 399 | } | 408 | } |
| 400 | 409 | ||
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index dc47720c99ba..39d8509d96a0 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c | |||
| @@ -48,8 +48,13 @@ static enum drm_mode_status | |||
| 48 | sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, | 48 | sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, |
| 49 | const struct drm_display_mode *mode) | 49 | const struct drm_display_mode *mode) |
| 50 | { | 50 | { |
| 51 | /* This is max for HDMI 2.0b (4K@60Hz) */ | 51 | /* |
| 52 | if (mode->clock > 594000) | 52 | * Controller support maximum of 594 MHz, which correlates to |
| 53 | * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than | ||
| 54 | * 340 MHz scrambling has to be enabled. Because scrambling is | ||
| 55 | * not yet implemented, just limit to 340 MHz for now. | ||
| 56 | */ | ||
| 57 | if (mode->clock > 340000) | ||
| 53 | return MODE_CLOCK_HIGH; | 58 | return MODE_CLOCK_HIGH; |
| 54 | 59 | ||
| 55 | return MODE_OK; | 60 | return MODE_OK; |
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index fc36e0c10a37..b1e7c76e9c17 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c | |||
| @@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, | |||
| 227 | 227 | ||
| 228 | err_unregister_gates: | 228 | err_unregister_gates: |
| 229 | for (i = 0; i < CLK_NUM; i++) | 229 | for (i = 0; i < CLK_NUM; i++) |
| 230 | if (clk_data->hws[i]) | 230 | if (!IS_ERR_OR_NULL(clk_data->hws[i])) |
| 231 | clk_hw_unregister_gate(clk_data->hws[i]); | 231 | clk_hw_unregister_gate(clk_data->hws[i]); |
| 232 | clk_disable_unprepare(tcon_top->bus); | 232 | clk_disable_unprepare(tcon_top->bus); |
| 233 | err_assert_reset: | 233 | err_assert_reset: |
| @@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master, | |||
| 245 | 245 | ||
| 246 | of_clk_del_provider(dev->of_node); | 246 | of_clk_del_provider(dev->of_node); |
| 247 | for (i = 0; i < CLK_NUM; i++) | 247 | for (i = 0; i < CLK_NUM; i++) |
| 248 | clk_hw_unregister_gate(clk_data->hws[i]); | 248 | if (clk_data->hws[i]) |
| 249 | clk_hw_unregister_gate(clk_data->hws[i]); | ||
| 249 | 250 | ||
| 250 | clk_disable_unprepare(tcon_top->bus); | 251 | clk_disable_unprepare(tcon_top->bus); |
| 251 | reset_control_assert(tcon_top->rst); | 252 | reset_control_assert(tcon_top->rst); |
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 47c55974756d..d23c4bfde790 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c | |||
| @@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) | |||
| 1260 | 1260 | ||
| 1261 | hdmi->dvi = !tegra_output_is_hdmi(output); | 1261 | hdmi->dvi = !tegra_output_is_hdmi(output); |
| 1262 | if (!hdmi->dvi) { | 1262 | if (!hdmi->dvi) { |
| 1263 | err = tegra_hdmi_setup_audio(hdmi); | 1263 | /* |
| 1264 | if (err < 0) | 1264 | * Make sure that the audio format has been configured before |
| 1265 | hdmi->dvi = true; | 1265 | * enabling audio, otherwise we may try to divide by zero. |
| 1266 | */ | ||
| 1267 | if (hdmi->format.sample_rate > 0) { | ||
| 1268 | err = tegra_hdmi_setup_audio(hdmi); | ||
| 1269 | if (err < 0) | ||
| 1270 | hdmi->dvi = true; | ||
| 1271 | } | ||
| 1266 | } | 1272 | } |
| 1267 | 1273 | ||
| 1268 | if (hdmi->config->has_hda) | 1274 | if (hdmi->config->has_hda) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3f56647cdb35..1a01669b159a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); | |||
| 49 | * ttm_global_mutex - protecting the global BO state | 49 | * ttm_global_mutex - protecting the global BO state |
| 50 | */ | 50 | */ |
| 51 | DEFINE_MUTEX(ttm_global_mutex); | 51 | DEFINE_MUTEX(ttm_global_mutex); |
| 52 | struct ttm_bo_global ttm_bo_glob = { | 52 | unsigned ttm_bo_glob_use_count; |
| 53 | .use_count = 0 | 53 | struct ttm_bo_global ttm_bo_glob; |
| 54 | }; | ||
| 55 | 54 | ||
| 56 | static struct attribute ttm_bo_count = { | 55 | static struct attribute ttm_bo_count = { |
| 57 | .name = "bo_count", | 56 | .name = "bo_count", |
| @@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, | |||
| 876 | reservation_object_add_shared_fence(bo->resv, fence); | 875 | reservation_object_add_shared_fence(bo->resv, fence); |
| 877 | 876 | ||
| 878 | ret = reservation_object_reserve_shared(bo->resv, 1); | 877 | ret = reservation_object_reserve_shared(bo->resv, 1); |
| 879 | if (unlikely(ret)) | 878 | if (unlikely(ret)) { |
| 879 | dma_fence_put(fence); | ||
| 880 | return ret; | 880 | return ret; |
| 881 | } | ||
| 881 | 882 | ||
| 882 | dma_fence_put(bo->moving); | 883 | dma_fence_put(bo->moving); |
| 883 | bo->moving = fence; | 884 | bo->moving = fence; |
| @@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void) | |||
| 1529 | struct ttm_bo_global *glob = &ttm_bo_glob; | 1530 | struct ttm_bo_global *glob = &ttm_bo_glob; |
| 1530 | 1531 | ||
| 1531 | mutex_lock(&ttm_global_mutex); | 1532 | mutex_lock(&ttm_global_mutex); |
| 1532 | if (--glob->use_count > 0) | 1533 | if (--ttm_bo_glob_use_count > 0) |
| 1533 | goto out; | 1534 | goto out; |
| 1534 | 1535 | ||
| 1535 | kobject_del(&glob->kobj); | 1536 | kobject_del(&glob->kobj); |
| 1536 | kobject_put(&glob->kobj); | 1537 | kobject_put(&glob->kobj); |
| 1537 | ttm_mem_global_release(&ttm_mem_glob); | 1538 | ttm_mem_global_release(&ttm_mem_glob); |
| 1539 | memset(glob, 0, sizeof(*glob)); | ||
| 1538 | out: | 1540 | out: |
| 1539 | mutex_unlock(&ttm_global_mutex); | 1541 | mutex_unlock(&ttm_global_mutex); |
| 1540 | } | 1542 | } |
| @@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void) | |||
| 1546 | unsigned i; | 1548 | unsigned i; |
| 1547 | 1549 | ||
| 1548 | mutex_lock(&ttm_global_mutex); | 1550 | mutex_lock(&ttm_global_mutex); |
| 1549 | if (++glob->use_count > 1) | 1551 | if (++ttm_bo_glob_use_count > 1) |
| 1550 | goto out; | 1552 | goto out; |
| 1551 | 1553 | ||
| 1552 | ret = ttm_mem_global_init(&ttm_mem_glob); | 1554 | ret = ttm_mem_global_init(&ttm_mem_glob); |
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index f1567c353b54..9a0909decb36 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
| @@ -461,8 +461,8 @@ out_no_zone: | |||
| 461 | 461 | ||
| 462 | void ttm_mem_global_release(struct ttm_mem_global *glob) | 462 | void ttm_mem_global_release(struct ttm_mem_global *glob) |
| 463 | { | 463 | { |
| 464 | unsigned int i; | ||
| 465 | struct ttm_mem_zone *zone; | 464 | struct ttm_mem_zone *zone; |
| 465 | unsigned int i; | ||
| 466 | 466 | ||
| 467 | /* let the page allocator first stop the shrink work. */ | 467 | /* let the page allocator first stop the shrink work. */ |
| 468 | ttm_page_alloc_fini(); | 468 | ttm_page_alloc_fini(); |
| @@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) | |||
| 475 | zone = glob->zones[i]; | 475 | zone = glob->zones[i]; |
| 476 | kobject_del(&zone->kobj); | 476 | kobject_del(&zone->kobj); |
| 477 | kobject_put(&zone->kobj); | 477 | kobject_put(&zone->kobj); |
| 478 | } | 478 | } |
| 479 | kobject_del(&glob->kobj); | 479 | kobject_del(&glob->kobj); |
| 480 | kobject_put(&glob->kobj); | 480 | kobject_put(&glob->kobj); |
| 481 | memset(glob, 0, sizeof(*glob)); | ||
| 481 | } | 482 | } |
| 482 | 483 | ||
| 483 | static void ttm_check_swapping(struct ttm_mem_global *glob) | 484 | static void ttm_check_swapping(struct ttm_mem_global *glob) |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f841accc2c00..627f8dc91d0e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
| 730 | } | 730 | } |
| 731 | 731 | ||
| 732 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 732 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 733 | if (!(flags & TTM_PAGE_FLAG_DMA32)) { | 733 | if (!(flags & TTM_PAGE_FLAG_DMA32) && |
| 734 | for (j = 0; j < HPAGE_PMD_NR; ++j) | 734 | (npages - i) >= HPAGE_PMD_NR) { |
| 735 | if (p++ != pages[i + j]) | 735 | for (j = 1; j < HPAGE_PMD_NR; ++j) |
| 736 | if (++p != pages[i + j]) | ||
| 736 | break; | 737 | break; |
| 737 | 738 | ||
| 738 | if (j == HPAGE_PMD_NR) | 739 | if (j == HPAGE_PMD_NR) |
| @@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
| 759 | unsigned max_size, n2free; | 760 | unsigned max_size, n2free; |
| 760 | 761 | ||
| 761 | spin_lock_irqsave(&huge->lock, irq_flags); | 762 | spin_lock_irqsave(&huge->lock, irq_flags); |
| 762 | while (i < npages) { | 763 | while ((npages - i) >= HPAGE_PMD_NR) { |
| 763 | struct page *p = pages[i]; | 764 | struct page *p = pages[i]; |
| 764 | unsigned j; | 765 | unsigned j; |
| 765 | 766 | ||
| 766 | if (!p) | 767 | if (!p) |
| 767 | break; | 768 | break; |
| 768 | 769 | ||
| 769 | for (j = 0; j < HPAGE_PMD_NR; ++j) | 770 | for (j = 1; j < HPAGE_PMD_NR; ++j) |
| 770 | if (p++ != pages[i + j]) | 771 | if (++p != pages[i + j]) |
| 771 | break; | 772 | break; |
| 772 | 773 | ||
| 773 | if (j != HPAGE_PMD_NR) | 774 | if (j != HPAGE_PMD_NR) |
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 22cd2d13e272..ff47f890e6ad 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c | |||
| @@ -52,6 +52,7 @@ static struct drm_driver driver = { | |||
| 52 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | 52 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, |
| 53 | .load = udl_driver_load, | 53 | .load = udl_driver_load, |
| 54 | .unload = udl_driver_unload, | 54 | .unload = udl_driver_unload, |
| 55 | .release = udl_driver_release, | ||
| 55 | 56 | ||
| 56 | /* gem hooks */ | 57 | /* gem hooks */ |
| 57 | .gem_free_object_unlocked = udl_gem_free_object, | 58 | .gem_free_object_unlocked = udl_gem_free_object, |
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index e9e9b1ff678e..4ae67d882eae 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h | |||
| @@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb); | |||
| 104 | 104 | ||
| 105 | int udl_driver_load(struct drm_device *dev, unsigned long flags); | 105 | int udl_driver_load(struct drm_device *dev, unsigned long flags); |
| 106 | void udl_driver_unload(struct drm_device *dev); | 106 | void udl_driver_unload(struct drm_device *dev); |
| 107 | void udl_driver_release(struct drm_device *dev); | ||
| 107 | 108 | ||
| 108 | int udl_fbdev_init(struct drm_device *dev); | 109 | int udl_fbdev_init(struct drm_device *dev); |
| 109 | void udl_fbdev_cleanup(struct drm_device *dev); | 110 | void udl_fbdev_cleanup(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 9086d0d1b880..1f8ef34ade24 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c | |||
| @@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev) | |||
| 379 | udl_free_urb_list(dev); | 379 | udl_free_urb_list(dev); |
| 380 | 380 | ||
| 381 | udl_fbdev_cleanup(dev); | 381 | udl_fbdev_cleanup(dev); |
| 382 | udl_modeset_cleanup(dev); | ||
| 383 | kfree(udl); | 382 | kfree(udl); |
| 384 | } | 383 | } |
| 384 | |||
| 385 | void udl_driver_release(struct drm_device *dev) | ||
| 386 | { | ||
| 387 | udl_modeset_cleanup(dev); | ||
| 388 | drm_dev_fini(dev); | ||
| 389 | kfree(dev); | ||
| 390 | } | ||
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 730008d3da76..1baa10e94484 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -1042,7 +1042,7 @@ static void | |||
| 1042 | vc4_crtc_reset(struct drm_crtc *crtc) | 1042 | vc4_crtc_reset(struct drm_crtc *crtc) |
| 1043 | { | 1043 | { |
| 1044 | if (crtc->state) | 1044 | if (crtc->state) |
| 1045 | __drm_atomic_helper_crtc_destroy_state(crtc->state); | 1045 | vc4_crtc_destroy_state(crtc, crtc->state); |
| 1046 | 1046 | ||
| 1047 | crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); | 1047 | crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); |
| 1048 | if (crtc->state) | 1048 | if (crtc->state) |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b996ac1d4fcc..af92964b6889 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c | |||
| @@ -205,10 +205,14 @@ static struct drm_driver driver = { | |||
| 205 | #if defined(CONFIG_DEBUG_FS) | 205 | #if defined(CONFIG_DEBUG_FS) |
| 206 | .debugfs_init = virtio_gpu_debugfs_init, | 206 | .debugfs_init = virtio_gpu_debugfs_init, |
| 207 | #endif | 207 | #endif |
| 208 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 209 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 208 | .gem_prime_export = drm_gem_prime_export, | 210 | .gem_prime_export = drm_gem_prime_export, |
| 209 | .gem_prime_import = drm_gem_prime_import, | 211 | .gem_prime_import = drm_gem_prime_import, |
| 210 | .gem_prime_pin = virtgpu_gem_prime_pin, | 212 | .gem_prime_pin = virtgpu_gem_prime_pin, |
| 211 | .gem_prime_unpin = virtgpu_gem_prime_unpin, | 213 | .gem_prime_unpin = virtgpu_gem_prime_unpin, |
| 214 | .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, | ||
| 215 | .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, | ||
| 212 | .gem_prime_vmap = virtgpu_gem_prime_vmap, | 216 | .gem_prime_vmap = virtgpu_gem_prime_vmap, |
| 213 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, | 217 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, |
| 214 | .gem_prime_mmap = virtgpu_gem_prime_mmap, | 218 | .gem_prime_mmap = virtgpu_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3238fdf58eb4..d577cb76f5ad 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
| @@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); | |||
| 354 | /* virtgpu_prime.c */ | 354 | /* virtgpu_prime.c */ |
| 355 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); | 355 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 356 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); | 356 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 357 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
| 358 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 359 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 360 | struct sg_table *sgt); | ||
| 357 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); | 361 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 358 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 362 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 359 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, | 363 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index c59ec34c80a5..eb51a78e1199 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c | |||
| @@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 39 | WARN_ONCE(1, "not implemented"); | 39 | WARN_ONCE(1, "not implemented"); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 43 | { | ||
| 44 | return ERR_PTR(-ENODEV); | ||
| 45 | } | ||
| 46 | |||
| 47 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 48 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 49 | struct sg_table *table) | ||
| 50 | { | ||
| 51 | return ERR_PTR(-ENODEV); | ||
| 52 | } | ||
| 53 | |||
| 42 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) | 54 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
| 43 | { | 55 | { |
| 44 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 56 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6165fe2c4504..1bfa353d995c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -546,29 +546,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | /** | 548 | /** |
| 549 | * vmw_assume_iommu - Figure out whether coherent dma-remapping might be | ||
| 550 | * taking place. | ||
| 551 | * @dev: Pointer to the struct drm_device. | ||
| 552 | * | ||
| 553 | * Return: true if iommu present, false otherwise. | ||
| 554 | */ | ||
| 555 | static bool vmw_assume_iommu(struct drm_device *dev) | ||
| 556 | { | ||
| 557 | const struct dma_map_ops *ops = get_dma_ops(dev->dev); | ||
| 558 | |||
| 559 | return !dma_is_direct(ops) && ops && | ||
| 560 | ops->map_page != dma_direct_map_page; | ||
| 561 | } | ||
| 562 | |||
| 563 | /** | ||
| 564 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | 549 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 565 | * system. | 550 | * system. |
| 566 | * | 551 | * |
| 567 | * @dev_priv: Pointer to a struct vmw_private | 552 | * @dev_priv: Pointer to a struct vmw_private |
| 568 | * | 553 | * |
| 569 | * This functions tries to determine the IOMMU setup and what actions | 554 | * This functions tries to determine what actions need to be taken by the |
| 570 | * need to be taken by the driver to make system pages visible to the | 555 | * driver to make system pages visible to the device. |
| 571 | * device. | ||
| 572 | * If this function decides that DMA is not possible, it returns -EINVAL. | 556 | * If this function decides that DMA is not possible, it returns -EINVAL. |
| 573 | * The driver may then try to disable features of the device that require | 557 | * The driver may then try to disable features of the device that require |
| 574 | * DMA. | 558 | * DMA. |
| @@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
| 578 | static const char *names[vmw_dma_map_max] = { | 562 | static const char *names[vmw_dma_map_max] = { |
| 579 | [vmw_dma_phys] = "Using physical TTM page addresses.", | 563 | [vmw_dma_phys] = "Using physical TTM page addresses.", |
| 580 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 564 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 581 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 565 | [vmw_dma_map_populate] = "Caching DMA mappings.", |
| 582 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 566 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
| 583 | 567 | ||
| 584 | if (vmw_force_coherent) | 568 | if (vmw_force_coherent) |
| 585 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 569 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 586 | else if (vmw_assume_iommu(dev_priv->dev)) | 570 | else if (vmw_restrict_iommu) |
| 587 | dev_priv->map_mode = vmw_dma_map_populate; | 571 | dev_priv->map_mode = vmw_dma_map_bind; |
| 588 | else if (!vmw_force_iommu) | ||
| 589 | dev_priv->map_mode = vmw_dma_phys; | ||
| 590 | else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) | ||
| 591 | dev_priv->map_mode = vmw_dma_alloc_coherent; | ||
| 592 | else | 572 | else |
| 593 | dev_priv->map_mode = vmw_dma_map_populate; | 573 | dev_priv->map_mode = vmw_dma_map_populate; |
| 594 | 574 | ||
| 595 | if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) | ||
| 596 | dev_priv->map_mode = vmw_dma_map_bind; | ||
| 597 | |||
| 598 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ | 575 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ |
| 599 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && | 576 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && |
| 600 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) | 577 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) |
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 27101c04a827..0c0eb43abf65 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c | |||
| @@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job) | |||
| 115 | static void host1x_channel_set_streamid(struct host1x_channel *channel) | 115 | static void host1x_channel_set_streamid(struct host1x_channel *channel) |
| 116 | { | 116 | { |
| 117 | #if HOST1X_HW >= 6 | 117 | #if HOST1X_HW >= 6 |
| 118 | u32 sid = 0x7f; | ||
| 119 | #ifdef CONFIG_IOMMU_API | ||
| 118 | struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent); | 120 | struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent); |
| 119 | u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f; | 121 | if (spec) |
| 122 | sid = spec->ids[0] & 0xffff; | ||
| 123 | #endif | ||
| 120 | 124 | ||
| 121 | host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID); | 125 | host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID); |
| 122 | #endif | 126 | #endif |
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c index 9b2b3fa479c4..5e44ff1f2085 100644 --- a/drivers/gpu/ipu-v3/ipu-dp.c +++ b/drivers/gpu/ipu-v3/ipu-dp.c | |||
| @@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp, | |||
| 195 | ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, | 195 | ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs, |
| 196 | DP_COM_CONF_CSC_DEF_BOTH); | 196 | DP_COM_CONF_CSC_DEF_BOTH); |
| 197 | } else { | 197 | } else { |
| 198 | if (flow->foreground.in_cs == flow->out_cs) | 198 | if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN || |
| 199 | flow->foreground.in_cs == flow->out_cs) | ||
| 199 | /* | 200 | /* |
| 200 | * foreground identical to output, apply color | 201 | * foreground identical to output, apply color |
| 201 | * conversion on background | 202 | * conversion on background |
| @@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) | |||
| 261 | struct ipu_dp_priv *priv = flow->priv; | 262 | struct ipu_dp_priv *priv = flow->priv; |
| 262 | u32 reg, csc; | 263 | u32 reg, csc; |
| 263 | 264 | ||
| 265 | dp->in_cs = IPUV3_COLORSPACE_UNKNOWN; | ||
| 266 | |||
| 264 | if (!dp->foreground) | 267 | if (!dp->foreground) |
| 265 | return; | 268 | return; |
| 266 | 269 | ||
| @@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync) | |||
| 268 | 271 | ||
| 269 | reg = readl(flow->base + DP_COM_CONF); | 272 | reg = readl(flow->base + DP_COM_CONF); |
| 270 | csc = reg & DP_COM_CONF_CSC_DEF_MASK; | 273 | csc = reg & DP_COM_CONF_CSC_DEF_MASK; |
| 271 | if (csc == DP_COM_CONF_CSC_DEF_FG) | 274 | reg &= ~DP_COM_CONF_CSC_DEF_MASK; |
| 272 | reg &= ~DP_COM_CONF_CSC_DEF_MASK; | 275 | if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG) |
| 276 | reg |= DP_COM_CONF_CSC_DEF_BG; | ||
| 273 | 277 | ||
| 274 | reg &= ~DP_COM_CONF_FG_EN; | 278 | reg &= ~DP_COM_CONF_FG_EN; |
| 275 | writel(reg, flow->base + DP_COM_CONF); | 279 | writel(reg, flow->base + DP_COM_CONF); |
| @@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base) | |||
| 347 | mutex_init(&priv->mutex); | 351 | mutex_init(&priv->mutex); |
| 348 | 352 | ||
| 349 | for (i = 0; i < IPUV3_NUM_FLOWS; i++) { | 353 | for (i = 0; i < IPUV3_NUM_FLOWS; i++) { |
| 354 | priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN; | ||
| 355 | priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN; | ||
| 350 | priv->flow[i].foreground.foreground = true; | 356 | priv->flow[i].foreground.foreground = true; |
| 351 | priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; | 357 | priv->flow[i].base = priv->base + ipu_dp_flow_base[i]; |
| 352 | priv->flow[i].priv = priv; | 358 | priv->flow[i].priv = priv; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 6ca8d322b487..4ca0cdfa6b33 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -150,6 +150,7 @@ config HID_ASUS | |||
| 150 | tristate "Asus" | 150 | tristate "Asus" |
| 151 | depends on LEDS_CLASS | 151 | depends on LEDS_CLASS |
| 152 | depends on ASUS_WMI || ASUS_WMI=n | 152 | depends on ASUS_WMI || ASUS_WMI=n |
| 153 | select POWER_SUPPLY | ||
| 153 | ---help--- | 154 | ---help--- |
| 154 | Support for Asus notebook built-in keyboard and touchpad via i2c, and | 155 | Support for Asus notebook built-in keyboard and touchpad via i2c, and |
| 155 | the Asus Republic of Gamers laptop keyboard special keys. | 156 | the Asus Republic of Gamers laptop keyboard special keys. |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9993b692598f..860e21ec6a49 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n) | |||
| 1301 | u32 hid_field_extract(const struct hid_device *hid, u8 *report, | 1301 | u32 hid_field_extract(const struct hid_device *hid, u8 *report, |
| 1302 | unsigned offset, unsigned n) | 1302 | unsigned offset, unsigned n) |
| 1303 | { | 1303 | { |
| 1304 | if (n > 32) { | 1304 | if (n > 256) { |
| 1305 | hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", | 1305 | hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n", |
| 1306 | n, current->comm); | 1306 | n, current->comm); |
| 1307 | n = 32; | 1307 | n = 256; |
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | return __extract(report, offset, n); | 1310 | return __extract(report, offset, n); |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index ac9fda1b5a72..1384e57182af 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
| @@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) | |||
| 1060 | seq_printf(f, "\n\n"); | 1060 | seq_printf(f, "\n\n"); |
| 1061 | 1061 | ||
| 1062 | /* dump parsed data and input mappings */ | 1062 | /* dump parsed data and input mappings */ |
| 1063 | if (down_interruptible(&hdev->driver_input_lock)) | ||
| 1064 | return 0; | ||
| 1065 | |||
| 1063 | hid_dump_device(hdev, f); | 1066 | hid_dump_device(hdev, f); |
| 1064 | seq_printf(f, "\n"); | 1067 | seq_printf(f, "\n"); |
| 1065 | hid_dump_input_mapping(hdev, f); | 1068 | hid_dump_input_mapping(hdev, f); |
| 1066 | 1069 | ||
| 1070 | up(&hdev->driver_input_lock); | ||
| 1071 | |||
| 1067 | return 0; | 1072 | return 0; |
| 1068 | } | 1073 | } |
| 1069 | 1074 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b6d93f4ad037..adce58f24f76 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -1083,6 +1083,7 @@ | |||
| 1083 | #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 | 1083 | #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 |
| 1084 | #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 | 1084 | #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 |
| 1085 | #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 | 1085 | #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 |
| 1086 | #define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e | ||
| 1086 | 1087 | ||
| 1087 | #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 | 1088 | #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 |
| 1088 | #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 | 1089 | #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b10b1922c5bd..b607286a0bc8 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
| @@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 680 | break; | 680 | break; |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ | ||
| 684 | switch (usage->hid & 0xf) { | ||
| 685 | case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; | ||
| 686 | default: goto ignore; | ||
| 687 | } | ||
| 688 | break; | ||
| 689 | } | ||
| 690 | |||
| 683 | /* | 691 | /* |
| 684 | * Some lazy vendors declare 255 usages for System Control, | 692 | * Some lazy vendors declare 255 usages for System Control, |
| 685 | * leading to the creation of ABS_X|Y axis and too many others. | 693 | * leading to the creation of ABS_X|Y axis and too many others. |
| @@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 902 | case 0x06a: map_key_clear(KEY_GREEN); break; | 910 | case 0x06a: map_key_clear(KEY_GREEN); break; |
| 903 | case 0x06b: map_key_clear(KEY_BLUE); break; | 911 | case 0x06b: map_key_clear(KEY_BLUE); break; |
| 904 | case 0x06c: map_key_clear(KEY_YELLOW); break; | 912 | case 0x06c: map_key_clear(KEY_YELLOW); break; |
| 905 | case 0x06d: map_key_clear(KEY_ZOOM); break; | 913 | case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break; |
| 906 | 914 | ||
| 907 | case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; | 915 | case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; |
| 908 | case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; | 916 | case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; |
| @@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 911 | case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; | 919 | case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; |
| 912 | case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; | 920 | case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; |
| 913 | 921 | ||
| 922 | case 0x079: map_key_clear(KEY_KBDILLUMUP); break; | ||
| 923 | case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break; | ||
| 924 | case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break; | ||
| 925 | |||
| 914 | case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; | 926 | case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; |
| 915 | case 0x083: map_key_clear(KEY_LAST); break; | 927 | case 0x083: map_key_clear(KEY_LAST); break; |
| 916 | case 0x084: map_key_clear(KEY_ENTER); break; | 928 | case 0x084: map_key_clear(KEY_ENTER); break; |
| @@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 998 | case 0x1b8: map_key_clear(KEY_VIDEO); break; | 1010 | case 0x1b8: map_key_clear(KEY_VIDEO); break; |
| 999 | case 0x1bc: map_key_clear(KEY_MESSENGER); break; | 1011 | case 0x1bc: map_key_clear(KEY_MESSENGER); break; |
| 1000 | case 0x1bd: map_key_clear(KEY_INFO); break; | 1012 | case 0x1bd: map_key_clear(KEY_INFO); break; |
| 1013 | case 0x1cb: map_key_clear(KEY_ASSISTANT); break; | ||
| 1001 | case 0x201: map_key_clear(KEY_NEW); break; | 1014 | case 0x201: map_key_clear(KEY_NEW); break; |
| 1002 | case 0x202: map_key_clear(KEY_OPEN); break; | 1015 | case 0x202: map_key_clear(KEY_OPEN); break; |
| 1003 | case 0x203: map_key_clear(KEY_CLOSE); break; | 1016 | case 0x203: map_key_clear(KEY_CLOSE); break; |
| @@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 1021 | case 0x22d: map_key_clear(KEY_ZOOMIN); break; | 1034 | case 0x22d: map_key_clear(KEY_ZOOMIN); break; |
| 1022 | case 0x22e: map_key_clear(KEY_ZOOMOUT); break; | 1035 | case 0x22e: map_key_clear(KEY_ZOOMOUT); break; |
| 1023 | case 0x22f: map_key_clear(KEY_ZOOMRESET); break; | 1036 | case 0x22f: map_key_clear(KEY_ZOOMRESET); break; |
| 1037 | case 0x232: map_key_clear(KEY_FULL_SCREEN); break; | ||
| 1024 | case 0x233: map_key_clear(KEY_SCROLLUP); break; | 1038 | case 0x233: map_key_clear(KEY_SCROLLUP); break; |
| 1025 | case 0x234: map_key_clear(KEY_SCROLLDOWN); break; | 1039 | case 0x234: map_key_clear(KEY_SCROLLDOWN); break; |
| 1026 | case 0x238: /* AC Pan */ | 1040 | case 0x238: /* AC Pan */ |
| @@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel | |||
| 1044 | case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; | 1058 | case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; |
| 1045 | case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; | 1059 | case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; |
| 1046 | 1060 | ||
| 1061 | case 0x29f: map_key_clear(KEY_SCALE); break; | ||
| 1062 | |||
| 1047 | default: map_key_clear(KEY_UNKNOWN); | 1063 | default: map_key_clear(KEY_UNKNOWN); |
| 1048 | } | 1064 | } |
| 1049 | break; | 1065 | break; |
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 15ed6177a7a3..199cc256e9d9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c | |||
| @@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) | |||
| 2111 | kfree(data); | 2111 | kfree(data); |
| 2112 | return -ENOMEM; | 2112 | return -ENOMEM; |
| 2113 | } | 2113 | } |
| 2114 | data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); | ||
| 2115 | if (!data->wq) { | ||
| 2116 | kfree(data->effect_ids); | ||
| 2117 | kfree(data); | ||
| 2118 | return -ENOMEM; | ||
| 2119 | } | ||
| 2120 | |||
| 2114 | data->hidpp = hidpp; | 2121 | data->hidpp = hidpp; |
| 2115 | data->feature_index = feature_index; | 2122 | data->feature_index = feature_index; |
| 2116 | data->version = version; | 2123 | data->version = version; |
| @@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) | |||
| 2155 | /* ignore boost value at response.fap.params[2] */ | 2162 | /* ignore boost value at response.fap.params[2] */ |
| 2156 | 2163 | ||
| 2157 | /* init the hardware command queue */ | 2164 | /* init the hardware command queue */ |
| 2158 | data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); | ||
| 2159 | atomic_set(&data->workqueue_size, 0); | 2165 | atomic_set(&data->workqueue_size, 0); |
| 2160 | 2166 | ||
| 2161 | /* initialize with zero autocenter to get wheel in usable state */ | 2167 | /* initialize with zero autocenter to get wheel in usable state */ |
| @@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) | |||
| 2608 | input_report_rel(mydata->input, REL_Y, v); | 2614 | input_report_rel(mydata->input, REL_Y, v); |
| 2609 | 2615 | ||
| 2610 | v = hid_snto32(data[6], 8); | 2616 | v = hid_snto32(data[6], 8); |
| 2611 | hidpp_scroll_counter_handle_scroll( | 2617 | if (v != 0) |
| 2612 | &hidpp->vertical_wheel_counter, v); | 2618 | hidpp_scroll_counter_handle_scroll( |
| 2619 | &hidpp->vertical_wheel_counter, v); | ||
| 2613 | 2620 | ||
| 2614 | input_sync(mydata->input); | 2621 | input_sync(mydata->input); |
| 2615 | } | 2622 | } |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 953908f2267c..77ffba48cc73 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
| @@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
| 715 | { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, | 715 | { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, |
| 716 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, | 716 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, |
| 717 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, | 717 | { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, |
| 718 | { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, | ||
| 719 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, | 718 | { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, |
| 720 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, | 719 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, |
| 721 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, | 720 | { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, |
| @@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
| 855 | { } | 854 | { } |
| 856 | }; | 855 | }; |
| 857 | 856 | ||
| 858 | /** | 857 | /* |
| 859 | * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer | 858 | * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer |
| 860 | * | 859 | * |
| 861 | * There are composite devices for which we want to ignore only a certain | 860 | * There are composite devices for which we want to ignore only a certain |
| @@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev) | |||
| 996 | if (hdev->product == 0x0401 && | 995 | if (hdev->product == 0x0401 && |
| 997 | strncmp(hdev->name, "ELAN0800", 8) != 0) | 996 | strncmp(hdev->name, "ELAN0800", 8) != 0) |
| 998 | return true; | 997 | return true; |
| 998 | /* Same with product id 0x0400 */ | ||
| 999 | if (hdev->product == 0x0400 && | ||
| 1000 | strncmp(hdev->name, "QTEC0001", 8) != 0) | ||
| 1001 | return true; | ||
| 999 | break; | 1002 | break; |
| 1000 | } | 1003 | } |
| 1001 | 1004 | ||
| @@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev) | |||
| 1042 | } | 1045 | } |
| 1043 | 1046 | ||
| 1044 | if (bl_entry != NULL) | 1047 | if (bl_entry != NULL) |
| 1045 | dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", | 1048 | dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n", |
| 1046 | bl_entry->driver_data, bl_entry->vendor, | 1049 | bl_entry->driver_data, bl_entry->vendor, |
| 1047 | bl_entry->product); | 1050 | bl_entry->product); |
| 1048 | 1051 | ||
| @@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev) | |||
| 1209 | quirks |= bl_entry->driver_data; | 1212 | quirks |= bl_entry->driver_data; |
| 1210 | 1213 | ||
| 1211 | if (quirks) | 1214 | if (quirks) |
| 1212 | dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", | 1215 | dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n", |
| 1213 | quirks, hdev->vendor, hdev->product); | 1216 | quirks, hdev->vendor, hdev->product); |
| 1214 | return quirks; | 1217 | return quirks; |
| 1215 | } | 1218 | } |
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8141cadfca0e..8dae0f9b819e 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c | |||
| @@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam) | |||
| 499 | static int steam_register(struct steam_device *steam) | 499 | static int steam_register(struct steam_device *steam) |
| 500 | { | 500 | { |
| 501 | int ret; | 501 | int ret; |
| 502 | bool client_opened; | ||
| 502 | 503 | ||
| 503 | /* | 504 | /* |
| 504 | * This function can be called several times in a row with the | 505 | * This function can be called several times in a row with the |
| @@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam) | |||
| 511 | * Unlikely, but getting the serial could fail, and it is not so | 512 | * Unlikely, but getting the serial could fail, and it is not so |
| 512 | * important, so make up a serial number and go on. | 513 | * important, so make up a serial number and go on. |
| 513 | */ | 514 | */ |
| 515 | mutex_lock(&steam->mutex); | ||
| 514 | if (steam_get_serial(steam) < 0) | 516 | if (steam_get_serial(steam) < 0) |
| 515 | strlcpy(steam->serial_no, "XXXXXXXXXX", | 517 | strlcpy(steam->serial_no, "XXXXXXXXXX", |
| 516 | sizeof(steam->serial_no)); | 518 | sizeof(steam->serial_no)); |
| 519 | mutex_unlock(&steam->mutex); | ||
| 517 | 520 | ||
| 518 | hid_info(steam->hdev, "Steam Controller '%s' connected", | 521 | hid_info(steam->hdev, "Steam Controller '%s' connected", |
| 519 | steam->serial_no); | 522 | steam->serial_no); |
| @@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam) | |||
| 528 | } | 531 | } |
| 529 | 532 | ||
| 530 | mutex_lock(&steam->mutex); | 533 | mutex_lock(&steam->mutex); |
| 531 | if (!steam->client_opened) { | 534 | client_opened = steam->client_opened; |
| 535 | if (!client_opened) | ||
| 532 | steam_set_lizard_mode(steam, lizard_mode); | 536 | steam_set_lizard_mode(steam, lizard_mode); |
| 537 | mutex_unlock(&steam->mutex); | ||
| 538 | |||
| 539 | if (!client_opened) | ||
| 533 | ret = steam_input_register(steam); | 540 | ret = steam_input_register(steam); |
| 534 | } else { | 541 | else |
| 535 | ret = 0; | 542 | ret = 0; |
| 536 | } | ||
| 537 | mutex_unlock(&steam->mutex); | ||
| 538 | 543 | ||
| 539 | return ret; | 544 | return ret; |
| 540 | } | 545 | } |
| @@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev) | |||
| 630 | { | 635 | { |
| 631 | struct steam_device *steam = hdev->driver_data; | 636 | struct steam_device *steam = hdev->driver_data; |
| 632 | 637 | ||
| 638 | unsigned long flags; | ||
| 639 | bool connected; | ||
| 640 | |||
| 641 | spin_lock_irqsave(&steam->lock, flags); | ||
| 642 | connected = steam->connected; | ||
| 643 | spin_unlock_irqrestore(&steam->lock, flags); | ||
| 644 | |||
| 633 | mutex_lock(&steam->mutex); | 645 | mutex_lock(&steam->mutex); |
| 634 | steam->client_opened = false; | 646 | steam->client_opened = false; |
| 647 | if (connected) | ||
| 648 | steam_set_lizard_mode(steam, lizard_mode); | ||
| 635 | mutex_unlock(&steam->mutex); | 649 | mutex_unlock(&steam->mutex); |
| 636 | 650 | ||
| 637 | if (steam->connected) { | 651 | if (connected) |
| 638 | steam_set_lizard_mode(steam, lizard_mode); | ||
| 639 | steam_input_register(steam); | 652 | steam_input_register(steam); |
| 640 | } | ||
| 641 | } | 653 | } |
| 642 | 654 | ||
| 643 | static int steam_client_ll_raw_request(struct hid_device *hdev, | 655 | static int steam_client_ll_raw_request(struct hid_device *hdev, |
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 7710d9f957da..0187c9f8fc22 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c | |||
| @@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params, | |||
| 735 | goto cleanup; | 735 | goto cleanup; |
| 736 | } | 736 | } |
| 737 | rc = usb_string(udev, 201, ver_ptr, ver_len); | 737 | rc = usb_string(udev, 201, ver_ptr, ver_len); |
| 738 | if (ver_ptr == NULL) { | ||
| 739 | rc = -ENOMEM; | ||
| 740 | goto cleanup; | ||
| 741 | } | ||
| 742 | if (rc == -EPIPE) { | 738 | if (rc == -EPIPE) { |
| 743 | *ver_ptr = '\0'; | 739 | *ver_ptr = '\0'; |
| 744 | } else if (rc < 0) { | 740 | } else if (rc < 0) { |
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 90164fed08d3..4d1f24ee249c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c | |||
| @@ -184,6 +184,8 @@ static const struct i2c_hid_quirks { | |||
| 184 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | 184 | I2C_HID_QUIRK_NO_RUNTIME_PM }, |
| 185 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, | 185 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, |
| 186 | I2C_HID_QUIRK_BOGUS_IRQ }, | 186 | I2C_HID_QUIRK_BOGUS_IRQ }, |
| 187 | { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E, | ||
| 188 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | ||
| 187 | { 0, 0 } | 189 | { 0, 0 } |
| 188 | }; | 190 | }; |
| 189 | 191 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6f929bfa9fcd..d0f1dfe2bcbb 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -1759,6 +1759,7 @@ config SENSORS_VT8231 | |||
| 1759 | config SENSORS_W83773G | 1759 | config SENSORS_W83773G |
| 1760 | tristate "Nuvoton W83773G" | 1760 | tristate "Nuvoton W83773G" |
| 1761 | depends on I2C | 1761 | depends on I2C |
| 1762 | select REGMAP_I2C | ||
| 1762 | help | 1763 | help |
| 1763 | If you say yes here you get support for the Nuvoton W83773G hardware | 1764 | If you say yes here you get support for the Nuvoton W83773G hardware |
| 1764 | monitoring chip. | 1765 | monitoring chip. |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index e4f9f7ce92fa..f9abeeeead9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
| @@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = { | |||
| 640 | }; | 640 | }; |
| 641 | 641 | ||
| 642 | static const u32 ntc_temp_config[] = { | 642 | static const u32 ntc_temp_config[] = { |
| 643 | HWMON_T_INPUT, HWMON_T_TYPE, | 643 | HWMON_T_INPUT | HWMON_T_TYPE, |
| 644 | 0 | 644 | 0 |
| 645 | }; | 645 | }; |
| 646 | 646 | ||
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index b91a80abf724..4679acb4918e 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
| @@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ) | |||
| 890 | s++; | 890 | s++; |
| 891 | } | 891 | } |
| 892 | } | 892 | } |
| 893 | |||
| 894 | s = (sensors->power.num_sensors * 4) + 1; | ||
| 893 | } else { | 895 | } else { |
| 894 | for (i = 0; i < sensors->power.num_sensors; ++i) { | 896 | for (i = 0; i < sensors->power.num_sensors; ++i) { |
| 895 | s = i + 1; | 897 | s = i + 1; |
| @@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ) | |||
| 918 | show_power, NULL, 3, i); | 920 | show_power, NULL, 3, i); |
| 919 | attr++; | 921 | attr++; |
| 920 | } | 922 | } |
| 921 | } | ||
| 922 | 923 | ||
| 923 | if (sensors->caps.num_sensors >= 1) { | ||
| 924 | s = sensors->power.num_sensors + 1; | 924 | s = sensors->power.num_sensors + 1; |
| 925 | } | ||
| 925 | 926 | ||
| 927 | if (sensors->caps.num_sensors >= 1) { | ||
| 926 | snprintf(attr->name, sizeof(attr->name), "power%d_label", s); | 928 | snprintf(attr->name, sizeof(attr->name), "power%d_label", s); |
| 927 | attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, | 929 | attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, |
| 928 | 0, 0); | 930 | 0, 0); |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 42fed40198a0..c0c3043b5d61 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
| 1169 | /* Init DMA config if supported */ | 1169 | /* Init DMA config if supported */ |
| 1170 | ret = i2c_imx_dma_request(i2c_imx, phy_addr); | 1170 | ret = i2c_imx_dma_request(i2c_imx, phy_addr); |
| 1171 | if (ret < 0) | 1171 | if (ret < 0) |
| 1172 | goto clk_notifier_unregister; | 1172 | goto del_adapter; |
| 1173 | 1173 | ||
| 1174 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); | 1174 | dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); |
| 1175 | return 0; /* Return OK */ | 1175 | return 0; /* Return OK */ |
| 1176 | 1176 | ||
| 1177 | del_adapter: | ||
| 1178 | i2c_del_adapter(&i2c_imx->adapter); | ||
| 1177 | clk_notifier_unregister: | 1179 | clk_notifier_unregister: |
| 1178 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); | 1180 | clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); |
| 1179 | rpm_disable: | 1181 | rpm_disable: |
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 2dc628d4f1ae..1412abcff010 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c | |||
| @@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, | |||
| 1980 | { | 1980 | { |
| 1981 | struct i3c_dev_boardinfo *boardinfo; | 1981 | struct i3c_dev_boardinfo *boardinfo; |
| 1982 | struct device *dev = &master->dev; | 1982 | struct device *dev = &master->dev; |
| 1983 | struct i3c_device_info info = { }; | ||
| 1984 | enum i3c_addr_slot_status addrstatus; | 1983 | enum i3c_addr_slot_status addrstatus; |
| 1985 | u32 init_dyn_addr = 0; | 1984 | u32 init_dyn_addr = 0; |
| 1986 | 1985 | ||
| @@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, | |||
| 2012 | 2011 | ||
| 2013 | boardinfo->pid = ((u64)reg[1] << 32) | reg[2]; | 2012 | boardinfo->pid = ((u64)reg[1] << 32) | reg[2]; |
| 2014 | 2013 | ||
| 2015 | if ((info.pid & GENMASK_ULL(63, 48)) || | 2014 | if ((boardinfo->pid & GENMASK_ULL(63, 48)) || |
| 2016 | I3C_PID_RND_LOWER_32BITS(info.pid)) | 2015 | I3C_PID_RND_LOWER_32BITS(boardinfo->pid)) |
| 2017 | return -EINVAL; | 2016 | return -EINVAL; |
| 2018 | 2017 | ||
| 2019 | boardinfo->init_dyn_addr = init_dyn_addr; | 2018 | boardinfo->init_dyn_addr = init_dyn_addr; |
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 59279224e07f..10c26ffaa8ef 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c | |||
| @@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master) | |||
| 300 | 300 | ||
| 301 | static void dw_i3c_master_disable(struct dw_i3c_master *master) | 301 | static void dw_i3c_master_disable(struct dw_i3c_master *master) |
| 302 | { | 302 | { |
| 303 | writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE, | 303 | writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, |
| 304 | master->regs + DEVICE_CTRL); | 304 | master->regs + DEVICE_CTRL); |
| 305 | } | 305 | } |
| 306 | 306 | ||
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 7096e577b23f..50f3ff386bea 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c | |||
| @@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev) | |||
| 1437 | 1437 | ||
| 1438 | mutex_lock(&data->mutex); | 1438 | mutex_lock(&data->mutex); |
| 1439 | ret = kxcjk1013_set_mode(data, OPERATION); | 1439 | ret = kxcjk1013_set_mode(data, OPERATION); |
| 1440 | if (ret == 0) | ||
| 1441 | ret = kxcjk1013_set_range(data, data->range); | ||
| 1440 | mutex_unlock(&data->mutex); | 1442 | mutex_unlock(&data->mutex); |
| 1441 | 1443 | ||
| 1442 | return ret; | 1444 | return ret; |
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ff5f2da2e1b1..54d9978b2740 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c | |||
| @@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, | |||
| 121 | if (sigma_delta->info->has_registers) { | 121 | if (sigma_delta->info->has_registers) { |
| 122 | data[0] = reg << sigma_delta->info->addr_shift; | 122 | data[0] = reg << sigma_delta->info->addr_shift; |
| 123 | data[0] |= sigma_delta->info->read_mask; | 123 | data[0] |= sigma_delta->info->read_mask; |
| 124 | data[0] |= sigma_delta->comm; | ||
| 124 | spi_message_add_tail(&t[0], &m); | 125 | spi_message_add_tail(&t[0], &m); |
| 125 | } | 126 | } |
| 126 | spi_message_add_tail(&t[1], &m); | 127 | spi_message_add_tail(&t[1], &m); |
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 75d2f73582a3..596841a3c4db 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c | |||
| @@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev, | |||
| 704 | ret = wait_event_interruptible_timeout(st->wq_data_avail, | 704 | ret = wait_event_interruptible_timeout(st->wq_data_avail, |
| 705 | st->done, | 705 | st->done, |
| 706 | msecs_to_jiffies(1000)); | 706 | msecs_to_jiffies(1000)); |
| 707 | if (ret == 0) | ||
| 708 | ret = -ETIMEDOUT; | ||
| 709 | if (ret < 0) { | ||
| 710 | mutex_unlock(&st->lock); | ||
| 711 | return ret; | ||
| 712 | } | ||
| 713 | |||
| 714 | *val = st->last_value; | ||
| 715 | 707 | ||
| 708 | /* Disable interrupts, regardless if adc conversion was | ||
| 709 | * successful or not | ||
| 710 | */ | ||
| 716 | at91_adc_writel(st, AT91_ADC_CHDR, | 711 | at91_adc_writel(st, AT91_ADC_CHDR, |
| 717 | AT91_ADC_CH(chan->channel)); | 712 | AT91_ADC_CH(chan->channel)); |
| 718 | at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); | 713 | at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); |
| 719 | 714 | ||
| 720 | st->last_value = 0; | 715 | if (ret > 0) { |
| 721 | st->done = false; | 716 | /* a valid conversion took place */ |
| 717 | *val = st->last_value; | ||
| 718 | st->last_value = 0; | ||
| 719 | st->done = false; | ||
| 720 | ret = IIO_VAL_INT; | ||
| 721 | } else if (ret == 0) { | ||
| 722 | /* conversion timeout */ | ||
| 723 | dev_err(&idev->dev, "ADC Channel %d timeout.\n", | ||
| 724 | chan->channel); | ||
| 725 | ret = -ETIMEDOUT; | ||
| 726 | } | ||
| 727 | |||
| 722 | mutex_unlock(&st->lock); | 728 | mutex_unlock(&st->lock); |
| 723 | return IIO_VAL_INT; | 729 | return ret; |
| 724 | 730 | ||
| 725 | case IIO_CHAN_INFO_SCALE: | 731 | case IIO_CHAN_INFO_SCALE: |
| 726 | *val = st->vref_mv; | 732 | *val = st->vref_mv; |
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index b13c61539d46..6401ca7a9a20 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c | |||
| @@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev) | |||
| 1292 | 1292 | ||
| 1293 | err_free_irq: | 1293 | err_free_irq: |
| 1294 | free_irq(xadc->irq, indio_dev); | 1294 | free_irq(xadc->irq, indio_dev); |
| 1295 | cancel_delayed_work_sync(&xadc->zynq_unmask_work); | ||
| 1295 | err_clk_disable_unprepare: | 1296 | err_clk_disable_unprepare: |
| 1296 | clk_disable_unprepare(xadc->clk); | 1297 | clk_disable_unprepare(xadc->clk); |
| 1297 | err_free_samplerate_trigger: | 1298 | err_free_samplerate_trigger: |
| @@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev) | |||
| 1321 | iio_triggered_buffer_cleanup(indio_dev); | 1322 | iio_triggered_buffer_cleanup(indio_dev); |
| 1322 | } | 1323 | } |
| 1323 | free_irq(xadc->irq, indio_dev); | 1324 | free_irq(xadc->irq, indio_dev); |
| 1325 | cancel_delayed_work_sync(&xadc->zynq_unmask_work); | ||
| 1324 | clk_disable_unprepare(xadc->clk); | 1326 | clk_disable_unprepare(xadc->clk); |
| 1325 | cancel_delayed_work(&xadc->zynq_unmask_work); | ||
| 1326 | kfree(xadc->data); | 1327 | kfree(xadc->data); |
| 1327 | kfree(indio_dev->channels); | 1328 | kfree(indio_dev->channels); |
| 1328 | 1329 | ||
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index d5d146e9e372..92c684d2b67e 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig | |||
| @@ -64,6 +64,7 @@ config IAQCORE | |||
| 64 | config PMS7003 | 64 | config PMS7003 |
| 65 | tristate "Plantower PMS7003 particulate matter sensor" | 65 | tristate "Plantower PMS7003 particulate matter sensor" |
| 66 | depends on SERIAL_DEV_BUS | 66 | depends on SERIAL_DEV_BUS |
| 67 | select IIO_TRIGGERED_BUFFER | ||
| 67 | help | 68 | help |
| 68 | Say Y here to build support for the Plantower PMS7003 particulate | 69 | Say Y here to build support for the Plantower PMS7003 particulate |
| 69 | matter sensor. | 70 | matter sensor. |
| @@ -71,6 +72,19 @@ config PMS7003 | |||
| 71 | To compile this driver as a module, choose M here: the module will | 72 | To compile this driver as a module, choose M here: the module will |
| 72 | be called pms7003. | 73 | be called pms7003. |
| 73 | 74 | ||
| 75 | config SENSIRION_SGP30 | ||
| 76 | tristate "Sensirion SGPxx gas sensors" | ||
| 77 | depends on I2C | ||
| 78 | select CRC8 | ||
| 79 | help | ||
| 80 | Say Y here to build I2C interface support for the following | ||
| 81 | Sensirion SGP gas sensors: | ||
| 82 | * SGP30 gas sensor | ||
| 83 | * SGPC3 low power gas sensor | ||
| 84 | |||
| 85 | To compile this driver as module, choose M here: the | ||
| 86 | module will be called sgp30. | ||
| 87 | |||
| 74 | config SPS30 | 88 | config SPS30 |
| 75 | tristate "SPS30 particulate matter sensor" | 89 | tristate "SPS30 particulate matter sensor" |
| 76 | depends on I2C | 90 | depends on I2C |
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index 0ae89b87e2d6..4edc5d21cb9f 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h | |||
| @@ -2,11 +2,9 @@ | |||
| 2 | #ifndef BME680_H_ | 2 | #ifndef BME680_H_ |
| 3 | #define BME680_H_ | 3 | #define BME680_H_ |
| 4 | 4 | ||
| 5 | #define BME680_REG_CHIP_I2C_ID 0xD0 | 5 | #define BME680_REG_CHIP_ID 0xD0 |
| 6 | #define BME680_REG_CHIP_SPI_ID 0x50 | ||
| 7 | #define BME680_CHIP_ID_VAL 0x61 | 6 | #define BME680_CHIP_ID_VAL 0x61 |
| 8 | #define BME680_REG_SOFT_RESET_I2C 0xE0 | 7 | #define BME680_REG_SOFT_RESET 0xE0 |
| 9 | #define BME680_REG_SOFT_RESET_SPI 0x60 | ||
| 10 | #define BME680_CMD_SOFTRESET 0xB6 | 8 | #define BME680_CMD_SOFTRESET 0xB6 |
| 11 | #define BME680_REG_STATUS 0x73 | 9 | #define BME680_REG_STATUS 0x73 |
| 12 | #define BME680_SPI_MEM_PAGE_BIT BIT(4) | 10 | #define BME680_SPI_MEM_PAGE_BIT BIT(4) |
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 70c1fe4366f4..ccde4c65ff93 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c | |||
| @@ -63,9 +63,23 @@ struct bme680_data { | |||
| 63 | s32 t_fine; | 63 | s32 t_fine; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | static const struct regmap_range bme680_volatile_ranges[] = { | ||
| 67 | regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB), | ||
| 68 | regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS), | ||
| 69 | regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG), | ||
| 70 | }; | ||
| 71 | |||
| 72 | static const struct regmap_access_table bme680_volatile_table = { | ||
| 73 | .yes_ranges = bme680_volatile_ranges, | ||
| 74 | .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges), | ||
| 75 | }; | ||
| 76 | |||
| 66 | const struct regmap_config bme680_regmap_config = { | 77 | const struct regmap_config bme680_regmap_config = { |
| 67 | .reg_bits = 8, | 78 | .reg_bits = 8, |
| 68 | .val_bits = 8, | 79 | .val_bits = 8, |
| 80 | .max_register = 0xef, | ||
| 81 | .volatile_table = &bme680_volatile_table, | ||
| 82 | .cache_type = REGCACHE_RBTREE, | ||
| 69 | }; | 83 | }; |
| 70 | EXPORT_SYMBOL(bme680_regmap_config); | 84 | EXPORT_SYMBOL(bme680_regmap_config); |
| 71 | 85 | ||
| @@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data, | |||
| 316 | s64 var1, var2, var3; | 330 | s64 var1, var2, var3; |
| 317 | s16 calc_temp; | 331 | s16 calc_temp; |
| 318 | 332 | ||
| 333 | /* If the calibration is invalid, attempt to reload it */ | ||
| 334 | if (!calib->par_t2) | ||
| 335 | bme680_read_calib(data, calib); | ||
| 336 | |||
| 319 | var1 = (adc_temp >> 3) - (calib->par_t1 << 1); | 337 | var1 = (adc_temp >> 3) - (calib->par_t1 << 1); |
| 320 | var2 = (var1 * calib->par_t2) >> 11; | 338 | var2 = (var1 * calib->par_t2) >> 11; |
| 321 | var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; | 339 | var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; |
| @@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data) | |||
| 583 | return ret; | 601 | return ret; |
| 584 | } | 602 | } |
| 585 | 603 | ||
| 586 | static int bme680_read_temp(struct bme680_data *data, | 604 | static int bme680_read_temp(struct bme680_data *data, int *val) |
| 587 | int *val, int *val2) | ||
| 588 | { | 605 | { |
| 589 | struct device *dev = regmap_get_device(data->regmap); | 606 | struct device *dev = regmap_get_device(data->regmap); |
| 590 | int ret; | 607 | int ret; |
| @@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data, | |||
| 617 | * compensate_press/compensate_humid to get compensated | 634 | * compensate_press/compensate_humid to get compensated |
| 618 | * pressure/humidity readings. | 635 | * pressure/humidity readings. |
| 619 | */ | 636 | */ |
| 620 | if (val && val2) { | 637 | if (val) { |
| 621 | *val = comp_temp; | 638 | *val = comp_temp * 10; /* Centidegrees to millidegrees */ |
| 622 | *val2 = 100; | 639 | return IIO_VAL_INT; |
| 623 | return IIO_VAL_FRACTIONAL; | ||
| 624 | } | 640 | } |
| 625 | 641 | ||
| 626 | return ret; | 642 | return ret; |
| @@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data, | |||
| 635 | s32 adc_press; | 651 | s32 adc_press; |
| 636 | 652 | ||
| 637 | /* Read and compensate temperature to get a reading of t_fine */ | 653 | /* Read and compensate temperature to get a reading of t_fine */ |
| 638 | ret = bme680_read_temp(data, NULL, NULL); | 654 | ret = bme680_read_temp(data, NULL); |
| 639 | if (ret < 0) | 655 | if (ret < 0) |
| 640 | return ret; | 656 | return ret; |
| 641 | 657 | ||
| @@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data, | |||
| 668 | u32 comp_humidity; | 684 | u32 comp_humidity; |
| 669 | 685 | ||
| 670 | /* Read and compensate temperature to get a reading of t_fine */ | 686 | /* Read and compensate temperature to get a reading of t_fine */ |
| 671 | ret = bme680_read_temp(data, NULL, NULL); | 687 | ret = bme680_read_temp(data, NULL); |
| 672 | if (ret < 0) | 688 | if (ret < 0) |
| 673 | return ret; | 689 | return ret; |
| 674 | 690 | ||
| @@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev, | |||
| 761 | case IIO_CHAN_INFO_PROCESSED: | 777 | case IIO_CHAN_INFO_PROCESSED: |
| 762 | switch (chan->type) { | 778 | switch (chan->type) { |
| 763 | case IIO_TEMP: | 779 | case IIO_TEMP: |
| 764 | return bme680_read_temp(data, val, val2); | 780 | return bme680_read_temp(data, val); |
| 765 | case IIO_PRESSURE: | 781 | case IIO_PRESSURE: |
| 766 | return bme680_read_press(data, val, val2); | 782 | return bme680_read_press(data, val, val2); |
| 767 | case IIO_HUMIDITYRELATIVE: | 783 | case IIO_HUMIDITYRELATIVE: |
| @@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, | |||
| 867 | { | 883 | { |
| 868 | struct iio_dev *indio_dev; | 884 | struct iio_dev *indio_dev; |
| 869 | struct bme680_data *data; | 885 | struct bme680_data *data; |
| 886 | unsigned int val; | ||
| 870 | int ret; | 887 | int ret; |
| 871 | 888 | ||
| 889 | ret = regmap_write(regmap, BME680_REG_SOFT_RESET, | ||
| 890 | BME680_CMD_SOFTRESET); | ||
| 891 | if (ret < 0) { | ||
| 892 | dev_err(dev, "Failed to reset chip\n"); | ||
| 893 | return ret; | ||
| 894 | } | ||
| 895 | |||
| 896 | ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); | ||
| 897 | if (ret < 0) { | ||
| 898 | dev_err(dev, "Error reading chip ID\n"); | ||
| 899 | return ret; | ||
| 900 | } | ||
| 901 | |||
| 902 | if (val != BME680_CHIP_ID_VAL) { | ||
| 903 | dev_err(dev, "Wrong chip ID, got %x expected %x\n", | ||
| 904 | val, BME680_CHIP_ID_VAL); | ||
| 905 | return -ENODEV; | ||
| 906 | } | ||
| 907 | |||
| 872 | indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); | 908 | indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); |
| 873 | if (!indio_dev) | 909 | if (!indio_dev) |
| 874 | return -ENOMEM; | 910 | return -ENOMEM; |
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c index b2f805b6b36a..de9c9e3d23ea 100644 --- a/drivers/iio/chemical/bme680_i2c.c +++ b/drivers/iio/chemical/bme680_i2c.c | |||
| @@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client, | |||
| 23 | { | 23 | { |
| 24 | struct regmap *regmap; | 24 | struct regmap *regmap; |
| 25 | const char *name = NULL; | 25 | const char *name = NULL; |
| 26 | unsigned int val; | ||
| 27 | int ret; | ||
| 28 | 26 | ||
| 29 | regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); | 27 | regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); |
| 30 | if (IS_ERR(regmap)) { | 28 | if (IS_ERR(regmap)) { |
| @@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client, | |||
| 33 | return PTR_ERR(regmap); | 31 | return PTR_ERR(regmap); |
| 34 | } | 32 | } |
| 35 | 33 | ||
| 36 | ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C, | ||
| 37 | BME680_CMD_SOFTRESET); | ||
| 38 | if (ret < 0) { | ||
| 39 | dev_err(&client->dev, "Failed to reset chip\n"); | ||
| 40 | return ret; | ||
| 41 | } | ||
| 42 | |||
| 43 | ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val); | ||
| 44 | if (ret < 0) { | ||
| 45 | dev_err(&client->dev, "Error reading I2C chip ID\n"); | ||
| 46 | return ret; | ||
| 47 | } | ||
| 48 | |||
| 49 | if (val != BME680_CHIP_ID_VAL) { | ||
| 50 | dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n", | ||
| 51 | val, BME680_CHIP_ID_VAL); | ||
| 52 | return -ENODEV; | ||
| 53 | } | ||
| 54 | |||
| 55 | if (id) | 34 | if (id) |
| 56 | name = id->name; | 35 | name = id->name; |
| 57 | 36 | ||
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c index d0b7bdd3f066..3b838068a7e4 100644 --- a/drivers/iio/chemical/bme680_spi.c +++ b/drivers/iio/chemical/bme680_spi.c | |||
| @@ -12,28 +12,93 @@ | |||
| 12 | 12 | ||
| 13 | #include "bme680.h" | 13 | #include "bme680.h" |
| 14 | 14 | ||
| 15 | struct bme680_spi_bus_context { | ||
| 16 | struct spi_device *spi; | ||
| 17 | u8 current_page; | ||
| 18 | }; | ||
| 19 | |||
| 20 | /* | ||
| 21 | * In SPI mode there are only 7 address bits, a "page" register determines | ||
| 22 | * which part of the 8-bit range is active. This function looks at the address | ||
| 23 | * and writes the page selection bit if needed | ||
| 24 | */ | ||
| 25 | static int bme680_regmap_spi_select_page( | ||
| 26 | struct bme680_spi_bus_context *ctx, u8 reg) | ||
| 27 | { | ||
| 28 | struct spi_device *spi = ctx->spi; | ||
| 29 | int ret; | ||
| 30 | u8 buf[2]; | ||
| 31 | u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */ | ||
| 32 | |||
| 33 | if (page == ctx->current_page) | ||
| 34 | return 0; | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Data sheet claims we're only allowed to change bit 4, so we must do | ||
| 38 | * a read-modify-write on each and every page select | ||
| 39 | */ | ||
| 40 | buf[0] = BME680_REG_STATUS; | ||
| 41 | ret = spi_write_then_read(spi, buf, 1, buf + 1, 1); | ||
| 42 | if (ret < 0) { | ||
| 43 | dev_err(&spi->dev, "failed to set page %u\n", page); | ||
| 44 | return ret; | ||
| 45 | } | ||
| 46 | |||
| 47 | buf[0] = BME680_REG_STATUS; | ||
| 48 | if (page) | ||
| 49 | buf[1] |= BME680_SPI_MEM_PAGE_BIT; | ||
| 50 | else | ||
| 51 | buf[1] &= ~BME680_SPI_MEM_PAGE_BIT; | ||
| 52 | |||
| 53 | ret = spi_write(spi, buf, 2); | ||
| 54 | if (ret < 0) { | ||
| 55 | dev_err(&spi->dev, "failed to set page %u\n", page); | ||
| 56 | return ret; | ||
| 57 | } | ||
| 58 | |||
| 59 | ctx->current_page = page; | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 15 | static int bme680_regmap_spi_write(void *context, const void *data, | 64 | static int bme680_regmap_spi_write(void *context, const void *data, |
| 16 | size_t count) | 65 | size_t count) |
| 17 | { | 66 | { |
| 18 | struct spi_device *spi = context; | 67 | struct bme680_spi_bus_context *ctx = context; |
| 68 | struct spi_device *spi = ctx->spi; | ||
| 69 | int ret; | ||
| 19 | u8 buf[2]; | 70 | u8 buf[2]; |
| 20 | 71 | ||
| 21 | memcpy(buf, data, 2); | 72 | memcpy(buf, data, 2); |
| 73 | |||
| 74 | ret = bme680_regmap_spi_select_page(ctx, buf[0]); | ||
| 75 | if (ret) | ||
| 76 | return ret; | ||
| 77 | |||
| 22 | /* | 78 | /* |
| 23 | * The SPI register address (= full register address without bit 7) | 79 | * The SPI register address (= full register address without bit 7) |
| 24 | * and the write command (bit7 = RW = '0') | 80 | * and the write command (bit7 = RW = '0') |
| 25 | */ | 81 | */ |
| 26 | buf[0] &= ~0x80; | 82 | buf[0] &= ~0x80; |
| 27 | 83 | ||
| 28 | return spi_write_then_read(spi, buf, 2, NULL, 0); | 84 | return spi_write(spi, buf, 2); |
| 29 | } | 85 | } |
| 30 | 86 | ||
| 31 | static int bme680_regmap_spi_read(void *context, const void *reg, | 87 | static int bme680_regmap_spi_read(void *context, const void *reg, |
| 32 | size_t reg_size, void *val, size_t val_size) | 88 | size_t reg_size, void *val, size_t val_size) |
| 33 | { | 89 | { |
| 34 | struct spi_device *spi = context; | 90 | struct bme680_spi_bus_context *ctx = context; |
| 91 | struct spi_device *spi = ctx->spi; | ||
| 92 | int ret; | ||
| 93 | u8 addr = *(const u8 *)reg; | ||
| 94 | |||
| 95 | ret = bme680_regmap_spi_select_page(ctx, addr); | ||
| 96 | if (ret) | ||
| 97 | return ret; | ||
| 35 | 98 | ||
| 36 | return spi_write_then_read(spi, reg, reg_size, val, val_size); | 99 | addr |= 0x80; /* bit7 = RW = '1' */ |
| 100 | |||
| 101 | return spi_write_then_read(spi, &addr, 1, val, val_size); | ||
| 37 | } | 102 | } |
| 38 | 103 | ||
| 39 | static struct regmap_bus bme680_regmap_bus = { | 104 | static struct regmap_bus bme680_regmap_bus = { |
| @@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = { | |||
| 46 | static int bme680_spi_probe(struct spi_device *spi) | 111 | static int bme680_spi_probe(struct spi_device *spi) |
| 47 | { | 112 | { |
| 48 | const struct spi_device_id *id = spi_get_device_id(spi); | 113 | const struct spi_device_id *id = spi_get_device_id(spi); |
| 114 | struct bme680_spi_bus_context *bus_context; | ||
| 49 | struct regmap *regmap; | 115 | struct regmap *regmap; |
| 50 | unsigned int val; | ||
| 51 | int ret; | 116 | int ret; |
| 52 | 117 | ||
| 53 | spi->bits_per_word = 8; | 118 | spi->bits_per_word = 8; |
| @@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi) | |||
| 57 | return ret; | 122 | return ret; |
| 58 | } | 123 | } |
| 59 | 124 | ||
| 125 | bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL); | ||
| 126 | if (!bus_context) | ||
| 127 | return -ENOMEM; | ||
| 128 | |||
| 129 | bus_context->spi = spi; | ||
| 130 | bus_context->current_page = 0xff; /* Undefined on warm boot */ | ||
| 131 | |||
| 60 | regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, | 132 | regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, |
| 61 | &spi->dev, &bme680_regmap_config); | 133 | bus_context, &bme680_regmap_config); |
| 62 | if (IS_ERR(regmap)) { | 134 | if (IS_ERR(regmap)) { |
| 63 | dev_err(&spi->dev, "Failed to register spi regmap %d\n", | 135 | dev_err(&spi->dev, "Failed to register spi regmap %d\n", |
| 64 | (int)PTR_ERR(regmap)); | 136 | (int)PTR_ERR(regmap)); |
| 65 | return PTR_ERR(regmap); | 137 | return PTR_ERR(regmap); |
| 66 | } | 138 | } |
| 67 | 139 | ||
| 68 | ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI, | ||
| 69 | BME680_CMD_SOFTRESET); | ||
| 70 | if (ret < 0) { | ||
| 71 | dev_err(&spi->dev, "Failed to reset chip\n"); | ||
| 72 | return ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */ | ||
| 76 | ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val); | ||
| 77 | if (ret < 0) { | ||
| 78 | dev_err(&spi->dev, "Error reading SPI chip ID\n"); | ||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | |||
| 82 | if (val != BME680_CHIP_ID_VAL) { | ||
| 83 | dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n", | ||
| 84 | val, BME680_CHIP_ID_VAL); | ||
| 85 | return -ENODEV; | ||
| 86 | } | ||
| 87 | /* | ||
| 88 | * select Page 1 of spi_mem_page to enable access to | ||
| 89 | * to registers from address 0x00 to 0x7F. | ||
| 90 | */ | ||
| 91 | ret = regmap_write_bits(regmap, BME680_REG_STATUS, | ||
| 92 | BME680_SPI_MEM_PAGE_BIT, | ||
| 93 | BME680_SPI_MEM_PAGE_1_VAL); | ||
| 94 | if (ret < 0) { | ||
| 95 | dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n"); | ||
| 96 | return ret; | ||
| 97 | } | ||
| 98 | |||
| 99 | return bme680_core_probe(&spi->dev, regmap, id->name); | 140 | return bme680_core_probe(&spi->dev, regmap, id->name); |
| 100 | } | 141 | } |
| 101 | 142 | ||
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 89cb0066a6e0..8d76afb87d87 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c | |||
| @@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, | |||
| 103 | * Do not use IIO_DEGREE_TO_RAD to avoid precision | 103 | * Do not use IIO_DEGREE_TO_RAD to avoid precision |
| 104 | * loss. Round to the nearest integer. | 104 | * loss. Round to the nearest integer. |
| 105 | */ | 105 | */ |
| 106 | *val = div_s64(val64 * 314159 + 9000000ULL, 1000); | 106 | *val = 0; |
| 107 | *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1); | 107 | *val2 = div_s64(val64 * 3141592653ULL, |
| 108 | ret = IIO_VAL_FRACTIONAL; | 108 | 180 << (CROS_EC_SENSOR_BITS - 1)); |
| 109 | ret = IIO_VAL_INT_PLUS_NANO; | ||
| 109 | break; | 110 | break; |
| 110 | case MOTIONSENSE_TYPE_MAG: | 111 | case MOTIONSENSE_TYPE_MAG: |
| 111 | /* | 112 | /* |
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 6d71fd905e29..c701a45469f6 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c | |||
| @@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, | |||
| 92 | 92 | ||
| 93 | inoutbuf[0] = 0x60; /* write EEPROM */ | 93 | inoutbuf[0] = 0x60; /* write EEPROM */ |
| 94 | inoutbuf[0] |= data->ref_mode << 3; | 94 | inoutbuf[0] |= data->ref_mode << 3; |
| 95 | inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0; | ||
| 95 | inoutbuf[1] = data->dac_value >> 4; | 96 | inoutbuf[1] = data->dac_value >> 4; |
| 96 | inoutbuf[2] = (data->dac_value & 0xf) << 4; | 97 | inoutbuf[2] = (data->dac_value & 0xf) << 4; |
| 97 | 98 | ||
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 63ca31628a93..92c07ab826eb 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c | |||
| @@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, | |||
| 582 | case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: | 582 | case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: |
| 583 | return bmg160_get_filter(data, val); | 583 | return bmg160_get_filter(data, val); |
| 584 | case IIO_CHAN_INFO_SCALE: | 584 | case IIO_CHAN_INFO_SCALE: |
| 585 | *val = 0; | ||
| 586 | switch (chan->type) { | 585 | switch (chan->type) { |
| 587 | case IIO_TEMP: | 586 | case IIO_TEMP: |
| 588 | *val2 = 500000; | 587 | *val = 500; |
| 589 | return IIO_VAL_INT_PLUS_MICRO; | 588 | return IIO_VAL_INT; |
| 590 | case IIO_ANGL_VEL: | 589 | case IIO_ANGL_VEL: |
| 591 | { | 590 | { |
| 592 | int i; | 591 | int i; |
| @@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, | |||
| 594 | for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { | 593 | for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { |
| 595 | if (bmg160_scale_table[i].dps_range == | 594 | if (bmg160_scale_table[i].dps_range == |
| 596 | data->dps_range) { | 595 | data->dps_range) { |
| 596 | *val = 0; | ||
| 597 | *val2 = bmg160_scale_table[i].scale; | 597 | *val2 = bmg160_scale_table[i].scale; |
| 598 | return IIO_VAL_INT_PLUS_MICRO; | 598 | return IIO_VAL_INT_PLUS_MICRO; |
| 599 | } | 599 | } |
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 77fac81a3adc..5ddebede31a6 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c | |||
| @@ -29,7 +29,8 @@ | |||
| 29 | 29 | ||
| 30 | #include "mpu3050.h" | 30 | #include "mpu3050.h" |
| 31 | 31 | ||
| 32 | #define MPU3050_CHIP_ID 0x69 | 32 | #define MPU3050_CHIP_ID 0x68 |
| 33 | #define MPU3050_CHIP_ID_MASK 0x7E | ||
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| 35 | * Register map: anything suffixed *_H is a big-endian high byte and always | 36 | * Register map: anything suffixed *_H is a big-endian high byte and always |
| @@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev, | |||
| 1176 | goto err_power_down; | 1177 | goto err_power_down; |
| 1177 | } | 1178 | } |
| 1178 | 1179 | ||
| 1179 | if (val != MPU3050_CHIP_ID) { | 1180 | if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { |
| 1180 | dev_err(dev, "unsupported chip id %02x\n", (u8)val); | 1181 | dev_err(dev, "unsupported chip id %02x\n", |
| 1182 | (u8)(val & MPU3050_CHIP_ID_MASK)); | ||
| 1181 | ret = -ENODEV; | 1183 | ret = -ENODEV; |
| 1182 | goto err_power_down; | 1184 | goto err_power_down; |
| 1183 | } | 1185 | } |
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index cd5bfe39591b..dadd921a4a30 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
| @@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, | |||
| 320 | const unsigned long *mask; | 320 | const unsigned long *mask; |
| 321 | unsigned long *trialmask; | 321 | unsigned long *trialmask; |
| 322 | 322 | ||
| 323 | trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), | 323 | trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
| 324 | sizeof(*trialmask), | 324 | sizeof(*trialmask), GFP_KERNEL); |
| 325 | GFP_KERNEL); | ||
| 326 | if (trialmask == NULL) | 325 | if (trialmask == NULL) |
| 327 | return -ENOMEM; | 326 | return -ENOMEM; |
| 328 | if (!indio_dev->masklength) { | 327 | if (!indio_dev->masklength) { |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 4700fd5d8c90..9c4d92115504 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
| @@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register); | |||
| 1743 | **/ | 1743 | **/ |
| 1744 | void iio_device_unregister(struct iio_dev *indio_dev) | 1744 | void iio_device_unregister(struct iio_dev *indio_dev) |
| 1745 | { | 1745 | { |
| 1746 | mutex_lock(&indio_dev->info_exist_lock); | ||
| 1747 | |||
| 1748 | cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); | 1746 | cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); |
| 1749 | 1747 | ||
| 1748 | mutex_lock(&indio_dev->info_exist_lock); | ||
| 1749 | |||
| 1750 | iio_device_unregister_debugfs(indio_dev); | 1750 | iio_device_unregister_debugfs(indio_dev); |
| 1751 | 1751 | ||
| 1752 | iio_disable_all_buffers(indio_dev); | 1752 | iio_disable_all_buffers(indio_dev); |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 70b7d80431a9..f2e7ffe6fc54 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -993,6 +993,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) | |||
| 993 | * will only be one mm, so no big deal. | 993 | * will only be one mm, so no big deal. |
| 994 | */ | 994 | */ |
| 995 | down_write(&mm->mmap_sem); | 995 | down_write(&mm->mmap_sem); |
| 996 | if (!mmget_still_valid(mm)) | ||
| 997 | goto skip_mm; | ||
| 996 | mutex_lock(&ufile->umap_lock); | 998 | mutex_lock(&ufile->umap_lock); |
| 997 | list_for_each_entry_safe (priv, next_priv, &ufile->umaps, | 999 | list_for_each_entry_safe (priv, next_priv, &ufile->umaps, |
| 998 | list) { | 1000 | list) { |
| @@ -1007,6 +1009,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) | |||
| 1007 | vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); | 1009 | vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); |
| 1008 | } | 1010 | } |
| 1009 | mutex_unlock(&ufile->umap_lock); | 1011 | mutex_unlock(&ufile->umap_lock); |
| 1012 | skip_mm: | ||
| 1010 | up_write(&mm->mmap_sem); | 1013 | up_write(&mm->mmap_sem); |
| 1011 | mmput(mm); | 1014 | mmput(mm); |
| 1012 | } | 1015 | } |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 612f04190ed8..9784c6c0d2ec 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) | |||
| 13232 | int total_contexts; | 13232 | int total_contexts; |
| 13233 | int ret; | 13233 | int ret; |
| 13234 | unsigned ngroups; | 13234 | unsigned ngroups; |
| 13235 | int qos_rmt_count; | 13235 | int rmt_count; |
| 13236 | int user_rmt_reduced; | 13236 | int user_rmt_reduced; |
| 13237 | u32 n_usr_ctxts; | 13237 | u32 n_usr_ctxts; |
| 13238 | u32 send_contexts = chip_send_contexts(dd); | 13238 | u32 send_contexts = chip_send_contexts(dd); |
| @@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd) | |||
| 13294 | n_usr_ctxts = rcv_contexts - total_contexts; | 13294 | n_usr_ctxts = rcv_contexts - total_contexts; |
| 13295 | } | 13295 | } |
| 13296 | 13296 | ||
| 13297 | /* each user context requires an entry in the RMT */ | 13297 | /* |
| 13298 | qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); | 13298 | * The RMT entries are currently allocated as shown below: |
| 13299 | if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { | 13299 | * 1. QOS (0 to 128 entries); |
| 13300 | user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; | 13300 | * 2. FECN for PSM (num_user_contexts + num_vnic_contexts); |
| 13301 | * 3. VNIC (num_vnic_contexts). | ||
| 13302 | * It should be noted that PSM FECN oversubscribe num_vnic_contexts | ||
| 13303 | * entries of RMT because both VNIC and PSM could allocate any receive | ||
| 13304 | * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, | ||
| 13305 | * and PSM FECN must reserve an RMT entry for each possible PSM receive | ||
| 13306 | * context. | ||
| 13307 | */ | ||
| 13308 | rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); | ||
| 13309 | if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { | ||
| 13310 | user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; | ||
| 13301 | dd_dev_err(dd, | 13311 | dd_dev_err(dd, |
| 13302 | "RMT size is reducing the number of user receive contexts from %u to %d\n", | 13312 | "RMT size is reducing the number of user receive contexts from %u to %d\n", |
| 13303 | n_usr_ctxts, | 13313 | n_usr_ctxts, |
| @@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, | |||
| 14285 | u64 reg; | 14295 | u64 reg; |
| 14286 | int i, idx, regoff, regidx; | 14296 | int i, idx, regoff, regidx; |
| 14287 | u8 offset; | 14297 | u8 offset; |
| 14298 | u32 total_cnt; | ||
| 14288 | 14299 | ||
| 14289 | /* there needs to be enough room in the map table */ | 14300 | /* there needs to be enough room in the map table */ |
| 14290 | if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { | 14301 | total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; |
| 14302 | if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { | ||
| 14291 | dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); | 14303 | dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); |
| 14292 | return; | 14304 | return; |
| 14293 | } | 14305 | } |
| @@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, | |||
| 14341 | /* add rule 1 */ | 14353 | /* add rule 1 */ |
| 14342 | add_rsm_rule(dd, RSM_INS_FECN, &rrd); | 14354 | add_rsm_rule(dd, RSM_INS_FECN, &rrd); |
| 14343 | 14355 | ||
| 14344 | rmt->used += dd->num_user_contexts; | 14356 | rmt->used += total_cnt; |
| 14345 | } | 14357 | } |
| 14346 | 14358 | ||
| 14347 | /* Initialize RSM for VNIC */ | 14359 | /* Initialize RSM for VNIC */ |
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 9b643c2409cf..eba300330a02 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c | |||
| @@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp) | |||
| 898 | if (!list_empty(&priv->s_iowait.list) && | 898 | if (!list_empty(&priv->s_iowait.list) && |
| 899 | !(qp->s_flags & RVT_S_BUSY) && | 899 | !(qp->s_flags & RVT_S_BUSY) && |
| 900 | !(priv->s_flags & RVT_S_BUSY)) { | 900 | !(priv->s_flags & RVT_S_BUSY)) { |
| 901 | qp->s_flags &= ~RVT_S_ANY_WAIT_IO; | 901 | qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; |
| 902 | iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); | ||
| 903 | iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); | ||
| 902 | list_del_init(&priv->s_iowait.list); | 904 | list_del_init(&priv->s_iowait.list); |
| 903 | priv->s_iowait.lock = NULL; | 905 | priv->s_iowait.lock = NULL; |
| 904 | rvt_put_qp(qp); | 906 | rvt_put_qp(qp); |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index e6726c1ab866..5991211d72bd 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
| @@ -3088,7 +3088,7 @@ send_last: | |||
| 3088 | update_ack_queue(qp, next); | 3088 | update_ack_queue(qp, next); |
| 3089 | } | 3089 | } |
| 3090 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 3090 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
| 3091 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 3091 | if (e->rdma_sge.mr) { |
| 3092 | rvt_put_mr(e->rdma_sge.mr); | 3092 | rvt_put_mr(e->rdma_sge.mr); |
| 3093 | e->rdma_sge.mr = NULL; | 3093 | e->rdma_sge.mr = NULL; |
| 3094 | } | 3094 | } |
| @@ -3166,7 +3166,7 @@ send_last: | |||
| 3166 | update_ack_queue(qp, next); | 3166 | update_ack_queue(qp, next); |
| 3167 | } | 3167 | } |
| 3168 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 3168 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
| 3169 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 3169 | if (e->rdma_sge.mr) { |
| 3170 | rvt_put_mr(e->rdma_sge.mr); | 3170 | rvt_put_mr(e->rdma_sge.mr); |
| 3171 | e->rdma_sge.mr = NULL; | 3171 | e->rdma_sge.mr = NULL; |
| 3172 | } | 3172 | } |
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index fdda33aca77f..43cbce7a19ea 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c | |||
| @@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
| 5017 | make_tid_rdma_ack(qp, ohdr, ps)) | 5017 | make_tid_rdma_ack(qp, ohdr, ps)) |
| 5018 | return 1; | 5018 | return 1; |
| 5019 | 5019 | ||
| 5020 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { | 5020 | /* |
| 5021 | if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) | 5021 | * Bail out if we can't send data. |
| 5022 | goto bail; | 5022 | * Be reminded that this check must been done after the call to |
| 5023 | /* We are in the error state, flush the work request. */ | 5023 | * make_tid_rdma_ack() because the responding QP could be in |
| 5024 | if (qp->s_last == READ_ONCE(qp->s_head)) | 5024 | * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA. |
| 5025 | goto bail; | 5025 | */ |
| 5026 | /* If DMAs are in progress, we can't flush immediately. */ | 5026 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) |
| 5027 | if (iowait_sdma_pending(&priv->s_iowait)) { | 5027 | goto bail; |
| 5028 | qp->s_flags |= RVT_S_WAIT_DMA; | ||
| 5029 | goto bail; | ||
| 5030 | } | ||
| 5031 | clear_ahg(qp); | ||
| 5032 | wqe = rvt_get_swqe_ptr(qp, qp->s_last); | ||
| 5033 | hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? | ||
| 5034 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); | ||
| 5035 | /* will get called again */ | ||
| 5036 | goto done_free_tx; | ||
| 5037 | } | ||
| 5038 | 5028 | ||
| 5039 | if (priv->s_flags & RVT_S_WAIT_ACK) | 5029 | if (priv->s_flags & RVT_S_WAIT_ACK) |
| 5040 | goto bail; | 5030 | goto bail; |
| @@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
| 5144 | hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, | 5134 | hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, |
| 5145 | middle, ps); | 5135 | middle, ps); |
| 5146 | return 1; | 5136 | return 1; |
| 5147 | done_free_tx: | ||
| 5148 | hfi1_put_txreq(ps->s_txreq); | ||
| 5149 | ps->s_txreq = NULL; | ||
| 5150 | return 1; | ||
| 5151 | |||
| 5152 | bail: | 5137 | bail: |
| 5153 | hfi1_put_txreq(ps->s_txreq); | 5138 | hfi1_put_txreq(ps->s_txreq); |
| 5154 | bail_no_tx: | 5139 | bail_no_tx: |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f1fec56f3ff4..8e29dbb5b5fb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c | |||
| @@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, | |||
| 792 | idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; | 792 | idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; |
| 793 | dma_offset = offset = idx_offset * table->obj_size; | 793 | dma_offset = offset = idx_offset * table->obj_size; |
| 794 | } else { | 794 | } else { |
| 795 | u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ | ||
| 796 | |||
| 795 | hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); | 797 | hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); |
| 796 | /* mtt mhop */ | 798 | /* mtt mhop */ |
| 797 | i = mhop.l0_idx; | 799 | i = mhop.l0_idx; |
| @@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, | |||
| 803 | hem_idx = i; | 805 | hem_idx = i; |
| 804 | 806 | ||
| 805 | hem = table->hem[hem_idx]; | 807 | hem = table->hem[hem_idx]; |
| 806 | dma_offset = offset = (obj & (table->num_obj - 1)) * | 808 | dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % |
| 807 | table->obj_size % mhop.bt_chunk_size; | 809 | mhop.bt_chunk_size; |
| 808 | if (mhop.hop_num == 2) | 810 | if (mhop.hop_num == 2) |
| 809 | dma_offset = offset = 0; | 811 | dma_offset = offset = 0; |
| 810 | } | 812 | } |
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index b09f1cde2ff5..08be0e4eabcd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c | |||
| @@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, | |||
| 746 | struct hns_roce_hem_table *table; | 746 | struct hns_roce_hem_table *table; |
| 747 | dma_addr_t dma_handle; | 747 | dma_addr_t dma_handle; |
| 748 | __le64 *mtts; | 748 | __le64 *mtts; |
| 749 | u32 s = start_index * sizeof(u64); | ||
| 750 | u32 bt_page_size; | 749 | u32 bt_page_size; |
| 751 | u32 i; | 750 | u32 i; |
| 752 | 751 | ||
| @@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, | |||
| 780 | return -EINVAL; | 779 | return -EINVAL; |
| 781 | 780 | ||
| 782 | mtts = hns_roce_table_find(hr_dev, table, | 781 | mtts = hns_roce_table_find(hr_dev, table, |
| 783 | mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, | 782 | mtt->first_seg + |
| 783 | start_index / HNS_ROCE_MTT_ENTRY_PER_SEG, | ||
| 784 | &dma_handle); | 784 | &dma_handle); |
| 785 | if (!mtts) | 785 | if (!mtts) |
| 786 | return -ENOMEM; | 786 | return -ENOMEM; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 57c76eafef2f..66cdf625534f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c | |||
| @@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |||
| 274 | wait_for_completion(&hr_qp->free); | 274 | wait_for_completion(&hr_qp->free); |
| 275 | 275 | ||
| 276 | if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { | 276 | if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { |
| 277 | if (hr_dev->caps.sccc_entry_sz) | ||
| 278 | hns_roce_table_put(hr_dev, &qp_table->sccc_table, | ||
| 279 | hr_qp->qpn); | ||
| 280 | if (hr_dev->caps.trrl_entry_sz) | 277 | if (hr_dev->caps.trrl_entry_sz) |
| 281 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, | 278 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, |
| 282 | hr_qp->qpn); | 279 | hr_qp->qpn); |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index c20bfc41ecf1..0aa10ebda5d9 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
| @@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, | |||
| 585 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); | 585 | struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); |
| 586 | bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; | 586 | bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; |
| 587 | bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; | 587 | bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; |
| 588 | u64 access_mask = ODP_READ_ALLOWED_BIT; | 588 | u64 access_mask; |
| 589 | u64 start_idx, page_mask; | 589 | u64 start_idx, page_mask; |
| 590 | struct ib_umem_odp *odp; | 590 | struct ib_umem_odp *odp; |
| 591 | size_t size; | 591 | size_t size; |
| @@ -607,6 +607,7 @@ next_mr: | |||
| 607 | page_shift = mr->umem->page_shift; | 607 | page_shift = mr->umem->page_shift; |
| 608 | page_mask = ~(BIT(page_shift) - 1); | 608 | page_mask = ~(BIT(page_shift) - 1); |
| 609 | start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; | 609 | start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; |
| 610 | access_mask = ODP_READ_ALLOWED_BIT; | ||
| 610 | 611 | ||
| 611 | if (prefetch && !downgrade && !mr->umem->writable) { | 612 | if (prefetch && !downgrade && !mr->umem->writable) { |
| 612 | /* prefetch with write-access must | 613 | /* prefetch with write-access must |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 6d8b3e0de57a..ec41400fec0c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
| @@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) | |||
| 1131 | pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); | 1131 | pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); |
| 1132 | pvrdma_page_dir_cleanup(dev, &dev->async_pdir); | 1132 | pvrdma_page_dir_cleanup(dev, &dev->async_pdir); |
| 1133 | pvrdma_free_slots(dev); | 1133 | pvrdma_free_slots(dev); |
| 1134 | dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, | ||
| 1135 | dev->dsrbase); | ||
| 1134 | 1136 | ||
| 1135 | iounmap(dev->regs); | 1137 | iounmap(dev->regs); |
| 1136 | kfree(dev->sgid_tbl); | 1138 | kfree(dev->sgid_tbl); |
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index effb63205d3d..4c67cf30a5d9 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c | |||
| @@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) | |||
| 148 | return error; | 148 | return error; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | pdata->input = input; | ||
| 152 | platform_set_drvdata(pdev, pdata); | ||
| 153 | |||
| 151 | error = devm_request_irq(&pdev->dev, pdata->irq, | 154 | error = devm_request_irq(&pdev->dev, pdata->irq, |
| 152 | imx_snvs_pwrkey_interrupt, | 155 | imx_snvs_pwrkey_interrupt, |
| 153 | 0, pdev->name, pdev); | 156 | 0, pdev->name, pdev); |
| @@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) | |||
| 163 | return error; | 166 | return error; |
| 164 | } | 167 | } |
| 165 | 168 | ||
| 166 | pdata->input = input; | ||
| 167 | platform_set_drvdata(pdev, pdata); | ||
| 168 | |||
| 169 | device_init_wakeup(&pdev->dev, pdata->wakeup); | 169 | device_init_wakeup(&pdev->dev, pdata->wakeup); |
| 170 | 170 | ||
| 171 | return 0; | 171 | return 0; |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 628ef617bb2f..f9525d6f0bfe 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
| 1339 | { "ELAN0600", 0 }, | 1339 | { "ELAN0600", 0 }, |
| 1340 | { "ELAN0601", 0 }, | 1340 | { "ELAN0601", 0 }, |
| 1341 | { "ELAN0602", 0 }, | 1341 | { "ELAN0602", 0 }, |
| 1342 | { "ELAN0603", 0 }, | ||
| 1343 | { "ELAN0604", 0 }, | ||
| 1342 | { "ELAN0605", 0 }, | 1344 | { "ELAN0605", 0 }, |
| 1345 | { "ELAN0606", 0 }, | ||
| 1346 | { "ELAN0607", 0 }, | ||
| 1343 | { "ELAN0608", 0 }, | 1347 | { "ELAN0608", 0 }, |
| 1344 | { "ELAN0609", 0 }, | 1348 | { "ELAN0609", 0 }, |
| 1345 | { "ELAN060B", 0 }, | 1349 | { "ELAN060B", 0 }, |
| 1346 | { "ELAN060C", 0 }, | 1350 | { "ELAN060C", 0 }, |
| 1351 | { "ELAN060F", 0 }, | ||
| 1352 | { "ELAN0610", 0 }, | ||
| 1347 | { "ELAN0611", 0 }, | 1353 | { "ELAN0611", 0 }, |
| 1348 | { "ELAN0612", 0 }, | 1354 | { "ELAN0612", 0 }, |
| 1355 | { "ELAN0615", 0 }, | ||
| 1356 | { "ELAN0616", 0 }, | ||
| 1349 | { "ELAN0617", 0 }, | 1357 | { "ELAN0617", 0 }, |
| 1350 | { "ELAN0618", 0 }, | 1358 | { "ELAN0618", 0 }, |
| 1359 | { "ELAN0619", 0 }, | ||
| 1360 | { "ELAN061A", 0 }, | ||
| 1361 | { "ELAN061B", 0 }, | ||
| 1351 | { "ELAN061C", 0 }, | 1362 | { "ELAN061C", 0 }, |
| 1352 | { "ELAN061D", 0 }, | 1363 | { "ELAN061D", 0 }, |
| 1353 | { "ELAN061E", 0 }, | 1364 | { "ELAN061E", 0 }, |
| 1365 | { "ELAN061F", 0 }, | ||
| 1354 | { "ELAN0620", 0 }, | 1366 | { "ELAN0620", 0 }, |
| 1355 | { "ELAN0621", 0 }, | 1367 | { "ELAN0621", 0 }, |
| 1356 | { "ELAN0622", 0 }, | 1368 | { "ELAN0622", 0 }, |
| 1369 | { "ELAN0623", 0 }, | ||
| 1370 | { "ELAN0624", 0 }, | ||
| 1371 | { "ELAN0625", 0 }, | ||
| 1372 | { "ELAN0626", 0 }, | ||
| 1373 | { "ELAN0627", 0 }, | ||
| 1374 | { "ELAN0628", 0 }, | ||
| 1375 | { "ELAN0629", 0 }, | ||
| 1376 | { "ELAN062A", 0 }, | ||
| 1377 | { "ELAN062B", 0 }, | ||
| 1378 | { "ELAN062C", 0 }, | ||
| 1379 | { "ELAN062D", 0 }, | ||
| 1380 | { "ELAN0631", 0 }, | ||
| 1381 | { "ELAN0632", 0 }, | ||
| 1357 | { "ELAN1000", 0 }, | 1382 | { "ELAN1000", 0 }, |
| 1358 | { } | 1383 | { } |
| 1359 | }; | 1384 | }; |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 1b1378619fc9..ff40ba758cf3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) | |||
| 359 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) | 359 | static void iommu_set_exclusion_range(struct amd_iommu *iommu) |
| 360 | { | 360 | { |
| 361 | u64 start = iommu->exclusion_start & PAGE_MASK; | 361 | u64 start = iommu->exclusion_start & PAGE_MASK; |
| 362 | u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; | 362 | u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; |
| 363 | u64 entry; | 363 | u64 entry; |
| 364 | 364 | ||
| 365 | if (!iommu->exclusion_start) | 365 | if (!iommu->exclusion_start) |
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c index aa7290784636..0390603170b4 100644 --- a/drivers/irqchip/irq-ath79-misc.c +++ b/drivers/irqchip/irq-ath79-misc.c | |||
| @@ -22,6 +22,15 @@ | |||
| 22 | #define AR71XX_RESET_REG_MISC_INT_ENABLE 4 | 22 | #define AR71XX_RESET_REG_MISC_INT_ENABLE 4 |
| 23 | 23 | ||
| 24 | #define ATH79_MISC_IRQ_COUNT 32 | 24 | #define ATH79_MISC_IRQ_COUNT 32 |
| 25 | #define ATH79_MISC_PERF_IRQ 5 | ||
| 26 | |||
| 27 | static int ath79_perfcount_irq; | ||
| 28 | |||
| 29 | int get_c0_perfcount_int(void) | ||
| 30 | { | ||
| 31 | return ath79_perfcount_irq; | ||
| 32 | } | ||
| 33 | EXPORT_SYMBOL_GPL(get_c0_perfcount_int); | ||
| 25 | 34 | ||
| 26 | static void ath79_misc_irq_handler(struct irq_desc *desc) | 35 | static void ath79_misc_irq_handler(struct irq_desc *desc) |
| 27 | { | 36 | { |
| @@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init( | |||
| 113 | { | 122 | { |
| 114 | void __iomem *base = domain->host_data; | 123 | void __iomem *base = domain->host_data; |
| 115 | 124 | ||
| 125 | ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ); | ||
| 126 | |||
| 116 | /* Disable and clear all interrupts */ | 127 | /* Disable and clear all interrupts */ |
| 117 | __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); | 128 | __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE); |
| 118 | __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); | 129 | __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS); |
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c index 86b72fbd3b45..353111a10413 100644 --- a/drivers/irqchip/irq-ls1x.c +++ b/drivers/irqchip/irq-ls1x.c | |||
| @@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node, | |||
| 130 | NULL); | 130 | NULL); |
| 131 | if (!priv->domain) { | 131 | if (!priv->domain) { |
| 132 | pr_err("ls1x-irq: cannot add IRQ domain\n"); | 132 | pr_err("ls1x-irq: cannot add IRQ domain\n"); |
| 133 | err = -ENOMEM; | ||
| 133 | goto out_iounmap; | 134 | goto out_iounmap; |
| 134 | } | 135 | } |
| 135 | 136 | ||
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 4ab8b1b6608f..a14e35d40538 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c | |||
| @@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
| 710 | struct sock *sk = sock->sk; | 710 | struct sock *sk = sock->sk; |
| 711 | int err = 0; | 711 | int err = 0; |
| 712 | 712 | ||
| 713 | if (!maddr || maddr->family != AF_ISDN) | 713 | if (addr_len < sizeof(struct sockaddr_mISDN)) |
| 714 | return -EINVAL; | 714 | return -EINVAL; |
| 715 | 715 | ||
| 716 | if (addr_len < sizeof(struct sockaddr_mISDN)) | 716 | if (!maddr || maddr->family != AF_ISDN) |
| 717 | return -EINVAL; | 717 | return -EINVAL; |
| 718 | 718 | ||
| 719 | lock_sock(sk); | 719 | lock_sock(sk); |
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 3789185144da..0b7d5fb4548d 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c | |||
| @@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) | |||
| 231 | struct pblk_sec_meta *meta; | 231 | struct pblk_sec_meta *meta; |
| 232 | struct bio *new_bio = rqd->bio; | 232 | struct bio *new_bio = rqd->bio; |
| 233 | struct bio *bio = pr_ctx->orig_bio; | 233 | struct bio *bio = pr_ctx->orig_bio; |
| 234 | struct bio_vec src_bv, dst_bv; | ||
| 235 | void *meta_list = rqd->meta_list; | 234 | void *meta_list = rqd->meta_list; |
| 236 | int bio_init_idx = pr_ctx->bio_init_idx; | ||
| 237 | unsigned long *read_bitmap = pr_ctx->bitmap; | 235 | unsigned long *read_bitmap = pr_ctx->bitmap; |
| 236 | struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT; | ||
| 237 | struct bvec_iter new_iter = BVEC_ITER_ALL_INIT; | ||
| 238 | int nr_secs = pr_ctx->orig_nr_secs; | 238 | int nr_secs = pr_ctx->orig_nr_secs; |
| 239 | int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); | 239 | int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); |
| 240 | void *src_p, *dst_p; | 240 | void *src_p, *dst_p; |
| 241 | int hole, i; | 241 | int bit, i; |
| 242 | 242 | ||
| 243 | if (unlikely(nr_holes == 1)) { | 243 | if (unlikely(nr_holes == 1)) { |
| 244 | struct ppa_addr ppa; | 244 | struct ppa_addr ppa; |
| @@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) | |||
| 257 | 257 | ||
| 258 | /* Fill the holes in the original bio */ | 258 | /* Fill the holes in the original bio */ |
| 259 | i = 0; | 259 | i = 0; |
| 260 | hole = find_first_zero_bit(read_bitmap, nr_secs); | 260 | for (bit = 0; bit < nr_secs; bit++) { |
| 261 | do { | 261 | if (!test_bit(bit, read_bitmap)) { |
| 262 | struct pblk_line *line; | 262 | struct bio_vec dst_bv, src_bv; |
| 263 | struct pblk_line *line; | ||
| 263 | 264 | ||
| 264 | line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); | 265 | line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); |
| 265 | kref_put(&line->ref, pblk_line_put); | 266 | kref_put(&line->ref, pblk_line_put); |
| 266 | 267 | ||
| 267 | meta = pblk_get_meta(pblk, meta_list, hole); | 268 | meta = pblk_get_meta(pblk, meta_list, bit); |
| 268 | meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); | 269 | meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); |
| 269 | 270 | ||
| 270 | src_bv = new_bio->bi_io_vec[i++]; | 271 | dst_bv = bio_iter_iovec(bio, orig_iter); |
| 271 | dst_bv = bio->bi_io_vec[bio_init_idx + hole]; | 272 | src_bv = bio_iter_iovec(new_bio, new_iter); |
| 272 | 273 | ||
| 273 | src_p = kmap_atomic(src_bv.bv_page); | 274 | src_p = kmap_atomic(src_bv.bv_page); |
| 274 | dst_p = kmap_atomic(dst_bv.bv_page); | 275 | dst_p = kmap_atomic(dst_bv.bv_page); |
| 275 | 276 | ||
| 276 | memcpy(dst_p + dst_bv.bv_offset, | 277 | memcpy(dst_p + dst_bv.bv_offset, |
| 277 | src_p + src_bv.bv_offset, | 278 | src_p + src_bv.bv_offset, |
| 278 | PBLK_EXPOSED_PAGE_SIZE); | 279 | PBLK_EXPOSED_PAGE_SIZE); |
| 279 | 280 | ||
| 280 | kunmap_atomic(src_p); | 281 | kunmap_atomic(src_p); |
| 281 | kunmap_atomic(dst_p); | 282 | kunmap_atomic(dst_p); |
| 282 | 283 | ||
| 283 | mempool_free(src_bv.bv_page, &pblk->page_bio_pool); | 284 | flush_dcache_page(dst_bv.bv_page); |
| 285 | mempool_free(src_bv.bv_page, &pblk->page_bio_pool); | ||
| 284 | 286 | ||
| 285 | hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); | 287 | bio_advance_iter(new_bio, &new_iter, |
| 286 | } while (hole < nr_secs); | 288 | PBLK_EXPOSED_PAGE_SIZE); |
| 289 | i++; | ||
| 290 | } | ||
| 291 | bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE); | ||
| 292 | } | ||
| 287 | 293 | ||
| 288 | bio_put(new_bio); | 294 | bio_put(new_bio); |
| 289 | kfree(pr_ctx); | 295 | kfree(pr_ctx); |
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..c4ef1fceead6 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h | |||
| @@ -115,6 +115,7 @@ struct mapped_device { | |||
| 115 | struct srcu_struct io_barrier; | 115 | struct srcu_struct io_barrier; |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | void disable_discard(struct mapped_device *md); | ||
| 118 | void disable_write_same(struct mapped_device *md); | 119 | void disable_write_same(struct mapped_device *md); |
| 119 | void disable_write_zeroes(struct mapped_device *md); | 120 | void disable_write_zeroes(struct mapped_device *md); |
| 120 | 121 | ||
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index b53f30f16b4d..4b76f84424c3 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c | |||
| @@ -36,7 +36,7 @@ struct dm_device { | |||
| 36 | struct list_head list; | 36 | struct list_head list; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | const char *dm_allowed_targets[] __initconst = { | 39 | const char * const dm_allowed_targets[] __initconst = { |
| 40 | "crypt", | 40 | "crypt", |
| 41 | "delay", | 41 | "delay", |
| 42 | "linear", | 42 | "linear", |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index d57d997a52c8..7c678f50aaa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig | |||
| 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) | 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) |
| 914 | { | 914 | { |
| 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && | 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && |
| 916 | range2->logical_sector + range2->n_sectors > range2->logical_sector; | 916 | range1->logical_sector + range1->n_sectors > range2->logical_sector; |
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) | 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) |
| @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity | |||
| 959 | struct dm_integrity_range *last_range = | 959 | struct dm_integrity_range *last_range = |
| 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); | 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); |
| 961 | struct task_struct *last_range_task; | 961 | struct task_struct *last_range_task; |
| 962 | if (!ranges_overlap(range, last_range)) | ||
| 963 | break; | ||
| 964 | last_range_task = last_range->task; | 962 | last_range_task = last_range->task; |
| 965 | list_del(&last_range->wait_entry); | 963 | list_del(&last_range->wait_entry); |
| 966 | if (!add_new_range(ic, last_range, false)) { | 964 | if (!add_new_range(ic, last_range, false)) { |
| @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3185 | journal_watermark = val; | 3183 | journal_watermark = val; |
| 3186 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) | 3184 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) |
| 3187 | sync_msec = val; | 3185 | sync_msec = val; |
| 3188 | else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { | 3186 | else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { |
| 3189 | if (ic->meta_dev) { | 3187 | if (ic->meta_dev) { |
| 3190 | dm_put_device(ti, ic->meta_dev); | 3188 | dm_put_device(ti, ic->meta_dev); |
| 3191 | ic->meta_dev = NULL; | 3189 | ic->meta_dev = NULL; |
| @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3204 | goto bad; | 3202 | goto bad; |
| 3205 | } | 3203 | } |
| 3206 | ic->sectors_per_block = val >> SECTOR_SHIFT; | 3204 | ic->sectors_per_block = val >> SECTOR_SHIFT; |
| 3207 | } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { | 3205 | } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { |
| 3208 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, | 3206 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, |
| 3209 | "Invalid internal_hash argument"); | 3207 | "Invalid internal_hash argument"); |
| 3210 | if (r) | 3208 | if (r) |
| 3211 | goto bad; | 3209 | goto bad; |
| 3212 | } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { | 3210 | } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { |
| 3213 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, | 3211 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, |
| 3214 | "Invalid journal_crypt argument"); | 3212 | "Invalid journal_crypt argument"); |
| 3215 | if (r) | 3213 | if (r) |
| 3216 | goto bad; | 3214 | goto bad; |
| 3217 | } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { | 3215 | } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { |
| 3218 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, | 3216 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, |
| 3219 | "Invalid journal_mac argument"); | 3217 | "Invalid journal_mac argument"); |
| 3220 | if (r) | 3218 | if (r) |
| @@ -3616,7 +3614,7 @@ static struct target_type integrity_target = { | |||
| 3616 | .io_hints = dm_integrity_io_hints, | 3614 | .io_hints = dm_integrity_io_hints, |
| 3617 | }; | 3615 | }; |
| 3618 | 3616 | ||
| 3619 | int __init dm_integrity_init(void) | 3617 | static int __init dm_integrity_init(void) |
| 3620 | { | 3618 | { |
| 3621 | int r; | 3619 | int r; |
| 3622 | 3620 | ||
| @@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void) | |||
| 3635 | return r; | 3633 | return r; |
| 3636 | } | 3634 | } |
| 3637 | 3635 | ||
| 3638 | void dm_integrity_exit(void) | 3636 | static void __exit dm_integrity_exit(void) |
| 3639 | { | 3637 | { |
| 3640 | dm_unregister_target(&integrity_target); | 3638 | dm_unregister_target(&integrity_target); |
| 3641 | kmem_cache_destroy(journal_io_cache); | 3639 | kmem_cache_destroy(journal_io_cache); |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 09773636602d..b66745bd08bb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
| @@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) | |||
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (unlikely(error == BLK_STS_TARGET)) { | 224 | if (unlikely(error == BLK_STS_TARGET)) { |
| 225 | if (req_op(clone) == REQ_OP_WRITE_SAME && | 225 | if (req_op(clone) == REQ_OP_DISCARD && |
| 226 | !clone->q->limits.max_write_same_sectors) | 226 | !clone->q->limits.max_discard_sectors) |
| 227 | disable_discard(tio->md); | ||
| 228 | else if (req_op(clone) == REQ_OP_WRITE_SAME && | ||
| 229 | !clone->q->limits.max_write_same_sectors) | ||
| 227 | disable_write_same(tio->md); | 230 | disable_write_same(tio->md); |
| 228 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && | 231 | else if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
| 229 | !clone->q->limits.max_write_zeroes_sectors) | 232 | !clone->q->limits.max_write_zeroes_sectors) |
| 230 | disable_write_zeroes(tio->md); | 233 | disable_write_zeroes(tio->md); |
| 231 | } | 234 | } |
| 232 | 235 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ba9481f1bf3c..cde3b49b2a91 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) | |||
| 1844 | return true; | 1844 | return true; |
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | static int device_requires_stable_pages(struct dm_target *ti, | ||
| 1848 | struct dm_dev *dev, sector_t start, | ||
| 1849 | sector_t len, void *data) | ||
| 1850 | { | ||
| 1851 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
| 1852 | |||
| 1853 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | /* | ||
| 1857 | * If any underlying device requires stable pages, a table must require | ||
| 1858 | * them as well. Only targets that support iterate_devices are considered: | ||
| 1859 | * don't want error, zero, etc to require stable pages. | ||
| 1860 | */ | ||
| 1861 | static bool dm_table_requires_stable_pages(struct dm_table *t) | ||
| 1862 | { | ||
| 1863 | struct dm_target *ti; | ||
| 1864 | unsigned i; | ||
| 1865 | |||
| 1866 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
| 1867 | ti = dm_table_get_target(t, i); | ||
| 1868 | |||
| 1869 | if (ti->type->iterate_devices && | ||
| 1870 | ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) | ||
| 1871 | return true; | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | return false; | ||
| 1875 | } | ||
| 1876 | |||
| 1847 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1877 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
| 1848 | struct queue_limits *limits) | 1878 | struct queue_limits *limits) |
| 1849 | { | 1879 | { |
| @@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1897 | dm_table_verify_integrity(t); | 1927 | dm_table_verify_integrity(t); |
| 1898 | 1928 | ||
| 1899 | /* | 1929 | /* |
| 1930 | * Some devices don't use blk_integrity but still want stable pages | ||
| 1931 | * because they do their own checksumming. | ||
| 1932 | */ | ||
| 1933 | if (dm_table_requires_stable_pages(t)) | ||
| 1934 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | ||
| 1935 | else | ||
| 1936 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; | ||
| 1937 | |||
| 1938 | /* | ||
| 1900 | * Determine whether or not this queue's I/O timings contribute | 1939 | * Determine whether or not this queue's I/O timings contribute |
| 1901 | * to the entropy pool, Only request-based targets use this. | 1940 | * to the entropy pool, Only request-based targets use this. |
| 1902 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | 1941 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68d24056d0b1..043f0761e4a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) | |||
| 945 | } | 945 | } |
| 946 | } | 946 | } |
| 947 | 947 | ||
| 948 | void disable_discard(struct mapped_device *md) | ||
| 949 | { | ||
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | ||
| 951 | |||
| 952 | /* device doesn't really support DISCARD, disable it */ | ||
| 953 | limits->max_discard_sectors = 0; | ||
| 954 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); | ||
| 955 | } | ||
| 956 | |||
| 948 | void disable_write_same(struct mapped_device *md) | 957 | void disable_write_same(struct mapped_device *md) |
| 949 | { | 958 | { |
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | 959 | struct queue_limits *limits = dm_get_queue_limits(md); |
| @@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio) | |||
| 970 | dm_endio_fn endio = tio->ti->type->end_io; | 979 | dm_endio_fn endio = tio->ti->type->end_io; |
| 971 | 980 | ||
| 972 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { | 981 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { |
| 973 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 982 | if (bio_op(bio) == REQ_OP_DISCARD && |
| 974 | !bio->bi_disk->queue->limits.max_write_same_sectors) | 983 | !bio->bi_disk->queue->limits.max_discard_sectors) |
| 984 | disable_discard(md); | ||
| 985 | else if (bio_op(bio) == REQ_OP_WRITE_SAME && | ||
| 986 | !bio->bi_disk->queue->limits.max_write_same_sectors) | ||
| 975 | disable_write_same(md); | 987 | disable_write_same(md); |
| 976 | if (bio_op(bio) == REQ_OP_WRITE_ZEROES && | 988 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && |
| 977 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) | 989 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) |
| 978 | disable_write_zeroes(md); | 990 | disable_write_zeroes(md); |
| 979 | } | 991 | } |
| 980 | 992 | ||
| @@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
| 1042 | return -EINVAL; | 1054 | return -EINVAL; |
| 1043 | } | 1055 | } |
| 1044 | 1056 | ||
| 1045 | /* | 1057 | ti->max_io_len = (uint32_t) len; |
| 1046 | * BIO based queue uses its own splitting. When multipage bvecs | ||
| 1047 | * is switched on, size of the incoming bio may be too big to | ||
| 1048 | * be handled in some targets, such as crypt. | ||
| 1049 | * | ||
| 1050 | * When these targets are ready for the big bio, we can remove | ||
| 1051 | * the limit. | ||
| 1052 | */ | ||
| 1053 | ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); | ||
| 1054 | 1058 | ||
| 1055 | return 0; | 1059 | return 0; |
| 1056 | } | 1060 | } |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0ce2d8dfc5f1..26ad6468d13a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
| @@ -1246,7 +1246,7 @@ config MFD_STA2X11 | |||
| 1246 | 1246 | ||
| 1247 | config MFD_SUN6I_PRCM | 1247 | config MFD_SUN6I_PRCM |
| 1248 | bool "Allwinner A31 PRCM controller" | 1248 | bool "Allwinner A31 PRCM controller" |
| 1249 | depends on ARCH_SUNXI | 1249 | depends on ARCH_SUNXI || COMPILE_TEST |
| 1250 | select MFD_CORE | 1250 | select MFD_CORE |
| 1251 | help | 1251 | help |
| 1252 | Support for the PRCM (Power/Reset/Clock Management) unit available | 1252 | Support for the PRCM (Power/Reset/Clock Management) unit available |
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 69df27769c21..43ac71691fe4 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c | |||
| @@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = { | |||
| 53 | static const struct mfd_cell sprd_pmic_devs[] = { | 53 | static const struct mfd_cell sprd_pmic_devs[] = { |
| 54 | { | 54 | { |
| 55 | .name = "sc27xx-wdt", | 55 | .name = "sc27xx-wdt", |
| 56 | .of_compatible = "sprd,sc27xx-wdt", | 56 | .of_compatible = "sprd,sc2731-wdt", |
| 57 | }, { | 57 | }, { |
| 58 | .name = "sc27xx-rtc", | 58 | .name = "sc27xx-rtc", |
| 59 | .of_compatible = "sprd,sc27xx-rtc", | 59 | .of_compatible = "sprd,sc2731-rtc", |
| 60 | }, { | 60 | }, { |
| 61 | .name = "sc27xx-charger", | 61 | .name = "sc27xx-charger", |
| 62 | .of_compatible = "sprd,sc27xx-charger", | 62 | .of_compatible = "sprd,sc2731-charger", |
| 63 | }, { | 63 | }, { |
| 64 | .name = "sc27xx-chg-timer", | 64 | .name = "sc27xx-chg-timer", |
| 65 | .of_compatible = "sprd,sc27xx-chg-timer", | 65 | .of_compatible = "sprd,sc2731-chg-timer", |
| 66 | }, { | 66 | }, { |
| 67 | .name = "sc27xx-fast-chg", | 67 | .name = "sc27xx-fast-chg", |
| 68 | .of_compatible = "sprd,sc27xx-fast-chg", | 68 | .of_compatible = "sprd,sc2731-fast-chg", |
| 69 | }, { | 69 | }, { |
| 70 | .name = "sc27xx-chg-wdt", | 70 | .name = "sc27xx-chg-wdt", |
| 71 | .of_compatible = "sprd,sc27xx-chg-wdt", | 71 | .of_compatible = "sprd,sc2731-chg-wdt", |
| 72 | }, { | 72 | }, { |
| 73 | .name = "sc27xx-typec", | 73 | .name = "sc27xx-typec", |
| 74 | .of_compatible = "sprd,sc27xx-typec", | 74 | .of_compatible = "sprd,sc2731-typec", |
| 75 | }, { | 75 | }, { |
| 76 | .name = "sc27xx-flash", | 76 | .name = "sc27xx-flash", |
| 77 | .of_compatible = "sprd,sc27xx-flash", | 77 | .of_compatible = "sprd,sc2731-flash", |
| 78 | }, { | 78 | }, { |
| 79 | .name = "sc27xx-eic", | 79 | .name = "sc27xx-eic", |
| 80 | .of_compatible = "sprd,sc27xx-eic", | 80 | .of_compatible = "sprd,sc2731-eic", |
| 81 | }, { | 81 | }, { |
| 82 | .name = "sc27xx-efuse", | 82 | .name = "sc27xx-efuse", |
| 83 | .of_compatible = "sprd,sc27xx-efuse", | 83 | .of_compatible = "sprd,sc2731-efuse", |
| 84 | }, { | 84 | }, { |
| 85 | .name = "sc27xx-thermal", | 85 | .name = "sc27xx-thermal", |
| 86 | .of_compatible = "sprd,sc27xx-thermal", | 86 | .of_compatible = "sprd,sc2731-thermal", |
| 87 | }, { | 87 | }, { |
| 88 | .name = "sc27xx-adc", | 88 | .name = "sc27xx-adc", |
| 89 | .of_compatible = "sprd,sc27xx-adc", | 89 | .of_compatible = "sprd,sc2731-adc", |
| 90 | }, { | 90 | }, { |
| 91 | .name = "sc27xx-audio-codec", | 91 | .name = "sc27xx-audio-codec", |
| 92 | .of_compatible = "sprd,sc27xx-audio-codec", | 92 | .of_compatible = "sprd,sc2731-audio-codec", |
| 93 | }, { | 93 | }, { |
| 94 | .name = "sc27xx-regulator", | 94 | .name = "sc27xx-regulator", |
| 95 | .of_compatible = "sprd,sc27xx-regulator", | 95 | .of_compatible = "sprd,sc2731-regulator", |
| 96 | }, { | 96 | }, { |
| 97 | .name = "sc27xx-vibrator", | 97 | .name = "sc27xx-vibrator", |
| 98 | .of_compatible = "sprd,sc27xx-vibrator", | 98 | .of_compatible = "sprd,sc2731-vibrator", |
| 99 | }, { | 99 | }, { |
| 100 | .name = "sc27xx-keypad-led", | 100 | .name = "sc27xx-keypad-led", |
| 101 | .of_compatible = "sprd,sc27xx-keypad-led", | 101 | .of_compatible = "sprd,sc2731-keypad-led", |
| 102 | }, { | 102 | }, { |
| 103 | .name = "sc27xx-bltc", | 103 | .name = "sc27xx-bltc", |
| 104 | .of_compatible = "sprd,sc27xx-bltc", | 104 | .of_compatible = "sprd,sc2731-bltc", |
| 105 | }, { | 105 | }, { |
| 106 | .name = "sc27xx-fgu", | 106 | .name = "sc27xx-fgu", |
| 107 | .of_compatible = "sprd,sc27xx-fgu", | 107 | .of_compatible = "sprd,sc2731-fgu", |
| 108 | }, { | 108 | }, { |
| 109 | .name = "sc27xx-7sreset", | 109 | .name = "sc27xx-7sreset", |
| 110 | .of_compatible = "sprd,sc27xx-7sreset", | 110 | .of_compatible = "sprd,sc2731-7sreset", |
| 111 | }, { | 111 | }, { |
| 112 | .name = "sc27xx-poweroff", | 112 | .name = "sc27xx-poweroff", |
| 113 | .of_compatible = "sprd,sc27xx-poweroff", | 113 | .of_compatible = "sprd,sc2731-poweroff", |
| 114 | }, { | 114 | }, { |
| 115 | .name = "sc27xx-syscon", | 115 | .name = "sc27xx-syscon", |
| 116 | .of_compatible = "sprd,sc27xx-syscon", | 116 | .of_compatible = "sprd,sc2731-syscon", |
| 117 | }, | 117 | }, |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 299016bc46d9..104477b512a2 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c | |||
| @@ -1245,6 +1245,28 @@ free: | |||
| 1245 | return status; | 1245 | return status; |
| 1246 | } | 1246 | } |
| 1247 | 1247 | ||
| 1248 | static int __maybe_unused twl_suspend(struct device *dev) | ||
| 1249 | { | ||
| 1250 | struct i2c_client *client = to_i2c_client(dev); | ||
| 1251 | |||
| 1252 | if (client->irq) | ||
| 1253 | disable_irq(client->irq); | ||
| 1254 | |||
| 1255 | return 0; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | static int __maybe_unused twl_resume(struct device *dev) | ||
| 1259 | { | ||
| 1260 | struct i2c_client *client = to_i2c_client(dev); | ||
| 1261 | |||
| 1262 | if (client->irq) | ||
| 1263 | enable_irq(client->irq); | ||
| 1264 | |||
| 1265 | return 0; | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); | ||
| 1269 | |||
| 1248 | static const struct i2c_device_id twl_ids[] = { | 1270 | static const struct i2c_device_id twl_ids[] = { |
| 1249 | { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ | 1271 | { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ |
| 1250 | { "twl5030", 0 }, /* T2 updated */ | 1272 | { "twl5030", 0 }, /* T2 updated */ |
| @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { | |||
| 1262 | /* One Client Driver , 4 Clients */ | 1284 | /* One Client Driver , 4 Clients */ |
| 1263 | static struct i2c_driver twl_driver = { | 1285 | static struct i2c_driver twl_driver = { |
| 1264 | .driver.name = DRIVER_NAME, | 1286 | .driver.name = DRIVER_NAME, |
| 1287 | .driver.pm = &twl_dev_pm_ops, | ||
| 1265 | .id_table = twl_ids, | 1288 | .id_table = twl_ids, |
| 1266 | .probe = twl_probe, | 1289 | .probe = twl_probe, |
| 1267 | .remove = twl_remove, | 1290 | .remove = twl_remove, |
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 39f832d27288..36d0d5c9cfba 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c | |||
| @@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev) | |||
| 1184 | struct fastrpc_session_ctx *sess; | 1184 | struct fastrpc_session_ctx *sess; |
| 1185 | struct device *dev = &pdev->dev; | 1185 | struct device *dev = &pdev->dev; |
| 1186 | int i, sessions = 0; | 1186 | int i, sessions = 0; |
| 1187 | int rc; | ||
| 1187 | 1188 | ||
| 1188 | cctx = dev_get_drvdata(dev->parent); | 1189 | cctx = dev_get_drvdata(dev->parent); |
| 1189 | if (!cctx) | 1190 | if (!cctx) |
| @@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev) | |||
| 1213 | } | 1214 | } |
| 1214 | cctx->sesscount++; | 1215 | cctx->sesscount++; |
| 1215 | spin_unlock(&cctx->lock); | 1216 | spin_unlock(&cctx->lock); |
| 1216 | dma_set_mask(dev, DMA_BIT_MASK(32)); | 1217 | rc = dma_set_mask(dev, DMA_BIT_MASK(32)); |
| 1218 | if (rc) { | ||
| 1219 | dev_err(dev, "32-bit DMA enable failed\n"); | ||
| 1220 | return rc; | ||
| 1221 | } | ||
| 1217 | 1222 | ||
| 1218 | return 0; | 1223 | return 0; |
| 1219 | } | 1224 | } |
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ea979ebd62fb..3c509e19d69d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c | |||
| @@ -1688,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev) | |||
| 1688 | 1688 | ||
| 1689 | /* | 1689 | /* |
| 1690 | * Workaround for H2 #HW-23 bug | 1690 | * Workaround for H2 #HW-23 bug |
| 1691 | * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it | 1691 | * Set DMA max outstanding read requests to 240 on DMA CH 1. |
| 1692 | * to 16 on KMD DMA | 1692 | * This limitation is still large enough to not affect Gen4 bandwidth. |
| 1693 | * We need to limit only these DMAs because the user can only read | 1693 | * We need to only limit that DMA channel because the user can only read |
| 1694 | * from Host using DMA CH 1 | 1694 | * from Host using DMA CH 1 |
| 1695 | */ | 1695 | */ |
| 1696 | WREG32(mmDMA_CH_0_CFG0, 0x0fff0010); | ||
| 1697 | WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); | 1696 | WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); |
| 1698 | 1697 | ||
| 1699 | goya->hw_cap_initialized |= HW_CAP_GOLDEN; | 1698 | goya->hw_cap_initialized |= HW_CAP_GOLDEN; |
| @@ -3693,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, | |||
| 3693 | * WA for HW-23. | 3692 | * WA for HW-23. |
| 3694 | * We can't allow user to read from Host using QMANs other than 1. | 3693 | * We can't allow user to read from Host using QMANs other than 1. |
| 3695 | */ | 3694 | */ |
| 3696 | if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 && | 3695 | if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 && |
| 3697 | hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), | 3696 | hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), |
| 3698 | le32_to_cpu(user_dma_pkt->tsize), | 3697 | le32_to_cpu(user_dma_pkt->tsize), |
| 3699 | hdev->asic_prop.va_space_host_start_address, | 3698 | hdev->asic_prop.va_space_host_start_address, |
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index 82a97866e0cf..7c8f203f9a24 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c | |||
| @@ -48,7 +48,6 @@ struct alcor_sdmmc_host { | |||
| 48 | struct mmc_command *cmd; | 48 | struct mmc_command *cmd; |
| 49 | struct mmc_data *data; | 49 | struct mmc_data *data; |
| 50 | unsigned int dma_on:1; | 50 | unsigned int dma_on:1; |
| 51 | unsigned int early_data:1; | ||
| 52 | 51 | ||
| 53 | struct mutex cmd_mutex; | 52 | struct mutex cmd_mutex; |
| 54 | 53 | ||
| @@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host) | |||
| 144 | host->sg_count--; | 143 | host->sg_count--; |
| 145 | } | 144 | } |
| 146 | 145 | ||
| 147 | static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, | 146 | static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host) |
| 148 | bool early) | ||
| 149 | { | 147 | { |
| 150 | struct alcor_pci_priv *priv = host->alcor_pci; | 148 | struct alcor_pci_priv *priv = host->alcor_pci; |
| 151 | struct mmc_data *data = host->data; | 149 | struct mmc_data *data = host->data; |
| @@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, | |||
| 155 | ctrl |= AU6601_DATA_WRITE; | 153 | ctrl |= AU6601_DATA_WRITE; |
| 156 | 154 | ||
| 157 | if (data->host_cookie == COOKIE_MAPPED) { | 155 | if (data->host_cookie == COOKIE_MAPPED) { |
| 158 | if (host->early_data) { | ||
| 159 | host->early_data = false; | ||
| 160 | return; | ||
| 161 | } | ||
| 162 | |||
| 163 | host->early_data = early; | ||
| 164 | |||
| 165 | alcor_data_set_dma(host); | 156 | alcor_data_set_dma(host); |
| 166 | ctrl |= AU6601_DATA_DMA_MODE; | 157 | ctrl |= AU6601_DATA_DMA_MODE; |
| 167 | host->dma_on = 1; | 158 | host->dma_on = 1; |
| @@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host) | |||
| 231 | static void alcor_prepare_data(struct alcor_sdmmc_host *host, | 222 | static void alcor_prepare_data(struct alcor_sdmmc_host *host, |
| 232 | struct mmc_command *cmd) | 223 | struct mmc_command *cmd) |
| 233 | { | 224 | { |
| 225 | struct alcor_pci_priv *priv = host->alcor_pci; | ||
| 234 | struct mmc_data *data = cmd->data; | 226 | struct mmc_data *data = cmd->data; |
| 235 | 227 | ||
| 236 | if (!data) | 228 | if (!data) |
| @@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host, | |||
| 248 | if (data->host_cookie != COOKIE_MAPPED) | 240 | if (data->host_cookie != COOKIE_MAPPED) |
| 249 | alcor_prepare_sg_miter(host); | 241 | alcor_prepare_sg_miter(host); |
| 250 | 242 | ||
| 251 | alcor_trigger_data_transfer(host, true); | 243 | alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL); |
| 252 | } | 244 | } |
| 253 | 245 | ||
| 254 | static void alcor_send_cmd(struct alcor_sdmmc_host *host, | 246 | static void alcor_send_cmd(struct alcor_sdmmc_host *host, |
| @@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask) | |||
| 435 | if (!host->data) | 427 | if (!host->data) |
| 436 | return false; | 428 | return false; |
| 437 | 429 | ||
| 438 | alcor_trigger_data_transfer(host, false); | 430 | alcor_trigger_data_transfer(host); |
| 439 | host->cmd = NULL; | 431 | host->cmd = NULL; |
| 440 | return true; | 432 | return true; |
| 441 | } | 433 | } |
| @@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) | |||
| 456 | if (!host->data) | 448 | if (!host->data) |
| 457 | alcor_request_complete(host, 1); | 449 | alcor_request_complete(host, 1); |
| 458 | else | 450 | else |
| 459 | alcor_trigger_data_transfer(host, false); | 451 | alcor_trigger_data_transfer(host); |
| 460 | host->cmd = NULL; | 452 | host->cmd = NULL; |
| 461 | } | 453 | } |
| 462 | 454 | ||
| @@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) | |||
| 487 | break; | 479 | break; |
| 488 | case AU6601_INT_READ_BUF_RDY: | 480 | case AU6601_INT_READ_BUF_RDY: |
| 489 | alcor_trf_block_pio(host, true); | 481 | alcor_trf_block_pio(host, true); |
| 490 | if (!host->blocks) | ||
| 491 | break; | ||
| 492 | alcor_trigger_data_transfer(host, false); | ||
| 493 | return 1; | 482 | return 1; |
| 494 | case AU6601_INT_WRITE_BUF_RDY: | 483 | case AU6601_INT_WRITE_BUF_RDY: |
| 495 | alcor_trf_block_pio(host, false); | 484 | alcor_trf_block_pio(host, false); |
| 496 | if (!host->blocks) | ||
| 497 | break; | ||
| 498 | alcor_trigger_data_transfer(host, false); | ||
| 499 | return 1; | 485 | return 1; |
| 500 | case AU6601_INT_DMA_END: | 486 | case AU6601_INT_DMA_END: |
| 501 | if (!host->sg_count) | 487 | if (!host->sg_count) |
| @@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) | |||
| 508 | break; | 494 | break; |
| 509 | } | 495 | } |
| 510 | 496 | ||
| 511 | if (intmask & AU6601_INT_DATA_END) | 497 | if (intmask & AU6601_INT_DATA_END) { |
| 512 | return 0; | 498 | if (!host->dma_on && host->blocks) { |
| 499 | alcor_trigger_data_transfer(host); | ||
| 500 | return 1; | ||
| 501 | } else { | ||
| 502 | return 0; | ||
| 503 | } | ||
| 504 | } | ||
| 513 | 505 | ||
| 514 | return 1; | 506 | return 1; |
| 515 | } | 507 | } |
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 5bbed477c9b1..9f20fff9781b 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c | |||
| @@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask) | |||
| 797 | sdhci_reset(host, mask); | 797 | sdhci_reset(host, mask); |
| 798 | } | 798 | } |
| 799 | 799 | ||
| 800 | #define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\ | ||
| 801 | SDHCI_INT_TIMEOUT) | ||
| 802 | #define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE) | ||
| 803 | |||
| 804 | static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) | ||
| 805 | { | ||
| 806 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | ||
| 807 | struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); | ||
| 808 | |||
| 809 | if (omap_host->is_tuning && host->cmd && !host->data_early && | ||
| 810 | (intmask & CMD_ERR_MASK)) { | ||
| 811 | |||
| 812 | /* | ||
| 813 | * Since we are not resetting data lines during tuning | ||
| 814 | * operation, data error or data complete interrupts | ||
| 815 | * might still arrive. Mark this request as a failure | ||
| 816 | * but still wait for the data interrupt | ||
| 817 | */ | ||
| 818 | if (intmask & SDHCI_INT_TIMEOUT) | ||
| 819 | host->cmd->error = -ETIMEDOUT; | ||
| 820 | else | ||
| 821 | host->cmd->error = -EILSEQ; | ||
| 822 | |||
| 823 | host->cmd = NULL; | ||
| 824 | |||
| 825 | /* | ||
| 826 | * Sometimes command error interrupts and command complete | ||
| 827 | * interrupt will arrive together. Clear all command related | ||
| 828 | * interrupts here. | ||
| 829 | */ | ||
| 830 | sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS); | ||
| 831 | intmask &= ~CMD_MASK; | ||
| 832 | } | ||
| 833 | |||
| 834 | return intmask; | ||
| 835 | } | ||
| 836 | |||
| 800 | static struct sdhci_ops sdhci_omap_ops = { | 837 | static struct sdhci_ops sdhci_omap_ops = { |
| 801 | .set_clock = sdhci_omap_set_clock, | 838 | .set_clock = sdhci_omap_set_clock, |
| 802 | .set_power = sdhci_omap_set_power, | 839 | .set_power = sdhci_omap_set_power, |
| @@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = { | |||
| 807 | .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, | 844 | .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, |
| 808 | .reset = sdhci_omap_reset, | 845 | .reset = sdhci_omap_reset, |
| 809 | .set_uhs_signaling = sdhci_omap_set_uhs_signaling, | 846 | .set_uhs_signaling = sdhci_omap_set_uhs_signaling, |
| 847 | .irq = sdhci_omap_irq, | ||
| 810 | }; | 848 | }; |
| 811 | 849 | ||
| 812 | static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) | 850 | static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 72428b6bfc47..7b7286b4d81e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
| @@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
| 1876 | continue; | 1876 | continue; |
| 1877 | } | 1877 | } |
| 1878 | 1878 | ||
| 1879 | if (time_after(jiffies, timeo) && !chip_ready(map, adr)) | 1879 | /* |
| 1880 | * We check "time_after" and "!chip_good" before checking "chip_good" to avoid | ||
| 1881 | * the failure due to scheduling. | ||
| 1882 | */ | ||
| 1883 | if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) | ||
| 1880 | break; | 1884 | break; |
| 1881 | 1885 | ||
| 1882 | if (chip_good(map, adr, datum)) { | 1886 | if (chip_good(map, adr, datum)) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b59708c35faf..ee610721098e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this, | |||
| 3213 | return NOTIFY_DONE; | 3213 | return NOTIFY_DONE; |
| 3214 | 3214 | ||
| 3215 | if (event_dev->flags & IFF_MASTER) { | 3215 | if (event_dev->flags & IFF_MASTER) { |
| 3216 | int ret; | ||
| 3217 | |||
| 3216 | netdev_dbg(event_dev, "IFF_MASTER\n"); | 3218 | netdev_dbg(event_dev, "IFF_MASTER\n"); |
| 3217 | return bond_master_netdev_event(event, event_dev); | 3219 | ret = bond_master_netdev_event(event, event_dev); |
| 3220 | if (ret != NOTIFY_DONE) | ||
| 3221 | return ret; | ||
| 3218 | } | 3222 | } |
| 3219 | 3223 | ||
| 3220 | if (event_dev->flags & IFF_SLAVE) { | 3224 | if (event_dev->flags & IFF_SLAVE) { |
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c | |||
| @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); | |||
| 55 | 55 | ||
| 56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) | 56 | static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) |
| 57 | { | 57 | { |
| 58 | return sprintf(buf, "%pM\n", slave->perm_hwaddr); | 58 | return sprintf(buf, "%*phC\n", |
| 59 | slave->dev->addr_len, | ||
| 60 | slave->perm_hwaddr); | ||
| 59 | } | 61 | } |
| 60 | static SLAVE_ATTR_RO(perm_hwaddr); | 62 | static SLAVE_ATTR_RO(perm_hwaddr); |
| 61 | 63 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c | |||
| @@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 427 | return 0; | 427 | return 0; |
| 428 | 428 | ||
| 429 | lane = mv88e6390x_serdes_get_lane(chip, port); | 429 | lane = mv88e6390x_serdes_get_lane(chip, port); |
| 430 | if (lane < 0) | 430 | if (lane < 0 && lane != -ENODEV) |
| 431 | return lane; | 431 | return lane; |
| 432 | 432 | ||
| 433 | if (chip->ports[port].serdes_irq) { | 433 | if (lane >= 0) { |
| 434 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | 434 | if (chip->ports[port].serdes_irq) { |
| 435 | err = mv88e6390_serdes_irq_disable(chip, port, lane); | ||
| 436 | if (err) | ||
| 437 | return err; | ||
| 438 | } | ||
| 439 | |||
| 440 | err = mv88e6390x_serdes_power(chip, port, false); | ||
| 435 | if (err) | 441 | if (err) |
| 436 | return err; | 442 | return err; |
| 437 | } | 443 | } |
| 438 | 444 | ||
| 439 | err = mv88e6390x_serdes_power(chip, port, false); | 445 | chip->ports[port].cmode = 0; |
| 440 | if (err) | ||
| 441 | return err; | ||
| 442 | 446 | ||
| 443 | if (cmode) { | 447 | if (cmode) { |
| 444 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); | 448 | err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); |
| @@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 452 | if (err) | 456 | if (err) |
| 453 | return err; | 457 | return err; |
| 454 | 458 | ||
| 459 | chip->ports[port].cmode = cmode; | ||
| 460 | |||
| 461 | lane = mv88e6390x_serdes_get_lane(chip, port); | ||
| 462 | if (lane < 0) | ||
| 463 | return lane; | ||
| 464 | |||
| 455 | err = mv88e6390x_serdes_power(chip, port, true); | 465 | err = mv88e6390x_serdes_power(chip, port, true); |
| 456 | if (err) | 466 | if (err) |
| 457 | return err; | 467 | return err; |
| @@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 463 | } | 473 | } |
| 464 | } | 474 | } |
| 465 | 475 | ||
| 466 | chip->ports[port].cmode = cmode; | ||
| 467 | |||
| 468 | return 0; | 476 | return 0; |
| 469 | } | 477 | } |
| 470 | 478 | ||
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 9e07b469066a..156fbc5601ca 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c | |||
| @@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) | |||
| 1721 | adapter->soft_stats.scc += smb->tx_1_col; | 1721 | adapter->soft_stats.scc += smb->tx_1_col; |
| 1722 | adapter->soft_stats.mcc += smb->tx_2_col; | 1722 | adapter->soft_stats.mcc += smb->tx_2_col; |
| 1723 | adapter->soft_stats.latecol += smb->tx_late_col; | 1723 | adapter->soft_stats.latecol += smb->tx_late_col; |
| 1724 | adapter->soft_stats.tx_underun += smb->tx_underrun; | 1724 | adapter->soft_stats.tx_underrun += smb->tx_underrun; |
| 1725 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | 1725 | adapter->soft_stats.tx_trunc += smb->tx_trunc; |
| 1726 | adapter->soft_stats.tx_pause += smb->tx_pause; | 1726 | adapter->soft_stats.tx_pause += smb->tx_pause; |
| 1727 | 1727 | ||
| @@ -3179,7 +3179,7 @@ static struct atl1_stats atl1_gstrings_stats[] = { | |||
| 3179 | {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, | 3179 | {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, |
| 3180 | {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, | 3180 | {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, |
| 3181 | {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, | 3181 | {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, |
| 3182 | {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, | 3182 | {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)}, |
| 3183 | {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, | 3183 | {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, |
| 3184 | {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, | 3184 | {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, |
| 3185 | {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, | 3185 | {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, |
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 34a58cd846a0..eacff19ea05b 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h | |||
| @@ -681,7 +681,7 @@ struct atl1_sft_stats { | |||
| 681 | u64 scc; /* packets TX after a single collision */ | 681 | u64 scc; /* packets TX after a single collision */ |
| 682 | u64 mcc; /* packets TX after multiple collisions */ | 682 | u64 mcc; /* packets TX after multiple collisions */ |
| 683 | u64 latecol; /* TX packets w/ late collisions */ | 683 | u64 latecol; /* TX packets w/ late collisions */ |
| 684 | u64 tx_underun; /* TX packets aborted due to TX FIFO underrun | 684 | u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun |
| 685 | * or TRD FIFO underrun */ | 685 | * or TRD FIFO underrun */ |
| 686 | u64 tx_trunc; /* TX packets truncated due to size > MTU */ | 686 | u64 tx_trunc; /* TX packets truncated due to size > MTU */ |
| 687 | u64 rx_pause; /* num Pause packets received. */ | 687 | u64 rx_pause; /* num Pause packets received. */ |
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index d99317b3d891..98da0fa27192 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c | |||
| @@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter) | |||
| 553 | netdev->stats.tx_aborted_errors++; | 553 | netdev->stats.tx_aborted_errors++; |
| 554 | if (txs->late_col) | 554 | if (txs->late_col) |
| 555 | netdev->stats.tx_window_errors++; | 555 | netdev->stats.tx_window_errors++; |
| 556 | if (txs->underun) | 556 | if (txs->underrun) |
| 557 | netdev->stats.tx_fifo_errors++; | 557 | netdev->stats.tx_fifo_errors++; |
| 558 | } while (1); | 558 | } while (1); |
| 559 | 559 | ||
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h index c64a6bdfa7ae..25ec84cb4853 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.h +++ b/drivers/net/ethernet/atheros/atlx/atl2.h | |||
| @@ -260,7 +260,7 @@ struct tx_pkt_status { | |||
| 260 | unsigned multi_col:1; | 260 | unsigned multi_col:1; |
| 261 | unsigned late_col:1; | 261 | unsigned late_col:1; |
| 262 | unsigned abort_col:1; | 262 | unsigned abort_col:1; |
| 263 | unsigned underun:1; /* current packet is aborted | 263 | unsigned underrun:1; /* current packet is aborted |
| 264 | * due to txram underrun */ | 264 | * due to txram underrun */ |
| 265 | unsigned:3; /* reserved */ | 265 | unsigned:3; /* reserved */ |
| 266 | unsigned update:1; /* always 1'b1 in tx_status_buf */ | 266 | unsigned update:1; /* always 1'b1 in tx_status_buf */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..10ff37d6dc78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
| @@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) | |||
| 957 | bnx2x_sample_bulletin(bp); | 957 | bnx2x_sample_bulletin(bp); |
| 958 | 958 | ||
| 959 | if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { | 959 | if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { |
| 960 | BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); | 960 | BNX2X_ERR("Hypervisor will decline the request, avoiding\n"); |
| 961 | rc = -EINVAL; | 961 | rc = -EINVAL; |
| 962 | goto out; | 962 | goto out; |
| 963 | } | 963 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..4c586ba4364b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, | |||
| 1133 | tpa_info = &rxr->rx_tpa[agg_id]; | 1133 | tpa_info = &rxr->rx_tpa[agg_id]; |
| 1134 | 1134 | ||
| 1135 | if (unlikely(cons != rxr->rx_next_cons)) { | 1135 | if (unlikely(cons != rxr->rx_next_cons)) { |
| 1136 | netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", | ||
| 1137 | cons, rxr->rx_next_cons); | ||
| 1136 | bnxt_sched_reset(bp, rxr); | 1138 | bnxt_sched_reset(bp, rxr); |
| 1137 | return; | 1139 | return; |
| 1138 | } | 1140 | } |
| @@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, | |||
| 1585 | } | 1587 | } |
| 1586 | 1588 | ||
| 1587 | cons = rxcmp->rx_cmp_opaque; | 1589 | cons = rxcmp->rx_cmp_opaque; |
| 1588 | rx_buf = &rxr->rx_buf_ring[cons]; | ||
| 1589 | data = rx_buf->data; | ||
| 1590 | data_ptr = rx_buf->data_ptr; | ||
| 1591 | if (unlikely(cons != rxr->rx_next_cons)) { | 1590 | if (unlikely(cons != rxr->rx_next_cons)) { |
| 1592 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); | 1591 | int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); |
| 1593 | 1592 | ||
| 1593 | netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", | ||
| 1594 | cons, rxr->rx_next_cons); | ||
| 1594 | bnxt_sched_reset(bp, rxr); | 1595 | bnxt_sched_reset(bp, rxr); |
| 1595 | return rc1; | 1596 | return rc1; |
| 1596 | } | 1597 | } |
| 1598 | rx_buf = &rxr->rx_buf_ring[cons]; | ||
| 1599 | data = rx_buf->data; | ||
| 1600 | data_ptr = rx_buf->data_ptr; | ||
| 1597 | prefetch(data_ptr); | 1601 | prefetch(data_ptr); |
| 1598 | 1602 | ||
| 1599 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); | 1603 | misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); |
| @@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, | |||
| 1610 | 1614 | ||
| 1611 | rx_buf->data = NULL; | 1615 | rx_buf->data = NULL; |
| 1612 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { | 1616 | if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { |
| 1617 | u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); | ||
| 1618 | |||
| 1613 | bnxt_reuse_rx_data(rxr, cons, data); | 1619 | bnxt_reuse_rx_data(rxr, cons, data); |
| 1614 | if (agg_bufs) | 1620 | if (agg_bufs) |
| 1615 | bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); | 1621 | bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); |
| 1616 | 1622 | ||
| 1617 | rc = -EIO; | 1623 | rc = -EIO; |
| 1624 | if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { | ||
| 1625 | netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); | ||
| 1626 | bnxt_sched_reset(bp, rxr); | ||
| 1627 | } | ||
| 1618 | goto next_rx; | 1628 | goto next_rx; |
| 1619 | } | 1629 | } |
| 1620 | 1630 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..060a6f386104 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp) | |||
| 4283 | pci_set_power_state(tp->pdev, PCI_D3hot); | 4283 | pci_set_power_state(tp->pdev, PCI_D3hot); |
| 4284 | } | 4284 | } |
| 4285 | 4285 | ||
| 4286 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) | 4286 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) |
| 4287 | { | 4287 | { |
| 4288 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { | 4288 | switch (val & MII_TG3_AUX_STAT_SPDMASK) { |
| 4289 | case MII_TG3_AUX_STAT_10HALF: | 4289 | case MII_TG3_AUX_STAT_10HALF: |
| @@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) | |||
| 4787 | bool current_link_up; | 4787 | bool current_link_up; |
| 4788 | u32 bmsr, val; | 4788 | u32 bmsr, val; |
| 4789 | u32 lcl_adv, rmt_adv; | 4789 | u32 lcl_adv, rmt_adv; |
| 4790 | u16 current_speed; | 4790 | u32 current_speed; |
| 4791 | u8 current_duplex; | 4791 | u8 current_duplex; |
| 4792 | int i, err; | 4792 | int i, err; |
| 4793 | 4793 | ||
| @@ -5719,7 +5719,7 @@ out: | |||
| 5719 | static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) | 5719 | static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) |
| 5720 | { | 5720 | { |
| 5721 | u32 orig_pause_cfg; | 5721 | u32 orig_pause_cfg; |
| 5722 | u16 orig_active_speed; | 5722 | u32 orig_active_speed; |
| 5723 | u8 orig_active_duplex; | 5723 | u8 orig_active_duplex; |
| 5724 | u32 mac_status; | 5724 | u32 mac_status; |
| 5725 | bool current_link_up; | 5725 | bool current_link_up; |
| @@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) | |||
| 5823 | { | 5823 | { |
| 5824 | int err = 0; | 5824 | int err = 0; |
| 5825 | u32 bmsr, bmcr; | 5825 | u32 bmsr, bmcr; |
| 5826 | u16 current_speed = SPEED_UNKNOWN; | 5826 | u32 current_speed = SPEED_UNKNOWN; |
| 5827 | u8 current_duplex = DUPLEX_UNKNOWN; | 5827 | u8 current_duplex = DUPLEX_UNKNOWN; |
| 5828 | bool current_link_up = false; | 5828 | bool current_link_up = false; |
| 5829 | u32 local_adv, remote_adv, sgsr; | 5829 | u32 local_adv, remote_adv, sgsr; |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b685c..6953d0546acb 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
| @@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info { | |||
| 2873 | struct tg3_link_config { | 2873 | struct tg3_link_config { |
| 2874 | /* Describes what we're trying to get. */ | 2874 | /* Describes what we're trying to get. */ |
| 2875 | u32 advertising; | 2875 | u32 advertising; |
| 2876 | u16 speed; | 2876 | u32 speed; |
| 2877 | u8 duplex; | 2877 | u8 duplex; |
| 2878 | u8 autoneg; | 2878 | u8 autoneg; |
| 2879 | u8 flowctrl; | 2879 | u8 flowctrl; |
| @@ -2882,7 +2882,7 @@ struct tg3_link_config { | |||
| 2882 | u8 active_flowctrl; | 2882 | u8 active_flowctrl; |
| 2883 | 2883 | ||
| 2884 | u8 active_duplex; | 2884 | u8 active_duplex; |
| 2885 | u16 active_speed; | 2885 | u32 active_speed; |
| 2886 | u32 rmt_adv; | 2886 | u32 rmt_adv; |
| 2887 | }; | 2887 | }; |
| 2888 | 2888 | ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1522aee81884..3da2795e2486 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) | |||
| 898 | 898 | ||
| 899 | /* First, update TX stats if needed */ | 899 | /* First, update TX stats if needed */ |
| 900 | if (skb) { | 900 | if (skb) { |
| 901 | if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { | 901 | if (unlikely(skb_shinfo(skb)->tx_flags & |
| 902 | SKBTX_HW_TSTAMP) && | ||
| 903 | gem_ptp_do_txstamp(queue, skb, desc) == 0) { | ||
| 902 | /* skb now belongs to timestamp buffer | 904 | /* skb now belongs to timestamp buffer |
| 903 | * and will be removed later | 905 | * and will be removed later |
| 904 | */ | 906 | */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aa2be4807191..c032bef1b776 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -32,6 +32,13 @@ | |||
| 32 | #define DRV_NAME "nicvf" | 32 | #define DRV_NAME "nicvf" |
| 33 | #define DRV_VERSION "1.0" | 33 | #define DRV_VERSION "1.0" |
| 34 | 34 | ||
| 35 | /* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs | ||
| 36 | * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed | ||
| 37 | * this value, keeping headroom for the 14 byte Ethernet header and two | ||
| 38 | * VLAN tags (for QinQ) | ||
| 39 | */ | ||
| 40 | #define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) | ||
| 41 | |||
| 35 | /* Supported devices */ | 42 | /* Supported devices */ |
| 36 | static const struct pci_device_id nicvf_id_table[] = { | 43 | static const struct pci_device_id nicvf_id_table[] = { |
| 37 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, | 44 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, |
| @@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev) | |||
| 1328 | struct nicvf_cq_poll *cq_poll = NULL; | 1335 | struct nicvf_cq_poll *cq_poll = NULL; |
| 1329 | union nic_mbx mbx = {}; | 1336 | union nic_mbx mbx = {}; |
| 1330 | 1337 | ||
| 1331 | cancel_delayed_work_sync(&nic->link_change_work); | ||
| 1332 | |||
| 1333 | /* wait till all queued set_rx_mode tasks completes */ | 1338 | /* wait till all queued set_rx_mode tasks completes */ |
| 1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1339 | if (nic->nicvf_rx_mode_wq) { |
| 1340 | cancel_delayed_work_sync(&nic->link_change_work); | ||
| 1341 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
| 1342 | } | ||
| 1335 | 1343 | ||
| 1336 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | 1344 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; |
| 1337 | nicvf_send_msg_to_pf(nic, &mbx); | 1345 | nicvf_send_msg_to_pf(nic, &mbx); |
| @@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev) | |||
| 1452 | struct nicvf_cq_poll *cq_poll = NULL; | 1460 | struct nicvf_cq_poll *cq_poll = NULL; |
| 1453 | 1461 | ||
| 1454 | /* wait till all queued set_rx_mode tasks completes if any */ | 1462 | /* wait till all queued set_rx_mode tasks completes if any */ |
| 1455 | drain_workqueue(nic->nicvf_rx_mode_wq); | 1463 | if (nic->nicvf_rx_mode_wq) |
| 1464 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
| 1456 | 1465 | ||
| 1457 | netif_carrier_off(netdev); | 1466 | netif_carrier_off(netdev); |
| 1458 | 1467 | ||
| @@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev) | |||
| 1550 | /* Send VF config done msg to PF */ | 1559 | /* Send VF config done msg to PF */ |
| 1551 | nicvf_send_cfg_done(nic); | 1560 | nicvf_send_cfg_done(nic); |
| 1552 | 1561 | ||
| 1553 | INIT_DELAYED_WORK(&nic->link_change_work, | 1562 | if (nic->nicvf_rx_mode_wq) { |
| 1554 | nicvf_link_status_check_task); | 1563 | INIT_DELAYED_WORK(&nic->link_change_work, |
| 1555 | queue_delayed_work(nic->nicvf_rx_mode_wq, | 1564 | nicvf_link_status_check_task); |
| 1556 | &nic->link_change_work, 0); | 1565 | queue_delayed_work(nic->nicvf_rx_mode_wq, |
| 1566 | &nic->link_change_work, 0); | ||
| 1567 | } | ||
| 1557 | 1568 | ||
| 1558 | return 0; | 1569 | return 0; |
| 1559 | cleanup: | 1570 | cleanup: |
| @@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 1578 | struct nicvf *nic = netdev_priv(netdev); | 1589 | struct nicvf *nic = netdev_priv(netdev); |
| 1579 | int orig_mtu = netdev->mtu; | 1590 | int orig_mtu = netdev->mtu; |
| 1580 | 1591 | ||
| 1592 | /* For now just support only the usual MTU sized frames, | ||
| 1593 | * plus some headroom for VLAN, QinQ. | ||
| 1594 | */ | ||
| 1595 | if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { | ||
| 1596 | netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", | ||
| 1597 | netdev->mtu); | ||
| 1598 | return -EINVAL; | ||
| 1599 | } | ||
| 1600 | |||
| 1581 | netdev->mtu = new_mtu; | 1601 | netdev->mtu = new_mtu; |
| 1582 | 1602 | ||
| 1583 | if (!netif_running(netdev)) | 1603 | if (!netif_running(netdev)) |
| @@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1826 | bool bpf_attached = false; | 1846 | bool bpf_attached = false; |
| 1827 | int ret = 0; | 1847 | int ret = 0; |
| 1828 | 1848 | ||
| 1829 | /* For now just support only the usual MTU sized frames */ | 1849 | /* For now just support only the usual MTU sized frames, |
| 1830 | if (prog && (dev->mtu > 1500)) { | 1850 | * plus some headroom for VLAN, QinQ. |
| 1851 | */ | ||
| 1852 | if (prog && dev->mtu > MAX_XDP_MTU) { | ||
| 1831 | netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", | 1853 | netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", |
| 1832 | dev->mtu); | 1854 | dev->mtu); |
| 1833 | return -EOPNOTSUPP; | 1855 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5b4d3badcb73..e246f9733bb8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
| @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, | |||
| 105 | /* Check if page can be recycled */ | 105 | /* Check if page can be recycled */ |
| 106 | if (page) { | 106 | if (page) { |
| 107 | ref_count = page_ref_count(page); | 107 | ref_count = page_ref_count(page); |
| 108 | /* Check if this page has been used once i.e 'put_page' | 108 | /* This page can be recycled if internal ref_count and page's |
| 109 | * called after packet transmission i.e internal ref_count | 109 | * ref_count are equal, indicating that the page has been used |
| 110 | * and page's ref_count are equal i.e page can be recycled. | 110 | * once for packet transmission. For non-XDP mode, internal |
| 111 | * ref_count is always '1'. | ||
| 111 | */ | 112 | */ |
| 112 | if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) | 113 | if (rbdr->is_xdp) { |
| 113 | pgcache->ref_count--; | 114 | if (ref_count == pgcache->ref_count) |
| 114 | else | 115 | pgcache->ref_count--; |
| 115 | page = NULL; | 116 | else |
| 116 | 117 | page = NULL; | |
| 117 | /* In non-XDP mode, page's ref_count needs to be '1' for it | 118 | } else if (ref_count != 1) { |
| 118 | * to be recycled. | ||
| 119 | */ | ||
| 120 | if (!rbdr->is_xdp && (ref_count != 1)) | ||
| 121 | page = NULL; | 119 | page = NULL; |
| 120 | } | ||
| 122 | } | 121 | } |
| 123 | 122 | ||
| 124 | if (!page) { | 123 | if (!page) { |
| @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |||
| 365 | while (head < rbdr->pgcnt) { | 364 | while (head < rbdr->pgcnt) { |
| 366 | pgcache = &rbdr->pgcache[head]; | 365 | pgcache = &rbdr->pgcache[head]; |
| 367 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { | 366 | if (pgcache->page && page_ref_count(pgcache->page) != 0) { |
| 368 | if (!rbdr->is_xdp) { | 367 | if (rbdr->is_xdp) { |
| 369 | put_page(pgcache->page); | 368 | page_ref_sub(pgcache->page, |
| 370 | continue; | 369 | pgcache->ref_count - 1); |
| 371 | } | 370 | } |
| 372 | page_ref_sub(pgcache->page, pgcache->ref_count - 1); | ||
| 373 | put_page(pgcache->page); | 371 | put_page(pgcache->page); |
| 374 | } | 372 | } |
| 375 | head++; | 373 | head++; |
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | |||
| @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, | |||
| 354 | ppmax = max; | 354 | ppmax = max; |
| 355 | 355 | ||
| 356 | /* pool size must be multiple of unsigned long */ | 356 | /* pool size must be multiple of unsigned long */ |
| 357 | bmap = BITS_TO_LONGS(ppmax); | 357 | bmap = ppmax / BITS_PER_TYPE(unsigned long); |
| 358 | if (!bmap) | ||
| 359 | return NULL; | ||
| 360 | |||
| 358 | ppmax = (bmap * sizeof(unsigned long)) << 3; | 361 | ppmax = (bmap * sizeof(unsigned long)) << 3; |
| 359 | 362 | ||
| 360 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; | 363 | alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; |
| @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, | |||
| 402 | if (reserve_factor) { | 405 | if (reserve_factor) { |
| 403 | ppmax_pool = ppmax / reserve_factor; | 406 | ppmax_pool = ppmax / reserve_factor; |
| 404 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); | 407 | pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); |
| 408 | if (!pool) { | ||
| 409 | ppmax_pool = 0; | ||
| 410 | reserve_factor = 0; | ||
| 411 | } | ||
| 405 | 412 | ||
| 406 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", | 413 | pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", |
| 407 | ndev->name, ppmax, ppmax_pool, pool_index_max); | 414 | ndev->name, ppmax, ppmax_pool, pool_index_max); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 697c2427f2b7..a96ad20ee484 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
| 1840 | int ret; | 1840 | int ret; |
| 1841 | 1841 | ||
| 1842 | if (enable) { | 1842 | if (enable) { |
| 1843 | ret = clk_prepare_enable(fep->clk_ahb); | ||
| 1844 | if (ret) | ||
| 1845 | return ret; | ||
| 1846 | |||
| 1847 | ret = clk_prepare_enable(fep->clk_enet_out); | 1843 | ret = clk_prepare_enable(fep->clk_enet_out); |
| 1848 | if (ret) | 1844 | if (ret) |
| 1849 | goto failed_clk_enet_out; | 1845 | return ret; |
| 1850 | 1846 | ||
| 1851 | if (fep->clk_ptp) { | 1847 | if (fep->clk_ptp) { |
| 1852 | mutex_lock(&fep->ptp_clk_mutex); | 1848 | mutex_lock(&fep->ptp_clk_mutex); |
| @@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
| 1866 | 1862 | ||
| 1867 | phy_reset_after_clk_enable(ndev->phydev); | 1863 | phy_reset_after_clk_enable(ndev->phydev); |
| 1868 | } else { | 1864 | } else { |
| 1869 | clk_disable_unprepare(fep->clk_ahb); | ||
| 1870 | clk_disable_unprepare(fep->clk_enet_out); | 1865 | clk_disable_unprepare(fep->clk_enet_out); |
| 1871 | if (fep->clk_ptp) { | 1866 | if (fep->clk_ptp) { |
| 1872 | mutex_lock(&fep->ptp_clk_mutex); | 1867 | mutex_lock(&fep->ptp_clk_mutex); |
| @@ -1885,8 +1880,6 @@ failed_clk_ref: | |||
| 1885 | failed_clk_ptp: | 1880 | failed_clk_ptp: |
| 1886 | if (fep->clk_enet_out) | 1881 | if (fep->clk_enet_out) |
| 1887 | clk_disable_unprepare(fep->clk_enet_out); | 1882 | clk_disable_unprepare(fep->clk_enet_out); |
| 1888 | failed_clk_enet_out: | ||
| 1889 | clk_disable_unprepare(fep->clk_ahb); | ||
| 1890 | 1883 | ||
| 1891 | return ret; | 1884 | return ret; |
| 1892 | } | 1885 | } |
| @@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) | |||
| 3470 | ret = clk_prepare_enable(fep->clk_ipg); | 3463 | ret = clk_prepare_enable(fep->clk_ipg); |
| 3471 | if (ret) | 3464 | if (ret) |
| 3472 | goto failed_clk_ipg; | 3465 | goto failed_clk_ipg; |
| 3466 | ret = clk_prepare_enable(fep->clk_ahb); | ||
| 3467 | if (ret) | ||
| 3468 | goto failed_clk_ahb; | ||
| 3473 | 3469 | ||
| 3474 | fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); | 3470 | fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); |
| 3475 | if (!IS_ERR(fep->reg_phy)) { | 3471 | if (!IS_ERR(fep->reg_phy)) { |
| @@ -3563,6 +3559,9 @@ failed_reset: | |||
| 3563 | pm_runtime_put(&pdev->dev); | 3559 | pm_runtime_put(&pdev->dev); |
| 3564 | pm_runtime_disable(&pdev->dev); | 3560 | pm_runtime_disable(&pdev->dev); |
| 3565 | failed_regulator: | 3561 | failed_regulator: |
| 3562 | clk_disable_unprepare(fep->clk_ahb); | ||
| 3563 | failed_clk_ahb: | ||
| 3564 | clk_disable_unprepare(fep->clk_ipg); | ||
| 3566 | failed_clk_ipg: | 3565 | failed_clk_ipg: |
| 3567 | fec_enet_clk_enable(ndev, false); | 3566 | fec_enet_clk_enable(ndev, false); |
| 3568 | failed_clk: | 3567 | failed_clk: |
| @@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) | |||
| 3686 | struct net_device *ndev = dev_get_drvdata(dev); | 3685 | struct net_device *ndev = dev_get_drvdata(dev); |
| 3687 | struct fec_enet_private *fep = netdev_priv(ndev); | 3686 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 3688 | 3687 | ||
| 3688 | clk_disable_unprepare(fep->clk_ahb); | ||
| 3689 | clk_disable_unprepare(fep->clk_ipg); | 3689 | clk_disable_unprepare(fep->clk_ipg); |
| 3690 | 3690 | ||
| 3691 | return 0; | 3691 | return 0; |
| @@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) | |||
| 3695 | { | 3695 | { |
| 3696 | struct net_device *ndev = dev_get_drvdata(dev); | 3696 | struct net_device *ndev = dev_get_drvdata(dev); |
| 3697 | struct fec_enet_private *fep = netdev_priv(ndev); | 3697 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 3698 | int ret; | ||
| 3698 | 3699 | ||
| 3699 | return clk_prepare_enable(fep->clk_ipg); | 3700 | ret = clk_prepare_enable(fep->clk_ahb); |
| 3701 | if (ret) | ||
| 3702 | return ret; | ||
| 3703 | ret = clk_prepare_enable(fep->clk_ipg); | ||
| 3704 | if (ret) | ||
| 3705 | goto failed_clk_ipg; | ||
| 3706 | |||
| 3707 | return 0; | ||
| 3708 | |||
| 3709 | failed_clk_ipg: | ||
| 3710 | clk_disable_unprepare(fep->clk_ahb); | ||
| 3711 | return ret; | ||
| 3700 | } | 3712 | } |
| 3701 | 3713 | ||
| 3702 | static const struct dev_pm_ops fec_pm_ops = { | 3714 | static const struct dev_pm_ops fec_pm_ops = { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
| @@ -150,7 +150,6 @@ out_buffer_fail: | |||
| 150 | /* free desc along with its attached buffer */ | 150 | /* free desc along with its attached buffer */ |
| 151 | static void hnae_free_desc(struct hnae_ring *ring) | 151 | static void hnae_free_desc(struct hnae_ring *ring) |
| 152 | { | 152 | { |
| 153 | hnae_free_buffers(ring); | ||
| 154 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, | 153 | dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, |
| 155 | ring->desc_num * sizeof(ring->desc[0]), | 154 | ring->desc_num * sizeof(ring->desc[0]), |
| 156 | ring_to_dma_dir(ring)); | 155 | ring_to_dma_dir(ring)); |
| @@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) | |||
| 183 | /* fini ring, also free the buffer for the ring */ | 182 | /* fini ring, also free the buffer for the ring */ |
| 184 | static void hnae_fini_ring(struct hnae_ring *ring) | 183 | static void hnae_fini_ring(struct hnae_ring *ring) |
| 185 | { | 184 | { |
| 185 | if (is_rx_ring(ring)) | ||
| 186 | hnae_free_buffers(ring); | ||
| 187 | |||
| 186 | hnae_free_desc(ring); | 188 | hnae_free_desc(ring); |
| 187 | kfree(ring->desc_cb); | 189 | kfree(ring->desc_cb); |
| 188 | ring->desc_cb = NULL; | 190 | ring->desc_cb = NULL; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c4..d6fb83437230 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -357,7 +357,7 @@ struct hnae_buf_ops { | |||
| 357 | }; | 357 | }; |
| 358 | 358 | ||
| 359 | struct hnae_queue { | 359 | struct hnae_queue { |
| 360 | void __iomem *io_base; | 360 | u8 __iomem *io_base; |
| 361 | phys_addr_t phy_base; | 361 | phys_addr_t phy_base; |
| 362 | struct hnae_ae_dev *dev; /* the device who use this queue */ | 362 | struct hnae_ae_dev *dev; /* the device who use this queue */ |
| 363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; | 363 | struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a97228c93831..6c0507921623 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
| @@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) | |||
| 370 | static void hns_mac_param_get(struct mac_params *param, | 370 | static void hns_mac_param_get(struct mac_params *param, |
| 371 | struct hns_mac_cb *mac_cb) | 371 | struct hns_mac_cb *mac_cb) |
| 372 | { | 372 | { |
| 373 | param->vaddr = (void *)mac_cb->vaddr; | 373 | param->vaddr = mac_cb->vaddr; |
| 374 | param->mac_mode = hns_get_enet_interface(mac_cb); | 374 | param->mac_mode = hns_get_enet_interface(mac_cb); |
| 375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); | 375 | ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); |
| 376 | param->mac_id = mac_cb->mac_id; | 376 | param->mac_id = mac_cb->mac_id; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef7..22589799f1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
| @@ -187,7 +187,7 @@ struct mac_statistics { | |||
| 187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ | 187 | /*mac para struct ,mac get param from nic or dsaf when initialize*/ |
| 188 | struct mac_params { | 188 | struct mac_params { |
| 189 | char addr[ETH_ALEN]; | 189 | char addr[ETH_ALEN]; |
| 190 | void *vaddr; /*virtual address*/ | 190 | u8 __iomem *vaddr; /*virtual address*/ |
| 191 | struct device *dev; | 191 | struct device *dev; |
| 192 | u8 mac_id; | 192 | u8 mac_id; |
| 193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ | 193 | /**< Ethernet operation mode (MAC-PHY interface and speed) */ |
| @@ -402,7 +402,7 @@ struct mac_driver { | |||
| 402 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
| 403 | u8 mac_id; | 403 | u8 mac_id; |
| 404 | struct hns_mac_cb *mac_cb; | 404 | struct hns_mac_cb *mac_cb; |
| 405 | void __iomem *io_base; | 405 | u8 __iomem *io_base; |
| 406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ | 406 | unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ |
| 407 | unsigned int virt_dev_num; | 407 | unsigned int virt_dev_num; |
| 408 | struct device *dev; | 408 | struct device *dev; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..61eea6ac846f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( | |||
| 1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); | 1602 | DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); |
| 1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, | 1603 | dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, |
| 1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); | 1604 | DSAF_TBL_TCAM_KEY_PORT_S, port); |
| 1605 | |||
| 1606 | mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); | ||
| 1607 | } | 1605 | } |
| 1608 | 1606 | ||
| 1609 | /** | 1607 | /** |
| @@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( | |||
| 1663 | /* default config dvc to 0 */ | 1661 | /* default config dvc to 0 */ |
| 1664 | mac_data.tbl_ucast_dvc = 0; | 1662 | mac_data.tbl_ucast_dvc = 0; |
| 1665 | mac_data.tbl_ucast_out_port = mac_entry->port_num; | 1663 | mac_data.tbl_ucast_out_port = mac_entry->port_num; |
| 1666 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1664 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 1667 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1665 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 1668 | 1666 | ||
| 1669 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); | 1667 | hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); |
| 1670 | 1668 | ||
| @@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1786 | 0xff, | 1784 | 0xff, |
| 1787 | mc_mask); | 1785 | mc_mask); |
| 1788 | 1786 | ||
| 1789 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
| 1790 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
| 1791 | |||
| 1792 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1787 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
| 1793 | } | 1788 | } |
| 1794 | 1789 | ||
| @@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1840 | dsaf_dev->ae_dev.name, mac_key.high.val, | 1835 | dsaf_dev->ae_dev.name, mac_key.high.val, |
| 1841 | mac_key.low.val, entry_index); | 1836 | mac_key.low.val, entry_index); |
| 1842 | 1837 | ||
| 1843 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 1838 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 1844 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 1839 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 1845 | 1840 | ||
| 1846 | /* config mc entry with mask */ | 1841 | /* config mc entry with mask */ |
| 1847 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, | 1842 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, |
| @@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1956 | /* config key mask */ | 1951 | /* config key mask */ |
| 1957 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); | 1952 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); |
| 1958 | 1953 | ||
| 1959 | mask_key.high.val = le32_to_cpu(mask_key.high.val); | ||
| 1960 | mask_key.low.val = le32_to_cpu(mask_key.low.val); | ||
| 1961 | |||
| 1962 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); | 1954 | pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); |
| 1963 | } | 1955 | } |
| 1964 | 1956 | ||
| @@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 2012 | soft_mac_entry += entry_index; | 2004 | soft_mac_entry += entry_index; |
| 2013 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; | 2005 | soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; |
| 2014 | } else { /* not zero, just del port, update */ | 2006 | } else { /* not zero, just del port, update */ |
| 2015 | tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); | 2007 | tcam_data.tbl_tcam_data_high = mac_key.high.val; |
| 2016 | tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); | 2008 | tcam_data.tbl_tcam_data_low = mac_key.low.val; |
| 2017 | 2009 | ||
| 2018 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, | 2010 | hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, |
| 2019 | &tcam_data, | 2011 | &tcam_data, |
| @@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) | |||
| 2750 | return DSAF_DUMP_REGS_NUM; | 2742 | return DSAF_DUMP_REGS_NUM; |
| 2751 | } | 2743 | } |
| 2752 | 2744 | ||
| 2745 | static int hns_dsaf_get_port_id(u8 port) | ||
| 2746 | { | ||
| 2747 | if (port < DSAF_SERVICE_NW_NUM) | ||
| 2748 | return port; | ||
| 2749 | |||
| 2750 | if (port >= DSAF_BASE_INNER_PORT_NUM) | ||
| 2751 | return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
| 2752 | |||
| 2753 | return -EINVAL; | ||
| 2754 | } | ||
| 2755 | |||
| 2753 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | 2756 | static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) |
| 2754 | { | 2757 | { |
| 2755 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; | 2758 | struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; |
| @@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) | |||
| 2815 | memset(&temp_key, 0x0, sizeof(temp_key)); | 2818 | memset(&temp_key, 0x0, sizeof(temp_key)); |
| 2816 | mask_entry.addr[0] = 0x01; | 2819 | mask_entry.addr[0] = 0x01; |
| 2817 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, | 2820 | hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, |
| 2818 | port, mask_entry.addr); | 2821 | 0xf, mask_entry.addr); |
| 2819 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; | 2822 | tbl_tcam_mcast.tbl_mcast_item_vld = 1; |
| 2820 | tbl_tcam_mcast.tbl_mcast_old_en = 0; | 2823 | tbl_tcam_mcast.tbl_mcast_old_en = 0; |
| 2821 | 2824 | ||
| 2822 | if (port < DSAF_SERVICE_NW_NUM) { | 2825 | /* set MAC port to handle multicast */ |
| 2823 | mskid = port; | 2826 | mskid = hns_dsaf_get_port_id(port); |
| 2824 | } else if (port >= DSAF_BASE_INNER_PORT_NUM) { | 2827 | if (mskid == -EINVAL) { |
| 2825 | mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; | ||
| 2826 | } else { | ||
| 2827 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", | 2828 | dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", |
| 2828 | dsaf_dev->ae_dev.name, port, | 2829 | dsaf_dev->ae_dev.name, port, |
| 2829 | mask_key.high.val, mask_key.low.val); | 2830 | mask_key.high.val, mask_key.low.val); |
| 2830 | return; | 2831 | return; |
| 2831 | } | 2832 | } |
| 2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | ||
| 2834 | mskid % 32, 1); | ||
| 2832 | 2835 | ||
| 2836 | /* set pool bit map to handle multicast */ | ||
| 2837 | mskid = hns_dsaf_get_port_id(port_num); | ||
| 2838 | if (mskid == -EINVAL) { | ||
| 2839 | dev_err(dsaf_dev->dev, | ||
| 2840 | "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", | ||
| 2841 | dsaf_dev->ae_dev.name, port_num, | ||
| 2842 | mask_key.high.val, mask_key.low.val); | ||
| 2843 | return; | ||
| 2844 | } | ||
| 2833 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], | 2845 | dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], |
| 2834 | mskid % 32, 1); | 2846 | mskid % 32, 1); |
| 2847 | |||
| 2835 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); | 2848 | memcpy(&temp_key, &mask_key, sizeof(mask_key)); |
| 2836 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, | 2849 | hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, |
| 2837 | (struct dsaf_tbl_tcam_data *)(&mask_key), | 2850 | (struct dsaf_tbl_tcam_data *)(&mask_key), |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6..76cc8887e1a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
| @@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 467 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
| 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); |
| 469 | 469 | ||
| 470 | int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); | ||
| 471 | |||
| 470 | #endif /* __HNS_DSAF_MAIN_H__ */ | 472 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c954..19b94879691f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c | |||
| @@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) | |||
| 670 | dsaf_set_field(origin, 1ull << 10, 10, en); | 670 | dsaf_set_field(origin, 1ull << 10, 10, en); |
| 671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); | 671 | dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); |
| 672 | } else { | 672 | } else { |
| 673 | u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + | 673 | u8 __iomem *base_addr = mac_cb->serdes_vaddr + |
| 674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); | 674 | (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); |
| 675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); | 675 | dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); |
| 676 | } | 676 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3d07c8a7639d..17c019106e6e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
| @@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void __iomem * | 64 | static u8 __iomem * |
| 65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) | 65 | hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) |
| 66 | { | 66 | { |
| 67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; | 67 | return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; |
| @@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) | |||
| 111 | dsaf_dev->ppe_common[comm_index] = NULL; | 111 | dsaf_dev->ppe_common[comm_index] = NULL; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, | 114 | static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, |
| 115 | int ppe_idx) | 115 | int ppe_idx) |
| 116 | { | 116 | { |
| 117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; | 117 | return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; |
| 118 | } | 118 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a01..110c6e8222c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
| @@ -80,7 +80,7 @@ struct hns_ppe_cb { | |||
| 80 | struct hns_ppe_hw_stats hw_stats; | 80 | struct hns_ppe_hw_stats hw_stats; |
| 81 | 81 | ||
| 82 | u8 index; /* index in a ppe common device */ | 82 | u8 index; /* index in a ppe common device */ |
| 83 | void __iomem *io_base; | 83 | u8 __iomem *io_base; |
| 84 | int virq; | 84 | int virq; |
| 85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ | 85 | u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ |
| 86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ | 86 | u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ |
| @@ -89,7 +89,7 @@ struct hns_ppe_cb { | |||
| 89 | struct ppe_common_cb { | 89 | struct ppe_common_cb { |
| 90 | struct device *dev; | 90 | struct device *dev; |
| 91 | struct dsaf_device *dsaf_dev; | 91 | struct dsaf_device *dsaf_dev; |
| 92 | void __iomem *io_base; | 92 | u8 __iomem *io_base; |
| 93 | 93 | ||
| 94 | enum ppe_common_mode ppe_mode; | 94 | enum ppe_common_mode ppe_mode; |
| 95 | 95 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6bf346c11b25..ac3518ca4d7b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
| @@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) | |||
| 458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; | 458 | mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; |
| 459 | } else { | 459 | } else { |
| 460 | ring = &q->tx_ring; | 460 | ring = &q->tx_ring; |
| 461 | ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + | 461 | ring->io_base = ring_pair_cb->q.io_base + |
| 462 | HNS_RCB_TX_REG_OFFSET; | 462 | HNS_RCB_TX_REG_OFFSET; |
| 463 | irq_idx = HNS_RCB_IRQ_IDX_TX; | 463 | irq_idx = HNS_RCB_IRQ_IDX_TX; |
| 464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : | 464 | mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : |
| @@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) | |||
| 764 | } | 764 | } |
| 765 | } | 765 | } |
| 766 | 766 | ||
| 767 | static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) | 767 | static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) |
| 768 | { | 768 | { |
| 769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; | 769 | struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; |
| 770 | 770 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b9733b0b8482..b9e7f11f0896 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
| @@ -1018,7 +1018,7 @@ | |||
| 1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 | 1018 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 |
| 1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 | 1019 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 |
| 1020 | 1020 | ||
| 1021 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) | 1021 | static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) |
| 1022 | { | 1022 | { |
| 1023 | writel(value, base + reg); | 1023 | writel(value, base + reg); |
| 1024 | } | 1024 | } |
| @@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) | |||
| 1053 | #define dsaf_set_bit(origin, shift, val) \ | 1053 | #define dsaf_set_bit(origin, shift, val) \ |
| 1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) | 1054 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) |
| 1055 | 1055 | ||
| 1056 | static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | 1056 | static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
| 1057 | u32 shift, u32 val) | 1057 | u32 shift, u32 val) |
| 1058 | { | 1058 | { |
| 1059 | u32 origin = dsaf_read_reg(base, reg); | 1059 | u32 origin = dsaf_read_reg(base, reg); |
| @@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
| 1073 | #define dsaf_get_bit(origin, shift) \ | 1073 | #define dsaf_get_bit(origin, shift) \ |
| 1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) | 1074 | dsaf_get_field((origin), (1ull << (shift)), (shift)) |
| 1075 | 1075 | ||
| 1076 | static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | 1076 | static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, |
| 1077 | u32 shift) | 1077 | u32 shift) |
| 1078 | { | 1078 | { |
| 1079 | u32 origin; | 1079 | u32 origin; |
| @@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, | |||
| 1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) | 1089 | dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) |
| 1090 | 1090 | ||
| 1091 | #define dsaf_write_b(addr, data)\ | 1091 | #define dsaf_write_b(addr, data)\ |
| 1092 | writeb((data), (__iomem unsigned char *)(addr)) | 1092 | writeb((data), (__iomem u8 *)(addr)) |
| 1093 | #define dsaf_read_b(addr)\ | 1093 | #define dsaf_read_b(addr)\ |
| 1094 | readb((__iomem unsigned char *)(addr)) | 1094 | readb((__iomem u8 *)(addr)) |
| 1095 | 1095 | ||
| 1096 | #define hns_mac_reg_read64(drv, offset) \ | 1096 | #define hns_mac_reg_read64(drv, offset) \ |
| 1097 | readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) | 1097 | readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) |
| 1098 | 1098 | ||
| 1099 | #endif /* _DSAF_REG_H */ | 1099 | #endif /* _DSAF_REG_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | |||
| @@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) | |||
| 129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); | 129 | dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); |
| 130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); | 130 | dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); |
| 131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); | 131 | dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); |
| 132 | dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); | 132 | dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /** | 135 | /** |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 60e7d7ae3787..4cd86ba1f050 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -29,9 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #define SERVICE_TIMER_HZ (1 * HZ) | 30 | #define SERVICE_TIMER_HZ (1 * HZ) |
| 31 | 31 | ||
| 32 | #define NIC_TX_CLEAN_MAX_NUM 256 | ||
| 33 | #define NIC_RX_CLEAN_MAX_NUM 64 | ||
| 34 | |||
| 35 | #define RCB_IRQ_NOT_INITED 0 | 32 | #define RCB_IRQ_NOT_INITED 0 |
| 36 | #define RCB_IRQ_INITED 1 | 33 | #define RCB_IRQ_INITED 1 |
| 37 | #define HNS_BUFFER_SIZE_2048 2048 | 34 | #define HNS_BUFFER_SIZE_2048 2048 |
| @@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, | |||
| 376 | wmb(); /* commit all data before submit */ | 373 | wmb(); /* commit all data before submit */ |
| 377 | assert(skb->queue_mapping < priv->ae_handle->q_num); | 374 | assert(skb->queue_mapping < priv->ae_handle->q_num); |
| 378 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); | 375 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); |
| 379 | ring->stats.tx_pkts++; | ||
| 380 | ring->stats.tx_bytes += skb->len; | ||
| 381 | 376 | ||
| 382 | return NETDEV_TX_OK; | 377 | return NETDEV_TX_OK; |
| 383 | 378 | ||
| @@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, | |||
| 999 | /* issue prefetch for next Tx descriptor */ | 994 | /* issue prefetch for next Tx descriptor */ |
| 1000 | prefetch(&ring->desc_cb[ring->next_to_clean]); | 995 | prefetch(&ring->desc_cb[ring->next_to_clean]); |
| 1001 | } | 996 | } |
| 997 | /* update tx ring statistics. */ | ||
| 998 | ring->stats.tx_pkts += pkts; | ||
| 999 | ring->stats.tx_bytes += bytes; | ||
| 1002 | 1000 | ||
| 1003 | NETIF_TX_UNLOCK(ring); | 1001 | NETIF_TX_UNLOCK(ring); |
| 1004 | 1002 | ||
| @@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
| 2152 | hns_nic_tx_fini_pro_v2; | 2150 | hns_nic_tx_fini_pro_v2; |
| 2153 | 2151 | ||
| 2154 | netif_napi_add(priv->netdev, &rd->napi, | 2152 | netif_napi_add(priv->netdev, &rd->napi, |
| 2155 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); | 2153 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
| 2156 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2154 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
| 2157 | } | 2155 | } |
| 2158 | for (i = h->q_num; i < h->q_num * 2; i++) { | 2156 | for (i = h->q_num; i < h->q_num * 2; i++) { |
| @@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |||
| 2165 | hns_nic_rx_fini_pro_v2; | 2163 | hns_nic_rx_fini_pro_v2; |
| 2166 | 2164 | ||
| 2167 | netif_napi_add(priv->netdev, &rd->napi, | 2165 | netif_napi_add(priv->netdev, &rd->napi, |
| 2168 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); | 2166 | hns_nic_common_poll, NAPI_POLL_WEIGHT); |
| 2169 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | 2167 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; |
| 2170 | } | 2168 | } |
| 2171 | 2169 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
| 4 | # | 4 | # |
| 5 | 5 | ||
| 6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o | 8 | obj-$(CONFIG_HNS3_HCLGE) += hclge.o |
| 9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o | 9 | hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # Makefile for the HISILICON network device drivers. | 3 | # Makefile for the HISILICON network device drivers. |
| 4 | # | 4 | # |
| 5 | 5 | ||
| 6 | ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 | 6 | ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o | 8 | obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o |
| 9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file | 9 | hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file |
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index baf5cc251f32..8b8a7d00e8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c | |||
| @@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { | |||
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | struct hns_mdio_device { | 41 | struct hns_mdio_device { |
| 42 | void *vbase; /* mdio reg base address */ | 42 | u8 __iomem *vbase; /* mdio reg base address */ |
| 43 | struct regmap *subctrl_vbase; | 43 | struct regmap *subctrl_vbase; |
| 44 | struct hns_mdio_sc_reg sc_reg; | 44 | struct hns_mdio_sc_reg sc_reg; |
| 45 | }; | 45 | }; |
| @@ -96,21 +96,17 @@ enum mdio_c45_op_seq { | |||
| 96 | #define MDIO_SC_CLK_ST 0x531C | 96 | #define MDIO_SC_CLK_ST 0x531C |
| 97 | #define MDIO_SC_RESET_ST 0x5A1C | 97 | #define MDIO_SC_RESET_ST 0x5A1C |
| 98 | 98 | ||
| 99 | static void mdio_write_reg(void *base, u32 reg, u32 value) | 99 | static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) |
| 100 | { | 100 | { |
| 101 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 101 | writel_relaxed(value, base + reg); |
| 102 | |||
| 103 | writel_relaxed(value, reg_addr + reg); | ||
| 104 | } | 102 | } |
| 105 | 103 | ||
| 106 | #define MDIO_WRITE_REG(a, reg, value) \ | 104 | #define MDIO_WRITE_REG(a, reg, value) \ |
| 107 | mdio_write_reg((a)->vbase, (reg), (value)) | 105 | mdio_write_reg((a)->vbase, (reg), (value)) |
| 108 | 106 | ||
| 109 | static u32 mdio_read_reg(void *base, u32 reg) | 107 | static u32 mdio_read_reg(u8 __iomem *base, u32 reg) |
| 110 | { | 108 | { |
| 111 | u8 __iomem *reg_addr = (u8 __iomem *)base; | 109 | return readl_relaxed(base + reg); |
| 112 | |||
| 113 | return readl_relaxed(reg_addr + reg); | ||
| 114 | } | 110 | } |
| 115 | 111 | ||
| 116 | #define mdio_set_field(origin, mask, shift, val) \ | 112 | #define mdio_set_field(origin, mask, shift, val) \ |
| @@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) | |||
| 121 | 117 | ||
| 122 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) | 118 | #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) |
| 123 | 119 | ||
| 124 | static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | 120 | static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, |
| 125 | u32 val) | 121 | u32 val) |
| 126 | { | 122 | { |
| 127 | u32 origin = mdio_read_reg(base, reg); | 123 | u32 origin = mdio_read_reg(base, reg); |
| @@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | |||
| 133 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ | 129 | #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ |
| 134 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) | 130 | mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) |
| 135 | 131 | ||
| 136 | static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) | 132 | static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) |
| 137 | { | 133 | { |
| 138 | u32 origin; | 134 | u32 origin; |
| 139 | 135 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ecbb1adcf3b..3dfb2d131eb7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, | |||
| 1885 | */ | 1885 | */ |
| 1886 | adapter->state = VNIC_PROBED; | 1886 | adapter->state = VNIC_PROBED; |
| 1887 | 1887 | ||
| 1888 | reinit_completion(&adapter->init_done); | ||
| 1888 | rc = init_crq_queue(adapter); | 1889 | rc = init_crq_queue(adapter); |
| 1889 | if (rc) { | 1890 | if (rc) { |
| 1890 | netdev_err(adapter->netdev, | 1891 | netdev_err(adapter->netdev, |
| @@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) | |||
| 3761 | { | 3762 | { |
| 3762 | struct device *dev = &adapter->vdev->dev; | 3763 | struct device *dev = &adapter->vdev->dev; |
| 3763 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; | 3764 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
| 3765 | netdev_features_t old_hw_features = 0; | ||
| 3764 | union ibmvnic_crq crq; | 3766 | union ibmvnic_crq crq; |
| 3765 | int i; | 3767 | int i; |
| 3766 | 3768 | ||
| @@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) | |||
| 3836 | adapter->ip_offload_ctrl.large_rx_ipv4 = 0; | 3838 | adapter->ip_offload_ctrl.large_rx_ipv4 = 0; |
| 3837 | adapter->ip_offload_ctrl.large_rx_ipv6 = 0; | 3839 | adapter->ip_offload_ctrl.large_rx_ipv6 = 0; |
| 3838 | 3840 | ||
| 3839 | adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; | 3841 | if (adapter->state != VNIC_PROBING) { |
| 3842 | old_hw_features = adapter->netdev->hw_features; | ||
| 3843 | adapter->netdev->hw_features = 0; | ||
| 3844 | } | ||
| 3845 | |||
| 3846 | adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; | ||
| 3840 | 3847 | ||
| 3841 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) | 3848 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) |
| 3842 | adapter->netdev->features |= NETIF_F_IP_CSUM; | 3849 | adapter->netdev->hw_features |= NETIF_F_IP_CSUM; |
| 3843 | 3850 | ||
| 3844 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) | 3851 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) |
| 3845 | adapter->netdev->features |= NETIF_F_IPV6_CSUM; | 3852 | adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; |
| 3846 | 3853 | ||
| 3847 | if ((adapter->netdev->features & | 3854 | if ((adapter->netdev->features & |
| 3848 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) | 3855 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
| 3849 | adapter->netdev->features |= NETIF_F_RXCSUM; | 3856 | adapter->netdev->hw_features |= NETIF_F_RXCSUM; |
| 3850 | 3857 | ||
| 3851 | if (buf->large_tx_ipv4) | 3858 | if (buf->large_tx_ipv4) |
| 3852 | adapter->netdev->features |= NETIF_F_TSO; | 3859 | adapter->netdev->hw_features |= NETIF_F_TSO; |
| 3853 | if (buf->large_tx_ipv6) | 3860 | if (buf->large_tx_ipv6) |
| 3854 | adapter->netdev->features |= NETIF_F_TSO6; | 3861 | adapter->netdev->hw_features |= NETIF_F_TSO6; |
| 3862 | |||
| 3863 | if (adapter->state == VNIC_PROBING) { | ||
| 3864 | adapter->netdev->features |= adapter->netdev->hw_features; | ||
| 3865 | } else if (old_hw_features != adapter->netdev->hw_features) { | ||
| 3866 | netdev_features_t tmp = 0; | ||
| 3855 | 3867 | ||
| 3856 | adapter->netdev->hw_features |= adapter->netdev->features; | 3868 | /* disable features no longer supported */ |
| 3869 | adapter->netdev->features &= adapter->netdev->hw_features; | ||
| 3870 | /* turn on features now supported if previously enabled */ | ||
| 3871 | tmp = (old_hw_features ^ adapter->netdev->hw_features) & | ||
| 3872 | adapter->netdev->hw_features; | ||
| 3873 | adapter->netdev->features |= | ||
| 3874 | tmp & adapter->netdev->wanted_features; | ||
| 3875 | } | ||
| 3857 | 3876 | ||
| 3858 | memset(&crq, 0, sizeof(crq)); | 3877 | memset(&crq, 0, sizeof(crq)); |
| 3859 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; | 3878 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; |
| @@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) | |||
| 4625 | old_num_rx_queues = adapter->req_rx_queues; | 4644 | old_num_rx_queues = adapter->req_rx_queues; |
| 4626 | old_num_tx_queues = adapter->req_tx_queues; | 4645 | old_num_tx_queues = adapter->req_tx_queues; |
| 4627 | 4646 | ||
| 4628 | init_completion(&adapter->init_done); | 4647 | reinit_completion(&adapter->init_done); |
| 4629 | adapter->init_done_rc = 0; | 4648 | adapter->init_done_rc = 0; |
| 4630 | ibmvnic_send_crq_init(adapter); | 4649 | ibmvnic_send_crq_init(adapter); |
| 4631 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4650 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| @@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
| 4680 | 4699 | ||
| 4681 | adapter->from_passive_init = false; | 4700 | adapter->from_passive_init = false; |
| 4682 | 4701 | ||
| 4683 | init_completion(&adapter->init_done); | ||
| 4684 | adapter->init_done_rc = 0; | 4702 | adapter->init_done_rc = 0; |
| 4685 | ibmvnic_send_crq_init(adapter); | 4703 | ibmvnic_send_crq_init(adapter); |
| 4686 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | 4704 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| @@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4759 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | 4777 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
| 4760 | INIT_LIST_HEAD(&adapter->rwi_list); | 4778 | INIT_LIST_HEAD(&adapter->rwi_list); |
| 4761 | spin_lock_init(&adapter->rwi_lock); | 4779 | spin_lock_init(&adapter->rwi_lock); |
| 4780 | init_completion(&adapter->init_done); | ||
| 4762 | adapter->resetting = false; | 4781 | adapter->resetting = false; |
| 4763 | 4782 | ||
| 4764 | adapter->mac_change_pending = false; | 4783 | adapter->mac_change_pending = false; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..ecef949f3baa 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
| @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) | |||
| 41 | /* create driver workqueue */ | 41 | /* create driver workqueue */ |
| 42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | 42 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, |
| 43 | fm10k_driver_name); | 43 | fm10k_driver_name); |
| 44 | if (!fm10k_workqueue) | ||
| 45 | return -ENOMEM; | ||
| 44 | 46 | ||
| 45 | fm10k_dbg_init(); | 47 | fm10k_dbg_init(); |
| 46 | 48 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d684998ba2b0..d3cc3427caad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -790,6 +790,8 @@ struct i40e_vsi { | |||
| 790 | 790 | ||
| 791 | /* VSI specific handlers */ | 791 | /* VSI specific handlers */ |
| 792 | irqreturn_t (*irq_handler)(int irq, void *data); | 792 | irqreturn_t (*irq_handler)(int irq, void *data); |
| 793 | |||
| 794 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ | ||
| 793 | } ____cacheline_internodealigned_in_smp; | 795 | } ____cacheline_internodealigned_in_smp; |
| 794 | 796 | ||
| 795 | struct i40e_netdev_priv { | 797 | struct i40e_netdev_priv { |
| @@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) | |||
| 1096 | return !!vsi->xdp_prog; | 1098 | return !!vsi->xdp_prog; |
| 1097 | } | 1099 | } |
| 1098 | 1100 | ||
| 1099 | static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
| 1100 | { | ||
| 1101 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
| 1102 | int qid = ring->queue_index; | ||
| 1103 | |||
| 1104 | if (ring_is_xdp(ring)) | ||
| 1105 | qid -= ring->vsi->alloc_queue_pairs; | ||
| 1106 | |||
| 1107 | if (!xdp_on) | ||
| 1108 | return NULL; | ||
| 1109 | |||
| 1110 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); | 1101 | int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); |
| 1114 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); | 1102 | int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); |
| 1115 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, | 1103 | int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4c885801fa26..7874d0ec7fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 2573 | return -EOPNOTSUPP; | 2573 | return -EOPNOTSUPP; |
| 2574 | 2574 | ||
| 2575 | /* only magic packet is supported */ | 2575 | /* only magic packet is supported */ |
| 2576 | if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) | 2576 | if (wol->wolopts & ~WAKE_MAGIC) |
| 2577 | | (wol->wolopts != WAKE_FILTER)) | ||
| 2578 | return -EOPNOTSUPP; | 2577 | return -EOPNOTSUPP; |
| 2579 | 2578 | ||
| 2580 | /* is this a new value? */ | 2579 | /* is this a new value? */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da62218eb70a..b1c265012c8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) | |||
| 3064 | } | 3064 | } |
| 3065 | 3065 | ||
| 3066 | /** | 3066 | /** |
| 3067 | * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled | ||
| 3068 | * @ring: The Tx or Rx ring | ||
| 3069 | * | ||
| 3070 | * Returns the UMEM or NULL. | ||
| 3071 | **/ | ||
| 3072 | static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) | ||
| 3073 | { | ||
| 3074 | bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); | ||
| 3075 | int qid = ring->queue_index; | ||
| 3076 | |||
| 3077 | if (ring_is_xdp(ring)) | ||
| 3078 | qid -= ring->vsi->alloc_queue_pairs; | ||
| 3079 | |||
| 3080 | if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) | ||
| 3081 | return NULL; | ||
| 3082 | |||
| 3083 | return xdp_get_umem_from_qid(ring->vsi->netdev, qid); | ||
| 3084 | } | ||
| 3085 | |||
| 3086 | /** | ||
| 3067 | * i40e_configure_tx_ring - Configure a transmit ring context and rest | 3087 | * i40e_configure_tx_ring - Configure a transmit ring context and rest |
| 3068 | * @ring: The Tx ring to configure | 3088 | * @ring: The Tx ring to configure |
| 3069 | * | 3089 | * |
| @@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 10064 | hash_init(vsi->mac_filter_hash); | 10084 | hash_init(vsi->mac_filter_hash); |
| 10065 | vsi->irqs_ready = false; | 10085 | vsi->irqs_ready = false; |
| 10066 | 10086 | ||
| 10087 | if (type == I40E_VSI_MAIN) { | ||
| 10088 | vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); | ||
| 10089 | if (!vsi->af_xdp_zc_qps) | ||
| 10090 | goto err_rings; | ||
| 10091 | } | ||
| 10092 | |||
| 10067 | ret = i40e_set_num_rings_in_vsi(vsi); | 10093 | ret = i40e_set_num_rings_in_vsi(vsi); |
| 10068 | if (ret) | 10094 | if (ret) |
| 10069 | goto err_rings; | 10095 | goto err_rings; |
| @@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
| 10082 | goto unlock_pf; | 10108 | goto unlock_pf; |
| 10083 | 10109 | ||
| 10084 | err_rings: | 10110 | err_rings: |
| 10111 | bitmap_free(vsi->af_xdp_zc_qps); | ||
| 10085 | pf->next_vsi = i - 1; | 10112 | pf->next_vsi = i - 1; |
| 10086 | kfree(vsi); | 10113 | kfree(vsi); |
| 10087 | unlock_pf: | 10114 | unlock_pf: |
| @@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) | |||
| 10162 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); | 10189 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); |
| 10163 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); | 10190 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); |
| 10164 | 10191 | ||
| 10192 | bitmap_free(vsi->af_xdp_zc_qps); | ||
| 10165 | i40e_vsi_free_arrays(vsi, true); | 10193 | i40e_vsi_free_arrays(vsi, true); |
| 10166 | i40e_clear_rss_config_user(vsi); | 10194 | i40e_clear_rss_config_user(vsi); |
| 10167 | 10195 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
| @@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
| 146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) | 146 | static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
| 147 | { | 147 | { |
| 148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); | 148 | struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); |
| 149 | struct timespec64 now; | 149 | struct timespec64 now, then; |
| 150 | 150 | ||
| 151 | then = ns_to_timespec64(delta); | ||
| 151 | mutex_lock(&pf->tmreg_lock); | 152 | mutex_lock(&pf->tmreg_lock); |
| 152 | 153 | ||
| 153 | i40e_ptp_read(pf, &now, NULL); | 154 | i40e_ptp_read(pf, &now, NULL); |
| 154 | timespec64_add_ns(&now, delta); | 155 | now = timespec64_add(now, then); |
| 155 | i40e_ptp_write(pf, (const struct timespec64 *)&now); | 156 | i40e_ptp_write(pf, (const struct timespec64 *)&now); |
| 156 | 157 | ||
| 157 | mutex_unlock(&pf->tmreg_lock); | 158 | mutex_unlock(&pf->tmreg_lock); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b5c182e688e3..1b17486543ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c | |||
| @@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |||
| 102 | if (err) | 102 | if (err) |
| 103 | return err; | 103 | return err; |
| 104 | 104 | ||
| 105 | set_bit(qid, vsi->af_xdp_zc_qps); | ||
| 106 | |||
| 105 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | 107 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); |
| 106 | 108 | ||
| 107 | if (if_running) { | 109 | if (if_running) { |
| @@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) | |||
| 148 | return err; | 150 | return err; |
| 149 | } | 151 | } |
| 150 | 152 | ||
| 153 | clear_bit(qid, vsi->af_xdp_zc_qps); | ||
| 151 | i40e_xsk_umem_dma_unmap(vsi, umem); | 154 | i40e_xsk_umem_dma_unmap(vsi, umem); |
| 152 | 155 | ||
| 153 | if (if_running) { | 156 | if (if_running) { |
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h | |||
| @@ -194,6 +194,8 @@ | |||
| 194 | /* enable link status from external LINK_0 and LINK_1 pins */ | 194 | /* enable link status from external LINK_0 and LINK_1 pins */ |
| 195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ | 195 | #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ |
| 196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ | 196 | #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ |
| 197 | #define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ | ||
| 198 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ | ||
| 197 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ | 199 | #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ |
| 198 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ | 200 | #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ |
| 199 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | 201 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..3269d8e94744 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8740 | struct e1000_hw *hw = &adapter->hw; | 8740 | struct e1000_hw *hw = &adapter->hw; |
| 8741 | u32 ctrl, rctl, status; | 8741 | u32 ctrl, rctl, status; |
| 8742 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | 8742 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; |
| 8743 | #ifdef CONFIG_PM | 8743 | bool wake; |
| 8744 | int retval = 0; | ||
| 8745 | #endif | ||
| 8746 | 8744 | ||
| 8747 | rtnl_lock(); | 8745 | rtnl_lock(); |
| 8748 | netif_device_detach(netdev); | 8746 | netif_device_detach(netdev); |
| @@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8755 | igb_clear_interrupt_scheme(adapter); | 8753 | igb_clear_interrupt_scheme(adapter); |
| 8756 | rtnl_unlock(); | 8754 | rtnl_unlock(); |
| 8757 | 8755 | ||
| 8758 | #ifdef CONFIG_PM | ||
| 8759 | if (!runtime) { | ||
| 8760 | retval = pci_save_state(pdev); | ||
| 8761 | if (retval) | ||
| 8762 | return retval; | ||
| 8763 | } | ||
| 8764 | #endif | ||
| 8765 | |||
| 8766 | status = rd32(E1000_STATUS); | 8756 | status = rd32(E1000_STATUS); |
| 8767 | if (status & E1000_STATUS_LU) | 8757 | if (status & E1000_STATUS_LU) |
| 8768 | wufc &= ~E1000_WUFC_LNKC; | 8758 | wufc &= ~E1000_WUFC_LNKC; |
| @@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8779 | } | 8769 | } |
| 8780 | 8770 | ||
| 8781 | ctrl = rd32(E1000_CTRL); | 8771 | ctrl = rd32(E1000_CTRL); |
| 8782 | /* advertise wake from D3Cold */ | ||
| 8783 | #define E1000_CTRL_ADVD3WUC 0x00100000 | ||
| 8784 | /* phy power management enable */ | ||
| 8785 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | ||
| 8786 | ctrl |= E1000_CTRL_ADVD3WUC; | 8772 | ctrl |= E1000_CTRL_ADVD3WUC; |
| 8787 | wr32(E1000_CTRL, ctrl); | 8773 | wr32(E1000_CTRL, ctrl); |
| 8788 | 8774 | ||
| @@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
| 8796 | wr32(E1000_WUFC, 0); | 8782 | wr32(E1000_WUFC, 0); |
| 8797 | } | 8783 | } |
| 8798 | 8784 | ||
| 8799 | *enable_wake = wufc || adapter->en_mng_pt; | 8785 | wake = wufc || adapter->en_mng_pt; |
| 8800 | if (!*enable_wake) | 8786 | if (!wake) |
| 8801 | igb_power_down_link(adapter); | 8787 | igb_power_down_link(adapter); |
| 8802 | else | 8788 | else |
| 8803 | igb_power_up_link(adapter); | 8789 | igb_power_up_link(adapter); |
| 8804 | 8790 | ||
| 8791 | if (enable_wake) | ||
| 8792 | *enable_wake = wake; | ||
| 8793 | |||
| 8805 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 8794 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
| 8806 | * would have already happened in close and is redundant. | 8795 | * would have already happened in close and is redundant. |
| 8807 | */ | 8796 | */ |
| @@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) | |||
| 8844 | 8833 | ||
| 8845 | static int __maybe_unused igb_suspend(struct device *dev) | 8834 | static int __maybe_unused igb_suspend(struct device *dev) |
| 8846 | { | 8835 | { |
| 8847 | int retval; | 8836 | return __igb_shutdown(to_pci_dev(dev), NULL, 0); |
| 8848 | bool wake; | ||
| 8849 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 8850 | |||
| 8851 | retval = __igb_shutdown(pdev, &wake, 0); | ||
| 8852 | if (retval) | ||
| 8853 | return retval; | ||
| 8854 | |||
| 8855 | if (wake) { | ||
| 8856 | pci_prepare_to_sleep(pdev); | ||
| 8857 | } else { | ||
| 8858 | pci_wake_from_d3(pdev, false); | ||
| 8859 | pci_set_power_state(pdev, PCI_D3hot); | ||
| 8860 | } | ||
| 8861 | |||
| 8862 | return 0; | ||
| 8863 | } | 8837 | } |
| 8864 | 8838 | ||
| 8865 | static int __maybe_unused igb_resume(struct device *dev) | 8839 | static int __maybe_unused igb_resume(struct device *dev) |
| @@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) | |||
| 8930 | 8904 | ||
| 8931 | static int __maybe_unused igb_runtime_suspend(struct device *dev) | 8905 | static int __maybe_unused igb_runtime_suspend(struct device *dev) |
| 8932 | { | 8906 | { |
| 8933 | struct pci_dev *pdev = to_pci_dev(dev); | 8907 | return __igb_shutdown(to_pci_dev(dev), NULL, 1); |
| 8934 | int retval; | ||
| 8935 | bool wake; | ||
| 8936 | |||
| 8937 | retval = __igb_shutdown(pdev, &wake, 1); | ||
| 8938 | if (retval) | ||
| 8939 | return retval; | ||
| 8940 | |||
| 8941 | if (wake) { | ||
| 8942 | pci_prepare_to_sleep(pdev); | ||
| 8943 | } else { | ||
| 8944 | pci_wake_from_d3(pdev, false); | ||
| 8945 | pci_set_power_state(pdev, PCI_D3hot); | ||
| 8946 | } | ||
| 8947 | |||
| 8948 | return 0; | ||
| 8949 | } | 8908 | } |
| 8950 | 8909 | ||
| 8951 | static int __maybe_unused igb_runtime_resume(struct device *dev) | 8910 | static int __maybe_unused igb_runtime_resume(struct device *dev) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
| @@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
| 905 | struct pci_dev *pdev = adapter->pdev; | 905 | struct pci_dev *pdev = adapter->pdev; |
| 906 | struct device *dev = &adapter->netdev->dev; | 906 | struct device *dev = &adapter->netdev->dev; |
| 907 | struct mii_bus *bus; | 907 | struct mii_bus *bus; |
| 908 | int err = -ENODEV; | ||
| 908 | 909 | ||
| 909 | adapter->mii_bus = devm_mdiobus_alloc(dev); | 910 | bus = devm_mdiobus_alloc(dev); |
| 910 | if (!adapter->mii_bus) | 911 | if (!bus) |
| 911 | return -ENOMEM; | 912 | return -ENOMEM; |
| 912 | 913 | ||
| 913 | bus = adapter->mii_bus; | ||
| 914 | |||
| 915 | switch (hw->device_id) { | 914 | switch (hw->device_id) { |
| 916 | /* C3000 SoCs */ | 915 | /* C3000 SoCs */ |
| 917 | case IXGBE_DEV_ID_X550EM_A_KR: | 916 | case IXGBE_DEV_ID_X550EM_A_KR: |
| @@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) | |||
| 949 | */ | 948 | */ |
| 950 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; | 949 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; |
| 951 | 950 | ||
| 952 | return mdiobus_register(bus); | 951 | err = mdiobus_register(bus); |
| 952 | if (!err) { | ||
| 953 | adapter->mii_bus = bus; | ||
| 954 | return 0; | ||
| 955 | } | ||
| 953 | 956 | ||
| 954 | ixgbe_no_mii_bus: | 957 | ixgbe_no_mii_bus: |
| 955 | devm_mdiobus_free(dev, bus); | 958 | devm_mdiobus_free(dev, bus); |
| 956 | adapter->mii_bus = NULL; | 959 | return err; |
| 957 | return -ENODEV; | ||
| 958 | } | 960 | } |
| 959 | 961 | ||
| 960 | /** | 962 | /** |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 71c65cc17904..d3eaf2ceaa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); | |||
| 858 | * switching channels | 858 | * switching channels |
| 859 | */ | 859 | */ |
| 860 | typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); | 860 | typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); |
| 861 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); | ||
| 861 | int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, | 862 | int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, |
| 862 | struct mlx5e_channels *new_chs, | 863 | struct mlx5e_channels *new_chs, |
| 863 | mlx5e_fp_hw_modify hw_modify); | 864 | mlx5e_fp_hw_modify hw_modify); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 122927f3a600..d5e5afbdca6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
| @@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, | |||
| 96 | if (!eproto) | 96 | if (!eproto) |
| 97 | return -EINVAL; | 97 | return -EINVAL; |
| 98 | 98 | ||
| 99 | if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) | ||
| 100 | return -EOPNOTSUPP; | ||
| 101 | |||
| 102 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); | 99 | err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); |
| 103 | if (err) | 100 | if (err) |
| 104 | return err; | 101 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index eac245a93f91..4ab0d030b544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | |||
| @@ -122,7 +122,9 @@ out: | |||
| 122 | return err; | 122 | return err; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ | 125 | /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) |
| 126 | * minimum speed value is 40Gbps | ||
| 127 | */ | ||
| 126 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | 128 | static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) |
| 127 | { | 129 | { |
| 128 | u32 speed; | 130 | u32 speed; |
| @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 130 | int err; | 132 | int err; |
| 131 | 133 | ||
| 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); | 134 | err = mlx5e_port_linkspeed(priv->mdev, &speed); |
| 133 | if (err) { | 135 | if (err) |
| 134 | mlx5_core_warn(priv->mdev, "cannot get port speed\n"); | 136 | speed = SPEED_40000; |
| 135 | return 0; | 137 | speed = max_t(u32, speed, SPEED_40000); |
| 136 | } | ||
| 137 | 138 | ||
| 138 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; | 139 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; |
| 139 | 140 | ||
| @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | 145 | static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, |
| 145 | u32 xoff, unsigned int mtu) | 146 | u32 xoff, unsigned int max_mtu) |
| 146 | { | 147 | { |
| 147 | int i; | 148 | int i; |
| 148 | 149 | ||
| @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | if (port_buffer->buffer[i].size < | 157 | if (port_buffer->buffer[i].size < |
| 157 | (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) | 158 | (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) |
| 158 | return -ENOMEM; | 159 | return -ENOMEM; |
| 159 | 160 | ||
| 160 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; | 161 | port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; |
| 161 | port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; | 162 | port_buffer->buffer[i].xon = |
| 163 | port_buffer->buffer[i].xoff - max_mtu; | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | return 0; | 166 | return 0; |
| @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 166 | 168 | ||
| 167 | /** | 169 | /** |
| 168 | * update_buffer_lossy() | 170 | * update_buffer_lossy() |
| 169 | * mtu: device's MTU | 171 | * max_mtu: netdev's max_mtu |
| 170 | * pfc_en: <input> current pfc configuration | 172 | * pfc_en: <input> current pfc configuration |
| 171 | * buffer: <input> current prio to buffer mapping | 173 | * buffer: <input> current prio to buffer mapping |
| 172 | * xoff: <input> xoff value | 174 | * xoff: <input> xoff value |
| @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, | |||
| 183 | * Return 0 if no error. | 185 | * Return 0 if no error. |
| 184 | * Set change to true if buffer configuration is modified. | 186 | * Set change to true if buffer configuration is modified. |
| 185 | */ | 187 | */ |
| 186 | static int update_buffer_lossy(unsigned int mtu, | 188 | static int update_buffer_lossy(unsigned int max_mtu, |
| 187 | u8 pfc_en, u8 *buffer, u32 xoff, | 189 | u8 pfc_en, u8 *buffer, u32 xoff, |
| 188 | struct mlx5e_port_buffer *port_buffer, | 190 | struct mlx5e_port_buffer *port_buffer, |
| 189 | bool *change) | 191 | bool *change) |
| @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
| 220 | } | 222 | } |
| 221 | 223 | ||
| 222 | if (changed) { | 224 | if (changed) { |
| 223 | err = update_xoff_threshold(port_buffer, xoff, mtu); | 225 | err = update_xoff_threshold(port_buffer, xoff, max_mtu); |
| 224 | if (err) | 226 | if (err) |
| 225 | return err; | 227 | return err; |
| 226 | 228 | ||
| @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, | |||
| 230 | return 0; | 232 | return 0; |
| 231 | } | 233 | } |
| 232 | 234 | ||
| 235 | #define MINIMUM_MAX_MTU 9216 | ||
| 233 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | 236 | int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, |
| 234 | u32 change, unsigned int mtu, | 237 | u32 change, unsigned int mtu, |
| 235 | struct ieee_pfc *pfc, | 238 | struct ieee_pfc *pfc, |
| @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 241 | bool update_prio2buffer = false; | 244 | bool update_prio2buffer = false; |
| 242 | u8 buffer[MLX5E_MAX_PRIORITY]; | 245 | u8 buffer[MLX5E_MAX_PRIORITY]; |
| 243 | bool update_buffer = false; | 246 | bool update_buffer = false; |
| 247 | unsigned int max_mtu; | ||
| 244 | u32 total_used = 0; | 248 | u32 total_used = 0; |
| 245 | u8 curr_pfc_en; | 249 | u8 curr_pfc_en; |
| 246 | int err; | 250 | int err; |
| 247 | int i; | 251 | int i; |
| 248 | 252 | ||
| 249 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); | 253 | mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); |
| 254 | max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); | ||
| 250 | 255 | ||
| 251 | err = mlx5e_port_query_buffer(priv, &port_buffer); | 256 | err = mlx5e_port_query_buffer(priv, &port_buffer); |
| 252 | if (err) | 257 | if (err) |
| @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 254 | 259 | ||
| 255 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { | 260 | if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { |
| 256 | update_buffer = true; | 261 | update_buffer = true; |
| 257 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 262 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 258 | if (err) | 263 | if (err) |
| 259 | return err; | 264 | return err; |
| 260 | } | 265 | } |
| @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 264 | if (err) | 269 | if (err) |
| 265 | return err; | 270 | return err; |
| 266 | 271 | ||
| 267 | err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, | 272 | err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, |
| 268 | &port_buffer, &update_buffer); | 273 | &port_buffer, &update_buffer); |
| 269 | if (err) | 274 | if (err) |
| 270 | return err; | 275 | return err; |
| @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 276 | if (err) | 281 | if (err) |
| 277 | return err; | 282 | return err; |
| 278 | 283 | ||
| 279 | err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, | 284 | err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, |
| 280 | &port_buffer, &update_buffer); | 285 | xoff, &port_buffer, &update_buffer); |
| 281 | if (err) | 286 | if (err) |
| 282 | return err; | 287 | return err; |
| 283 | } | 288 | } |
| @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 301 | return -EINVAL; | 306 | return -EINVAL; |
| 302 | 307 | ||
| 303 | update_buffer = true; | 308 | update_buffer = true; |
| 304 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 309 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 305 | if (err) | 310 | if (err) |
| 306 | return err; | 311 | return err; |
| 307 | } | 312 | } |
| @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, | |||
| 309 | /* Need to update buffer configuration if xoff value is changed */ | 314 | /* Need to update buffer configuration if xoff value is changed */ |
| 310 | if (!update_buffer && xoff != priv->dcbx.xoff) { | 315 | if (!update_buffer && xoff != priv->dcbx.xoff) { |
| 311 | update_buffer = true; | 316 | update_buffer = true; |
| 312 | err = update_xoff_threshold(&port_buffer, xoff, mtu); | 317 | err = update_xoff_threshold(&port_buffer, xoff, max_mtu); |
| 313 | if (err) | 318 | if (err) |
| 314 | return err; | 319 | return err; |
| 315 | } | 320 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d38e62cdf24..476dd97f7f2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | |||
| @@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) | |||
| 186 | 186 | ||
| 187 | static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) | 187 | static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) |
| 188 | { | 188 | { |
| 189 | int err; | 189 | int err = 0; |
| 190 | 190 | ||
| 191 | rtnl_lock(); | 191 | rtnl_lock(); |
| 192 | mutex_lock(&priv->state_lock); | 192 | mutex_lock(&priv->state_lock); |
| 193 | mlx5e_close_locked(priv->netdev); | 193 | |
| 194 | err = mlx5e_open_locked(priv->netdev); | 194 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) |
| 195 | goto out; | ||
| 196 | |||
| 197 | err = mlx5e_safe_reopen_channels(priv); | ||
| 198 | |||
| 199 | out: | ||
| 195 | mutex_unlock(&priv->state_lock); | 200 | mutex_unlock(&priv->state_lock); |
| 196 | rtnl_unlock(); | 201 | rtnl_unlock(); |
| 197 | 202 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fa2a3c444cdc..eec07b34b4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |||
| @@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, | |||
| 39 | return -EOPNOTSUPP; | 39 | return -EOPNOTSUPP; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | if (!(mlx5e_eswitch_rep(*out_dev) && | ||
| 43 | mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) | ||
| 44 | return -EOPNOTSUPP; | ||
| 45 | |||
| 42 | return 0; | 46 | return 0; |
| 43 | } | 47 | } |
| 44 | 48 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 03b2a9f9c589..cad34d6f5f45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | |||
| @@ -33,6 +33,26 @@ | |||
| 33 | #include <linux/bpf_trace.h> | 33 | #include <linux/bpf_trace.h> |
| 34 | #include "en/xdp.h" | 34 | #include "en/xdp.h" |
| 35 | 35 | ||
| 36 | int mlx5e_xdp_max_mtu(struct mlx5e_params *params) | ||
| 37 | { | ||
| 38 | int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; | ||
| 39 | |||
| 40 | /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). | ||
| 41 | * The condition checked in mlx5e_rx_is_linear_skb is: | ||
| 42 | * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) | ||
| 43 | * (Note that hw_mtu == sw_mtu + hard_mtu.) | ||
| 44 | * What is returned from this function is: | ||
| 45 | * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) | ||
| 46 | * After assigning sw_mtu := max_mtu, the left side of (1) turns to | ||
| 47 | * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, | ||
| 48 | * because both PAGE_SIZE and S are already aligned. Any number greater | ||
| 49 | * than max_mtu would make the left side of (1) greater than PAGE_SIZE, | ||
| 50 | * so max_mtu is the maximum MTU allowed. | ||
| 51 | */ | ||
| 52 | |||
| 53 | return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); | ||
| 54 | } | ||
| 55 | |||
| 36 | static inline bool | 56 | static inline bool |
| 37 | mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, | 57 | mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, |
| 38 | struct xdp_buff *xdp) | 58 | struct xdp_buff *xdp) |
| @@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) | |||
| 304 | mlx5e_xdpi_fifo_pop(xdpi_fifo); | 324 | mlx5e_xdpi_fifo_pop(xdpi_fifo); |
| 305 | 325 | ||
| 306 | if (is_redirect) { | 326 | if (is_redirect) { |
| 307 | xdp_return_frame(xdpi.xdpf); | ||
| 308 | dma_unmap_single(sq->pdev, xdpi.dma_addr, | 327 | dma_unmap_single(sq->pdev, xdpi.dma_addr, |
| 309 | xdpi.xdpf->len, DMA_TO_DEVICE); | 328 | xdpi.xdpf->len, DMA_TO_DEVICE); |
| 329 | xdp_return_frame(xdpi.xdpf); | ||
| 310 | } else { | 330 | } else { |
| 311 | /* Recycle RX page */ | 331 | /* Recycle RX page */ |
| 312 | mlx5e_page_release(rq, &xdpi.di, true); | 332 | mlx5e_page_release(rq, &xdpi.di, true); |
| @@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) | |||
| 345 | mlx5e_xdpi_fifo_pop(xdpi_fifo); | 365 | mlx5e_xdpi_fifo_pop(xdpi_fifo); |
| 346 | 366 | ||
| 347 | if (is_redirect) { | 367 | if (is_redirect) { |
| 348 | xdp_return_frame(xdpi.xdpf); | ||
| 349 | dma_unmap_single(sq->pdev, xdpi.dma_addr, | 368 | dma_unmap_single(sq->pdev, xdpi.dma_addr, |
| 350 | xdpi.xdpf->len, DMA_TO_DEVICE); | 369 | xdpi.xdpf->len, DMA_TO_DEVICE); |
| 370 | xdp_return_frame(xdpi.xdpf); | ||
| 351 | } else { | 371 | } else { |
| 352 | /* Recycle RX page */ | 372 | /* Recycle RX page */ |
| 353 | mlx5e_page_release(rq, &xdpi.di, false); | 373 | mlx5e_page_release(rq, &xdpi.di, false); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index ee27a7c8cd87..553956cadc8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | |||
| @@ -34,13 +34,12 @@ | |||
| 34 | 34 | ||
| 35 | #include "en.h" | 35 | #include "en.h" |
| 36 | 36 | ||
| 37 | #define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ | ||
| 38 | MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) | ||
| 39 | #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) | 37 | #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) |
| 40 | #define MLX5E_XDP_TX_EMPTY_DS_COUNT \ | 38 | #define MLX5E_XDP_TX_EMPTY_DS_COUNT \ |
| 41 | (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) | 39 | (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) |
| 42 | #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) | 40 | #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) |
| 43 | 41 | ||
| 42 | int mlx5e_xdp_max_mtu(struct mlx5e_params *params); | ||
| 44 | bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, | 43 | bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, |
| 45 | void *va, u16 *rx_headroom, u32 *len); | 44 | void *va, u16 *rx_headroom, u32 *len); |
| 46 | bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq); | 45 | bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 3078491cc0d0..1539cf3de5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c | |||
| @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
| 45 | if (err) | 45 | if (err) |
| 46 | return err; | 46 | return err; |
| 47 | 47 | ||
| 48 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 48 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); | 49 | list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); |
| 50 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 49 | 51 | ||
| 50 | return 0; | 52 | return 0; |
| 51 | } | 53 | } |
| @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, | |||
| 53 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, | 55 | void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, |
| 54 | struct mlx5e_tir *tir) | 56 | struct mlx5e_tir *tir) |
| 55 | { | 57 | { |
| 58 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 56 | mlx5_core_destroy_tir(mdev, tir->tirn); | 59 | mlx5_core_destroy_tir(mdev, tir->tirn); |
| 57 | list_del(&tir->list); | 60 | list_del(&tir->list); |
| 61 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 58 | } | 62 | } |
| 59 | 63 | ||
| 60 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, | 64 | static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, |
| @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) | |||
| 114 | } | 118 | } |
| 115 | 119 | ||
| 116 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); | 120 | INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); |
| 121 | mutex_init(&mdev->mlx5e_res.td.list_lock); | ||
| 117 | 122 | ||
| 118 | return 0; | 123 | return 0; |
| 119 | 124 | ||
| @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
| 141 | { | 146 | { |
| 142 | struct mlx5_core_dev *mdev = priv->mdev; | 147 | struct mlx5_core_dev *mdev = priv->mdev; |
| 143 | struct mlx5e_tir *tir; | 148 | struct mlx5e_tir *tir; |
| 144 | int err = -ENOMEM; | 149 | int err = 0; |
| 145 | u32 tirn = 0; | 150 | u32 tirn = 0; |
| 146 | int inlen; | 151 | int inlen; |
| 147 | void *in; | 152 | void *in; |
| 148 | 153 | ||
| 149 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | 154 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
| 150 | in = kvzalloc(inlen, GFP_KERNEL); | 155 | in = kvzalloc(inlen, GFP_KERNEL); |
| 151 | if (!in) | 156 | if (!in) { |
| 157 | err = -ENOMEM; | ||
| 152 | goto out; | 158 | goto out; |
| 159 | } | ||
| 153 | 160 | ||
| 154 | if (enable_uc_lb) | 161 | if (enable_uc_lb) |
| 155 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, | 162 | MLX5_SET(modify_tir_in, in, ctx.self_lb_block, |
| @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) | |||
| 157 | 164 | ||
| 158 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); | 165 | MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); |
| 159 | 166 | ||
| 167 | mutex_lock(&mdev->mlx5e_res.td.list_lock); | ||
| 160 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { | 168 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { |
| 161 | tirn = tir->tirn; | 169 | tirn = tir->tirn; |
| 162 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); | 170 | err = mlx5_core_modify_tir(mdev, tirn, in, inlen); |
| @@ -168,6 +176,7 @@ out: | |||
| 168 | kvfree(in); | 176 | kvfree(in); |
| 169 | if (err) | 177 | if (err) |
| 170 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); | 178 | netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); |
| 179 | mutex_unlock(&mdev->mlx5e_res.td.list_lock); | ||
| 171 | 180 | ||
| 172 | return err; | 181 | return err; |
| 173 | } | 182 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a0987cc5fe4a..78dc8fe2a83c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, | |||
| 603 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 603 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, | 606 | static void ptys2ethtool_adver_link(unsigned long *advertising_modes, |
| 607 | unsigned long *advertising_modes, | 607 | u32 eth_proto_cap, bool ext) |
| 608 | u32 eth_proto_cap) | ||
| 609 | { | 608 | { |
| 610 | unsigned long proto_cap = eth_proto_cap; | 609 | unsigned long proto_cap = eth_proto_cap; |
| 611 | struct ptys2ethtool_config *table; | 610 | struct ptys2ethtool_config *table; |
| 612 | u32 max_size; | 611 | u32 max_size; |
| 613 | int proto; | 612 | int proto; |
| 614 | 613 | ||
| 615 | mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); | 614 | table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; |
| 615 | max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : | ||
| 616 | ARRAY_SIZE(ptys2legacy_ethtool_table); | ||
| 617 | |||
| 616 | for_each_set_bit(proto, &proto_cap, max_size) | 618 | for_each_set_bit(proto, &proto_cap, max_size) |
| 617 | bitmap_or(advertising_modes, advertising_modes, | 619 | bitmap_or(advertising_modes, advertising_modes, |
| 618 | table[proto].advertised, | 620 | table[proto].advertised, |
| @@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | |||
| 794 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); | 796 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); |
| 795 | } | 797 | } |
| 796 | 798 | ||
| 797 | static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, | 799 | static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, |
| 798 | u8 tx_pause, u8 rx_pause, | 800 | struct ethtool_link_ksettings *link_ksettings, |
| 799 | struct ethtool_link_ksettings *link_ksettings) | 801 | bool ext) |
| 800 | { | 802 | { |
| 801 | unsigned long *advertising = link_ksettings->link_modes.advertising; | 803 | unsigned long *advertising = link_ksettings->link_modes.advertising; |
| 802 | ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); | 804 | ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); |
| 803 | 805 | ||
| 804 | if (rx_pause) | 806 | if (rx_pause) |
| 805 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); | 807 | ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); |
| @@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, | |||
| 854 | struct ethtool_link_ksettings *link_ksettings) | 856 | struct ethtool_link_ksettings *link_ksettings) |
| 855 | { | 857 | { |
| 856 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; | 858 | unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; |
| 859 | bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | ||
| 857 | 860 | ||
| 858 | ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); | 861 | ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); |
| 859 | } | 862 | } |
| 860 | 863 | ||
| 861 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | 864 | int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, |
| @@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 872 | u8 an_disable_admin; | 875 | u8 an_disable_admin; |
| 873 | u8 an_status; | 876 | u8 an_status; |
| 874 | u8 connector_type; | 877 | u8 connector_type; |
| 878 | bool admin_ext; | ||
| 875 | bool ext; | 879 | bool ext; |
| 876 | int err; | 880 | int err; |
| 877 | 881 | ||
| @@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 886 | eth_proto_capability); | 890 | eth_proto_capability); |
| 887 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 891 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
| 888 | eth_proto_admin); | 892 | eth_proto_admin); |
| 893 | /* Fields: eth_proto_admin and ext_eth_proto_admin are | ||
| 894 | * mutually exclusive. Hence try reading legacy advertising | ||
| 895 | * when extended advertising is zero. | ||
| 896 | * admin_ext indicates how eth_proto_admin should be | ||
| 897 | * interpreted | ||
| 898 | */ | ||
| 899 | admin_ext = ext; | ||
| 900 | if (ext && !eth_proto_admin) { | ||
| 901 | eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false, | ||
| 902 | eth_proto_admin); | ||
| 903 | admin_ext = false; | ||
| 904 | } | ||
| 905 | |||
| 889 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 906 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, |
| 890 | eth_proto_oper); | 907 | eth_proto_oper); |
| 891 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); | 908 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); |
| @@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 899 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); | 916 | ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); |
| 900 | 917 | ||
| 901 | get_supported(mdev, eth_proto_cap, link_ksettings); | 918 | get_supported(mdev, eth_proto_cap, link_ksettings); |
| 902 | get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); | 919 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, |
| 920 | admin_ext); | ||
| 903 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); | 921 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); |
| 904 | 922 | ||
| 905 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; | 923 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
| @@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
| 997 | 1015 | ||
| 998 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) | 1016 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) |
| 999 | 1017 | ||
| 1000 | ext_requested = (link_ksettings->link_modes.advertising[0] > | 1018 | ext_requested = !!(link_ksettings->link_modes.advertising[0] > |
| 1001 | MLX5E_PTYS_EXT); | 1019 | MLX5E_PTYS_EXT || |
| 1020 | link_ksettings->link_modes.advertising[1]); | ||
| 1002 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | 1021 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); |
| 1003 | 1022 | ext_requested &= ext_supported; | |
| 1004 | /*when ptys_extended_ethernet is set legacy link modes are deprecated */ | ||
| 1005 | if (ext_requested != ext_supported) | ||
| 1006 | return -EPROTONOSUPPORT; | ||
| 1007 | 1023 | ||
| 1008 | speed = link_ksettings->base.speed; | 1024 | speed = link_ksettings->base.speed; |
| 1009 | ethtool2ptys_adver_func = ext_requested ? | 1025 | ethtool2ptys_adver_func = ext_requested ? |
| 1010 | mlx5e_ethtool2ptys_ext_adver_link : | 1026 | mlx5e_ethtool2ptys_ext_adver_link : |
| 1011 | mlx5e_ethtool2ptys_adver_link; | 1027 | mlx5e_ethtool2ptys_adver_link; |
| 1012 | err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); | 1028 | err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); |
| 1013 | if (err) { | 1029 | if (err) { |
| 1014 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", | 1030 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", |
| 1015 | __func__, err); | 1031 | __func__, err); |
| @@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
| 1037 | if (!an_changes && link_modes == eproto.admin) | 1053 | if (!an_changes && link_modes == eproto.admin) |
| 1038 | goto out; | 1054 | goto out; |
| 1039 | 1055 | ||
| 1040 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); | 1056 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); |
| 1041 | mlx5_toggle_port_link(mdev); | 1057 | mlx5_toggle_port_link(mdev); |
| 1042 | 1058 | ||
| 1043 | out: | 1059 | out: |
| @@ -1570,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, | |||
| 1570 | break; | 1586 | break; |
| 1571 | case MLX5_MODULE_ID_SFP: | 1587 | case MLX5_MODULE_ID_SFP: |
| 1572 | modinfo->type = ETH_MODULE_SFF_8472; | 1588 | modinfo->type = ETH_MODULE_SFF_8472; |
| 1573 | modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; | 1589 | modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; |
| 1574 | break; | 1590 | break; |
| 1575 | default: | 1591 | default: |
| 1576 | netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", | 1592 | netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", |
| @@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) | |||
| 1752 | struct mlx5e_channel *c; | 1768 | struct mlx5e_channel *c; |
| 1753 | int i; | 1769 | int i; |
| 1754 | 1770 | ||
| 1755 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) | 1771 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || |
| 1772 | priv->channels.params.xdp_prog) | ||
| 1756 | return 0; | 1773 | return 0; |
| 1757 | 1774 | ||
| 1758 | for (i = 0; i < channels->num; i++) { | 1775 | for (i = 0; i < channels->num; i++) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..46157e2a1e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, | |||
| 951 | if (params->rx_dim_enabled) | 951 | if (params->rx_dim_enabled) |
| 952 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); | 952 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); |
| 953 | 953 | ||
| 954 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) | 954 | /* We disable csum_complete when XDP is enabled since |
| 955 | * XDP programs might manipulate packets which will render | ||
| 956 | * skb->checksum incorrect. | ||
| 957 | */ | ||
| 958 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) | ||
| 955 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); | 959 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); |
| 956 | 960 | ||
| 957 | return 0; | 961 | return 0; |
| @@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, | |||
| 2937 | return 0; | 2941 | return 0; |
| 2938 | } | 2942 | } |
| 2939 | 2943 | ||
| 2944 | int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) | ||
| 2945 | { | ||
| 2946 | struct mlx5e_channels new_channels = {}; | ||
| 2947 | |||
| 2948 | new_channels.params = priv->channels.params; | ||
| 2949 | return mlx5e_safe_switch_channels(priv, &new_channels, NULL); | ||
| 2950 | } | ||
| 2951 | |||
| 2940 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) | 2952 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
| 2941 | { | 2953 | { |
| 2942 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; | 2954 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; |
| @@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, | |||
| 3765 | if (params->xdp_prog && | 3777 | if (params->xdp_prog && |
| 3766 | !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { | 3778 | !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { |
| 3767 | netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", | 3779 | netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", |
| 3768 | new_mtu, MLX5E_XDP_MAX_MTU); | 3780 | new_mtu, mlx5e_xdp_max_mtu(params)); |
| 3769 | err = -EINVAL; | 3781 | err = -EINVAL; |
| 3770 | goto out; | 3782 | goto out; |
| 3771 | } | 3783 | } |
| @@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) | |||
| 4161 | if (!report_failed) | 4173 | if (!report_failed) |
| 4162 | goto unlock; | 4174 | goto unlock; |
| 4163 | 4175 | ||
| 4164 | mlx5e_close_locked(priv->netdev); | 4176 | err = mlx5e_safe_reopen_channels(priv); |
| 4165 | err = mlx5e_open_locked(priv->netdev); | ||
| 4166 | if (err) | 4177 | if (err) |
| 4167 | netdev_err(priv->netdev, | 4178 | netdev_err(priv->netdev, |
| 4168 | "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", | 4179 | "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", |
| 4169 | err); | 4180 | err); |
| 4170 | 4181 | ||
| 4171 | unlock: | 4182 | unlock: |
| @@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) | |||
| 4201 | 4212 | ||
| 4202 | if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { | 4213 | if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { |
| 4203 | netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", | 4214 | netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", |
| 4204 | new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); | 4215 | new_channels.params.sw_mtu, |
| 4216 | mlx5e_xdp_max_mtu(&new_channels.params)); | ||
| 4205 | return -EINVAL; | 4217 | return -EINVAL; |
| 4206 | } | 4218 | } |
| 4207 | 4219 | ||
| @@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, | |||
| 4553 | { | 4565 | { |
| 4554 | enum mlx5e_traffic_types tt; | 4566 | enum mlx5e_traffic_types tt; |
| 4555 | 4567 | ||
| 4556 | rss_params->hfunc = ETH_RSS_HASH_XOR; | 4568 | rss_params->hfunc = ETH_RSS_HASH_TOP; |
| 4557 | netdev_rss_key_fill(rss_params->toeplitz_hash_key, | 4569 | netdev_rss_key_fill(rss_params->toeplitz_hash_key, |
| 4558 | sizeof(rss_params->toeplitz_hash_key)); | 4570 | sizeof(rss_params->toeplitz_hash_key)); |
| 4559 | mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, | 4571 | mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3dde5c7e0739..c3b3002ff62f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, | |||
| 692 | { | 692 | { |
| 693 | *proto = ((struct ethhdr *)skb->data)->h_proto; | 693 | *proto = ((struct ethhdr *)skb->data)->h_proto; |
| 694 | *proto = __vlan_get_protocol(skb, *proto, network_depth); | 694 | *proto = __vlan_get_protocol(skb, *proto, network_depth); |
| 695 | return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); | 695 | |
| 696 | if (*proto == htons(ETH_P_IP)) | ||
| 697 | return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); | ||
| 698 | |||
| 699 | if (*proto == htons(ETH_P_IPV6)) | ||
| 700 | return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); | ||
| 701 | |||
| 702 | return false; | ||
| 696 | } | 703 | } |
| 697 | 704 | ||
| 698 | static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) | 705 | static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) |
| @@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) | |||
| 712 | rq->stats->ecn_mark += !!rc; | 719 | rq->stats->ecn_mark += !!rc; |
| 713 | } | 720 | } |
| 714 | 721 | ||
| 715 | static u32 mlx5e_get_fcs(const struct sk_buff *skb) | ||
| 716 | { | ||
| 717 | const void *fcs_bytes; | ||
| 718 | u32 _fcs_bytes; | ||
| 719 | |||
| 720 | fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, | ||
| 721 | ETH_FCS_LEN, &_fcs_bytes); | ||
| 722 | |||
| 723 | return __get_unaligned_cpu32(fcs_bytes); | ||
| 724 | } | ||
| 725 | |||
| 726 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | 722 | static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) |
| 727 | { | 723 | { |
| 728 | void *ip_p = skb->data + network_depth; | 724 | void *ip_p = skb->data + network_depth; |
| @@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | |||
| 733 | 729 | ||
| 734 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | 730 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) |
| 735 | 731 | ||
| 732 | #define MAX_PADDING 8 | ||
| 733 | |||
| 734 | static void | ||
| 735 | tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, | ||
| 736 | struct mlx5e_rq_stats *stats) | ||
| 737 | { | ||
| 738 | stats->csum_complete_tail_slow++; | ||
| 739 | skb->csum = csum_block_add(skb->csum, | ||
| 740 | skb_checksum(skb, offset, len, 0), | ||
| 741 | offset); | ||
| 742 | } | ||
| 743 | |||
| 744 | static void | ||
| 745 | tail_padding_csum(struct sk_buff *skb, int offset, | ||
| 746 | struct mlx5e_rq_stats *stats) | ||
| 747 | { | ||
| 748 | u8 tail_padding[MAX_PADDING]; | ||
| 749 | int len = skb->len - offset; | ||
| 750 | void *tail; | ||
| 751 | |||
| 752 | if (unlikely(len > MAX_PADDING)) { | ||
| 753 | tail_padding_csum_slow(skb, offset, len, stats); | ||
| 754 | return; | ||
| 755 | } | ||
| 756 | |||
| 757 | tail = skb_header_pointer(skb, offset, len, tail_padding); | ||
| 758 | if (unlikely(!tail)) { | ||
| 759 | tail_padding_csum_slow(skb, offset, len, stats); | ||
| 760 | return; | ||
| 761 | } | ||
| 762 | |||
| 763 | stats->csum_complete_tail++; | ||
| 764 | skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); | ||
| 765 | } | ||
| 766 | |||
| 767 | static void | ||
| 768 | mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, | ||
| 769 | struct mlx5e_rq_stats *stats) | ||
| 770 | { | ||
| 771 | struct ipv6hdr *ip6; | ||
| 772 | struct iphdr *ip4; | ||
| 773 | int pkt_len; | ||
| 774 | |||
| 775 | switch (proto) { | ||
| 776 | case htons(ETH_P_IP): | ||
| 777 | ip4 = (struct iphdr *)(skb->data + network_depth); | ||
| 778 | pkt_len = network_depth + ntohs(ip4->tot_len); | ||
| 779 | break; | ||
| 780 | case htons(ETH_P_IPV6): | ||
| 781 | ip6 = (struct ipv6hdr *)(skb->data + network_depth); | ||
| 782 | pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); | ||
| 783 | break; | ||
| 784 | default: | ||
| 785 | return; | ||
| 786 | } | ||
| 787 | |||
| 788 | if (likely(pkt_len >= skb->len)) | ||
| 789 | return; | ||
| 790 | |||
| 791 | tail_padding_csum(skb, pkt_len, stats); | ||
| 792 | } | ||
| 793 | |||
| 736 | static inline void mlx5e_handle_csum(struct net_device *netdev, | 794 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
| 737 | struct mlx5_cqe64 *cqe, | 795 | struct mlx5_cqe64 *cqe, |
| 738 | struct mlx5e_rq *rq, | 796 | struct mlx5e_rq *rq, |
| @@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 752 | return; | 810 | return; |
| 753 | } | 811 | } |
| 754 | 812 | ||
| 755 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) | 813 | /* True when explicitly set via priv flag, or XDP prog is loaded */ |
| 814 | if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) | ||
| 756 | goto csum_unnecessary; | 815 | goto csum_unnecessary; |
| 757 | 816 | ||
| 758 | /* CQE csum doesn't cover padding octets in short ethernet | 817 | /* CQE csum doesn't cover padding octets in short ethernet |
| @@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 780 | skb->csum = csum_partial(skb->data + ETH_HLEN, | 839 | skb->csum = csum_partial(skb->data + ETH_HLEN, |
| 781 | network_depth - ETH_HLEN, | 840 | network_depth - ETH_HLEN, |
| 782 | skb->csum); | 841 | skb->csum); |
| 783 | if (unlikely(netdev->features & NETIF_F_RXFCS)) | 842 | |
| 784 | skb->csum = csum_block_add(skb->csum, | 843 | mlx5e_skb_padding_csum(skb, network_depth, proto, stats); |
| 785 | (__force __wsum)mlx5e_get_fcs(skb), | ||
| 786 | skb->len - ETH_FCS_LEN); | ||
| 787 | stats->csum_complete++; | 844 | stats->csum_complete++; |
| 788 | return; | 845 | return; |
| 789 | } | 846 | } |
| 790 | 847 | ||
| 791 | csum_unnecessary: | 848 | csum_unnecessary: |
| 792 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && | 849 | if (likely((cqe->hds_ip_ext & CQE_L3_OK) && |
| 793 | ((cqe->hds_ip_ext & CQE_L4_OK) || | 850 | (cqe->hds_ip_ext & CQE_L4_OK))) { |
| 794 | (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { | ||
| 795 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 851 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 796 | if (cqe_is_tunneled(cqe)) { | 852 | if (cqe_is_tunneled(cqe)) { |
| 797 | skb->csum_level = 1; | 853 | skb->csum_level = 1; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1a78e05cbba8..b75aa8b8bf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | |||
| @@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { | |||
| 59 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, | 59 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, |
| 60 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, | 60 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, |
| 61 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, | 61 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, |
| 62 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, | ||
| 63 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, | ||
| 62 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, | 64 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, |
| 63 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, | 65 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, |
| 64 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, | 66 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, |
| @@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) | |||
| 151 | s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; | 153 | s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; |
| 152 | s->rx_csum_none += rq_stats->csum_none; | 154 | s->rx_csum_none += rq_stats->csum_none; |
| 153 | s->rx_csum_complete += rq_stats->csum_complete; | 155 | s->rx_csum_complete += rq_stats->csum_complete; |
| 156 | s->rx_csum_complete_tail += rq_stats->csum_complete_tail; | ||
| 157 | s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; | ||
| 154 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; | 158 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; |
| 155 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; | 159 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; |
| 156 | s->rx_xdp_drop += rq_stats->xdp_drop; | 160 | s->rx_xdp_drop += rq_stats->xdp_drop; |
| @@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { | |||
| 1190 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, | 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, |
| 1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, | 1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, |
| 1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, | 1196 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, |
| 1197 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, | ||
| 1198 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, | ||
| 1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, | 1199 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, |
| 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, | 1200 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, |
| 1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, | 1201 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4640d4f986f8..16c3b785f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
| @@ -71,6 +71,8 @@ struct mlx5e_sw_stats { | |||
| 71 | u64 rx_csum_unnecessary; | 71 | u64 rx_csum_unnecessary; |
| 72 | u64 rx_csum_none; | 72 | u64 rx_csum_none; |
| 73 | u64 rx_csum_complete; | 73 | u64 rx_csum_complete; |
| 74 | u64 rx_csum_complete_tail; | ||
| 75 | u64 rx_csum_complete_tail_slow; | ||
| 74 | u64 rx_csum_unnecessary_inner; | 76 | u64 rx_csum_unnecessary_inner; |
| 75 | u64 rx_xdp_drop; | 77 | u64 rx_xdp_drop; |
| 76 | u64 rx_xdp_redirect; | 78 | u64 rx_xdp_redirect; |
| @@ -181,6 +183,8 @@ struct mlx5e_rq_stats { | |||
| 181 | u64 packets; | 183 | u64 packets; |
| 182 | u64 bytes; | 184 | u64 bytes; |
| 183 | u64 csum_complete; | 185 | u64 csum_complete; |
| 186 | u64 csum_complete_tail; | ||
| 187 | u64 csum_complete_tail_slow; | ||
| 184 | u64 csum_unnecessary; | 188 | u64 csum_unnecessary; |
| 185 | u64 csum_unnecessary_inner; | 189 | u64 csum_unnecessary_inner; |
| 186 | u64 csum_none; | 190 | u64 csum_none; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b4967a0ff8c7..d75dc44eb2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
| 2158 | return true; | 2158 | return true; |
| 2159 | } | 2159 | } |
| 2160 | 2160 | ||
| 2161 | struct ip_ttl_word { | ||
| 2162 | __u8 ttl; | ||
| 2163 | __u8 protocol; | ||
| 2164 | __sum16 check; | ||
| 2165 | }; | ||
| 2166 | |||
| 2167 | struct ipv6_hoplimit_word { | ||
| 2168 | __be16 payload_len; | ||
| 2169 | __u8 nexthdr; | ||
| 2170 | __u8 hop_limit; | ||
| 2171 | }; | ||
| 2172 | |||
| 2173 | static bool is_action_keys_supported(const struct flow_action_entry *act) | ||
| 2174 | { | ||
| 2175 | u32 mask, offset; | ||
| 2176 | u8 htype; | ||
| 2177 | |||
| 2178 | htype = act->mangle.htype; | ||
| 2179 | offset = act->mangle.offset; | ||
| 2180 | mask = ~act->mangle.mask; | ||
| 2181 | /* For IPv4 & IPv6 header check 4 byte word, | ||
| 2182 | * to determine that modified fields | ||
| 2183 | * are NOT ttl & hop_limit only. | ||
| 2184 | */ | ||
| 2185 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { | ||
| 2186 | struct ip_ttl_word *ttl_word = | ||
| 2187 | (struct ip_ttl_word *)&mask; | ||
| 2188 | |||
| 2189 | if (offset != offsetof(struct iphdr, ttl) || | ||
| 2190 | ttl_word->protocol || | ||
| 2191 | ttl_word->check) { | ||
| 2192 | return true; | ||
| 2193 | } | ||
| 2194 | } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
| 2195 | struct ipv6_hoplimit_word *hoplimit_word = | ||
| 2196 | (struct ipv6_hoplimit_word *)&mask; | ||
| 2197 | |||
| 2198 | if (offset != offsetof(struct ipv6hdr, payload_len) || | ||
| 2199 | hoplimit_word->payload_len || | ||
| 2200 | hoplimit_word->nexthdr) { | ||
| 2201 | return true; | ||
| 2202 | } | ||
| 2203 | } | ||
| 2204 | return false; | ||
| 2205 | } | ||
| 2206 | |||
| 2161 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2207 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
| 2162 | struct flow_action *flow_action, | 2208 | struct flow_action *flow_action, |
| 2163 | u32 actions, | 2209 | u32 actions, |
| @@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 2165 | { | 2211 | { |
| 2166 | const struct flow_action_entry *act; | 2212 | const struct flow_action_entry *act; |
| 2167 | bool modify_ip_header; | 2213 | bool modify_ip_header; |
| 2168 | u8 htype, ip_proto; | ||
| 2169 | void *headers_v; | 2214 | void *headers_v; |
| 2170 | u16 ethertype; | 2215 | u16 ethertype; |
| 2216 | u8 ip_proto; | ||
| 2171 | int i; | 2217 | int i; |
| 2172 | 2218 | ||
| 2173 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 2219 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
| @@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 2187 | act->id != FLOW_ACTION_ADD) | 2233 | act->id != FLOW_ACTION_ADD) |
| 2188 | continue; | 2234 | continue; |
| 2189 | 2235 | ||
| 2190 | htype = act->mangle.htype; | 2236 | if (is_action_keys_supported(act)) { |
| 2191 | if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || | ||
| 2192 | htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { | ||
| 2193 | modify_ip_header = true; | 2237 | modify_ip_header = true; |
| 2194 | break; | 2238 | break; |
| 2195 | } | 2239 | } |
| @@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, | |||
| 2340 | return 0; | 2384 | return 0; |
| 2341 | } | 2385 | } |
| 2342 | 2386 | ||
| 2343 | static inline int cmp_encap_info(struct ip_tunnel_key *a, | 2387 | struct encap_key { |
| 2344 | struct ip_tunnel_key *b) | 2388 | struct ip_tunnel_key *ip_tun_key; |
| 2389 | int tunnel_type; | ||
| 2390 | }; | ||
| 2391 | |||
| 2392 | static inline int cmp_encap_info(struct encap_key *a, | ||
| 2393 | struct encap_key *b) | ||
| 2345 | { | 2394 | { |
| 2346 | return memcmp(a, b, sizeof(*a)); | 2395 | return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || |
| 2396 | a->tunnel_type != b->tunnel_type; | ||
| 2347 | } | 2397 | } |
| 2348 | 2398 | ||
| 2349 | static inline int hash_encap_info(struct ip_tunnel_key *key) | 2399 | static inline int hash_encap_info(struct encap_key *key) |
| 2350 | { | 2400 | { |
| 2351 | return jhash(key, sizeof(*key), 0); | 2401 | return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), |
| 2402 | key->tunnel_type); | ||
| 2352 | } | 2403 | } |
| 2353 | 2404 | ||
| 2354 | 2405 | ||
| @@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
| 2379 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; | 2430 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; |
| 2380 | struct mlx5e_tc_flow_parse_attr *parse_attr; | 2431 | struct mlx5e_tc_flow_parse_attr *parse_attr; |
| 2381 | struct ip_tunnel_info *tun_info; | 2432 | struct ip_tunnel_info *tun_info; |
| 2382 | struct ip_tunnel_key *key; | 2433 | struct encap_key key, e_key; |
| 2383 | struct mlx5e_encap_entry *e; | 2434 | struct mlx5e_encap_entry *e; |
| 2384 | unsigned short family; | 2435 | unsigned short family; |
| 2385 | uintptr_t hash_key; | 2436 | uintptr_t hash_key; |
| @@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
| 2389 | parse_attr = attr->parse_attr; | 2440 | parse_attr = attr->parse_attr; |
| 2390 | tun_info = &parse_attr->tun_info[out_index]; | 2441 | tun_info = &parse_attr->tun_info[out_index]; |
| 2391 | family = ip_tunnel_info_af(tun_info); | 2442 | family = ip_tunnel_info_af(tun_info); |
| 2392 | key = &tun_info->key; | 2443 | key.ip_tun_key = &tun_info->key; |
| 2444 | key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); | ||
| 2393 | 2445 | ||
| 2394 | hash_key = hash_encap_info(key); | 2446 | hash_key = hash_encap_info(&key); |
| 2395 | 2447 | ||
| 2396 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, | 2448 | hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, |
| 2397 | encap_hlist, hash_key) { | 2449 | encap_hlist, hash_key) { |
| 2398 | if (!cmp_encap_info(&e->tun_info.key, key)) { | 2450 | e_key.ip_tun_key = &e->tun_info.key; |
| 2451 | e_key.tunnel_type = e->tunnel_type; | ||
| 2452 | if (!cmp_encap_info(&e_key, &key)) { | ||
| 2399 | found = true; | 2453 | found = true; |
| 2400 | break; | 2454 | break; |
| 2401 | } | 2455 | } |
| @@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, | |||
| 2657 | 2711 | ||
| 2658 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || | 2712 | if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || |
| 2659 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { | 2713 | hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { |
| 2660 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, | 2714 | err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, |
| 2661 | parse_attr, hdrs, extack); | 2715 | parse_attr, hdrs, extack); |
| 2662 | if (err) | 2716 | if (err) |
| 2663 | return err; | 2717 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ecd2c747f726..8a67fd197b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
| 105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); | 105 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); |
| 106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); | 106 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); |
| 107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | 107 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); |
| 108 | if (vport) | 108 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); |
| 109 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); | ||
| 110 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, | 109 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, |
| 111 | in, nic_vport_context); | 110 | in, nic_vport_context); |
| 112 | 111 | ||
| @@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, | |||
| 134 | MLX5_SET(modify_esw_vport_context_in, in, opcode, | 133 | MLX5_SET(modify_esw_vport_context_in, in, opcode, |
| 135 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); | 134 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); |
| 136 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); | 135 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); |
| 137 | if (vport) | 136 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); |
| 138 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); | ||
| 139 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 137 | return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
| 140 | } | 138 | } |
| 141 | 139 | ||
| @@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) | |||
| 431 | { | 429 | { |
| 432 | int err; | 430 | int err; |
| 433 | 431 | ||
| 432 | memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); | ||
| 433 | |||
| 434 | err = esw_create_legacy_vepa_table(esw); | 434 | err = esw_create_legacy_vepa_table(esw); |
| 435 | if (err) | 435 | if (err) |
| 436 | return err; | 436 | return err; |
| @@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, | |||
| 2157 | 2157 | ||
| 2158 | /* Star rule to forward all traffic to uplink vport */ | 2158 | /* Star rule to forward all traffic to uplink vport */ |
| 2159 | memset(spec, 0, sizeof(*spec)); | 2159 | memset(spec, 0, sizeof(*spec)); |
| 2160 | memset(&dest, 0, sizeof(dest)); | ||
| 2160 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 2161 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
| 2161 | dest.vport.num = MLX5_VPORT_UPLINK; | 2162 | dest.vport.num = MLX5_VPORT_UPLINK; |
| 2162 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; | 2163 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f2260391be5b..9b2d78ee22b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) | |||
| 1611 | { | 1611 | { |
| 1612 | int err; | 1612 | int err; |
| 1613 | 1613 | ||
| 1614 | memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); | ||
| 1614 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); | 1615 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); |
| 1615 | 1616 | ||
| 1616 | err = esw_create_offloads_fdb_tables(esw, nvports); | 1617 | err = esw_create_offloads_fdb_tables(esw, nvports); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51f..22a2ef111514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | |||
| @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, | |||
| 148 | return ret; | 148 | return ret; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static void mlx5_fpga_tls_release_swid(struct idr *idr, | 151 | static void *mlx5_fpga_tls_release_swid(struct idr *idr, |
| 152 | spinlock_t *idr_spinlock, u32 swid) | 152 | spinlock_t *idr_spinlock, u32 swid) |
| 153 | { | 153 | { |
| 154 | unsigned long flags; | 154 | unsigned long flags; |
| 155 | void *ptr; | ||
| 155 | 156 | ||
| 156 | spin_lock_irqsave(idr_spinlock, flags); | 157 | spin_lock_irqsave(idr_spinlock, flags); |
| 157 | idr_remove(idr, swid); | 158 | ptr = idr_remove(idr, swid); |
| 158 | spin_unlock_irqrestore(idr_spinlock, flags); | 159 | spin_unlock_irqrestore(idr_spinlock, flags); |
| 160 | return ptr; | ||
| 159 | } | 161 | } |
| 160 | 162 | ||
| 161 | static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, | 163 | static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, |
| @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, | |||
| 165 | kfree(buf); | 167 | kfree(buf); |
| 166 | } | 168 | } |
| 167 | 169 | ||
| 168 | struct mlx5_teardown_stream_context { | ||
| 169 | struct mlx5_fpga_tls_command_context cmd; | ||
| 170 | u32 swid; | ||
| 171 | }; | ||
| 172 | |||
| 173 | static void | 170 | static void |
| 174 | mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, | 171 | mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, |
| 175 | struct mlx5_fpga_device *fdev, | 172 | struct mlx5_fpga_device *fdev, |
| 176 | struct mlx5_fpga_tls_command_context *cmd, | 173 | struct mlx5_fpga_tls_command_context *cmd, |
| 177 | struct mlx5_fpga_dma_buf *resp) | 174 | struct mlx5_fpga_dma_buf *resp) |
| 178 | { | 175 | { |
| 179 | struct mlx5_teardown_stream_context *ctx = | ||
| 180 | container_of(cmd, struct mlx5_teardown_stream_context, cmd); | ||
| 181 | |||
| 182 | if (resp) { | 176 | if (resp) { |
| 183 | u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); | 177 | u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); |
| 184 | 178 | ||
| @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, | |||
| 186 | mlx5_fpga_err(fdev, | 180 | mlx5_fpga_err(fdev, |
| 187 | "Teardown stream failed with syndrome = %d", | 181 | "Teardown stream failed with syndrome = %d", |
| 188 | syndrome); | 182 | syndrome); |
| 189 | else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) | ||
| 190 | mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, | ||
| 191 | &fdev->tls->tx_idr_spinlock, | ||
| 192 | ctx->swid); | ||
| 193 | else | ||
| 194 | mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, | ||
| 195 | &fdev->tls->rx_idr_spinlock, | ||
| 196 | ctx->swid); | ||
| 197 | } | 183 | } |
| 198 | mlx5_fpga_tls_put_command_ctx(cmd); | 184 | mlx5_fpga_tls_put_command_ctx(cmd); |
| 199 | } | 185 | } |
| @@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
| 225 | 211 | ||
| 226 | rcu_read_lock(); | 212 | rcu_read_lock(); |
| 227 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); | 213 | flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); |
| 228 | rcu_read_unlock(); | 214 | if (unlikely(!flow)) { |
| 215 | rcu_read_unlock(); | ||
| 216 | WARN_ONCE(1, "Received NULL pointer for handle\n"); | ||
| 217 | kfree(buf); | ||
| 218 | return -EINVAL; | ||
| 219 | } | ||
| 229 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); | 220 | mlx5_fpga_tls_flow_to_cmd(flow, cmd); |
| 221 | rcu_read_unlock(); | ||
| 230 | 222 | ||
| 231 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); | 223 | MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); |
| 232 | MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); | 224 | MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); |
| @@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
| 238 | buf->complete = mlx_tls_kfree_complete; | 230 | buf->complete = mlx_tls_kfree_complete; |
| 239 | 231 | ||
| 240 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); | 232 | ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); |
| 233 | if (ret < 0) | ||
| 234 | kfree(buf); | ||
| 241 | 235 | ||
| 242 | return ret; | 236 | return ret; |
| 243 | } | 237 | } |
| @@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, | |||
| 245 | static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | 239 | static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, |
| 246 | void *flow, u32 swid, gfp_t flags) | 240 | void *flow, u32 swid, gfp_t flags) |
| 247 | { | 241 | { |
| 248 | struct mlx5_teardown_stream_context *ctx; | 242 | struct mlx5_fpga_tls_command_context *ctx; |
| 249 | struct mlx5_fpga_dma_buf *buf; | 243 | struct mlx5_fpga_dma_buf *buf; |
| 250 | void *cmd; | 244 | void *cmd; |
| 251 | 245 | ||
| @@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | |||
| 253 | if (!ctx) | 247 | if (!ctx) |
| 254 | return; | 248 | return; |
| 255 | 249 | ||
| 256 | buf = &ctx->cmd.buf; | 250 | buf = &ctx->buf; |
| 257 | cmd = (ctx + 1); | 251 | cmd = (ctx + 1); |
| 258 | MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); | 252 | MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); |
| 259 | MLX5_SET(tls_cmd, cmd, swid, swid); | 253 | MLX5_SET(tls_cmd, cmd, swid, swid); |
| @@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, | |||
| 264 | buf->sg[0].data = cmd; | 258 | buf->sg[0].data = cmd; |
| 265 | buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; | 259 | buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; |
| 266 | 260 | ||
| 267 | ctx->swid = swid; | 261 | mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, |
| 268 | mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, | ||
| 269 | mlx5_fpga_tls_teardown_completion); | 262 | mlx5_fpga_tls_teardown_completion); |
| 270 | } | 263 | } |
| 271 | 264 | ||
| @@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, | |||
| 275 | struct mlx5_fpga_tls *tls = mdev->fpga->tls; | 268 | struct mlx5_fpga_tls *tls = mdev->fpga->tls; |
| 276 | void *flow; | 269 | void *flow; |
| 277 | 270 | ||
| 278 | rcu_read_lock(); | ||
| 279 | if (direction_sx) | 271 | if (direction_sx) |
| 280 | flow = idr_find(&tls->tx_idr, swid); | 272 | flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, |
| 273 | &tls->tx_idr_spinlock, | ||
| 274 | swid); | ||
| 281 | else | 275 | else |
| 282 | flow = idr_find(&tls->rx_idr, swid); | 276 | flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, |
| 283 | 277 | &tls->rx_idr_spinlock, | |
| 284 | rcu_read_unlock(); | 278 | swid); |
| 285 | 279 | ||
| 286 | if (!flow) { | 280 | if (!flow) { |
| 287 | mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", | 281 | mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", |
| @@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, | |||
| 289 | return; | 283 | return; |
| 290 | } | 284 | } |
| 291 | 285 | ||
| 286 | synchronize_rcu(); /* before kfree(flow) */ | ||
| 292 | mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); | 287 | mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); |
| 293 | } | 288 | } |
| 294 | 289 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70cc906a102b..76716419370d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = { | |||
| 164 | .size = 8, | 164 | .size = 8, |
| 165 | .limit = 4 | 165 | .limit = 4 |
| 166 | }, | 166 | }, |
| 167 | .mr_cache[16] = { | ||
| 168 | .size = 8, | ||
| 169 | .limit = 4 | ||
| 170 | }, | ||
| 171 | .mr_cache[17] = { | ||
| 172 | .size = 8, | ||
| 173 | .limit = 4 | ||
| 174 | }, | ||
| 175 | .mr_cache[18] = { | ||
| 176 | .size = 8, | ||
| 177 | .limit = 4 | ||
| 178 | }, | ||
| 179 | .mr_cache[19] = { | ||
| 180 | .size = 4, | ||
| 181 | .limit = 2 | ||
| 182 | }, | ||
| 183 | .mr_cache[20] = { | ||
| 184 | .size = 4, | ||
| 185 | .limit = 2 | ||
| 186 | }, | ||
| 187 | }, | 167 | }, |
| 188 | }; | 168 | }; |
| 189 | 169 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 21b7f05b16a5..361468e0435d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
| @@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, | |||
| 317 | size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; | 317 | size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; |
| 318 | 318 | ||
| 319 | i2c_addr = MLX5_I2C_ADDR_LOW; | 319 | i2c_addr = MLX5_I2C_ADDR_LOW; |
| 320 | if (offset >= MLX5_EEPROM_PAGE_LENGTH) { | ||
| 321 | i2c_addr = MLX5_I2C_ADDR_HIGH; | ||
| 322 | offset -= MLX5_EEPROM_PAGE_LENGTH; | ||
| 323 | } | ||
| 324 | 320 | ||
| 325 | MLX5_SET(mcia_reg, in, l, 0); | 321 | MLX5_SET(mcia_reg, in, l, 0); |
| 326 | MLX5_SET(mcia_reg, in, module, module_num); | 322 | MLX5_SET(mcia_reg, in, module, module_num); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index d23d53c0e284..f26a4ca29363 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
| @@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | |||
| 568 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | 568 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
| 569 | return 0; | 569 | return 0; |
| 570 | 570 | ||
| 571 | emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); | 571 | emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); |
| 572 | if (!emad_wq) | 572 | if (!emad_wq) |
| 573 | return -ENOMEM; | 573 | return -ENOMEM; |
| 574 | mlxsw_core->emad_wq = emad_wq; | 574 | mlxsw_core->emad_wq = emad_wq; |
| @@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void) | |||
| 1958 | { | 1958 | { |
| 1959 | int err; | 1959 | int err; |
| 1960 | 1960 | ||
| 1961 | mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); | 1961 | mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); |
| 1962 | if (!mlxsw_wq) | 1962 | if (!mlxsw_wq) |
| 1963 | return -ENOMEM; | 1963 | return -ENOMEM; |
| 1964 | mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, | 1964 | mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, |
| 1965 | mlxsw_core_driver_name); | 1965 | mlxsw_core_driver_name); |
| 1966 | if (!mlxsw_owq) { | 1966 | if (!mlxsw_owq) { |
| 1967 | err = -ENOMEM; | 1967 | err = -ENOMEM; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index ffee38e36ce8..8648ca171254 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #define MLXSW_PCI_SW_RESET 0xF0010 | 28 | #define MLXSW_PCI_SW_RESET 0xF0010 |
| 29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) | 29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) |
| 30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 | 30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000 |
| 31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 | 31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 |
| 32 | #define MLXSW_PCI_FW_READY 0xA1844 | 32 | #define MLXSW_PCI_FW_READY 0xA1844 |
| 33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF | 33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9eb63300c1d3..6b8aa3761899 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, | |||
| 3126 | if (err) | 3126 | if (err) |
| 3127 | return err; | 3127 | return err; |
| 3128 | 3128 | ||
| 3129 | mlxsw_sp_port->link.autoneg = autoneg; | ||
| 3130 | |||
| 3129 | if (!netif_running(dev)) | 3131 | if (!netif_running(dev)) |
| 3130 | return 0; | 3132 | return 0; |
| 3131 | 3133 | ||
| 3132 | mlxsw_sp_port->link.autoneg = autoneg; | ||
| 3133 | |||
| 3134 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); | 3134 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); |
| 3135 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); | 3135 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); |
| 3136 | 3136 | ||
| @@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 3316 | err = mlxsw_sp_port_ets_set(mlxsw_sp_port, | 3316 | err = mlxsw_sp_port_ets_set(mlxsw_sp_port, |
| 3317 | MLXSW_REG_QEEC_HIERARCY_TC, | 3317 | MLXSW_REG_QEEC_HIERARCY_TC, |
| 3318 | i + 8, i, | 3318 | i + 8, i, |
| 3319 | false, 0); | 3319 | true, 100); |
| 3320 | if (err) | 3320 | if (err) |
| 3321 | return err; | 3321 | return err; |
| 3322 | } | 3322 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9a79b5e11597..d633bef5f105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
| @@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { | |||
| 70 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, | 70 | {MLXSW_REG_SBXX_DIR_EGRESS, 1}, |
| 71 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, | 71 | {MLXSW_REG_SBXX_DIR_EGRESS, 2}, |
| 72 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, | 72 | {MLXSW_REG_SBXX_DIR_EGRESS, 3}, |
| 73 | {MLXSW_REG_SBXX_DIR_EGRESS, 15}, | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | #define MLXSW_SP_SB_ING_TC_COUNT 8 | 76 | #define MLXSW_SP_SB_ING_TC_COUNT 8 |
| @@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { | |||
| 428 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 429 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
| 429 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 430 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
| 430 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), | 431 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), |
| 432 | MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), | ||
| 431 | }; | 433 | }; |
| 432 | 434 | ||
| 433 | static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, | 435 | static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, |
| @@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { | |||
| 517 | MLXSW_SP_SB_CM(0, 7, 4), | 519 | MLXSW_SP_SB_CM(0, 7, 4), |
| 518 | MLXSW_SP_SB_CM(0, 7, 4), | 520 | MLXSW_SP_SB_CM(0, 7, 4), |
| 519 | MLXSW_SP_SB_CM(0, 7, 4), | 521 | MLXSW_SP_SB_CM(0, 7, 4), |
| 520 | MLXSW_SP_SB_CM(0, 7, 4), | 522 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 521 | MLXSW_SP_SB_CM(0, 7, 4), | 523 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 522 | MLXSW_SP_SB_CM(0, 7, 4), | 524 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 523 | MLXSW_SP_SB_CM(0, 7, 4), | 525 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 524 | MLXSW_SP_SB_CM(0, 7, 4), | 526 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 525 | MLXSW_SP_SB_CM(0, 7, 4), | 527 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 526 | MLXSW_SP_SB_CM(0, 7, 4), | 528 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 527 | MLXSW_SP_SB_CM(0, 7, 4), | 529 | MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), |
| 528 | MLXSW_SP_SB_CM(1, 0xff, 4), | 530 | MLXSW_SP_SB_CM(1, 0xff, 4), |
| 529 | }; | 531 | }; |
| 530 | 532 | ||
| @@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { | |||
| 671 | MLXSW_SP_SB_PM(0, 0), | 673 | MLXSW_SP_SB_PM(0, 0), |
| 672 | MLXSW_SP_SB_PM(0, 0), | 674 | MLXSW_SP_SB_PM(0, 0), |
| 673 | MLXSW_SP_SB_PM(0, 0), | 675 | MLXSW_SP_SB_PM(0, 0), |
| 676 | MLXSW_SP_SB_PM(10000, 90000), | ||
| 674 | }; | 677 | }; |
| 675 | 678 | ||
| 676 | static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) | 679 | static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52fed8c7bf1e..902e766a8ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, | |||
| 6781 | /* A RIF is not created for macvlan netdevs. Their MAC is used to | 6781 | /* A RIF is not created for macvlan netdevs. Their MAC is used to |
| 6782 | * populate the FDB | 6782 | * populate the FDB |
| 6783 | */ | 6783 | */ |
| 6784 | if (netif_is_macvlan(dev)) | 6784 | if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) |
| 6785 | return 0; | 6785 | return 0; |
| 6786 | 6786 | ||
| 6787 | for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { | 6787 | for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index f6ce386c3036..50111f228d77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1630 | u16 fid_index; | 1630 | u16 fid_index; |
| 1631 | int err = 0; | 1631 | int err = 0; |
| 1632 | 1632 | ||
| 1633 | if (switchdev_trans_ph_prepare(trans)) | 1633 | if (switchdev_trans_ph_commit(trans)) |
| 1634 | return 0; | 1634 | return 0; |
| 1635 | 1635 | ||
| 1636 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1636 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a1d0d6e42533..d715ef4fc92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c | |||
| @@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, | |||
| 613 | struct netdev_hw_addr *hw_addr) | 613 | struct netdev_hw_addr *hw_addr) |
| 614 | { | 614 | { |
| 615 | struct ocelot *ocelot = port->ocelot; | 615 | struct ocelot *ocelot = port->ocelot; |
| 616 | struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); | 616 | struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); |
| 617 | 617 | ||
| 618 | if (!ha) | 618 | if (!ha) |
| 619 | return -ENOMEM; | 619 | return -ENOMEM; |
| @@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) | |||
| 959 | ETH_GSTRING_LEN); | 959 | ETH_GSTRING_LEN); |
| 960 | } | 960 | } |
| 961 | 961 | ||
| 962 | static void ocelot_check_stats(struct work_struct *work) | 962 | static void ocelot_update_stats(struct ocelot *ocelot) |
| 963 | { | 963 | { |
| 964 | struct delayed_work *del_work = to_delayed_work(work); | ||
| 965 | struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); | ||
| 966 | int i, j; | 964 | int i, j; |
| 967 | 965 | ||
| 968 | mutex_lock(&ocelot->stats_lock); | 966 | mutex_lock(&ocelot->stats_lock); |
| @@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work) | |||
| 986 | } | 984 | } |
| 987 | } | 985 | } |
| 988 | 986 | ||
| 989 | cancel_delayed_work(&ocelot->stats_work); | 987 | mutex_unlock(&ocelot->stats_lock); |
| 988 | } | ||
| 989 | |||
| 990 | static void ocelot_check_stats_work(struct work_struct *work) | ||
| 991 | { | ||
| 992 | struct delayed_work *del_work = to_delayed_work(work); | ||
| 993 | struct ocelot *ocelot = container_of(del_work, struct ocelot, | ||
| 994 | stats_work); | ||
| 995 | |||
| 996 | ocelot_update_stats(ocelot); | ||
| 997 | |||
| 990 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, | 998 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, |
| 991 | OCELOT_STATS_CHECK_DELAY); | 999 | OCELOT_STATS_CHECK_DELAY); |
| 992 | |||
| 993 | mutex_unlock(&ocelot->stats_lock); | ||
| 994 | } | 1000 | } |
| 995 | 1001 | ||
| 996 | static void ocelot_get_ethtool_stats(struct net_device *dev, | 1002 | static void ocelot_get_ethtool_stats(struct net_device *dev, |
| @@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, | |||
| 1001 | int i; | 1007 | int i; |
| 1002 | 1008 | ||
| 1003 | /* check and update now */ | 1009 | /* check and update now */ |
| 1004 | ocelot_check_stats(&ocelot->stats_work.work); | 1010 | ocelot_update_stats(ocelot); |
| 1005 | 1011 | ||
| 1006 | /* Copy all counters */ | 1012 | /* Copy all counters */ |
| 1007 | for (i = 0; i < ocelot->num_stats; i++) | 1013 | for (i = 0; i < ocelot->num_stats; i++) |
| @@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot) | |||
| 1809 | ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), | 1815 | ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), |
| 1810 | ANA_CPUQ_8021_CFG, i); | 1816 | ANA_CPUQ_8021_CFG, i); |
| 1811 | 1817 | ||
| 1812 | INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); | 1818 | INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); |
| 1813 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, | 1819 | queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, |
| 1814 | OCELOT_STATS_CHECK_DELAY); | 1820 | OCELOT_STATS_CHECK_DELAY); |
| 1815 | return 0; | 1821 | return 0; |
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 7cde387e5ec6..51cd57ab3d95 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c | |||
| @@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, | |||
| 2366 | dma_object->addr))) { | 2366 | dma_object->addr))) { |
| 2367 | vxge_os_dma_free(devh->pdev, memblock, | 2367 | vxge_os_dma_free(devh->pdev, memblock, |
| 2368 | &dma_object->acc_handle); | 2368 | &dma_object->acc_handle); |
| 2369 | memblock = NULL; | ||
| 2369 | goto exit; | 2370 | goto exit; |
| 2370 | } | 2371 | } |
| 2371 | 2372 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index 9852080cf454..ff3913085665 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c | |||
| @@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, | |||
| 39 | } | 39 | } |
| 40 | if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || | 40 | if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || |
| 41 | knode->sel->offoff || knode->fshift) { | 41 | knode->sel->offoff || knode->fshift) { |
| 42 | NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported"); | 42 | NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported"); |
| 43 | return false; | 43 | return false; |
| 44 | } | 44 | } |
| 45 | if (knode->sel->hoff || knode->sel->hmask) { | 45 | if (knode->sel->hoff || knode->sel->hmask) { |
| @@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, | |||
| 78 | 78 | ||
| 79 | k = &knode->sel->keys[0]; | 79 | k = &knode->sel->keys[0]; |
| 80 | if (k->offmask) { | 80 | if (k->offmask) { |
| 81 | NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported"); | 81 | NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported"); |
| 82 | return false; | 82 | return false; |
| 83 | } | 83 | } |
| 84 | if (k->off) { | 84 | if (k->off) { |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index eeda4ed98333..e336f6ee94f5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
| @@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, | |||
| 48 | 48 | ||
| 49 | tmp_push_vlan_tci = | 49 | tmp_push_vlan_tci = |
| 50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | | 50 | FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | |
| 51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | | 51 | FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); |
| 52 | NFP_FL_PUSH_VLAN_CFI; | ||
| 53 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); | 52 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); |
| 54 | } | 53 | } |
| 55 | 54 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4fcaf11ed56e..0ed51e79db00 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) | 26 | #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) |
| 27 | 27 | ||
| 28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) | 28 | #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) |
| 29 | #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) | 29 | #define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) |
| 30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) | 30 | #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) |
| 31 | 31 | ||
| 32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) | 32 | #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) |
| @@ -82,7 +82,6 @@ | |||
| 82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) | 82 | #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) |
| 83 | 83 | ||
| 84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) | 84 | #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) |
| 85 | #define NFP_FL_PUSH_VLAN_CFI BIT(12) | ||
| 86 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) | 85 | #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) |
| 87 | 86 | ||
| 88 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) | 87 | #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e03c8ef2c28c..9b8b843d0340 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
| @@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, | |||
| 30 | 30 | ||
| 31 | flow_rule_match_vlan(rule, &match); | 31 | flow_rule_match_vlan(rule, &match); |
| 32 | /* Populate the tci field. */ | 32 | /* Populate the tci field. */ |
| 33 | if (match.key->vlan_id || match.key->vlan_priority) { | 33 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
| 34 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 34 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 35 | match.key->vlan_priority) | | 35 | match.key->vlan_priority) | |
| 36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 36 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
| 37 | match.key->vlan_id) | | 37 | match.key->vlan_id); |
| 38 | NFP_FLOWER_MASK_VLAN_CFI; | 38 | ext->tci = cpu_to_be16(tmp_tci); |
| 39 | ext->tci = cpu_to_be16(tmp_tci); | 39 | |
| 40 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 40 | tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; |
| 41 | match.mask->vlan_priority) | | 41 | tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 42 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 42 | match.mask->vlan_priority) | |
| 43 | match.mask->vlan_id) | | 43 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
| 44 | NFP_FLOWER_MASK_VLAN_CFI; | 44 | match.mask->vlan_id); |
| 45 | msk->tci = cpu_to_be16(tmp_tci); | 45 | msk->tci = cpu_to_be16(tmp_tci); |
| 46 | } | ||
| 47 | } | 46 | } |
| 48 | } | 47 | } |
| 49 | 48 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d2c803bb4e56..94d228c04496 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | |||
| @@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 195 | ret = dev_queue_xmit(skb); | 195 | ret = dev_queue_xmit(skb); |
| 196 | nfp_repr_inc_tx_stats(netdev, len, ret); | 196 | nfp_repr_inc_tx_stats(netdev, len, ret); |
| 197 | 197 | ||
| 198 | return ret; | 198 | return NETDEV_TX_OK; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static int nfp_repr_stop(struct net_device *netdev) | 201 | static int nfp_repr_stop(struct net_device *netdev) |
| @@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, | |||
| 383 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | 383 | netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); |
| 384 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; | 384 | netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; |
| 385 | 385 | ||
| 386 | netdev->priv_flags |= IFF_NO_QUEUE; | 386 | netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; |
| 387 | netdev->features |= NETIF_F_LLTX; | 387 | netdev->features |= NETIF_F_LLTX; |
| 388 | 388 | ||
| 389 | if (nfp_app_has_tc(app)) { | 389 | if (nfp_app_has_tc(app)) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 43a57ec296fd..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
| @@ -431,12 +431,16 @@ struct qed_qm_info { | |||
| 431 | u8 num_pf_rls; | 431 | u8 num_pf_rls; |
| 432 | }; | 432 | }; |
| 433 | 433 | ||
| 434 | #define QED_OVERFLOW_BIT 1 | ||
| 435 | |||
| 434 | struct qed_db_recovery_info { | 436 | struct qed_db_recovery_info { |
| 435 | struct list_head list; | 437 | struct list_head list; |
| 436 | 438 | ||
| 437 | /* Lock to protect the doorbell recovery mechanism list */ | 439 | /* Lock to protect the doorbell recovery mechanism list */ |
| 438 | spinlock_t lock; | 440 | spinlock_t lock; |
| 441 | bool dorq_attn; | ||
| 439 | u32 db_recovery_counter; | 442 | u32 db_recovery_counter; |
| 443 | unsigned long overflow; | ||
| 440 | }; | 444 | }; |
| 441 | 445 | ||
| 442 | struct storm_stats { | 446 | struct storm_stats { |
| @@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); | |||
| 920 | 924 | ||
| 921 | /* doorbell recovery mechanism */ | 925 | /* doorbell recovery mechanism */ |
| 922 | void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); | 926 | void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); |
| 923 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, | 927 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); |
| 924 | enum qed_db_rec_exec db_exec); | ||
| 925 | bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); | 928 | bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); |
| 926 | 929 | ||
| 927 | /* Other Linux specific common definitions */ | 930 | /* Other Linux specific common definitions */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9df8c4b3b54e..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, | |||
| 102 | 102 | ||
| 103 | /* Doorbell address sanity (address within doorbell bar range) */ | 103 | /* Doorbell address sanity (address within doorbell bar range) */ |
| 104 | static bool qed_db_rec_sanity(struct qed_dev *cdev, | 104 | static bool qed_db_rec_sanity(struct qed_dev *cdev, |
| 105 | void __iomem *db_addr, void *db_data) | 105 | void __iomem *db_addr, |
| 106 | enum qed_db_rec_width db_width, | ||
| 107 | void *db_data) | ||
| 106 | { | 108 | { |
| 109 | u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; | ||
| 110 | |||
| 107 | /* Make sure doorbell address is within the doorbell bar */ | 111 | /* Make sure doorbell address is within the doorbell bar */ |
| 108 | if (db_addr < cdev->doorbells || | 112 | if (db_addr < cdev->doorbells || |
| 109 | (u8 __iomem *)db_addr > | 113 | (u8 __iomem *)db_addr + width > |
| 110 | (u8 __iomem *)cdev->doorbells + cdev->db_size) { | 114 | (u8 __iomem *)cdev->doorbells + cdev->db_size) { |
| 111 | WARN(true, | 115 | WARN(true, |
| 112 | "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", | 116 | "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", |
| @@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev, | |||
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | /* Sanitize doorbell address */ | 165 | /* Sanitize doorbell address */ |
| 162 | if (!qed_db_rec_sanity(cdev, db_addr, db_data)) | 166 | if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) |
| 163 | return -EINVAL; | 167 | return -EINVAL; |
| 164 | 168 | ||
| 165 | /* Obtain hwfn from doorbell address */ | 169 | /* Obtain hwfn from doorbell address */ |
| @@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev, | |||
| 205 | return 0; | 209 | return 0; |
| 206 | } | 210 | } |
| 207 | 211 | ||
| 208 | /* Sanitize doorbell address */ | ||
| 209 | if (!qed_db_rec_sanity(cdev, db_addr, db_data)) | ||
| 210 | return -EINVAL; | ||
| 211 | |||
| 212 | /* Obtain hwfn from doorbell address */ | 212 | /* Obtain hwfn from doorbell address */ |
| 213 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); | 213 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); |
| 214 | 214 | ||
| @@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) | |||
| 300 | 300 | ||
| 301 | /* Ring the doorbell of a single doorbell recovery entry */ | 301 | /* Ring the doorbell of a single doorbell recovery entry */ |
| 302 | static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | 302 | static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, |
| 303 | struct qed_db_recovery_entry *db_entry, | 303 | struct qed_db_recovery_entry *db_entry) |
| 304 | enum qed_db_rec_exec db_exec) | 304 | { |
| 305 | { | 305 | /* Print according to width */ |
| 306 | if (db_exec != DB_REC_ONCE) { | 306 | if (db_entry->db_width == DB_REC_WIDTH_32B) { |
| 307 | /* Print according to width */ | 307 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
| 308 | if (db_entry->db_width == DB_REC_WIDTH_32B) { | 308 | "ringing doorbell address %p data %x\n", |
| 309 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | 309 | db_entry->db_addr, |
| 310 | "%s doorbell address %p data %x\n", | 310 | *(u32 *)db_entry->db_data); |
| 311 | db_exec == DB_REC_DRY_RUN ? | 311 | } else { |
| 312 | "would have rung" : "ringing", | 312 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
| 313 | db_entry->db_addr, | 313 | "ringing doorbell address %p data %llx\n", |
| 314 | *(u32 *)db_entry->db_data); | 314 | db_entry->db_addr, |
| 315 | } else { | 315 | *(u64 *)(db_entry->db_data)); |
| 316 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | ||
| 317 | "%s doorbell address %p data %llx\n", | ||
| 318 | db_exec == DB_REC_DRY_RUN ? | ||
| 319 | "would have rung" : "ringing", | ||
| 320 | db_entry->db_addr, | ||
| 321 | *(u64 *)(db_entry->db_data)); | ||
| 322 | } | ||
| 323 | } | 316 | } |
| 324 | 317 | ||
| 325 | /* Sanity */ | 318 | /* Sanity */ |
| 326 | if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, | 319 | if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, |
| 327 | db_entry->db_data)) | 320 | db_entry->db_width, db_entry->db_data)) |
| 328 | return; | 321 | return; |
| 329 | 322 | ||
| 330 | /* Flush the write combined buffer. Since there are multiple doorbelling | 323 | /* Flush the write combined buffer. Since there are multiple doorbelling |
| @@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | |||
| 334 | wmb(); | 327 | wmb(); |
| 335 | 328 | ||
| 336 | /* Ring the doorbell */ | 329 | /* Ring the doorbell */ |
| 337 | if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { | 330 | if (db_entry->db_width == DB_REC_WIDTH_32B) |
| 338 | if (db_entry->db_width == DB_REC_WIDTH_32B) | 331 | DIRECT_REG_WR(db_entry->db_addr, |
| 339 | DIRECT_REG_WR(db_entry->db_addr, | 332 | *(u32 *)(db_entry->db_data)); |
| 340 | *(u32 *)(db_entry->db_data)); | 333 | else |
| 341 | else | 334 | DIRECT_REG_WR64(db_entry->db_addr, |
| 342 | DIRECT_REG_WR64(db_entry->db_addr, | 335 | *(u64 *)(db_entry->db_data)); |
| 343 | *(u64 *)(db_entry->db_data)); | ||
| 344 | } | ||
| 345 | 336 | ||
| 346 | /* Flush the write combined buffer. Next doorbell may come from a | 337 | /* Flush the write combined buffer. Next doorbell may come from a |
| 347 | * different entity to the same address... | 338 | * different entity to the same address... |
| @@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, | |||
| 350 | } | 341 | } |
| 351 | 342 | ||
| 352 | /* Traverse the doorbell recovery entry list and ring all the doorbells */ | 343 | /* Traverse the doorbell recovery entry list and ring all the doorbells */ |
| 353 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, | 344 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) |
| 354 | enum qed_db_rec_exec db_exec) | ||
| 355 | { | 345 | { |
| 356 | struct qed_db_recovery_entry *db_entry = NULL; | 346 | struct qed_db_recovery_entry *db_entry = NULL; |
| 357 | 347 | ||
| 358 | if (db_exec != DB_REC_ONCE) { | 348 | DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", |
| 359 | DP_NOTICE(p_hwfn, | 349 | p_hwfn->db_recovery_info.db_recovery_counter); |
| 360 | "Executing doorbell recovery. Counter was %d\n", | ||
| 361 | p_hwfn->db_recovery_info.db_recovery_counter); | ||
| 362 | 350 | ||
| 363 | /* Track amount of times recovery was executed */ | 351 | /* Track amount of times recovery was executed */ |
| 364 | p_hwfn->db_recovery_info.db_recovery_counter++; | 352 | p_hwfn->db_recovery_info.db_recovery_counter++; |
| 365 | } | ||
| 366 | 353 | ||
| 367 | /* Protect the list */ | 354 | /* Protect the list */ |
| 368 | spin_lock_bh(&p_hwfn->db_recovery_info.lock); | 355 | spin_lock_bh(&p_hwfn->db_recovery_info.lock); |
| 369 | list_for_each_entry(db_entry, | 356 | list_for_each_entry(db_entry, |
| 370 | &p_hwfn->db_recovery_info.list, list_entry) { | 357 | &p_hwfn->db_recovery_info.list, list_entry) |
| 371 | qed_db_recovery_ring(p_hwfn, db_entry, db_exec); | 358 | qed_db_recovery_ring(p_hwfn, db_entry); |
| 372 | if (db_exec == DB_REC_ONCE) | ||
| 373 | break; | ||
| 374 | } | ||
| 375 | |||
| 376 | spin_unlock_bh(&p_hwfn->db_recovery_info.lock); | 359 | spin_unlock_bh(&p_hwfn->db_recovery_info.lock); |
| 377 | } | 360 | } |
| 378 | 361 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..8848d5bed6e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
| @@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, | |||
| 378 | u32 count = QED_DB_REC_COUNT; | 378 | u32 count = QED_DB_REC_COUNT; |
| 379 | u32 usage = 1; | 379 | u32 usage = 1; |
| 380 | 380 | ||
| 381 | /* Flush any pending (e)dpms as they may never arrive */ | ||
| 382 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); | ||
| 383 | |||
| 381 | /* wait for usage to zero or count to run out. This is necessary since | 384 | /* wait for usage to zero or count to run out. This is necessary since |
| 382 | * EDPM doorbell transactions can take multiple 64b cycles, and as such | 385 | * EDPM doorbell transactions can take multiple 64b cycles, and as such |
| 383 | * can "split" over the pci. Possibly, the doorbell drop can happen with | 386 | * can "split" over the pci. Possibly, the doorbell drop can happen with |
| @@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, | |||
| 406 | 409 | ||
| 407 | int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 410 | int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 408 | { | 411 | { |
| 409 | u32 overflow; | 412 | u32 attn_ovfl, cur_ovfl; |
| 410 | int rc; | 413 | int rc; |
| 411 | 414 | ||
| 412 | overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); | 415 | attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, |
| 413 | DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); | 416 | &p_hwfn->db_recovery_info.overflow); |
| 414 | if (!overflow) { | 417 | cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); |
| 415 | qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); | 418 | if (!cur_ovfl && !attn_ovfl) |
| 416 | return 0; | 419 | return 0; |
| 417 | } | ||
| 418 | 420 | ||
| 419 | if (qed_edpm_enabled(p_hwfn)) { | 421 | DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", |
| 422 | attn_ovfl, cur_ovfl); | ||
| 423 | |||
| 424 | if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { | ||
| 420 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); | 425 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); |
| 421 | if (rc) | 426 | if (rc) |
| 422 | return rc; | 427 | return rc; |
| 423 | } | 428 | } |
| 424 | 429 | ||
| 425 | /* Flush any pending (e)dpm as they may never arrive */ | ||
| 426 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); | ||
| 427 | |||
| 428 | /* Release overflow sticky indication (stop silently dropping everything) */ | 430 | /* Release overflow sticky indication (stop silently dropping everything) */ |
| 429 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); | 431 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); |
| 430 | 432 | ||
| 431 | /* Repeat all last doorbells (doorbell drop recovery) */ | 433 | /* Repeat all last doorbells (doorbell drop recovery) */ |
| 432 | qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); | 434 | qed_db_recovery_execute(p_hwfn); |
| 433 | 435 | ||
| 434 | return 0; | 436 | return 0; |
| 435 | } | 437 | } |
| 436 | 438 | ||
| 437 | static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | 439 | static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) |
| 438 | { | 440 | { |
| 439 | u32 int_sts, first_drop_reason, details, address, all_drops_reason; | ||
| 440 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; | 441 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; |
| 442 | u32 overflow; | ||
| 441 | int rc; | 443 | int rc; |
| 442 | 444 | ||
| 443 | int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); | 445 | overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); |
| 444 | DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); | 446 | if (!overflow) |
| 447 | goto out; | ||
| 448 | |||
| 449 | /* Run PF doorbell recovery in next periodic handler */ | ||
| 450 | set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); | ||
| 451 | |||
| 452 | if (!p_hwfn->db_bar_no_edpm) { | ||
| 453 | rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); | ||
| 454 | if (rc) | ||
| 455 | goto out; | ||
| 456 | } | ||
| 457 | |||
| 458 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); | ||
| 459 | out: | ||
| 460 | /* Schedule the handler even if overflow was not detected */ | ||
| 461 | qed_periodic_db_rec_start(p_hwfn); | ||
| 462 | } | ||
| 463 | |||
| 464 | static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) | ||
| 465 | { | ||
| 466 | u32 int_sts, first_drop_reason, details, address, all_drops_reason; | ||
| 467 | struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; | ||
| 445 | 468 | ||
| 446 | /* int_sts may be zero since all PFs were interrupted for doorbell | 469 | /* int_sts may be zero since all PFs were interrupted for doorbell |
| 447 | * overflow but another one already handled it. Can abort here. If | 470 | * overflow but another one already handled it. Can abort here. If |
| 448 | * This PF also requires overflow recovery we will be interrupted again. | 471 | * This PF also requires overflow recovery we will be interrupted again. |
| 449 | * The masked almost full indication may also be set. Ignoring. | 472 | * The masked almost full indication may also be set. Ignoring. |
| 450 | */ | 473 | */ |
| 474 | int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); | ||
| 451 | if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) | 475 | if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) |
| 452 | return 0; | 476 | return 0; |
| 453 | 477 | ||
| 478 | DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); | ||
| 479 | |||
| 454 | /* check if db_drop or overflow happened */ | 480 | /* check if db_drop or overflow happened */ |
| 455 | if (int_sts & (DORQ_REG_INT_STS_DB_DROP | | 481 | if (int_sts & (DORQ_REG_INT_STS_DB_DROP | |
| 456 | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { | 482 | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { |
| @@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | |||
| 477 | GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, | 503 | GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, |
| 478 | first_drop_reason, all_drops_reason); | 504 | first_drop_reason, all_drops_reason); |
| 479 | 505 | ||
| 480 | rc = qed_db_rec_handler(p_hwfn, p_ptt); | ||
| 481 | qed_periodic_db_rec_start(p_hwfn); | ||
| 482 | if (rc) | ||
| 483 | return rc; | ||
| 484 | |||
| 485 | /* Clear the doorbell drop details and prepare for next drop */ | 506 | /* Clear the doorbell drop details and prepare for next drop */ |
| 486 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); | 507 | qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); |
| 487 | 508 | ||
| @@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | |||
| 507 | return -EINVAL; | 528 | return -EINVAL; |
| 508 | } | 529 | } |
| 509 | 530 | ||
| 531 | static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) | ||
| 532 | { | ||
| 533 | p_hwfn->db_recovery_info.dorq_attn = true; | ||
| 534 | qed_dorq_attn_overflow(p_hwfn); | ||
| 535 | |||
| 536 | return qed_dorq_attn_int_sts(p_hwfn); | ||
| 537 | } | ||
| 538 | |||
| 539 | static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) | ||
| 540 | { | ||
| 541 | if (p_hwfn->db_recovery_info.dorq_attn) | ||
| 542 | goto out; | ||
| 543 | |||
| 544 | /* Call DORQ callback if the attention was missed */ | ||
| 545 | qed_dorq_attn_cb(p_hwfn); | ||
| 546 | out: | ||
| 547 | p_hwfn->db_recovery_info.dorq_attn = false; | ||
| 548 | } | ||
| 549 | |||
| 510 | /* Instead of major changes to the data-structure, we have a some 'special' | 550 | /* Instead of major changes to the data-structure, we have a some 'special' |
| 511 | * identifiers for sources that changed meaning between adapters. | 551 | * identifiers for sources that changed meaning between adapters. |
| 512 | */ | 552 | */ |
| @@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, | |||
| 1080 | } | 1120 | } |
| 1081 | } | 1121 | } |
| 1082 | 1122 | ||
| 1123 | /* Handle missed DORQ attention */ | ||
| 1124 | qed_dorq_attn_handler(p_hwfn); | ||
| 1125 | |||
| 1083 | /* Clear IGU indication for the deasserted bits */ | 1126 | /* Clear IGU indication for the deasserted bits */ |
| 1084 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + | 1127 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + |
| 1085 | GTT_BAR0_MAP_REG_IGU_CMD + | 1128 | GTT_BAR0_MAP_REG_IGU_CMD + |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 1f356ed4f761..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h | |||
| @@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); | |||
| 192 | 192 | ||
| 193 | /** | 193 | /** |
| 194 | * @brief - Doorbell Recovery handler. | 194 | * @brief - Doorbell Recovery handler. |
| 195 | * Run DB_REAL_DEAL doorbell recovery in case of PF overflow | 195 | * Run doorbell recovery in case of PF overflow (and flush DORQ if |
| 196 | * (and flush DORQ if needed), otherwise run DB_REC_ONCE. | 196 | * needed). |
| 197 | * | 197 | * |
| 198 | * @param p_hwfn | 198 | * @param p_hwfn |
| 199 | * @param p_ptt | 199 | * @param p_ptt |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f164d4acebcb..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
| 970 | } | 970 | } |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #define QED_PERIODIC_DB_REC_COUNT 100 | 973 | #define QED_PERIODIC_DB_REC_COUNT 10 |
| 974 | #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 | 974 | #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 |
| 975 | #define QED_PERIODIC_DB_REC_INTERVAL \ | 975 | #define QED_PERIODIC_DB_REC_INTERVAL \ |
| 976 | msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) | 976 | msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9faaa6df78ed..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
| @@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, | |||
| 1591 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; | 1591 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; |
| 1592 | } else { | 1592 | } else { |
| 1593 | DP_INFO(p_hwfn, | 1593 | DP_INFO(p_hwfn, |
| 1594 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", | 1594 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", |
| 1595 | vf->abs_vf_id, | 1595 | vf->abs_vf_id, |
| 1596 | req->vfdev_info.eth_fp_hsi_major, | 1596 | req->vfdev_info.eth_fp_hsi_major, |
| 1597 | req->vfdev_info.eth_fp_hsi_minor, | 1597 | req->vfdev_info.eth_fp_hsi_minor, |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 5f3f42a25361..bddb2b5982dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c | |||
| @@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) | |||
| 490 | 490 | ||
| 491 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); | 491 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); |
| 492 | if (IS_ERR(ptp->clock)) { | 492 | if (IS_ERR(ptp->clock)) { |
| 493 | rc = -EINVAL; | ||
| 494 | DP_ERR(edev, "PTP clock registration failed\n"); | 493 | DP_ERR(edev, "PTP clock registration failed\n"); |
| 494 | qede_ptp_disable(edev); | ||
| 495 | rc = -EINVAL; | ||
| 495 | goto err2; | 496 | goto err2; |
| 496 | } | 497 | } |
| 497 | 498 | ||
| 498 | return 0; | 499 | return 0; |
| 499 | 500 | ||
| 500 | err2: | ||
| 501 | qede_ptp_disable(edev); | ||
| 502 | ptp->clock = NULL; | ||
| 503 | err1: | 501 | err1: |
| 504 | kfree(ptp); | 502 | kfree(ptp); |
| 503 | err2: | ||
| 505 | edev->ptp = NULL; | 504 | edev->ptp = NULL; |
| 506 | 505 | ||
| 507 | return rc; | 506 | return rc; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 0c443ea98479..374a4d4371f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -497,7 +497,7 @@ struct qlcnic_hardware_context { | |||
| 497 | u16 board_type; | 497 | u16 board_type; |
| 498 | u16 supported_type; | 498 | u16 supported_type; |
| 499 | 499 | ||
| 500 | u16 link_speed; | 500 | u32 link_speed; |
| 501 | u16 link_duplex; | 501 | u16 link_duplex; |
| 502 | u16 link_autoneg; | 502 | u16 link_autoneg; |
| 503 | u16 module_type; | 503 | u16 module_type; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7562ccbbb39a..ed651dde6ef9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/firmware.h> | 29 | #include <linux/firmware.h> |
| 30 | #include <linux/prefetch.h> | 30 | #include <linux/prefetch.h> |
| 31 | #include <linux/pci-aspm.h> | ||
| 31 | #include <linux/ipv6.h> | 32 | #include <linux/ipv6.h> |
| 32 | #include <net/ip6_checksum.h> | 33 | #include <net/ip6_checksum.h> |
| 33 | 34 | ||
| @@ -5460,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) | |||
| 5460 | tp->cp_cmd |= PktCntrDisable | INTT_1; | 5461 | tp->cp_cmd |= PktCntrDisable | INTT_1; |
| 5461 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); | 5462 | RTL_W16(tp, CPlusCmd, tp->cp_cmd); |
| 5462 | 5463 | ||
| 5463 | RTL_W16(tp, IntrMitigate, 0x5151); | 5464 | RTL_W16(tp, IntrMitigate, 0x5100); |
| 5464 | 5465 | ||
| 5465 | /* Work around for RxFIFO overflow. */ | 5466 | /* Work around for RxFIFO overflow. */ |
| 5466 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { | 5467 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { |
| @@ -7352,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7352 | if (rc) | 7353 | if (rc) |
| 7353 | return rc; | 7354 | return rc; |
| 7354 | 7355 | ||
| 7356 | /* Disable ASPM completely as that cause random device stop working | ||
| 7357 | * problems as well as full system hangs for some PCIe devices users. | ||
| 7358 | */ | ||
| 7359 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); | ||
| 7360 | |||
| 7355 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 7361 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
| 7356 | rc = pcim_enable_device(pdev); | 7362 | rc = pcim_enable_device(pdev); |
| 7357 | if (rc < 0) { | 7363 | if (rc < 0) { |
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index a18149720aa2..cba5881b2746 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c | |||
| @@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv) | |||
| 673 | } | 673 | } |
| 674 | 674 | ||
| 675 | static void *netsec_alloc_rx_data(struct netsec_priv *priv, | 675 | static void *netsec_alloc_rx_data(struct netsec_priv *priv, |
| 676 | dma_addr_t *dma_handle, u16 *desc_len) | 676 | dma_addr_t *dma_handle, u16 *desc_len, |
| 677 | bool napi) | ||
| 677 | { | 678 | { |
| 678 | size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 679 | size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 679 | size_t payload_len = NETSEC_RX_BUF_SZ; | 680 | size_t payload_len = NETSEC_RX_BUF_SZ; |
| @@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv, | |||
| 682 | 683 | ||
| 683 | total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); | 684 | total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); |
| 684 | 685 | ||
| 685 | buf = napi_alloc_frag(total_len); | 686 | buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len); |
| 686 | if (!buf) | 687 | if (!buf) |
| 687 | return NULL; | 688 | return NULL; |
| 688 | 689 | ||
| @@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) | |||
| 765 | /* allocate a fresh buffer and map it to the hardware. | 766 | /* allocate a fresh buffer and map it to the hardware. |
| 766 | * This will eventually replace the old buffer in the hardware | 767 | * This will eventually replace the old buffer in the hardware |
| 767 | */ | 768 | */ |
| 768 | buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); | 769 | buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len, |
| 770 | true); | ||
| 769 | if (unlikely(!buf_addr)) | 771 | if (unlikely(!buf_addr)) |
| 770 | break; | 772 | break; |
| 771 | 773 | ||
| @@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv) | |||
| 1069 | void *buf; | 1071 | void *buf; |
| 1070 | u16 len; | 1072 | u16 len; |
| 1071 | 1073 | ||
| 1072 | buf = netsec_alloc_rx_data(priv, &dma_handle, &len); | 1074 | buf = netsec_alloc_rx_data(priv, &dma_handle, &len, |
| 1075 | false); | ||
| 1073 | if (!buf) { | 1076 | if (!buf) { |
| 1074 | netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); | 1077 | netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); |
| 1075 | goto err_out; | 1078 | goto err_out; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h | |||
| @@ -29,11 +29,13 @@ | |||
| 29 | /* Specific functions used for Ring mode */ | 29 | /* Specific functions used for Ring mode */ |
| 30 | 30 | ||
| 31 | /* Enhanced descriptors */ | 31 | /* Enhanced descriptors */ |
| 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) | 32 | static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, |
| 33 | int bfsize) | ||
| 33 | { | 34 | { |
| 34 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB | 35 | if (bfsize == BUF_SIZE_16KiB) |
| 35 | << ERDES1_BUFFER2_SIZE_SHIFT) | 36 | p->des1 |= cpu_to_le32((BUF_SIZE_8KiB |
| 36 | & ERDES1_BUFFER2_SIZE_MASK); | 37 | << ERDES1_BUFFER2_SIZE_SHIFT) |
| 38 | & ERDES1_BUFFER2_SIZE_MASK); | ||
| 37 | 39 | ||
| 38 | if (end) | 40 | if (end) |
| 39 | p->des1 |= cpu_to_le32(ERDES1_END_RING); | 41 | p->des1 |= cpu_to_le32(ERDES1_END_RING); |
| @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) | |||
| 59 | } | 61 | } |
| 60 | 62 | ||
| 61 | /* Normal descriptors */ | 63 | /* Normal descriptors */ |
| 62 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) | 64 | static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) |
| 63 | { | 65 | { |
| 64 | p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) | 66 | if (bfsize >= BUF_SIZE_2KiB) { |
| 65 | << RDES1_BUFFER2_SIZE_SHIFT) | 67 | int bfsize2; |
| 66 | & RDES1_BUFFER2_SIZE_MASK); | 68 | |
| 69 | bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); | ||
| 70 | p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) | ||
| 71 | & RDES1_BUFFER2_SIZE_MASK); | ||
| 72 | } | ||
| 67 | 73 | ||
| 68 | if (end) | 74 | if (end) |
| 69 | p->des1 |= cpu_to_le32(RDES1_END_RING); | 75 | p->des1 |= cpu_to_le32(RDES1_END_RING); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7fbb6a4dbf51..e061e9f5fad7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
| @@ -296,7 +296,7 @@ exit: | |||
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 298 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 299 | int mode, int end) | 299 | int mode, int end, int bfsize) |
| 300 | { | 300 | { |
| 301 | dwmac4_set_rx_owner(p, disable_rx_ic); | 301 | dwmac4_set_rx_owner(p, disable_rx_ic); |
| 302 | } | 302 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c | |||
| @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 125 | static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 126 | int mode, int end) | 126 | int mode, int end, int bfsize) |
| 127 | { | 127 | { |
| 128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); | 128 | dwxgmac2_set_rx_owner(p, disable_rx_ic); |
| 129 | } | 129 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c | |||
| @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 201 | if (unlikely(rdes0 & RDES0_OWN)) | 201 | if (unlikely(rdes0 & RDES0_OWN)) |
| 202 | return dma_own; | 202 | return dma_own; |
| 203 | 203 | ||
| 204 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | ||
| 205 | stats->rx_length_errors++; | ||
| 206 | return discard_frame; | ||
| 207 | } | ||
| 208 | |||
| 204 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { | 209 | if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { |
| 205 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { | 210 | if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { |
| 206 | x->rx_desc++; | 211 | x->rx_desc++; |
| @@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 231 | * It doesn't match with the information reported into the databook. | 236 | * It doesn't match with the information reported into the databook. |
| 232 | * At any rate, we need to understand if the CSUM hw computation is ok | 237 | * At any rate, we need to understand if the CSUM hw computation is ok |
| 233 | * and report this info to the upper layers. */ | 238 | * and report this info to the upper layers. */ |
| 234 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), | 239 | if (likely(ret == good_frame)) |
| 235 | !!(rdes0 & RDES0_FRAME_TYPE), | 240 | ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), |
| 236 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | 241 | !!(rdes0 & RDES0_FRAME_TYPE), |
| 242 | !!(rdes0 & ERDES0_RX_MAC_ADDR)); | ||
| 237 | 243 | ||
| 238 | if (unlikely(rdes0 & RDES0_DRIBBLING)) | 244 | if (unlikely(rdes0 & RDES0_DRIBBLING)) |
| 239 | x->dribbling_bit++; | 245 | x->dribbling_bit++; |
| @@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 259 | } | 265 | } |
| 260 | 266 | ||
| 261 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 267 | static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
| 262 | int mode, int end) | 268 | int mode, int end, int bfsize) |
| 263 | { | 269 | { |
| 270 | int bfsize1; | ||
| 271 | |||
| 264 | p->des0 |= cpu_to_le32(RDES0_OWN); | 272 | p->des0 |= cpu_to_le32(RDES0_OWN); |
| 265 | p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); | 273 | |
| 274 | bfsize1 = min(bfsize, BUF_SIZE_8KiB); | ||
| 275 | p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); | ||
| 266 | 276 | ||
| 267 | if (mode == STMMAC_CHAIN_MODE) | 277 | if (mode == STMMAC_CHAIN_MODE) |
| 268 | ehn_desc_rx_set_on_chain(p); | 278 | ehn_desc_rx_set_on_chain(p); |
| 269 | else | 279 | else |
| 270 | ehn_desc_rx_set_on_ring(p, end); | 280 | ehn_desc_rx_set_on_ring(p, end, bfsize); |
| 271 | 281 | ||
| 272 | if (disable_rx_ic) | 282 | if (disable_rx_ic) |
| 273 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); | 283 | p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h | |||
| @@ -33,7 +33,7 @@ struct dma_extended_desc; | |||
| 33 | struct stmmac_desc_ops { | 33 | struct stmmac_desc_ops { |
| 34 | /* DMA RX descriptor ring initialization */ | 34 | /* DMA RX descriptor ring initialization */ |
| 35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, | 35 | void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, |
| 36 | int end); | 36 | int end, int bfsize); |
| 37 | /* DMA TX descriptor ring initialization */ | 37 | /* DMA TX descriptor ring initialization */ |
| 38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); | 38 | void (*init_tx_desc)(struct dma_desc *p, int mode, int end); |
| 39 | /* Invoked by the xmit function to prepare the tx descriptor */ | 39 | /* Invoked by the xmit function to prepare the tx descriptor */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..6d690678c20e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c | |||
| @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 91 | return dma_own; | 91 | return dma_own; |
| 92 | 92 | ||
| 93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { | 93 | if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { |
| 94 | pr_warn("%s: Oversized frame spanned multiple buffers\n", | ||
| 95 | __func__); | ||
| 96 | stats->rx_length_errors++; | 94 | stats->rx_length_errors++; |
| 97 | return discard_frame; | 95 | return discard_frame; |
| 98 | } | 96 | } |
| @@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
| 135 | } | 133 | } |
| 136 | 134 | ||
| 137 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, | 135 | static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, |
| 138 | int end) | 136 | int end, int bfsize) |
| 139 | { | 137 | { |
| 138 | int bfsize1; | ||
| 139 | |||
| 140 | p->des0 |= cpu_to_le32(RDES0_OWN); | 140 | p->des0 |= cpu_to_le32(RDES0_OWN); |
| 141 | p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); | 141 | |
| 142 | bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); | ||
| 143 | p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); | ||
| 142 | 144 | ||
| 143 | if (mode == STMMAC_CHAIN_MODE) | 145 | if (mode == STMMAC_CHAIN_MODE) |
| 144 | ndesc_rx_set_on_chain(p, end); | 146 | ndesc_rx_set_on_chain(p, end); |
| 145 | else | 147 | else |
| 146 | ndesc_rx_set_on_ring(p, end); | 148 | ndesc_rx_set_on_ring(p, end, bfsize); |
| 147 | 149 | ||
| 148 | if (disable_rx_ic) | 150 | if (disable_rx_ic) |
| 149 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); | 151 | p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6a2e1031a62a..48712437d0da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) | |||
| 1136 | if (priv->extend_desc) | 1136 | if (priv->extend_desc) |
| 1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, | 1137 | stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, |
| 1138 | priv->use_riwt, priv->mode, | 1138 | priv->use_riwt, priv->mode, |
| 1139 | (i == DMA_RX_SIZE - 1)); | 1139 | (i == DMA_RX_SIZE - 1), |
| 1140 | priv->dma_buf_sz); | ||
| 1140 | else | 1141 | else |
| 1141 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], | 1142 | stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], |
| 1142 | priv->use_riwt, priv->mode, | 1143 | priv->use_riwt, priv->mode, |
| 1143 | (i == DMA_RX_SIZE - 1)); | 1144 | (i == DMA_RX_SIZE - 1), |
| 1145 | priv->dma_buf_sz); | ||
| 1144 | } | 1146 | } |
| 1145 | 1147 | ||
| 1146 | /** | 1148 | /** |
| @@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev) | |||
| 2614 | u32 chan; | 2616 | u32 chan; |
| 2615 | int ret; | 2617 | int ret; |
| 2616 | 2618 | ||
| 2617 | stmmac_check_ether_addr(priv); | ||
| 2618 | |||
| 2619 | if (priv->hw->pcs != STMMAC_PCS_RGMII && | 2619 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
| 2620 | priv->hw->pcs != STMMAC_PCS_TBI && | 2620 | priv->hw->pcs != STMMAC_PCS_TBI && |
| 2621 | priv->hw->pcs != STMMAC_PCS_RTBI) { | 2621 | priv->hw->pcs != STMMAC_PCS_RTBI) { |
| @@ -3352,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3352 | { | 3352 | { |
| 3353 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3353 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3354 | struct stmmac_channel *ch = &priv->channel[queue]; | 3354 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 3355 | unsigned int entry = rx_q->cur_rx; | 3355 | unsigned int next_entry = rx_q->cur_rx; |
| 3356 | int coe = priv->hw->rx_csum; | 3356 | int coe = priv->hw->rx_csum; |
| 3357 | unsigned int next_entry; | ||
| 3358 | unsigned int count = 0; | 3357 | unsigned int count = 0; |
| 3359 | bool xmac; | 3358 | bool xmac; |
| 3360 | 3359 | ||
| @@ -3372,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3372 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); | 3371 | stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); |
| 3373 | } | 3372 | } |
| 3374 | while (count < limit) { | 3373 | while (count < limit) { |
| 3375 | int status; | 3374 | int entry, status; |
| 3376 | struct dma_desc *p; | 3375 | struct dma_desc *p; |
| 3377 | struct dma_desc *np; | 3376 | struct dma_desc *np; |
| 3378 | 3377 | ||
| 3378 | entry = next_entry; | ||
| 3379 | |||
| 3379 | if (priv->extend_desc) | 3380 | if (priv->extend_desc) |
| 3380 | p = (struct dma_desc *)(rx_q->dma_erx + entry); | 3381 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
| 3381 | else | 3382 | else |
| @@ -3431,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3431 | * ignored | 3432 | * ignored |
| 3432 | */ | 3433 | */ |
| 3433 | if (frame_len > priv->dma_buf_sz) { | 3434 | if (frame_len > priv->dma_buf_sz) { |
| 3434 | netdev_err(priv->dev, | 3435 | if (net_ratelimit()) |
| 3435 | "len %d larger than size (%d)\n", | 3436 | netdev_err(priv->dev, |
| 3436 | frame_len, priv->dma_buf_sz); | 3437 | "len %d larger than size (%d)\n", |
| 3438 | frame_len, priv->dma_buf_sz); | ||
| 3437 | priv->dev->stats.rx_length_errors++; | 3439 | priv->dev->stats.rx_length_errors++; |
| 3438 | break; | 3440 | continue; |
| 3439 | } | 3441 | } |
| 3440 | 3442 | ||
| 3441 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 3443 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
| @@ -3470,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3470 | dev_warn(priv->device, | 3472 | dev_warn(priv->device, |
| 3471 | "packet dropped\n"); | 3473 | "packet dropped\n"); |
| 3472 | priv->dev->stats.rx_dropped++; | 3474 | priv->dev->stats.rx_dropped++; |
| 3473 | break; | 3475 | continue; |
| 3474 | } | 3476 | } |
| 3475 | 3477 | ||
| 3476 | dma_sync_single_for_cpu(priv->device, | 3478 | dma_sync_single_for_cpu(priv->device, |
| @@ -3490,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3490 | } else { | 3492 | } else { |
| 3491 | skb = rx_q->rx_skbuff[entry]; | 3493 | skb = rx_q->rx_skbuff[entry]; |
| 3492 | if (unlikely(!skb)) { | 3494 | if (unlikely(!skb)) { |
| 3493 | netdev_err(priv->dev, | 3495 | if (net_ratelimit()) |
| 3494 | "%s: Inconsistent Rx chain\n", | 3496 | netdev_err(priv->dev, |
| 3495 | priv->dev->name); | 3497 | "%s: Inconsistent Rx chain\n", |
| 3498 | priv->dev->name); | ||
| 3496 | priv->dev->stats.rx_dropped++; | 3499 | priv->dev->stats.rx_dropped++; |
| 3497 | break; | 3500 | continue; |
| 3498 | } | 3501 | } |
| 3499 | prefetch(skb->data - NET_IP_ALIGN); | 3502 | prefetch(skb->data - NET_IP_ALIGN); |
| 3500 | rx_q->rx_skbuff[entry] = NULL; | 3503 | rx_q->rx_skbuff[entry] = NULL; |
| @@ -3529,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3529 | priv->dev->stats.rx_packets++; | 3532 | priv->dev->stats.rx_packets++; |
| 3530 | priv->dev->stats.rx_bytes += frame_len; | 3533 | priv->dev->stats.rx_bytes += frame_len; |
| 3531 | } | 3534 | } |
| 3532 | entry = next_entry; | ||
| 3533 | } | 3535 | } |
| 3534 | 3536 | ||
| 3535 | stmmac_rx_refill(priv, queue); | 3537 | stmmac_rx_refill(priv, queue); |
| @@ -4299,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4299 | if (ret) | 4301 | if (ret) |
| 4300 | goto error_hw_init; | 4302 | goto error_hw_init; |
| 4301 | 4303 | ||
| 4304 | stmmac_check_ether_addr(priv); | ||
| 4305 | |||
| 4302 | /* Configure real RX and TX queues */ | 4306 | /* Configure real RX and TX queues */ |
| 4303 | netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); | 4307 | netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
| 4304 | netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); | 4308 | netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index d819e8eaba12..cc1e887e47b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
| @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = { | |||
| 159 | }, | 159 | }, |
| 160 | .driver_data = (void *)&galileo_stmmac_dmi_data, | 160 | .driver_data = (void *)&galileo_stmmac_dmi_data, |
| 161 | }, | 161 | }, |
| 162 | /* | ||
| 163 | * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040. | ||
| 164 | * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which | ||
| 165 | * has only one pci network device while other asset tags are | ||
| 166 | * for IOT2040 which has two. | ||
| 167 | */ | ||
| 162 | { | 168 | { |
| 163 | .matches = { | 169 | .matches = { |
| 164 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), | 170 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), |
| @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = { | |||
| 170 | { | 176 | { |
| 171 | .matches = { | 177 | .matches = { |
| 172 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), | 178 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), |
| 173 | DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, | ||
| 174 | "6ES7647-0AA00-1YA2"), | ||
| 175 | }, | 179 | }, |
| 176 | .driver_data = (void *)&iot2040_stmmac_dmi_data, | 180 | .driver_data = (void *)&iot2040_stmmac_dmi_data, |
| 177 | }, | 181 | }, |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e859ae2e42d5..49f41b64077b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -987,6 +987,7 @@ struct netvsc_device { | |||
| 987 | 987 | ||
| 988 | wait_queue_head_t wait_drain; | 988 | wait_queue_head_t wait_drain; |
| 989 | bool destroy; | 989 | bool destroy; |
| 990 | bool tx_disable; /* if true, do not wake up queue again */ | ||
| 990 | 991 | ||
| 991 | /* Receive buffer allocated by us but manages by NetVSP */ | 992 | /* Receive buffer allocated by us but manages by NetVSP */ |
| 992 | void *recv_buf; | 993 | void *recv_buf; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 813d195bbd57..e0dce373cdd9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
| 110 | 110 | ||
| 111 | init_waitqueue_head(&net_device->wait_drain); | 111 | init_waitqueue_head(&net_device->wait_drain); |
| 112 | net_device->destroy = false; | 112 | net_device->destroy = false; |
| 113 | net_device->tx_disable = false; | ||
| 113 | 114 | ||
| 114 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 115 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
| 115 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 116 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
| @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, | |||
| 719 | } else { | 720 | } else { |
| 720 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | 721 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); |
| 721 | 722 | ||
| 722 | if (netif_tx_queue_stopped(txq) && | 723 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
| 723 | (hv_get_avail_to_write_percent(&channel->outbound) > | 724 | (hv_get_avail_to_write_percent(&channel->outbound) > |
| 724 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { | 725 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { |
| 725 | netif_tx_wake_queue(txq); | 726 | netif_tx_wake_queue(txq); |
| @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( | |||
| 874 | } else if (ret == -EAGAIN) { | 875 | } else if (ret == -EAGAIN) { |
| 875 | netif_tx_stop_queue(txq); | 876 | netif_tx_stop_queue(txq); |
| 876 | ndev_ctx->eth_stats.stop_queue++; | 877 | ndev_ctx->eth_stats.stop_queue++; |
| 877 | if (atomic_read(&nvchan->queue_sends) < 1) { | 878 | if (atomic_read(&nvchan->queue_sends) < 1 && |
| 879 | !net_device->tx_disable) { | ||
| 878 | netif_tx_wake_queue(txq); | 880 | netif_tx_wake_queue(txq); |
| 879 | ndev_ctx->eth_stats.wake_queue++; | 881 | ndev_ctx->eth_stats.wake_queue++; |
| 880 | ret = -ENOSPC; | 882 | ret = -ENOSPC; |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf4897043e83..b20fb0fb595b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) | |||
| 109 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | static void netvsc_tx_enable(struct netvsc_device *nvscdev, | ||
| 113 | struct net_device *ndev) | ||
| 114 | { | ||
| 115 | nvscdev->tx_disable = false; | ||
| 116 | virt_wmb(); /* ensure queue wake up mechanism is on */ | ||
| 117 | |||
| 118 | netif_tx_wake_all_queues(ndev); | ||
| 119 | } | ||
| 120 | |||
| 112 | static int netvsc_open(struct net_device *net) | 121 | static int netvsc_open(struct net_device *net) |
| 113 | { | 122 | { |
| 114 | struct net_device_context *ndev_ctx = netdev_priv(net); | 123 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) | |||
| 129 | rdev = nvdev->extension; | 138 | rdev = nvdev->extension; |
| 130 | if (!rdev->link_state) { | 139 | if (!rdev->link_state) { |
| 131 | netif_carrier_on(net); | 140 | netif_carrier_on(net); |
| 132 | netif_tx_wake_all_queues(net); | 141 | netvsc_tx_enable(nvdev, net); |
| 133 | } | 142 | } |
| 134 | 143 | ||
| 135 | if (vf_netdev) { | 144 | if (vf_netdev) { |
| @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) | |||
| 184 | } | 193 | } |
| 185 | } | 194 | } |
| 186 | 195 | ||
| 196 | static void netvsc_tx_disable(struct netvsc_device *nvscdev, | ||
| 197 | struct net_device *ndev) | ||
| 198 | { | ||
| 199 | if (nvscdev) { | ||
| 200 | nvscdev->tx_disable = true; | ||
| 201 | virt_wmb(); /* ensure txq will not wake up after stop */ | ||
| 202 | } | ||
| 203 | |||
| 204 | netif_tx_disable(ndev); | ||
| 205 | } | ||
| 206 | |||
| 187 | static int netvsc_close(struct net_device *net) | 207 | static int netvsc_close(struct net_device *net) |
| 188 | { | 208 | { |
| 189 | struct net_device_context *net_device_ctx = netdev_priv(net); | 209 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) | |||
| 192 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 212 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
| 193 | int ret; | 213 | int ret; |
| 194 | 214 | ||
| 195 | netif_tx_disable(net); | 215 | netvsc_tx_disable(nvdev, net); |
| 196 | 216 | ||
| 197 | /* No need to close rndis filter if it is removed already */ | 217 | /* No need to close rndis filter if it is removed already */ |
| 198 | if (!nvdev) | 218 | if (!nvdev) |
| @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, | |||
| 920 | 940 | ||
| 921 | /* If device was up (receiving) then shutdown */ | 941 | /* If device was up (receiving) then shutdown */ |
| 922 | if (netif_running(ndev)) { | 942 | if (netif_running(ndev)) { |
| 923 | netif_tx_disable(ndev); | 943 | netvsc_tx_disable(nvdev, ndev); |
| 924 | 944 | ||
| 925 | ret = rndis_filter_close(nvdev); | 945 | ret = rndis_filter_close(nvdev); |
| 926 | if (ret) { | 946 | if (ret) { |
| @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1908 | if (rdev->link_state) { | 1928 | if (rdev->link_state) { |
| 1909 | rdev->link_state = false; | 1929 | rdev->link_state = false; |
| 1910 | netif_carrier_on(net); | 1930 | netif_carrier_on(net); |
| 1911 | netif_tx_wake_all_queues(net); | 1931 | netvsc_tx_enable(net_device, net); |
| 1912 | } else { | 1932 | } else { |
| 1913 | notify = true; | 1933 | notify = true; |
| 1914 | } | 1934 | } |
| @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1918 | if (!rdev->link_state) { | 1938 | if (!rdev->link_state) { |
| 1919 | rdev->link_state = true; | 1939 | rdev->link_state = true; |
| 1920 | netif_carrier_off(net); | 1940 | netif_carrier_off(net); |
| 1921 | netif_tx_stop_all_queues(net); | 1941 | netvsc_tx_disable(net_device, net); |
| 1922 | } | 1942 | } |
| 1923 | kfree(event); | 1943 | kfree(event); |
| 1924 | break; | 1944 | break; |
| @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1927 | if (!rdev->link_state) { | 1947 | if (!rdev->link_state) { |
| 1928 | rdev->link_state = true; | 1948 | rdev->link_state = true; |
| 1929 | netif_carrier_off(net); | 1949 | netif_carrier_off(net); |
| 1930 | netif_tx_stop_all_queues(net); | 1950 | netvsc_tx_disable(net_device, net); |
| 1931 | event->event = RNDIS_STATUS_MEDIA_CONNECT; | 1951 | event->event = RNDIS_STATUS_MEDIA_CONNECT; |
| 1932 | spin_lock_irqsave(&ndev_ctx->lock, flags); | 1952 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
| 1933 | list_add(&event->list, &ndev_ctx->reconfig_events); | 1953 | list_add(&event->list, &ndev_ctx->reconfig_events); |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 92b64e254b44..7475cef17cf7 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c | |||
| @@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = { | |||
| 159 | }; | 159 | }; |
| 160 | MODULE_DEVICE_TABLE(spi, ks8995_id); | 160 | MODULE_DEVICE_TABLE(spi, ks8995_id); |
| 161 | 161 | ||
| 162 | static const struct of_device_id ks8895_spi_of_match[] = { | ||
| 163 | { .compatible = "micrel,ks8995" }, | ||
| 164 | { .compatible = "micrel,ksz8864" }, | ||
| 165 | { .compatible = "micrel,ksz8795" }, | ||
| 166 | { }, | ||
| 167 | }; | ||
| 168 | MODULE_DEVICE_TABLE(of, ks8895_spi_of_match); | ||
| 169 | |||
| 162 | static inline u8 get_chip_id(u8 val) | 170 | static inline u8 get_chip_id(u8 val) |
| 163 | { | 171 | { |
| 164 | return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; | 172 | return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; |
| @@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi) | |||
| 526 | static struct spi_driver ks8995_driver = { | 534 | static struct spi_driver ks8995_driver = { |
| 527 | .driver = { | 535 | .driver = { |
| 528 | .name = "spi-ks8995", | 536 | .name = "spi-ks8995", |
| 537 | .of_match_table = of_match_ptr(ks8895_spi_of_match), | ||
| 529 | }, | 538 | }, |
| 530 | .probe = ks8995_probe, | 539 | .probe = ks8995_probe, |
| 531 | .remove = ks8995_remove, | 540 | .remove = ks8995_remove, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6ed96fdfd96d..16963f7a88f7 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -1156,6 +1156,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
| 1156 | return -EINVAL; | 1156 | return -EINVAL; |
| 1157 | } | 1157 | } |
| 1158 | 1158 | ||
| 1159 | if (netdev_has_upper_dev(dev, port_dev)) { | ||
| 1160 | NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface"); | ||
| 1161 | netdev_err(dev, "Device %s is already an upper device of the team interface\n", | ||
| 1162 | portname); | ||
| 1163 | return -EBUSY; | ||
| 1164 | } | ||
| 1165 | |||
| 1159 | if (port_dev->features & NETIF_F_VLAN_CHALLENGED && | 1166 | if (port_dev->features & NETIF_F_VLAN_CHALLENGED && |
| 1160 | vlan_uses_dev(dev)) { | 1167 | vlan_uses_dev(dev)) { |
| 1161 | NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); | 1168 | NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); |
| @@ -1246,6 +1253,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
| 1246 | goto err_option_port_add; | 1253 | goto err_option_port_add; |
| 1247 | } | 1254 | } |
| 1248 | 1255 | ||
| 1256 | /* set promiscuity level to new slave */ | ||
| 1257 | if (dev->flags & IFF_PROMISC) { | ||
| 1258 | err = dev_set_promiscuity(port_dev, 1); | ||
| 1259 | if (err) | ||
| 1260 | goto err_set_slave_promisc; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | /* set allmulti level to new slave */ | ||
| 1264 | if (dev->flags & IFF_ALLMULTI) { | ||
| 1265 | err = dev_set_allmulti(port_dev, 1); | ||
| 1266 | if (err) { | ||
| 1267 | if (dev->flags & IFF_PROMISC) | ||
| 1268 | dev_set_promiscuity(port_dev, -1); | ||
| 1269 | goto err_set_slave_promisc; | ||
| 1270 | } | ||
| 1271 | } | ||
| 1272 | |||
| 1249 | netif_addr_lock_bh(dev); | 1273 | netif_addr_lock_bh(dev); |
| 1250 | dev_uc_sync_multiple(port_dev, dev); | 1274 | dev_uc_sync_multiple(port_dev, dev); |
| 1251 | dev_mc_sync_multiple(port_dev, dev); | 1275 | dev_mc_sync_multiple(port_dev, dev); |
| @@ -1262,6 +1286,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
| 1262 | 1286 | ||
| 1263 | return 0; | 1287 | return 0; |
| 1264 | 1288 | ||
| 1289 | err_set_slave_promisc: | ||
| 1290 | __team_option_inst_del_port(team, port); | ||
| 1291 | |||
| 1265 | err_option_port_add: | 1292 | err_option_port_add: |
| 1266 | team_upper_dev_unlink(team, port); | 1293 | team_upper_dev_unlink(team, port); |
| 1267 | 1294 | ||
| @@ -1307,6 +1334,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) | |||
| 1307 | 1334 | ||
| 1308 | team_port_disable(team, port); | 1335 | team_port_disable(team, port); |
| 1309 | list_del_rcu(&port->list); | 1336 | list_del_rcu(&port->list); |
| 1337 | |||
| 1338 | if (dev->flags & IFF_PROMISC) | ||
| 1339 | dev_set_promiscuity(port_dev, -1); | ||
| 1340 | if (dev->flags & IFF_ALLMULTI) | ||
| 1341 | dev_set_allmulti(port_dev, -1); | ||
| 1342 | |||
| 1310 | team_upper_dev_unlink(team, port); | 1343 | team_upper_dev_unlink(team, port); |
| 1311 | netdev_rx_handler_unregister(port_dev); | 1344 | netdev_rx_handler_unregister(port_dev); |
| 1312 | team_port_disable_netpoll(port); | 1345 | team_port_disable_netpoll(port); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 74bebbdb4b15..9195f3476b1d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = { | |||
| 1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 1203 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
| 1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | 1204 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ |
| 1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ | 1205 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ |
| 1206 | {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ | ||
| 1206 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ | 1207 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ |
| 1207 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1208 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
| 1208 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1209 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7c1430ed0244..9ee4d7402ca2 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
| @@ -875,6 +875,7 @@ static const struct net_device_ops vrf_netdev_ops = { | |||
| 875 | .ndo_init = vrf_dev_init, | 875 | .ndo_init = vrf_dev_init, |
| 876 | .ndo_uninit = vrf_dev_uninit, | 876 | .ndo_uninit = vrf_dev_uninit, |
| 877 | .ndo_start_xmit = vrf_xmit, | 877 | .ndo_start_xmit = vrf_xmit, |
| 878 | .ndo_set_mac_address = eth_mac_addr, | ||
| 878 | .ndo_get_stats64 = vrf_get_stats64, | 879 | .ndo_get_stats64 = vrf_get_stats64, |
| 879 | .ndo_add_slave = vrf_add_slave, | 880 | .ndo_add_slave = vrf_add_slave, |
| 880 | .ndo_del_slave = vrf_del_slave, | 881 | .ndo_del_slave = vrf_del_slave, |
| @@ -1273,9 +1274,15 @@ static void vrf_setup(struct net_device *dev) | |||
| 1273 | 1274 | ||
| 1274 | /* default to no qdisc; user can add if desired */ | 1275 | /* default to no qdisc; user can add if desired */ |
| 1275 | dev->priv_flags |= IFF_NO_QUEUE; | 1276 | dev->priv_flags |= IFF_NO_QUEUE; |
| 1277 | dev->priv_flags |= IFF_NO_RX_HANDLER; | ||
| 1278 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 1276 | 1279 | ||
| 1277 | dev->min_mtu = 0; | 1280 | /* VRF devices do not care about MTU, but if the MTU is set |
| 1278 | dev->max_mtu = 0; | 1281 | * too low then the ipv4 and ipv6 protocols are disabled |
| 1282 | * which breaks networking. | ||
| 1283 | */ | ||
| 1284 | dev->min_mtu = IPV6_MIN_MTU; | ||
| 1285 | dev->max_mtu = ETH_MAX_MTU; | ||
| 1279 | } | 1286 | } |
| 1280 | 1287 | ||
| 1281 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], | 1288 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], |
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index a20ea270d519..1acc622d2183 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
| @@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) | |||
| 2728 | num_msdus++; | 2728 | num_msdus++; |
| 2729 | num_bytes += ret; | 2729 | num_bytes += ret; |
| 2730 | } | 2730 | } |
| 2731 | ieee80211_return_txq(hw, txq); | 2731 | ieee80211_return_txq(hw, txq, false); |
| 2732 | ieee80211_txq_schedule_end(hw, txq->ac); | 2732 | ieee80211_txq_schedule_end(hw, txq->ac); |
| 2733 | 2733 | ||
| 2734 | record->num_msdus = cpu_to_le16(num_msdus); | 2734 | record->num_msdus = cpu_to_le16(num_msdus); |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b73c23d4ce86..41e89db244d2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
| @@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) | |||
| 4089 | if (ret < 0) | 4089 | if (ret < 0) |
| 4090 | break; | 4090 | break; |
| 4091 | } | 4091 | } |
| 4092 | ieee80211_return_txq(hw, txq); | 4092 | ieee80211_return_txq(hw, txq, false); |
| 4093 | ath10k_htt_tx_txq_update(hw, txq); | 4093 | ath10k_htt_tx_txq_update(hw, txq); |
| 4094 | if (ret == -EBUSY) | 4094 | if (ret == -EBUSY) |
| 4095 | break; | 4095 | break; |
| @@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, | |||
| 4374 | if (ret < 0) | 4374 | if (ret < 0) |
| 4375 | break; | 4375 | break; |
| 4376 | } | 4376 | } |
| 4377 | ieee80211_return_txq(hw, txq); | 4377 | ieee80211_return_txq(hw, txq, false); |
| 4378 | ath10k_htt_tx_txq_update(hw, txq); | 4378 | ath10k_htt_tx_txq_update(hw, txq); |
| 4379 | out: | 4379 | out: |
| 4380 | ieee80211_txq_schedule_end(hw, ac); | 4380 | ieee80211_txq_schedule_end(hw, ac); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 773d428ff1b0..b17e1ca40995 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
| 1938 | goto out; | 1938 | goto out; |
| 1939 | 1939 | ||
| 1940 | while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { | 1940 | while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { |
| 1941 | bool force; | ||
| 1942 | |||
| 1941 | tid = (struct ath_atx_tid *)queue->drv_priv; | 1943 | tid = (struct ath_atx_tid *)queue->drv_priv; |
| 1942 | 1944 | ||
| 1943 | ret = ath_tx_sched_aggr(sc, txq, tid); | 1945 | ret = ath_tx_sched_aggr(sc, txq, tid); |
| 1944 | ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); | 1946 | ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); |
| 1945 | 1947 | ||
| 1946 | ieee80211_return_txq(hw, queue); | 1948 | force = !skb_queue_empty(&tid->retry_q); |
| 1949 | ieee80211_return_txq(hw, queue, force); | ||
| 1947 | } | 1950 | } |
| 1948 | 1951 | ||
| 1949 | out: | 1952 | out: |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index fdc56f821b5a..eb6defb6d0cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c | |||
| @@ -82,6 +82,7 @@ | |||
| 82 | #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" | 82 | #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" |
| 83 | #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" | 83 | #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" |
| 84 | #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" | 84 | #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" |
| 85 | #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" | ||
| 85 | #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" | 86 | #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" |
| 86 | #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" | 87 | #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" |
| 87 | #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" | 88 | #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" |
| @@ -105,8 +106,8 @@ | |||
| 105 | IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" | 106 | IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" |
| 106 | #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ | 107 | #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ |
| 107 | IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" | 108 | IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" |
| 108 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ | 109 | #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ |
| 109 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" | 110 | IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" |
| 110 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ | 111 | #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ |
| 111 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" | 112 | IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" |
| 112 | #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ | 113 | #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ |
| @@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { | |||
| 235 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, | 236 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, |
| 236 | }; | 237 | }; |
| 237 | 238 | ||
| 238 | const struct iwl_cfg iwl22260_2ax_cfg = { | 239 | const struct iwl_cfg iwl_ax101_cfg_quz_hr = { |
| 239 | .name = "Intel(R) Wireless-AX 22260", | 240 | .name = "Intel(R) Wi-Fi 6 AX101", |
| 241 | .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, | ||
| 242 | IWL_DEVICE_22500, | ||
| 243 | /* | ||
| 244 | * This device doesn't support receiving BlockAck with a large bitmap | ||
| 245 | * so we need to restrict the size of transmitted aggregation to the | ||
| 246 | * HT size; mac80211 would otherwise pick the HE max (256) by default. | ||
| 247 | */ | ||
| 248 | .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, | ||
| 249 | }; | ||
| 250 | |||
| 251 | const struct iwl_cfg iwl_ax200_cfg_cc = { | ||
| 252 | .name = "Intel(R) Wi-Fi 6 AX200 160MHz", | ||
| 240 | .fw_name_pre = IWL_CC_A_FW_PRE, | 253 | .fw_name_pre = IWL_CC_A_FW_PRE, |
| 241 | IWL_DEVICE_22500, | 254 | IWL_DEVICE_22500, |
| 242 | /* | 255 | /* |
| @@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = { | |||
| 249 | }; | 262 | }; |
| 250 | 263 | ||
| 251 | const struct iwl_cfg killer1650x_2ax_cfg = { | 264 | const struct iwl_cfg killer1650x_2ax_cfg = { |
| 252 | .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", | 265 | .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)", |
| 253 | .fw_name_pre = IWL_CC_A_FW_PRE, | 266 | .fw_name_pre = IWL_CC_A_FW_PRE, |
| 254 | IWL_DEVICE_22500, | 267 | IWL_DEVICE_22500, |
| 255 | /* | 268 | /* |
| @@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = { | |||
| 262 | }; | 275 | }; |
| 263 | 276 | ||
| 264 | const struct iwl_cfg killer1650w_2ax_cfg = { | 277 | const struct iwl_cfg killer1650w_2ax_cfg = { |
| 265 | .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", | 278 | .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)", |
| 266 | .fw_name_pre = IWL_CC_A_FW_PRE, | 279 | .fw_name_pre = IWL_CC_A_FW_PRE, |
| 267 | IWL_DEVICE_22500, | 280 | IWL_DEVICE_22500, |
| 268 | /* | 281 | /* |
| @@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { | |||
| 328 | }; | 341 | }; |
| 329 | 342 | ||
| 330 | const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { | 343 | const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { |
| 331 | .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", | 344 | .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", |
| 332 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, | 345 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, |
| 333 | IWL_DEVICE_22500, | 346 | IWL_DEVICE_22500, |
| 334 | /* | 347 | /* |
| @@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { | |||
| 340 | }; | 353 | }; |
| 341 | 354 | ||
| 342 | const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { | 355 | const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { |
| 343 | .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", | 356 | .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", |
| 344 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, | 357 | .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, |
| 345 | IWL_DEVICE_22500, | 358 | IWL_DEVICE_22500, |
| 346 | /* | 359 | /* |
| @@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | |||
| 444 | MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 457 | MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
| 445 | MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 458 | MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
| 446 | MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 459 | MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
| 460 | MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | ||
| 447 | MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 461 | MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
| 448 | MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 462 | MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
| 449 | MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); | 463 | MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f119c49cd39c..d7380016f1c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c | |||
| @@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, | |||
| 1614 | if (!range) { | 1614 | if (!range) { |
| 1615 | IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", | 1615 | IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", |
| 1616 | le32_to_cpu(reg->region_id), type); | 1616 | le32_to_cpu(reg->region_id), type); |
| 1617 | memset(*data, 0, le32_to_cpu((*data)->len)); | ||
| 1617 | return; | 1618 | return; |
| 1618 | } | 1619 | } |
| 1619 | 1620 | ||
| @@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, | |||
| 1623 | if (range_size < 0) { | 1624 | if (range_size < 0) { |
| 1624 | IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", | 1625 | IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", |
| 1625 | le32_to_cpu(reg->region_id), type); | 1626 | le32_to_cpu(reg->region_id), type); |
| 1627 | memset(*data, 0, le32_to_cpu((*data)->len)); | ||
| 1626 | return; | 1628 | return; |
| 1627 | } | 1629 | } |
| 1628 | range = range + range_size; | 1630 | range = range + range_size; |
| @@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, | |||
| 1807 | 1809 | ||
| 1808 | trigger = fwrt->dump.active_trigs[id].trig; | 1810 | trigger = fwrt->dump.active_trigs[id].trig; |
| 1809 | 1811 | ||
| 1810 | size = sizeof(*dump_file); | 1812 | size = iwl_fw_ini_get_trigger_len(fwrt, trigger); |
| 1811 | size += iwl_fw_ini_get_trigger_len(fwrt, trigger); | ||
| 1812 | |||
| 1813 | if (!size) | 1813 | if (!size) |
| 1814 | return NULL; | 1814 | return NULL; |
| 1815 | 1815 | ||
| 1816 | size += sizeof(*dump_file); | ||
| 1817 | |||
| 1816 | dump_file = vzalloc(size); | 1818 | dump_file = vzalloc(size); |
| 1817 | if (!dump_file) | 1819 | if (!dump_file) |
| 1818 | return NULL; | 1820 | return NULL; |
| @@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, | |||
| 1942 | iwl_dump_error_desc->len = 0; | 1944 | iwl_dump_error_desc->len = 0; |
| 1943 | 1945 | ||
| 1944 | ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); | 1946 | ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); |
| 1945 | if (ret) { | 1947 | if (ret) |
| 1946 | kfree(iwl_dump_error_desc); | 1948 | kfree(iwl_dump_error_desc); |
| 1947 | } else { | 1949 | else |
| 1948 | set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); | 1950 | iwl_trans_sync_nmi(fwrt->trans); |
| 1949 | |||
| 1950 | /* trigger nmi to halt the fw */ | ||
| 1951 | iwl_force_nmi(fwrt->trans); | ||
| 1952 | } | ||
| 1953 | 1951 | ||
| 1954 | return ret; | 1952 | return ret; |
| 1955 | } | 1953 | } |
| @@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); | |||
| 2489 | 2487 | ||
| 2490 | void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) | 2488 | void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) |
| 2491 | { | 2489 | { |
| 2492 | /* if the wait event timeout elapses instead of wake up then | ||
| 2493 | * the driver did not receive NMI interrupt and can not assume the FW | ||
| 2494 | * is halted | ||
| 2495 | */ | ||
| 2496 | int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq, | ||
| 2497 | !test_bit(STATUS_FW_WAIT_DUMP, | ||
| 2498 | &fwrt->trans->status), | ||
| 2499 | msecs_to_jiffies(2000)); | ||
| 2500 | if (!ret) { | ||
| 2501 | /* failed to receive NMI interrupt, assuming the FW is stuck */ | ||
| 2502 | set_bit(STATUS_FW_ERROR, &fwrt->trans->status); | ||
| 2503 | |||
| 2504 | clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); | ||
| 2505 | } | ||
| 2506 | |||
| 2507 | /* Assuming the op mode mutex is held at this point */ | ||
| 2508 | iwl_fw_dbg_collect_sync(fwrt); | 2490 | iwl_fw_dbg_collect_sync(fwrt); |
| 2509 | 2491 | ||
| 2510 | iwl_trans_stop_device(fwrt->trans); | 2492 | iwl_trans_stop_device(fwrt->trans); |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 7adf4e4e841a..12310e3d2fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c | |||
| @@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
| 76 | fwrt->ops_ctx = ops_ctx; | 76 | fwrt->ops_ctx = ops_ctx; |
| 77 | INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); | 77 | INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); |
| 78 | iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); | 78 | iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); |
| 79 | init_waitqueue_head(&fwrt->trans->fw_halt_waitq); | ||
| 80 | } | 79 | } |
| 81 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); | 80 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); |
| 82 | 81 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f5f87773667b..93070848280a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h | |||
| @@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; | |||
| 549 | extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; | 549 | extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; |
| 550 | extern const struct iwl_cfg iwl22000_2ac_cfg_jf; | 550 | extern const struct iwl_cfg iwl22000_2ac_cfg_jf; |
| 551 | extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; | 551 | extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; |
| 552 | extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; | ||
| 552 | extern const struct iwl_cfg iwl22000_2ax_cfg_hr; | 553 | extern const struct iwl_cfg iwl22000_2ax_cfg_hr; |
| 553 | extern const struct iwl_cfg iwl22260_2ax_cfg; | 554 | extern const struct iwl_cfg iwl_ax200_cfg_cc; |
| 554 | extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; | 555 | extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; |
| 555 | extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; | 556 | extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; |
| 556 | extern const struct iwl_cfg killer1650x_2ax_cfg; | 557 | extern const struct iwl_cfg killer1650x_2ax_cfg; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index aea6d03e545a..e539bc94eff7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h | |||
| @@ -327,6 +327,7 @@ enum { | |||
| 327 | #define CSR_HW_REV_TYPE_NONE (0x00001F0) | 327 | #define CSR_HW_REV_TYPE_NONE (0x00001F0) |
| 328 | #define CSR_HW_REV_TYPE_QNJ (0x0000360) | 328 | #define CSR_HW_REV_TYPE_QNJ (0x0000360) |
| 329 | #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) | 329 | #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) |
| 330 | #define CSR_HW_REV_TYPE_QUZ (0x0000354) | ||
| 330 | #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) | 331 | #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) |
| 331 | #define CSR_HW_REV_TYPE_SO (0x0000370) | 332 | #define CSR_HW_REV_TYPE_SO (0x0000370) |
| 332 | #define CSR_HW_REV_TYPE_TY (0x0000420) | 333 | #define CSR_HW_REV_TYPE_TY (0x0000420) |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index bbebbf3efd57..d8690acee40c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h | |||
| @@ -338,7 +338,6 @@ enum iwl_d3_status { | |||
| 338 | * are sent | 338 | * are sent |
| 339 | * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent | 339 | * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent |
| 340 | * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation | 340 | * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation |
| 341 | * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump | ||
| 342 | */ | 341 | */ |
| 343 | enum iwl_trans_status { | 342 | enum iwl_trans_status { |
| 344 | STATUS_SYNC_HCMD_ACTIVE, | 343 | STATUS_SYNC_HCMD_ACTIVE, |
| @@ -351,7 +350,6 @@ enum iwl_trans_status { | |||
| 351 | STATUS_TRANS_GOING_IDLE, | 350 | STATUS_TRANS_GOING_IDLE, |
| 352 | STATUS_TRANS_IDLE, | 351 | STATUS_TRANS_IDLE, |
| 353 | STATUS_TRANS_DEAD, | 352 | STATUS_TRANS_DEAD, |
| 354 | STATUS_FW_WAIT_DUMP, | ||
| 355 | }; | 353 | }; |
| 356 | 354 | ||
| 357 | static inline int | 355 | static inline int |
| @@ -618,6 +616,7 @@ struct iwl_trans_ops { | |||
| 618 | struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, | 616 | struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, |
| 619 | u32 dump_mask); | 617 | u32 dump_mask); |
| 620 | void (*debugfs_cleanup)(struct iwl_trans *trans); | 618 | void (*debugfs_cleanup)(struct iwl_trans *trans); |
| 619 | void (*sync_nmi)(struct iwl_trans *trans); | ||
| 621 | }; | 620 | }; |
| 622 | 621 | ||
| 623 | /** | 622 | /** |
| @@ -831,7 +830,6 @@ struct iwl_trans { | |||
| 831 | u32 lmac_error_event_table[2]; | 830 | u32 lmac_error_event_table[2]; |
| 832 | u32 umac_error_event_table; | 831 | u32 umac_error_event_table; |
| 833 | unsigned int error_event_table_tlv_status; | 832 | unsigned int error_event_table_tlv_status; |
| 834 | wait_queue_head_t fw_halt_waitq; | ||
| 835 | 833 | ||
| 836 | /* pointer to trans specific struct */ | 834 | /* pointer to trans specific struct */ |
| 837 | /*Ensure that this pointer will always be aligned to sizeof pointer */ | 835 | /*Ensure that this pointer will always be aligned to sizeof pointer */ |
| @@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) | |||
| 1239 | /* prevent double restarts due to the same erroneous FW */ | 1237 | /* prevent double restarts due to the same erroneous FW */ |
| 1240 | if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) | 1238 | if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) |
| 1241 | iwl_op_mode_nic_error(trans->op_mode); | 1239 | iwl_op_mode_nic_error(trans->op_mode); |
| 1240 | } | ||
| 1242 | 1241 | ||
| 1243 | if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) | 1242 | static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) |
| 1244 | wake_up(&trans->fw_halt_waitq); | 1243 | { |
| 1245 | 1244 | if (trans->ops->sync_nmi) | |
| 1245 | trans->ops->sync_nmi(trans); | ||
| 1246 | } | 1246 | } |
| 1247 | 1247 | ||
| 1248 | /***************************************************** | 1248 | /***************************************************** |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3a92c09d4692..6a3b11dd2edf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, | |||
| 2714 | 2714 | ||
| 2715 | iwl_mvm_mac_ctxt_remove(mvm, vif); | 2715 | iwl_mvm_mac_ctxt_remove(mvm, vif); |
| 2716 | 2716 | ||
| 2717 | kfree(mvmvif->ap_wep_key); | ||
| 2718 | mvmvif->ap_wep_key = NULL; | ||
| 2719 | |||
| 2720 | mutex_unlock(&mvm->mutex); | 2717 | mutex_unlock(&mvm->mutex); |
| 2721 | } | 2718 | } |
| 2722 | 2719 | ||
| @@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
| 3183 | ret = iwl_mvm_update_sta(mvm, vif, sta); | 3180 | ret = iwl_mvm_update_sta(mvm, vif, sta); |
| 3184 | } else if (old_state == IEEE80211_STA_ASSOC && | 3181 | } else if (old_state == IEEE80211_STA_ASSOC && |
| 3185 | new_state == IEEE80211_STA_AUTHORIZED) { | 3182 | new_state == IEEE80211_STA_AUTHORIZED) { |
| 3186 | /* if wep is used, need to set the key for the station now */ | 3183 | ret = 0; |
| 3187 | if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { | ||
| 3188 | mvm_sta->wep_key = | ||
| 3189 | kmemdup(mvmvif->ap_wep_key, | ||
| 3190 | sizeof(*mvmvif->ap_wep_key) + | ||
| 3191 | mvmvif->ap_wep_key->keylen, | ||
| 3192 | GFP_KERNEL); | ||
| 3193 | if (!mvm_sta->wep_key) { | ||
| 3194 | ret = -ENOMEM; | ||
| 3195 | goto out_unlock; | ||
| 3196 | } | ||
| 3197 | |||
| 3198 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, | ||
| 3199 | mvm_sta->wep_key, | ||
| 3200 | STA_KEY_IDX_INVALID); | ||
| 3201 | } else { | ||
| 3202 | ret = 0; | ||
| 3203 | } | ||
| 3204 | 3184 | ||
| 3205 | /* we don't support TDLS during DCM */ | 3185 | /* we don't support TDLS during DCM */ |
| 3206 | if (iwl_mvm_phy_ctx_count(mvm) > 1) | 3186 | if (iwl_mvm_phy_ctx_count(mvm) > 1) |
| @@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
| 3242 | NL80211_TDLS_DISABLE_LINK); | 3222 | NL80211_TDLS_DISABLE_LINK); |
| 3243 | } | 3223 | } |
| 3244 | 3224 | ||
| 3245 | /* Remove STA key if this is an AP using WEP */ | ||
| 3246 | if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { | ||
| 3247 | int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, | ||
| 3248 | mvm_sta->wep_key); | ||
| 3249 | |||
| 3250 | if (!ret) | ||
| 3251 | ret = rm_ret; | ||
| 3252 | kfree(mvm_sta->wep_key); | ||
| 3253 | mvm_sta->wep_key = NULL; | ||
| 3254 | } | ||
| 3255 | |||
| 3256 | if (unlikely(ret && | 3225 | if (unlikely(ret && |
| 3257 | test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, | 3226 | test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, |
| 3258 | &mvm->status))) | 3227 | &mvm->status))) |
| @@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, | |||
| 3289 | struct ieee80211_sta *sta, u32 changed) | 3258 | struct ieee80211_sta *sta, u32 changed) |
| 3290 | { | 3259 | { |
| 3291 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | 3260 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); |
| 3261 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | ||
| 3262 | |||
| 3263 | if (changed & (IEEE80211_RC_BW_CHANGED | | ||
| 3264 | IEEE80211_RC_SUPP_RATES_CHANGED | | ||
| 3265 | IEEE80211_RC_NSS_CHANGED)) | ||
| 3266 | iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, | ||
| 3267 | true); | ||
| 3292 | 3268 | ||
| 3293 | if (vif->type == NL80211_IFTYPE_STATION && | 3269 | if (vif->type == NL80211_IFTYPE_STATION && |
| 3294 | changed & IEEE80211_RC_NSS_CHANGED) | 3270 | changed & IEEE80211_RC_NSS_CHANGED) |
| @@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
| 3439 | break; | 3415 | break; |
| 3440 | case WLAN_CIPHER_SUITE_WEP40: | 3416 | case WLAN_CIPHER_SUITE_WEP40: |
| 3441 | case WLAN_CIPHER_SUITE_WEP104: | 3417 | case WLAN_CIPHER_SUITE_WEP104: |
| 3442 | if (vif->type == NL80211_IFTYPE_AP) { | 3418 | if (vif->type == NL80211_IFTYPE_STATION) |
| 3443 | struct iwl_mvm_vif *mvmvif = | 3419 | break; |
| 3444 | iwl_mvm_vif_from_mac80211(vif); | 3420 | if (iwl_mvm_has_new_tx_api(mvm)) |
| 3445 | 3421 | return -EOPNOTSUPP; | |
| 3446 | mvmvif->ap_wep_key = kmemdup(key, | 3422 | /* support HW crypto on TX */ |
| 3447 | sizeof(*key) + key->keylen, | 3423 | return 0; |
| 3448 | GFP_KERNEL); | ||
| 3449 | if (!mvmvif->ap_wep_key) | ||
| 3450 | return -ENOMEM; | ||
| 3451 | } | ||
| 3452 | |||
| 3453 | if (vif->type != NL80211_IFTYPE_STATION) | ||
| 3454 | return 0; | ||
| 3455 | break; | ||
| 3456 | default: | 3424 | default: |
| 3457 | /* currently FW supports only one optional cipher scheme */ | 3425 | /* currently FW supports only one optional cipher scheme */ |
| 3458 | if (hw->n_cipher_schemes && | 3426 | if (hw->n_cipher_schemes && |
| @@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
| 3540 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); | 3508 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); |
| 3541 | if (ret) { | 3509 | if (ret) { |
| 3542 | IWL_WARN(mvm, "set key failed\n"); | 3510 | IWL_WARN(mvm, "set key failed\n"); |
| 3511 | key->hw_key_idx = STA_KEY_IDX_INVALID; | ||
| 3543 | /* | 3512 | /* |
| 3544 | * can't add key for RX, but we don't need it | 3513 | * can't add key for RX, but we don't need it |
| 3545 | * in the device for TX so still return 0 | 3514 | * in the device for TX so still return 0, |
| 3515 | * unless we have new TX API where we cannot | ||
| 3516 | * put key material into the TX_CMD | ||
| 3546 | */ | 3517 | */ |
| 3547 | key->hw_key_idx = STA_KEY_IDX_INVALID; | 3518 | if (iwl_mvm_has_new_tx_api(mvm)) |
| 3548 | ret = 0; | 3519 | ret = -EOPNOTSUPP; |
| 3520 | else | ||
| 3521 | ret = 0; | ||
| 3549 | } | 3522 | } |
| 3550 | 3523 | ||
| 3551 | break; | 3524 | break; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index bca6f6b536d9..a50dc53df086 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
| @@ -498,7 +498,6 @@ struct iwl_mvm_vif { | |||
| 498 | netdev_features_t features; | 498 | netdev_features_t features; |
| 499 | 499 | ||
| 500 | struct iwl_probe_resp_data __rcu *probe_resp_data; | 500 | struct iwl_probe_resp_data __rcu *probe_resp_data; |
| 501 | struct ieee80211_key_conf *ap_wep_key; | ||
| 502 | }; | 501 | }; |
| 503 | 502 | ||
| 504 | static inline struct iwl_mvm_vif * | 503 | static inline struct iwl_mvm_vif * |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 498c315291cf..98d123dd7177 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 11 | * Copyright(c) 2018 Intel Corporation | 11 | * Copyright(c) 2018 - 2019 Intel Corporation |
| 12 | * | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -31,7 +31,7 @@ | |||
| 31 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. | 31 | * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. |
| 32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 32 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 33 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 34 | * Copyright(c) 2018 Intel Corporation | 34 | * Copyright(c) 2018 - 2019 Intel Corporation |
| 35 | * All rights reserved. | 35 | * All rights reserved. |
| 36 | * | 36 | * |
| 37 | * Redistribution and use in source and binary forms, with or without | 37 | * Redistribution and use in source and binary forms, with or without |
| @@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) | |||
| 1399 | 1399 | ||
| 1400 | iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); | 1400 | iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); |
| 1401 | list_del_init(&mvmtxq->list); | 1401 | list_del_init(&mvmtxq->list); |
| 1402 | local_bh_disable(); | ||
| 1402 | iwl_mvm_mac_itxq_xmit(mvm->hw, txq); | 1403 | iwl_mvm_mac_itxq_xmit(mvm->hw, txq); |
| 1404 | local_bh_enable(); | ||
| 1403 | } | 1405 | } |
| 1404 | 1406 | ||
| 1405 | mutex_unlock(&mvm->mutex); | 1407 | mutex_unlock(&mvm->mutex); |
| @@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 2333 | iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, | 2335 | iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, |
| 2334 | timeout); | 2336 | timeout); |
| 2335 | 2337 | ||
| 2336 | if (mvmvif->ap_wep_key) { | ||
| 2337 | u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); | ||
| 2338 | |||
| 2339 | __set_bit(key_offset, mvm->fw_key_table); | ||
| 2340 | |||
| 2341 | if (key_offset == STA_KEY_IDX_INVALID) | ||
| 2342 | return -ENOSPC; | ||
| 2343 | |||
| 2344 | ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, | ||
| 2345 | mvmvif->ap_wep_key, true, 0, NULL, 0, | ||
| 2346 | key_offset, 0); | ||
| 2347 | if (ret) | ||
| 2348 | return ret; | ||
| 2349 | } | ||
| 2350 | |||
| 2351 | return 0; | 2338 | return 0; |
| 2352 | } | 2339 | } |
| 2353 | 2340 | ||
| @@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 2419 | 2406 | ||
| 2420 | iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); | 2407 | iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); |
| 2421 | 2408 | ||
| 2422 | if (mvmvif->ap_wep_key) { | ||
| 2423 | int i; | ||
| 2424 | |||
| 2425 | if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx, | ||
| 2426 | mvm->fw_key_table)) { | ||
| 2427 | IWL_ERR(mvm, "offset %d not used in fw key table.\n", | ||
| 2428 | mvmvif->ap_wep_key->hw_key_idx); | ||
| 2429 | return -ENOENT; | ||
| 2430 | } | ||
| 2431 | |||
| 2432 | /* track which key was deleted last */ | ||
| 2433 | for (i = 0; i < STA_KEY_MAX_NUM; i++) { | ||
| 2434 | if (mvm->fw_key_deleted[i] < U8_MAX) | ||
| 2435 | mvm->fw_key_deleted[i]++; | ||
| 2436 | } | ||
| 2437 | mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0; | ||
| 2438 | ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id, | ||
| 2439 | mvmvif->ap_wep_key, true); | ||
| 2440 | if (ret) | ||
| 2441 | return ret; | ||
| 2442 | } | ||
| 2443 | |||
| 2444 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); | 2409 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); |
| 2445 | if (ret) | 2410 | if (ret) |
| 2446 | IWL_WARN(mvm, "Failed sending remove station\n"); | 2411 | IWL_WARN(mvm, "Failed sending remove station\n"); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 79700c7310a1..b4d4071b865d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
| 10 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH | 10 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
| 11 | * Copyright(c) 2018 Intel Corporation | 11 | * Copyright(c) 2018 - 2019 Intel Corporation |
| 12 | * | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -31,7 +31,7 @@ | |||
| 31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 31 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 32 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 32 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
| 33 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH | 33 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
| 34 | * Copyright(c) 2018 Intel Corporation | 34 | * Copyright(c) 2018 - 2019 Intel Corporation |
| 35 | * All rights reserved. | 35 | * All rights reserved. |
| 36 | * | 36 | * |
| 37 | * Redistribution and use in source and binary forms, with or without | 37 | * Redistribution and use in source and binary forms, with or without |
| @@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data { | |||
| 394 | * the BA window. To be used for UAPSD only. | 394 | * the BA window. To be used for UAPSD only. |
| 395 | * @ptk_pn: per-queue PTK PN data structures | 395 | * @ptk_pn: per-queue PTK PN data structures |
| 396 | * @dup_data: per queue duplicate packet detection data | 396 | * @dup_data: per queue duplicate packet detection data |
| 397 | * @wep_key: used in AP mode. Is a duplicate of the WEP key. | ||
| 398 | * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID | 397 | * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID |
| 399 | * @tx_ant: the index of the antenna to use for data tx to this station. Only | 398 | * @tx_ant: the index of the antenna to use for data tx to this station. Only |
| 400 | * used during connection establishment (e.g. for the 4 way handshake | 399 | * used during connection establishment (e.g. for the 4 way handshake |
| @@ -426,8 +425,6 @@ struct iwl_mvm_sta { | |||
| 426 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; | 425 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; |
| 427 | struct iwl_mvm_rxq_dup_data *dup_data; | 426 | struct iwl_mvm_rxq_dup_data *dup_data; |
| 428 | 427 | ||
| 429 | struct ieee80211_key_conf *wep_key; | ||
| 430 | |||
| 431 | u8 reserved_queue; | 428 | u8 reserved_queue; |
| 432 | 429 | ||
| 433 | /* Temporary, until the new TLC will control the Tx protection */ | 430 | /* Temporary, until the new TLC will control the Tx protection */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2b94e4cef56c..9f1af8da9dc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
| @@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 953 | {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, | 953 | {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, |
| 954 | {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, | 954 | {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, |
| 955 | 955 | ||
| 956 | {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, | 956 | {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, |
| 957 | {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, | 957 | {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, |
| 958 | {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, | 958 | {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)}, |
| 959 | {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, | 959 | {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)}, |
| 960 | {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, | 960 | {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, |
| 961 | {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, | 961 | {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, |
| 962 | {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, | 962 | {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)}, |
| 963 | {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, | 963 | {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, |
| 964 | {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, | ||
| 964 | 965 | ||
| 965 | {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, | 966 | {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, |
| 966 | {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, | 967 | {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index bf8b61a476c5..59213164f35e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h | |||
| @@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) | |||
| 1043 | 1043 | ||
| 1044 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); | 1044 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); |
| 1045 | void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); | 1045 | void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); |
| 1046 | void iwl_trans_sync_nmi(struct iwl_trans *trans); | 1046 | void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); |
| 1047 | 1047 | ||
| 1048 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 1048 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 1049 | int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); | 1049 | int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index fe8269d023de..79c1dc05f948 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
| @@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) | |||
| 3318 | .unref = iwl_trans_pcie_unref, \ | 3318 | .unref = iwl_trans_pcie_unref, \ |
| 3319 | .dump_data = iwl_trans_pcie_dump_data, \ | 3319 | .dump_data = iwl_trans_pcie_dump_data, \ |
| 3320 | .d3_suspend = iwl_trans_pcie_d3_suspend, \ | 3320 | .d3_suspend = iwl_trans_pcie_d3_suspend, \ |
| 3321 | .d3_resume = iwl_trans_pcie_d3_resume | 3321 | .d3_resume = iwl_trans_pcie_d3_resume, \ |
| 3322 | .sync_nmi = iwl_trans_pcie_sync_nmi | ||
| 3322 | 3323 | ||
| 3323 | #ifdef CONFIG_PM_SLEEP | 3324 | #ifdef CONFIG_PM_SLEEP |
| 3324 | #define IWL_TRANS_PM_OPS \ | 3325 | #define IWL_TRANS_PM_OPS \ |
| @@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
| 3542 | } | 3543 | } |
| 3543 | } else if (cfg == &iwl_ax101_cfg_qu_hr) { | 3544 | } else if (cfg == &iwl_ax101_cfg_qu_hr) { |
| 3544 | if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3545 | if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
| 3546 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && | ||
| 3547 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { | ||
| 3548 | trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; | ||
| 3549 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | ||
| 3545 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { | 3550 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { |
| 3546 | trans->cfg = &iwl_ax101_cfg_qu_hr; | 3551 | trans->cfg = &iwl_ax101_cfg_qu_hr; |
| 3547 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3552 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
| @@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
| 3560 | } | 3565 | } |
| 3561 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == | 3566 | } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == |
| 3562 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && | 3567 | CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && |
| 3563 | (trans->cfg != &iwl22260_2ax_cfg || | 3568 | (trans->cfg != &iwl_ax200_cfg_cc || |
| 3564 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { | 3569 | trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { |
| 3565 | u32 hw_status; | 3570 | u32 hw_status; |
| 3566 | 3571 | ||
| @@ -3637,7 +3642,7 @@ out_no_pci: | |||
| 3637 | return ERR_PTR(ret); | 3642 | return ERR_PTR(ret); |
| 3638 | } | 3643 | } |
| 3639 | 3644 | ||
| 3640 | void iwl_trans_sync_nmi(struct iwl_trans *trans) | 3645 | void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) |
| 3641 | { | 3646 | { |
| 3642 | unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; | 3647 | unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; |
| 3643 | 3648 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 88530d9f4a54..38d110338987 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | |||
| @@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, | |||
| 965 | cmd_str); | 965 | cmd_str); |
| 966 | ret = -ETIMEDOUT; | 966 | ret = -ETIMEDOUT; |
| 967 | 967 | ||
| 968 | iwl_trans_sync_nmi(trans); | 968 | iwl_trans_pcie_sync_nmi(trans); |
| 969 | goto cancel; | 969 | goto cancel; |
| 970 | } | 970 | } |
| 971 | 971 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fbd37d23e85..7be73e2c4681 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
| @@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, | |||
| 1960 | iwl_get_cmd_string(trans, cmd->id)); | 1960 | iwl_get_cmd_string(trans, cmd->id)); |
| 1961 | ret = -ETIMEDOUT; | 1961 | ret = -ETIMEDOUT; |
| 1962 | 1962 | ||
| 1963 | iwl_trans_sync_nmi(trans); | 1963 | iwl_trans_pcie_sync_nmi(trans); |
| 1964 | goto cancel; | 1964 | goto cancel; |
| 1965 | } | 1965 | } |
| 1966 | 1966 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0838af04d681..524eb5805995 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2644 | enum nl80211_band band; | 2644 | enum nl80211_band band; |
| 2645 | const struct ieee80211_ops *ops = &mac80211_hwsim_ops; | 2645 | const struct ieee80211_ops *ops = &mac80211_hwsim_ops; |
| 2646 | struct net *net; | 2646 | struct net *net; |
| 2647 | int idx; | 2647 | int idx, i; |
| 2648 | int n_limits = 0; | 2648 | int n_limits = 0; |
| 2649 | 2649 | ||
| 2650 | if (WARN_ON(param->channels > 1 && !param->use_chanctx)) | 2650 | if (WARN_ON(param->channels > 1 && !param->use_chanctx)) |
| @@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2768 | goto failed_hw; | 2768 | goto failed_hw; |
| 2769 | } | 2769 | } |
| 2770 | 2770 | ||
| 2771 | data->if_combination.max_interfaces = 0; | ||
| 2772 | for (i = 0; i < n_limits; i++) | ||
| 2773 | data->if_combination.max_interfaces += | ||
| 2774 | data->if_limits[i].max; | ||
| 2775 | |||
| 2771 | data->if_combination.n_limits = n_limits; | 2776 | data->if_combination.n_limits = n_limits; |
| 2772 | data->if_combination.max_interfaces = 2048; | ||
| 2773 | data->if_combination.limits = data->if_limits; | 2777 | data->if_combination.limits = data->if_limits; |
| 2774 | 2778 | ||
| 2775 | hw->wiphy->iface_combinations = &data->if_combination; | 2779 | /* |
| 2776 | hw->wiphy->n_iface_combinations = 1; | 2780 | * If we actually were asked to support combinations, |
| 2781 | * advertise them - if there's only a single thing like | ||
| 2782 | * only IBSS then don't advertise it as combinations. | ||
| 2783 | */ | ||
| 2784 | if (data->if_combination.max_interfaces > 1) { | ||
| 2785 | hw->wiphy->iface_combinations = &data->if_combination; | ||
| 2786 | hw->wiphy->n_iface_combinations = 1; | ||
| 2787 | } | ||
| 2777 | 2788 | ||
| 2778 | if (param->ciphers) { | 2789 | if (param->ciphers) { |
| 2779 | memcpy(data->ciphers, param->ciphers, | 2790 | memcpy(data->ciphers, param->ciphers, |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index d54dda67d036..3af45949e868 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c | |||
| @@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev) | |||
| 510 | bus_ops->rmw = mt7603_rmw; | 510 | bus_ops->rmw = mt7603_rmw; |
| 511 | dev->mt76.bus = bus_ops; | 511 | dev->mt76.bus = bus_ops; |
| 512 | 512 | ||
| 513 | spin_lock_init(&dev->ps_lock); | ||
| 514 | |||
| 513 | INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); | 515 | INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); |
| 514 | tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, | 516 | tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, |
| 515 | (unsigned long)dev); | 517 | (unsigned long)dev); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 5e31d7da96fc..5abc02b57818 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c | |||
| @@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) | |||
| 343 | MT_BA_CONTROL_1_RESET)); | 343 | MT_BA_CONTROL_1_RESET)); |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | 346 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, |
| 347 | int ba_size) | 347 | int ba_size) |
| 348 | { | 348 | { |
| 349 | u32 addr = mt7603_wtbl2_addr(wcid); | 349 | u32 addr = mt7603_wtbl2_addr(wcid); |
| @@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | |||
| 358 | mt76_clear(dev, addr + (15 * 4), tid_mask); | 358 | mt76_clear(dev, addr + (15 * 4), tid_mask); |
| 359 | return; | 359 | return; |
| 360 | } | 360 | } |
| 361 | mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); | ||
| 362 | |||
| 363 | mt7603_mac_stop(dev); | ||
| 364 | switch (tid) { | ||
| 365 | case 0: | ||
| 366 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); | ||
| 367 | break; | ||
| 368 | case 1: | ||
| 369 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); | ||
| 370 | break; | ||
| 371 | case 2: | ||
| 372 | mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, | ||
| 373 | ssn); | ||
| 374 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, | ||
| 375 | ssn >> 8); | ||
| 376 | break; | ||
| 377 | case 3: | ||
| 378 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); | ||
| 379 | break; | ||
| 380 | case 4: | ||
| 381 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); | ||
| 382 | break; | ||
| 383 | case 5: | ||
| 384 | mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, | ||
| 385 | ssn); | ||
| 386 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, | ||
| 387 | ssn >> 4); | ||
| 388 | break; | ||
| 389 | case 6: | ||
| 390 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); | ||
| 391 | break; | ||
| 392 | case 7: | ||
| 393 | mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); | ||
| 394 | break; | ||
| 395 | } | ||
| 396 | mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); | ||
| 397 | mt7603_mac_start(dev); | ||
| 398 | 361 | ||
| 399 | for (i = 7; i > 0; i--) { | 362 | for (i = 7; i > 0; i--) { |
| 400 | if (ba_size >= MT_AGG_SIZE_LIMIT(i)) | 363 | if (ba_size >= MT_AGG_SIZE_LIMIT(i)) |
| @@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
| 827 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 790 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 828 | struct ieee80211_tx_rate *rate = &info->control.rates[0]; | 791 | struct ieee80211_tx_rate *rate = &info->control.rates[0]; |
| 829 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 792 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 793 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; | ||
| 830 | struct ieee80211_vif *vif = info->control.vif; | 794 | struct ieee80211_vif *vif = info->control.vif; |
| 831 | struct mt7603_vif *mvif; | 795 | struct mt7603_vif *mvif; |
| 832 | int wlan_idx; | 796 | int wlan_idx; |
| @@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
| 834 | int tx_count = 8; | 798 | int tx_count = 8; |
| 835 | u8 frame_type, frame_subtype; | 799 | u8 frame_type, frame_subtype; |
| 836 | u16 fc = le16_to_cpu(hdr->frame_control); | 800 | u16 fc = le16_to_cpu(hdr->frame_control); |
| 801 | u16 seqno = 0; | ||
| 837 | u8 vif_idx = 0; | 802 | u8 vif_idx = 0; |
| 838 | u32 val; | 803 | u32 val; |
| 839 | u8 bw; | 804 | u8 bw; |
| @@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, | |||
| 919 | tx_count = 0x1f; | 884 | tx_count = 0x1f; |
| 920 | 885 | ||
| 921 | val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | | 886 | val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | |
| 922 | FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); | 887 | MT_TXD3_SN_VALID; |
| 888 | |||
| 889 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
| 890 | seqno = le16_to_cpu(hdr->seq_ctrl); | ||
| 891 | else if (ieee80211_is_back_req(hdr->frame_control)) | ||
| 892 | seqno = le16_to_cpu(bar->start_seq_num); | ||
| 893 | else | ||
| 894 | val &= ~MT_TXD3_SN_VALID; | ||
| 895 | |||
| 896 | val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); | ||
| 897 | |||
| 923 | txwi[3] = cpu_to_le32(val); | 898 | txwi[3] = cpu_to_le32(val); |
| 924 | 899 | ||
| 925 | if (key) { | 900 | if (key) { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index cc0fe0933b2d..a3c4ef198bfe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c | |||
| @@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) | |||
| 372 | struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; | 372 | struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; |
| 373 | struct sk_buff_head list; | 373 | struct sk_buff_head list; |
| 374 | 374 | ||
| 375 | mt76_stop_tx_queues(&dev->mt76, sta, false); | 375 | mt76_stop_tx_queues(&dev->mt76, sta, true); |
| 376 | mt7603_wtbl_set_ps(dev, msta, ps); | 376 | mt7603_wtbl_set_ps(dev, msta, ps); |
| 377 | if (ps) | 377 | if (ps) |
| 378 | return; | 378 | return; |
| @@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 584 | case IEEE80211_AMPDU_TX_OPERATIONAL: | 584 | case IEEE80211_AMPDU_TX_OPERATIONAL: |
| 585 | mtxq->aggr = true; | 585 | mtxq->aggr = true; |
| 586 | mtxq->send_bar = false; | 586 | mtxq->send_bar = false; |
| 587 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); | 587 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size); |
| 588 | break; | 588 | break; |
| 589 | case IEEE80211_AMPDU_TX_STOP_FLUSH: | 589 | case IEEE80211_AMPDU_TX_STOP_FLUSH: |
| 590 | case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: | 590 | case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: |
| 591 | mtxq->aggr = false; | 591 | mtxq->aggr = false; |
| 592 | ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); | 592 | ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); |
| 593 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); | 593 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); |
| 594 | break; | 594 | break; |
| 595 | case IEEE80211_AMPDU_TX_START: | 595 | case IEEE80211_AMPDU_TX_START: |
| 596 | mtxq->agg_ssn = *ssn << 4; | 596 | mtxq->agg_ssn = *ssn << 4; |
| @@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 598 | break; | 598 | break; |
| 599 | case IEEE80211_AMPDU_TX_STOP_CONT: | 599 | case IEEE80211_AMPDU_TX_STOP_CONT: |
| 600 | mtxq->aggr = false; | 600 | mtxq->aggr = false; |
| 601 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); | 601 | mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); |
| 602 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | 602 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
| 603 | break; | 603 | break; |
| 604 | } | 604 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 79f332429432..6049f3b7c8fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h | |||
| @@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); | |||
| 200 | int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); | 200 | int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); |
| 201 | void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); | 201 | void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); |
| 202 | void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); | 202 | void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); |
| 203 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, | 203 | void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, |
| 204 | int ba_size); | 204 | int ba_size); |
| 205 | 205 | ||
| 206 | void mt7603_pse_client_reset(struct mt7603_dev *dev); | 206 | void mt7603_pse_client_reset(struct mt7603_dev *dev); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 9ed231abe916..4fe5a83ca5a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | |||
| @@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
| 466 | return; | 466 | return; |
| 467 | 467 | ||
| 468 | rcu_read_lock(); | 468 | rcu_read_lock(); |
| 469 | mt76_tx_status_lock(mdev, &list); | ||
| 470 | 469 | ||
| 471 | if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) | 470 | if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) |
| 472 | wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); | 471 | wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); |
| @@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
| 479 | drv_priv); | 478 | drv_priv); |
| 480 | } | 479 | } |
| 481 | 480 | ||
| 481 | mt76_tx_status_lock(mdev, &list); | ||
| 482 | |||
| 482 | if (wcid) { | 483 | if (wcid) { |
| 483 | if (stat->pktid >= MT_PACKET_ID_FIRST) | 484 | if (stat->pktid >= MT_PACKET_ID_FIRST) |
| 484 | status.skb = mt76_tx_status_skb_get(mdev, wcid, | 485 | status.skb = mt76_tx_status_skb_get(mdev, wcid, |
| @@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
| 498 | if (*update == 0 && stat_val == stat_cache && | 499 | if (*update == 0 && stat_val == stat_cache && |
| 499 | stat->wcid == msta->status.wcid && msta->n_frames < 32) { | 500 | stat->wcid == msta->status.wcid && msta->n_frames < 32) { |
| 500 | msta->n_frames++; | 501 | msta->n_frames++; |
| 501 | goto out; | 502 | mt76_tx_status_unlock(mdev, &list); |
| 503 | rcu_read_unlock(); | ||
| 504 | return; | ||
| 502 | } | 505 | } |
| 503 | 506 | ||
| 504 | mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, | 507 | mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, |
| @@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, | |||
| 514 | 517 | ||
| 515 | if (status.skb) | 518 | if (status.skb) |
| 516 | mt76_tx_status_skb_done(mdev, status.skb, &list); | 519 | mt76_tx_status_skb_done(mdev, status.skb, &list); |
| 517 | else | ||
| 518 | ieee80211_tx_status_ext(mt76_hw(dev), &status); | ||
| 519 | |||
| 520 | out: | ||
| 521 | mt76_tx_status_unlock(mdev, &list); | 520 | mt76_tx_status_unlock(mdev, &list); |
| 521 | |||
| 522 | if (!status.skb) | ||
| 523 | ieee80211_tx_status_ext(mt76_hw(dev), &status); | ||
| 522 | rcu_read_unlock(); | 524 | rcu_read_unlock(); |
| 523 | } | 525 | } |
| 524 | 526 | ||
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 4b1744e9fb78..50b92ca92bd7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h | |||
| @@ -673,7 +673,6 @@ enum rt2x00_state_flags { | |||
| 673 | CONFIG_CHANNEL_HT40, | 673 | CONFIG_CHANNEL_HT40, |
| 674 | CONFIG_POWERSAVING, | 674 | CONFIG_POWERSAVING, |
| 675 | CONFIG_HT_DISABLED, | 675 | CONFIG_HT_DISABLED, |
| 676 | CONFIG_QOS_DISABLED, | ||
| 677 | CONFIG_MONITORING, | 676 | CONFIG_MONITORING, |
| 678 | 677 | ||
| 679 | /* | 678 | /* |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 2825560e2424..e8462f25d252 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | |||
| @@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, | |||
| 642 | rt2x00dev->intf_associated--; | 642 | rt2x00dev->intf_associated--; |
| 643 | 643 | ||
| 644 | rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); | 644 | rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); |
| 645 | |||
| 646 | clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); | ||
| 647 | } | 645 | } |
| 648 | 646 | ||
| 649 | /* | 647 | /* |
| 650 | * Check for access point which do not support 802.11e . We have to | ||
| 651 | * generate data frames sequence number in S/W for such AP, because | ||
| 652 | * of H/W bug. | ||
| 653 | */ | ||
| 654 | if (changes & BSS_CHANGED_QOS && !bss_conf->qos) | ||
| 655 | set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); | ||
| 656 | |||
| 657 | /* | ||
| 658 | * When the erp information has changed, we should perform | 648 | * When the erp information has changed, we should perform |
| 659 | * additional configuration steps. For all other changes we are done. | 649 | * additional configuration steps. For all other changes we are done. |
| 660 | */ | 650 | */ |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 92ddc19e7bf7..4834b4eb0206 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | |||
| @@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
| 201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { | 201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { |
| 202 | /* | 202 | /* |
| 203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase | 203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase |
| 204 | * seqno on retransmited data (non-QOS) frames. To workaround | 204 | * seqno on retransmitted data (non-QOS) and management frames. |
| 205 | * the problem let's generate seqno in software if QOS is | 205 | * To workaround the problem let's generate seqno in software. |
| 206 | * disabled. | 206 | * Except for beacons which are transmitted periodically by H/W |
| 207 | * hence hardware has to assign seqno for them. | ||
| 207 | */ | 208 | */ |
| 208 | if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) | 209 | if (ieee80211_is_beacon(hdr->frame_control)) { |
| 209 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | 210 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
| 210 | else | ||
| 211 | /* H/W will generate sequence number */ | 211 | /* H/W will generate sequence number */ |
| 212 | return; | 212 | return; |
| 213 | } | ||
| 214 | |||
| 215 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | ||
| 213 | } | 216 | } |
| 214 | 217 | ||
| 215 | /* | 218 | /* |
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 2b26f762fbc3..01acb6e53365 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c | |||
| @@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = { | |||
| 1074 | }; | 1074 | }; |
| 1075 | MODULE_DEVICE_TABLE(spi, st95hf_id); | 1075 | MODULE_DEVICE_TABLE(spi, st95hf_id); |
| 1076 | 1076 | ||
| 1077 | static const struct of_device_id st95hf_spi_of_match[] = { | ||
| 1078 | { .compatible = "st,st95hf" }, | ||
| 1079 | { }, | ||
| 1080 | }; | ||
| 1081 | MODULE_DEVICE_TABLE(of, st95hf_spi_of_match); | ||
| 1082 | |||
| 1077 | static int st95hf_probe(struct spi_device *nfc_spi_dev) | 1083 | static int st95hf_probe(struct spi_device *nfc_spi_dev) |
| 1078 | { | 1084 | { |
| 1079 | int ret; | 1085 | int ret; |
| @@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = { | |||
| 1260 | .driver = { | 1266 | .driver = { |
| 1261 | .name = "st95hf", | 1267 | .name = "st95hf", |
| 1262 | .owner = THIS_MODULE, | 1268 | .owner = THIS_MODULE, |
| 1269 | .of_match_table = of_match_ptr(st95hf_spi_of_match), | ||
| 1263 | }, | 1270 | }, |
| 1264 | .id_table = st95hf_id, | 1271 | .id_table = st95hf_id, |
| 1265 | .probe = st95hf_probe, | 1272 | .probe = st95hf_probe, |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index b72a303176c7..9486acc08402 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
| @@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, | |||
| 198 | return NULL; | 198 | return NULL; |
| 199 | 199 | ||
| 200 | nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); | 200 | nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); |
| 201 | if (nd_btt->id < 0) { | 201 | if (nd_btt->id < 0) |
| 202 | kfree(nd_btt); | 202 | goto out_nd_btt; |
| 203 | return NULL; | ||
| 204 | } | ||
| 205 | 203 | ||
| 206 | nd_btt->lbasize = lbasize; | 204 | nd_btt->lbasize = lbasize; |
| 207 | if (uuid) | 205 | if (uuid) { |
| 208 | uuid = kmemdup(uuid, 16, GFP_KERNEL); | 206 | uuid = kmemdup(uuid, 16, GFP_KERNEL); |
| 207 | if (!uuid) | ||
| 208 | goto out_put_id; | ||
| 209 | } | ||
| 209 | nd_btt->uuid = uuid; | 210 | nd_btt->uuid = uuid; |
| 210 | dev = &nd_btt->dev; | 211 | dev = &nd_btt->dev; |
| 211 | dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); | 212 | dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); |
| @@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, | |||
| 220 | return NULL; | 221 | return NULL; |
| 221 | } | 222 | } |
| 222 | return dev; | 223 | return dev; |
| 224 | |||
| 225 | out_put_id: | ||
| 226 | ida_simple_remove(&nd_region->btt_ida, nd_btt->id); | ||
| 227 | |||
| 228 | out_nd_btt: | ||
| 229 | kfree(nd_btt); | ||
| 230 | return NULL; | ||
| 223 | } | 231 | } |
| 224 | 232 | ||
| 225 | struct device *nd_btt_create(struct nd_region *nd_region) | 233 | struct device *nd_btt_create(struct nd_region *nd_region) |
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 7849bf1812c4..f293556cbbf6 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
| @@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, | |||
| 2249 | if (!nsblk->uuid) | 2249 | if (!nsblk->uuid) |
| 2250 | goto blk_err; | 2250 | goto blk_err; |
| 2251 | memcpy(name, nd_label->name, NSLABEL_NAME_LEN); | 2251 | memcpy(name, nd_label->name, NSLABEL_NAME_LEN); |
| 2252 | if (name[0]) | 2252 | if (name[0]) { |
| 2253 | nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, | 2253 | nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, |
| 2254 | GFP_KERNEL); | 2254 | GFP_KERNEL); |
| 2255 | if (!nsblk->alt_name) | ||
| 2256 | goto blk_err; | ||
| 2257 | } | ||
| 2255 | res = nsblk_add_resource(nd_region, ndd, nsblk, | 2258 | res = nsblk_add_resource(nd_region, ndd, nsblk, |
| 2256 | __le64_to_cpu(nd_label->dpa)); | 2259 | __le64_to_cpu(nd_label->dpa)); |
| 2257 | if (!res) | 2260 | if (!res) |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index bc2f700feef8..0279eb1da3ef 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
| @@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page, | |||
| 113 | 113 | ||
| 114 | while (len) { | 114 | while (len) { |
| 115 | mem = kmap_atomic(page); | 115 | mem = kmap_atomic(page); |
| 116 | chunk = min_t(unsigned int, len, PAGE_SIZE); | 116 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
| 117 | memcpy_flushcache(pmem_addr, mem + off, chunk); | 117 | memcpy_flushcache(pmem_addr, mem + off, chunk); |
| 118 | kunmap_atomic(mem); | 118 | kunmap_atomic(mem); |
| 119 | len -= chunk; | 119 | len -= chunk; |
| 120 | off = 0; | 120 | off = 0; |
| 121 | page++; | 121 | page++; |
| 122 | pmem_addr += PAGE_SIZE; | 122 | pmem_addr += chunk; |
| 123 | } | 123 | } |
| 124 | } | 124 | } |
| 125 | 125 | ||
| @@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, | |||
| 132 | 132 | ||
| 133 | while (len) { | 133 | while (len) { |
| 134 | mem = kmap_atomic(page); | 134 | mem = kmap_atomic(page); |
| 135 | chunk = min_t(unsigned int, len, PAGE_SIZE); | 135 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
| 136 | rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); | 136 | rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); |
| 137 | kunmap_atomic(mem); | 137 | kunmap_atomic(mem); |
| 138 | if (rem) | 138 | if (rem) |
| @@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, | |||
| 140 | len -= chunk; | 140 | len -= chunk; |
| 141 | off = 0; | 141 | off = 0; |
| 142 | page++; | 142 | page++; |
| 143 | pmem_addr += PAGE_SIZE; | 143 | pmem_addr += chunk; |
| 144 | } | 144 | } |
| 145 | return BLK_STS_OK; | 145 | return BLK_STS_OK; |
| 146 | } | 146 | } |
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index f8bb746a549f..a570f2263a42 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c | |||
| @@ -22,6 +22,8 @@ static bool key_revalidate = true; | |||
| 22 | module_param(key_revalidate, bool, 0444); | 22 | module_param(key_revalidate, bool, 0444); |
| 23 | MODULE_PARM_DESC(key_revalidate, "Require key validation at init."); | 23 | MODULE_PARM_DESC(key_revalidate, "Require key validation at init."); |
| 24 | 24 | ||
| 25 | static const char zero_key[NVDIMM_PASSPHRASE_LEN]; | ||
| 26 | |||
| 25 | static void *key_data(struct key *key) | 27 | static void *key_data(struct key *key) |
| 26 | { | 28 | { |
| 27 | struct encrypted_key_payload *epayload = dereference_key_locked(key); | 29 | struct encrypted_key_payload *epayload = dereference_key_locked(key); |
| @@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm) | |||
| 75 | return key; | 77 | return key; |
| 76 | } | 78 | } |
| 77 | 79 | ||
| 80 | static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm, | ||
| 81 | struct key **key) | ||
| 82 | { | ||
| 83 | *key = nvdimm_request_key(nvdimm); | ||
| 84 | if (!*key) | ||
| 85 | return zero_key; | ||
| 86 | |||
| 87 | return key_data(*key); | ||
| 88 | } | ||
| 89 | |||
| 78 | static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, | 90 | static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, |
| 79 | key_serial_t id, int subclass) | 91 | key_serial_t id, int subclass) |
| 80 | { | 92 | { |
| @@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, | |||
| 105 | return key; | 117 | return key; |
| 106 | } | 118 | } |
| 107 | 119 | ||
| 108 | static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm) | 120 | static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm, |
| 121 | key_serial_t id, int subclass, struct key **key) | ||
| 122 | { | ||
| 123 | *key = NULL; | ||
| 124 | if (id == 0) { | ||
| 125 | if (subclass == NVDIMM_BASE_KEY) | ||
| 126 | return zero_key; | ||
| 127 | else | ||
| 128 | return NULL; | ||
| 129 | } | ||
| 130 | |||
| 131 | *key = nvdimm_lookup_user_key(nvdimm, id, subclass); | ||
| 132 | if (!*key) | ||
| 133 | return NULL; | ||
| 134 | |||
| 135 | return key_data(*key); | ||
| 136 | } | ||
| 137 | |||
| 138 | |||
| 139 | static int nvdimm_key_revalidate(struct nvdimm *nvdimm) | ||
| 109 | { | 140 | { |
| 110 | struct key *key; | 141 | struct key *key; |
| 111 | int rc; | 142 | int rc; |
| 143 | const void *data; | ||
| 112 | 144 | ||
| 113 | if (!nvdimm->sec.ops->change_key) | 145 | if (!nvdimm->sec.ops->change_key) |
| 114 | return NULL; | 146 | return -EOPNOTSUPP; |
| 115 | 147 | ||
| 116 | key = nvdimm_request_key(nvdimm); | 148 | data = nvdimm_get_key_payload(nvdimm, &key); |
| 117 | if (!key) | ||
| 118 | return NULL; | ||
| 119 | 149 | ||
| 120 | /* | 150 | /* |
| 121 | * Send the same key to the hardware as new and old key to | 151 | * Send the same key to the hardware as new and old key to |
| 122 | * verify that the key is good. | 152 | * verify that the key is good. |
| 123 | */ | 153 | */ |
| 124 | rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key), | 154 | rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER); |
| 125 | key_data(key), NVDIMM_USER); | ||
| 126 | if (rc < 0) { | 155 | if (rc < 0) { |
| 127 | nvdimm_put_key(key); | 156 | nvdimm_put_key(key); |
| 128 | key = NULL; | 157 | return rc; |
| 129 | } | 158 | } |
| 130 | return key; | 159 | |
| 160 | nvdimm_put_key(key); | ||
| 161 | nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER); | ||
| 162 | return 0; | ||
| 131 | } | 163 | } |
| 132 | 164 | ||
| 133 | static int __nvdimm_security_unlock(struct nvdimm *nvdimm) | 165 | static int __nvdimm_security_unlock(struct nvdimm *nvdimm) |
| 134 | { | 166 | { |
| 135 | struct device *dev = &nvdimm->dev; | 167 | struct device *dev = &nvdimm->dev; |
| 136 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 168 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 137 | struct key *key = NULL; | 169 | struct key *key; |
| 170 | const void *data; | ||
| 138 | int rc; | 171 | int rc; |
| 139 | 172 | ||
| 140 | /* The bus lock should be held at the top level of the call stack */ | 173 | /* The bus lock should be held at the top level of the call stack */ |
| @@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) | |||
| 160 | if (!key_revalidate) | 193 | if (!key_revalidate) |
| 161 | return 0; | 194 | return 0; |
| 162 | 195 | ||
| 163 | key = nvdimm_key_revalidate(nvdimm); | 196 | return nvdimm_key_revalidate(nvdimm); |
| 164 | if (!key) | ||
| 165 | return nvdimm_security_freeze(nvdimm); | ||
| 166 | } else | 197 | } else |
| 167 | key = nvdimm_request_key(nvdimm); | 198 | data = nvdimm_get_key_payload(nvdimm, &key); |
| 168 | 199 | ||
| 169 | if (!key) | 200 | rc = nvdimm->sec.ops->unlock(nvdimm, data); |
| 170 | return -ENOKEY; | ||
| 171 | |||
| 172 | rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key)); | ||
| 173 | dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), | 201 | dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), |
| 174 | rc == 0 ? "success" : "fail"); | 202 | rc == 0 ? "success" : "fail"); |
| 175 | 203 | ||
| @@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) | |||
| 195 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 223 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 196 | struct key *key; | 224 | struct key *key; |
| 197 | int rc; | 225 | int rc; |
| 226 | const void *data; | ||
| 198 | 227 | ||
| 199 | /* The bus lock should be held at the top level of the call stack */ | 228 | /* The bus lock should be held at the top level of the call stack */ |
| 200 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); | 229 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| @@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) | |||
| 214 | return -EBUSY; | 243 | return -EBUSY; |
| 215 | } | 244 | } |
| 216 | 245 | ||
| 217 | key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); | 246 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 218 | if (!key) | 247 | NVDIMM_BASE_KEY, &key); |
| 248 | if (!data) | ||
| 219 | return -ENOKEY; | 249 | return -ENOKEY; |
| 220 | 250 | ||
| 221 | rc = nvdimm->sec.ops->disable(nvdimm, key_data(key)); | 251 | rc = nvdimm->sec.ops->disable(nvdimm, data); |
| 222 | dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), | 252 | dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), |
| 223 | rc == 0 ? "success" : "fail"); | 253 | rc == 0 ? "success" : "fail"); |
| 224 | 254 | ||
| @@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, | |||
| 235 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 265 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 236 | struct key *key, *newkey; | 266 | struct key *key, *newkey; |
| 237 | int rc; | 267 | int rc; |
| 268 | const void *data, *newdata; | ||
| 238 | 269 | ||
| 239 | /* The bus lock should be held at the top level of the call stack */ | 270 | /* The bus lock should be held at the top level of the call stack */ |
| 240 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); | 271 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| @@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, | |||
| 249 | return -EIO; | 280 | return -EIO; |
| 250 | } | 281 | } |
| 251 | 282 | ||
| 252 | if (keyid == 0) | 283 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 253 | key = NULL; | 284 | NVDIMM_BASE_KEY, &key); |
| 254 | else { | 285 | if (!data) |
| 255 | key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); | 286 | return -ENOKEY; |
| 256 | if (!key) | ||
| 257 | return -ENOKEY; | ||
| 258 | } | ||
| 259 | 287 | ||
| 260 | newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY); | 288 | newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid, |
| 261 | if (!newkey) { | 289 | NVDIMM_NEW_KEY, &newkey); |
| 290 | if (!newdata) { | ||
| 262 | nvdimm_put_key(key); | 291 | nvdimm_put_key(key); |
| 263 | return -ENOKEY; | 292 | return -ENOKEY; |
| 264 | } | 293 | } |
| 265 | 294 | ||
| 266 | rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL, | 295 | rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type); |
| 267 | key_data(newkey), pass_type); | ||
| 268 | dev_dbg(dev, "key: %d %d update%s: %s\n", | 296 | dev_dbg(dev, "key: %d %d update%s: %s\n", |
| 269 | key_serial(key), key_serial(newkey), | 297 | key_serial(key), key_serial(newkey), |
| 270 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", | 298 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", |
| @@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, | |||
| 286 | { | 314 | { |
| 287 | struct device *dev = &nvdimm->dev; | 315 | struct device *dev = &nvdimm->dev; |
| 288 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 316 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 289 | struct key *key; | 317 | struct key *key = NULL; |
| 290 | int rc; | 318 | int rc; |
| 319 | const void *data; | ||
| 291 | 320 | ||
| 292 | /* The bus lock should be held at the top level of the call stack */ | 321 | /* The bus lock should be held at the top level of the call stack */ |
| 293 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); | 322 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| @@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, | |||
| 319 | return -EOPNOTSUPP; | 348 | return -EOPNOTSUPP; |
| 320 | } | 349 | } |
| 321 | 350 | ||
| 322 | key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); | 351 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 323 | if (!key) | 352 | NVDIMM_BASE_KEY, &key); |
| 353 | if (!data) | ||
| 324 | return -ENOKEY; | 354 | return -ENOKEY; |
| 325 | 355 | ||
| 326 | rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type); | 356 | rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); |
| 327 | dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), | 357 | dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), |
| 328 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", | 358 | pass_type == NVDIMM_MASTER ? "(master)" : "(user)", |
| 329 | rc == 0 ? "success" : "fail"); | 359 | rc == 0 ? "success" : "fail"); |
| @@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) | |||
| 337 | { | 367 | { |
| 338 | struct device *dev = &nvdimm->dev; | 368 | struct device *dev = &nvdimm->dev; |
| 339 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 369 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
| 340 | struct key *key; | 370 | struct key *key = NULL; |
| 341 | int rc; | 371 | int rc; |
| 372 | const void *data; | ||
| 342 | 373 | ||
| 343 | /* The bus lock should be held at the top level of the call stack */ | 374 | /* The bus lock should be held at the top level of the call stack */ |
| 344 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); | 375 | lockdep_assert_held(&nvdimm_bus->reconfig_mutex); |
| @@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) | |||
| 368 | return -EBUSY; | 399 | return -EBUSY; |
| 369 | } | 400 | } |
| 370 | 401 | ||
| 371 | if (keyid == 0) | 402 | data = nvdimm_get_user_key_payload(nvdimm, keyid, |
| 372 | key = NULL; | 403 | NVDIMM_BASE_KEY, &key); |
| 373 | else { | 404 | if (!data) |
| 374 | key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); | 405 | return -ENOKEY; |
| 375 | if (!key) | ||
| 376 | return -ENOKEY; | ||
| 377 | } | ||
| 378 | 406 | ||
| 379 | rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL); | 407 | rc = nvdimm->sec.ops->overwrite(nvdimm, data); |
| 380 | dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), | 408 | dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), |
| 381 | rc == 0 ? "success" : "fail"); | 409 | rc == 0 ? "success" : "fail"); |
| 382 | 410 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 470601980794..2c43e12b70af 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) | |||
| 288 | "Cancelling I/O %d", req->tag); | 288 | "Cancelling I/O %d", req->tag); |
| 289 | 289 | ||
| 290 | nvme_req(req)->status = NVME_SC_ABORT_REQ; | 290 | nvme_req(req)->status = NVME_SC_ABORT_REQ; |
| 291 | blk_mq_complete_request(req); | 291 | blk_mq_complete_request_sync(req); |
| 292 | return true; | 292 | return true; |
| 293 | } | 293 | } |
| 294 | EXPORT_SYMBOL_GPL(nvme_cancel_request); | 294 | EXPORT_SYMBOL_GPL(nvme_cancel_request); |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f3b9d91ba0df..6d8451356eac 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) | |||
| 1845 | memset(queue, 0, sizeof(*queue)); | 1845 | memset(queue, 0, sizeof(*queue)); |
| 1846 | queue->ctrl = ctrl; | 1846 | queue->ctrl = ctrl; |
| 1847 | queue->qnum = idx; | 1847 | queue->qnum = idx; |
| 1848 | atomic_set(&queue->csn, 1); | 1848 | atomic_set(&queue->csn, 0); |
| 1849 | queue->dev = ctrl->dev; | 1849 | queue->dev = ctrl->dev; |
| 1850 | 1850 | ||
| 1851 | if (idx > 0) | 1851 | if (idx > 0) |
| @@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) | |||
| 1887 | */ | 1887 | */ |
| 1888 | 1888 | ||
| 1889 | queue->connection_id = 0; | 1889 | queue->connection_id = 0; |
| 1890 | atomic_set(&queue->csn, 1); | 1890 | atomic_set(&queue->csn, 0); |
| 1891 | } | 1891 | } |
| 1892 | 1892 | ||
| 1893 | static void | 1893 | static void |
| @@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
| 2183 | { | 2183 | { |
| 2184 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | 2184 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; |
| 2185 | struct nvme_command *sqe = &cmdiu->sqe; | 2185 | struct nvme_command *sqe = &cmdiu->sqe; |
| 2186 | u32 csn; | ||
| 2187 | int ret, opstate; | 2186 | int ret, opstate; |
| 2188 | 2187 | ||
| 2189 | /* | 2188 | /* |
| @@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
| 2198 | 2197 | ||
| 2199 | /* format the FC-NVME CMD IU and fcp_req */ | 2198 | /* format the FC-NVME CMD IU and fcp_req */ |
| 2200 | cmdiu->connection_id = cpu_to_be64(queue->connection_id); | 2199 | cmdiu->connection_id = cpu_to_be64(queue->connection_id); |
| 2201 | csn = atomic_inc_return(&queue->csn); | ||
| 2202 | cmdiu->csn = cpu_to_be32(csn); | ||
| 2203 | cmdiu->data_len = cpu_to_be32(data_len); | 2200 | cmdiu->data_len = cpu_to_be32(data_len); |
| 2204 | switch (io_dir) { | 2201 | switch (io_dir) { |
| 2205 | case NVMEFC_FCP_WRITE: | 2202 | case NVMEFC_FCP_WRITE: |
| @@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
| 2257 | if (!(op->flags & FCOP_FLAGS_AEN)) | 2254 | if (!(op->flags & FCOP_FLAGS_AEN)) |
| 2258 | blk_mq_start_request(op->rq); | 2255 | blk_mq_start_request(op->rq); |
| 2259 | 2256 | ||
| 2257 | cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); | ||
| 2260 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, | 2258 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, |
| 2261 | &ctrl->rport->remoteport, | 2259 | &ctrl->rport->remoteport, |
| 2262 | queue->lldd_handle, &op->fcp_req); | 2260 | queue->lldd_handle, &op->fcp_req); |
| 2263 | 2261 | ||
| 2264 | if (ret) { | 2262 | if (ret) { |
| 2263 | /* | ||
| 2264 | * If the lld fails to send the command is there an issue with | ||
| 2265 | * the csn value? If the command that fails is the Connect, | ||
| 2266 | * no - as the connection won't be live. If it is a command | ||
| 2267 | * post-connect, it's possible a gap in csn may be created. | ||
| 2268 | * Does this matter? As Linux initiators don't send fused | ||
| 2269 | * commands, no. The gap would exist, but as there's nothing | ||
| 2270 | * that depends on csn order to be delivered on the target | ||
| 2271 | * side, it shouldn't hurt. It would be difficult for a | ||
| 2272 | * target to even detect the csn gap as it has no idea when the | ||
| 2273 | * cmd with the csn was supposed to arrive. | ||
| 2274 | */ | ||
| 2265 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); | 2275 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); |
| 2266 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); | 2276 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
| 2267 | 2277 | ||
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 76250181fee0..9f72d515fc4b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
| @@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) | |||
| 24 | return len; | 24 | return len; |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | u64 nvmet_get_log_page_offset(struct nvme_command *cmd) | ||
| 28 | { | ||
| 29 | return le64_to_cpu(cmd->get_log_page.lpo); | ||
| 30 | } | ||
| 31 | |||
| 27 | static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) | 32 | static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) |
| 28 | { | 33 | { |
| 29 | nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); | 34 | nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); |
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index c872b47a88f3..33ed95e72d6b 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c | |||
| @@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port | |||
| 131 | memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); | 131 | memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static size_t discovery_log_entries(struct nvmet_req *req) | ||
| 135 | { | ||
| 136 | struct nvmet_ctrl *ctrl = req->sq->ctrl; | ||
| 137 | struct nvmet_subsys_link *p; | ||
| 138 | struct nvmet_port *r; | ||
| 139 | size_t entries = 0; | ||
| 140 | |||
| 141 | list_for_each_entry(p, &req->port->subsystems, entry) { | ||
| 142 | if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) | ||
| 143 | continue; | ||
| 144 | entries++; | ||
| 145 | } | ||
| 146 | list_for_each_entry(r, &req->port->referrals, entry) | ||
| 147 | entries++; | ||
| 148 | return entries; | ||
| 149 | } | ||
| 150 | |||
| 134 | static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) | 151 | static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) |
| 135 | { | 152 | { |
| 136 | const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); | 153 | const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); |
| 137 | struct nvmet_ctrl *ctrl = req->sq->ctrl; | 154 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 138 | struct nvmf_disc_rsp_page_hdr *hdr; | 155 | struct nvmf_disc_rsp_page_hdr *hdr; |
| 156 | u64 offset = nvmet_get_log_page_offset(req->cmd); | ||
| 139 | size_t data_len = nvmet_get_log_page_len(req->cmd); | 157 | size_t data_len = nvmet_get_log_page_len(req->cmd); |
| 140 | size_t alloc_len = max(data_len, sizeof(*hdr)); | 158 | size_t alloc_len; |
| 141 | int residual_len = data_len - sizeof(*hdr); | ||
| 142 | struct nvmet_subsys_link *p; | 159 | struct nvmet_subsys_link *p; |
| 143 | struct nvmet_port *r; | 160 | struct nvmet_port *r; |
| 144 | u32 numrec = 0; | 161 | u32 numrec = 0; |
| 145 | u16 status = 0; | 162 | u16 status = 0; |
| 163 | void *buffer; | ||
| 164 | |||
| 165 | /* Spec requires dword aligned offsets */ | ||
| 166 | if (offset & 0x3) { | ||
| 167 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | ||
| 168 | goto out; | ||
| 169 | } | ||
| 146 | 170 | ||
| 147 | /* | 171 | /* |
| 148 | * Make sure we're passing at least a buffer of response header size. | 172 | * Make sure we're passing at least a buffer of response header size. |
| 149 | * If host provided data len is less than the header size, only the | 173 | * If host provided data len is less than the header size, only the |
| 150 | * number of bytes requested by host will be sent to host. | 174 | * number of bytes requested by host will be sent to host. |
| 151 | */ | 175 | */ |
| 152 | hdr = kzalloc(alloc_len, GFP_KERNEL); | 176 | down_read(&nvmet_config_sem); |
| 153 | if (!hdr) { | 177 | alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); |
| 178 | buffer = kzalloc(alloc_len, GFP_KERNEL); | ||
| 179 | if (!buffer) { | ||
| 180 | up_read(&nvmet_config_sem); | ||
| 154 | status = NVME_SC_INTERNAL; | 181 | status = NVME_SC_INTERNAL; |
| 155 | goto out; | 182 | goto out; |
| 156 | } | 183 | } |
| 157 | 184 | ||
| 158 | down_read(&nvmet_config_sem); | 185 | hdr = buffer; |
| 159 | list_for_each_entry(p, &req->port->subsystems, entry) { | 186 | list_for_each_entry(p, &req->port->subsystems, entry) { |
| 187 | char traddr[NVMF_TRADDR_SIZE]; | ||
| 188 | |||
| 160 | if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) | 189 | if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) |
| 161 | continue; | 190 | continue; |
| 162 | if (residual_len >= entry_size) { | 191 | |
| 163 | char traddr[NVMF_TRADDR_SIZE]; | 192 | nvmet_set_disc_traddr(req, req->port, traddr); |
| 164 | 193 | nvmet_format_discovery_entry(hdr, req->port, | |
| 165 | nvmet_set_disc_traddr(req, req->port, traddr); | 194 | p->subsys->subsysnqn, traddr, |
| 166 | nvmet_format_discovery_entry(hdr, req->port, | 195 | NVME_NQN_NVME, numrec); |
| 167 | p->subsys->subsysnqn, traddr, | ||
| 168 | NVME_NQN_NVME, numrec); | ||
| 169 | residual_len -= entry_size; | ||
| 170 | } | ||
| 171 | numrec++; | 196 | numrec++; |
| 172 | } | 197 | } |
| 173 | 198 | ||
| 174 | list_for_each_entry(r, &req->port->referrals, entry) { | 199 | list_for_each_entry(r, &req->port->referrals, entry) { |
| 175 | if (residual_len >= entry_size) { | 200 | nvmet_format_discovery_entry(hdr, r, |
| 176 | nvmet_format_discovery_entry(hdr, r, | 201 | NVME_DISC_SUBSYS_NAME, |
| 177 | NVME_DISC_SUBSYS_NAME, | 202 | r->disc_addr.traddr, |
| 178 | r->disc_addr.traddr, | 203 | NVME_NQN_DISC, numrec); |
| 179 | NVME_NQN_DISC, numrec); | ||
| 180 | residual_len -= entry_size; | ||
| 181 | } | ||
| 182 | numrec++; | 204 | numrec++; |
| 183 | } | 205 | } |
| 184 | 206 | ||
| @@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) | |||
| 190 | 212 | ||
| 191 | up_read(&nvmet_config_sem); | 213 | up_read(&nvmet_config_sem); |
| 192 | 214 | ||
| 193 | status = nvmet_copy_to_sgl(req, 0, hdr, data_len); | 215 | status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); |
| 194 | kfree(hdr); | 216 | kfree(buffer); |
| 195 | out: | 217 | out: |
| 196 | nvmet_req_complete(req, status); | 218 | nvmet_req_complete(req, status); |
| 197 | } | 219 | } |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 51e49efd7849..1653d19b187f 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
| @@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, | |||
| 428 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); | 428 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); |
| 429 | 429 | ||
| 430 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); | 430 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); |
| 431 | u64 nvmet_get_log_page_offset(struct nvme_command *cmd); | ||
| 431 | 432 | ||
| 432 | extern struct list_head *nvmet_ports; | 433 | extern struct list_head *nvmet_ports; |
| 433 | void nvmet_port_disc_changed(struct nvmet_port *port, | 434 | void nvmet_port_disc_changed(struct nvmet_port *port, |
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index 810ab0fbcccb..d820f3edd431 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | */ | 7 | */ |
| 8 | #include <linux/etherdevice.h> | 8 | #include <linux/etherdevice.h> |
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/nvmem-consumer.h> | ||
| 11 | #include <linux/of_net.h> | 10 | #include <linux/of_net.h> |
| 12 | #include <linux/phy.h> | 11 | #include <linux/phy.h> |
| 13 | #include <linux/export.h> | 12 | #include <linux/export.h> |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 1be571c20062..6bad04cbb1d3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
| @@ -157,8 +157,12 @@ | |||
| 157 | #define DBG_IRT(x...) | 157 | #define DBG_IRT(x...) |
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | #ifdef CONFIG_64BIT | ||
| 161 | #define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) | ||
| 162 | #else | ||
| 160 | #define COMPARE_IRTE_ADDR(irte, hpa) \ | 163 | #define COMPARE_IRTE_ADDR(irte, hpa) \ |
| 161 | ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) | 164 | ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) |
| 165 | #endif | ||
| 162 | 166 | ||
| 163 | #define IOSAPIC_REG_SELECT 0x00 | 167 | #define IOSAPIC_REG_SELECT 0x00 |
| 164 | #define IOSAPIC_REG_WINDOW 0x10 | 168 | #define IOSAPIC_REG_WINDOW 0x10 |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 3f3df4c29f6e..905282a8ddaa 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
| @@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal) | |||
| 115 | * removed from the slot/adapter. | 115 | * removed from the slot/adapter. |
| 116 | */ | 116 | */ |
| 117 | msleep(1000); | 117 | msleep(1000); |
| 118 | |||
| 119 | /* Ignore link or presence changes caused by power off */ | ||
| 120 | atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), | ||
| 121 | &ctrl->pending_events); | ||
| 118 | } | 122 | } |
| 119 | 123 | ||
| 120 | /* turn off Green LED */ | 124 | /* turn off Green LED */ |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a59ad09ce911..a077f67fe1da 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, | |||
| 3877 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ | 3877 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ |
| 3878 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, | 3878 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, |
| 3879 | quirk_dma_func1_alias); | 3879 | quirk_dma_func1_alias); |
| 3880 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, | ||
| 3881 | quirk_dma_func1_alias); | ||
| 3880 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ | 3882 | /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ |
| 3881 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, | 3883 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, |
| 3882 | quirk_dma_func1_alias); | 3884 | quirk_dma_func1_alias); |
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 8f018b3f3cd4..c7039f52ad51 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/debugfs.h> | 18 | #include <linux/debugfs.h> |
| 19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
| 20 | #include <linux/dmi.h> | ||
| 20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
| 21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 22 | #include <linux/platform_data/x86/clk-pmc-atom.h> | 23 | #include <linux/platform_data/x86/clk-pmc-atom.h> |
| @@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) | |||
| 391 | } | 392 | } |
| 392 | #endif /* CONFIG_DEBUG_FS */ | 393 | #endif /* CONFIG_DEBUG_FS */ |
| 393 | 394 | ||
| 395 | /* | ||
| 396 | * Some systems need one or more of their pmc_plt_clks to be | ||
| 397 | * marked as critical. | ||
| 398 | */ | ||
| 399 | static const struct dmi_system_id critclk_systems[] = { | ||
| 400 | { | ||
| 401 | .ident = "MPL CEC1x", | ||
| 402 | .matches = { | ||
| 403 | DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), | ||
| 404 | DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), | ||
| 405 | }, | ||
| 406 | }, | ||
| 407 | { /*sentinel*/ } | ||
| 408 | }; | ||
| 409 | |||
| 394 | static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, | 410 | static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, |
| 395 | const struct pmc_data *pmc_data) | 411 | const struct pmc_data *pmc_data) |
| 396 | { | 412 | { |
| 397 | struct platform_device *clkdev; | 413 | struct platform_device *clkdev; |
| 398 | struct pmc_clk_data *clk_data; | 414 | struct pmc_clk_data *clk_data; |
| 415 | const struct dmi_system_id *d = dmi_first_match(critclk_systems); | ||
| 399 | 416 | ||
| 400 | clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); | 417 | clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); |
| 401 | if (!clk_data) | 418 | if (!clk_data) |
| @@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, | |||
| 403 | 420 | ||
| 404 | clk_data->base = pmc_regmap; /* offset is added by client */ | 421 | clk_data->base = pmc_regmap; /* offset is added by client */ |
| 405 | clk_data->clks = pmc_data->clks; | 422 | clk_data->clks = pmc_data->clks; |
| 423 | if (d) { | ||
| 424 | clk_data->critical = true; | ||
| 425 | pr_info("%s critclks quirk enabled\n", d->ident); | ||
| 426 | } | ||
| 406 | 427 | ||
| 407 | clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", | 428 | clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", |
| 408 | PLATFORM_DEVID_NONE, | 429 | PLATFORM_DEVID_NONE, |
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c index ad969d9fc981..c2644a9fe80f 100644 --- a/drivers/power/supply/goldfish_battery.c +++ b/drivers/power/supply/goldfish_battery.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Power supply driver for the goldfish emulator | 3 | * Power supply driver for the goldfish emulator |
| 4 | * | 4 | * |
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37a..c53a2185a039 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c | |||
| @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) | |||
| 130 | arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); | 130 | arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); |
| 131 | arb->rstc.ops = &meson_audio_arb_rstc_ops; | 131 | arb->rstc.ops = &meson_audio_arb_rstc_ops; |
| 132 | arb->rstc.of_node = dev->of_node; | 132 | arb->rstc.of_node = dev->of_node; |
| 133 | arb->rstc.owner = THIS_MODULE; | ||
| 133 | 134 | ||
| 134 | /* | 135 | /* |
| 135 | * Enable general : | 136 | * Enable general : |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index a71734c41693..f933c06bff4f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
| @@ -667,9 +667,9 @@ config RTC_DRV_S5M | |||
| 667 | will be called rtc-s5m. | 667 | will be called rtc-s5m. |
| 668 | 668 | ||
| 669 | config RTC_DRV_SD3078 | 669 | config RTC_DRV_SD3078 |
| 670 | tristate "ZXW Crystal SD3078" | 670 | tristate "ZXW Shenzhen whwave SD3078" |
| 671 | help | 671 | help |
| 672 | If you say yes here you get support for the ZXW Crystal | 672 | If you say yes here you get support for the ZXW Shenzhen whwave |
| 673 | SD3078 RTC chips. | 673 | SD3078 RTC chips. |
| 674 | 674 | ||
| 675 | This driver can also be built as a module. If so, the module | 675 | This driver can also be built as a module. If so, the module |
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075e..4d6bf9304ceb 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c | |||
| @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) | |||
| 298 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); | 298 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); |
| 299 | 299 | ||
| 300 | if (device_may_wakeup(dev)) | 300 | if (device_may_wakeup(dev)) |
| 301 | enable_irq_wake(cros_ec_rtc->cros_ec->irq); | 301 | return enable_irq_wake(cros_ec_rtc->cros_ec->irq); |
| 302 | 302 | ||
| 303 | return 0; | 303 | return 0; |
| 304 | } | 304 | } |
| @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) | |||
| 309 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); | 309 | struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); |
| 310 | 310 | ||
| 311 | if (device_may_wakeup(dev)) | 311 | if (device_may_wakeup(dev)) |
| 312 | disable_irq_wake(cros_ec_rtc->cros_ec->irq); | 312 | return disable_irq_wake(cros_ec_rtc->cros_ec->irq); |
| 313 | 313 | ||
| 314 | return 0; | 314 | return 0; |
| 315 | } | 315 | } |
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad..69b54e5556c0 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c | |||
| @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) | |||
| 480 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); | 480 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); |
| 481 | rtc->rtc_sync = false; | 481 | rtc->rtc_sync = false; |
| 482 | 482 | ||
| 483 | /* | ||
| 484 | * TODO: some models have alarms on a minute boundary but still support | ||
| 485 | * real hardware interrupts. Add this once the core supports it. | ||
| 486 | */ | ||
| 487 | if (config->rtc_data_start != RTC_SEC) | ||
| 488 | rtc->rtc_dev->uie_unsupported = 1; | ||
| 489 | |||
| 483 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); | 490 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); |
| 484 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, | 491 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, |
| 485 | da9063_alarm_event, | 492 | da9063_alarm_event, |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index d417b203cbc5..1d3de2a3d1a4 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
| @@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 374 | static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) | 374 | static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) |
| 375 | { | 375 | { |
| 376 | unsigned int byte; | 376 | unsigned int byte; |
| 377 | int value = 0xff; /* return 0xff for ignored values */ | 377 | int value = -1; /* return -1 for ignored values */ |
| 378 | 378 | ||
| 379 | byte = readb(rtc->regbase + reg_off); | 379 | byte = readb(rtc->regbase + reg_off); |
| 380 | if (byte & AR_ENB) { | 380 | if (byte & AR_ENB) { |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6e294b4d3635..f89f9d02e788 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
| @@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) | |||
| 2004 | blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); | 2004 | blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); |
| 2005 | 2005 | ||
| 2006 | raw: | 2006 | raw: |
| 2007 | block->blocks = (private->real_cyl * | 2007 | block->blocks = ((unsigned long) private->real_cyl * |
| 2008 | private->rdc_data.trk_per_cyl * | 2008 | private->rdc_data.trk_per_cyl * |
| 2009 | blk_per_trk); | 2009 | blk_per_trk); |
| 2010 | 2010 | ||
| 2011 | dev_info(&device->cdev->dev, | 2011 | dev_info(&device->cdev->dev, |
| 2012 | "DASD with %d KB/block, %d KB total size, %d KB/track, " | 2012 | "DASD with %u KB/block, %lu KB total size, %u KB/track, " |
| 2013 | "%s\n", (block->bp_block >> 10), | 2013 | "%s\n", (block->bp_block >> 10), |
| 2014 | ((private->real_cyl * | 2014 | (((unsigned long) private->real_cyl * |
| 2015 | private->rdc_data.trk_per_cyl * | 2015 | private->rdc_data.trk_per_cyl * |
| 2016 | blk_per_trk * (block->bp_block >> 9)) >> 1), | 2016 | blk_per_trk * (block->bp_block >> 9)) >> 1), |
| 2017 | ((blk_per_trk * block->bp_block) >> 10), | 2017 | ((blk_per_trk * block->bp_block) >> 10), |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index fd2146bcc0ad..e17364e13d2f 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
| @@ -629,7 +629,7 @@ con3270_init(void) | |||
| 629 | (void (*)(unsigned long)) con3270_read_tasklet, | 629 | (void (*)(unsigned long)) con3270_read_tasklet, |
| 630 | (unsigned long) condev->read); | 630 | (unsigned long) condev->read); |
| 631 | 631 | ||
| 632 | raw3270_add_view(&condev->view, &con3270_fn, 1); | 632 | raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ); |
| 633 | 633 | ||
| 634 | INIT_LIST_HEAD(&condev->freemem); | 634 | INIT_LIST_HEAD(&condev->freemem); |
| 635 | for (i = 0; i < CON3270_STRING_PAGES; i++) { | 635 | for (i = 0; i < CON3270_STRING_PAGES; i++) { |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 8f3a2eeb28dc..8b48ba9c598e 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
| @@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp) | |||
| 463 | 463 | ||
| 464 | init_waitqueue_head(&fp->wait); | 464 | init_waitqueue_head(&fp->wait); |
| 465 | fp->fs_pid = get_pid(task_pid(current)); | 465 | fp->fs_pid = get_pid(task_pid(current)); |
| 466 | rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); | 466 | rc = raw3270_add_view(&fp->view, &fs3270_fn, minor, |
| 467 | RAW3270_VIEW_LOCK_BH); | ||
| 467 | if (rc) { | 468 | if (rc) { |
| 468 | fs3270_free_view(&fp->view); | 469 | fs3270_free_view(&fp->view); |
| 469 | goto out; | 470 | goto out; |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index f8cd2935fbfd..63a41b168761 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
| @@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view) | |||
| 920 | * Add view to device with minor "minor". | 920 | * Add view to device with minor "minor". |
| 921 | */ | 921 | */ |
| 922 | int | 922 | int |
| 923 | raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | 923 | raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass) |
| 924 | { | 924 | { |
| 925 | unsigned long flags; | 925 | unsigned long flags; |
| 926 | struct raw3270 *rp; | 926 | struct raw3270 *rp; |
| @@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | |||
| 942 | view->cols = rp->cols; | 942 | view->cols = rp->cols; |
| 943 | view->ascebc = rp->ascebc; | 943 | view->ascebc = rp->ascebc; |
| 944 | spin_lock_init(&view->lock); | 944 | spin_lock_init(&view->lock); |
| 945 | lockdep_set_subclass(&view->lock, subclass); | ||
| 945 | list_add(&view->list, &rp->view_list); | 946 | list_add(&view->list, &rp->view_list); |
| 946 | rc = 0; | 947 | rc = 0; |
| 947 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 948 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 114ca7cbf889..3afaa35f7351 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h | |||
| @@ -150,6 +150,8 @@ struct raw3270_fn { | |||
| 150 | struct raw3270_view { | 150 | struct raw3270_view { |
| 151 | struct list_head list; | 151 | struct list_head list; |
| 152 | spinlock_t lock; | 152 | spinlock_t lock; |
| 153 | #define RAW3270_VIEW_LOCK_IRQ 0 | ||
| 154 | #define RAW3270_VIEW_LOCK_BH 1 | ||
| 153 | atomic_t ref_count; | 155 | atomic_t ref_count; |
| 154 | struct raw3270 *dev; | 156 | struct raw3270 *dev; |
| 155 | struct raw3270_fn *fn; | 157 | struct raw3270_fn *fn; |
| @@ -158,7 +160,7 @@ struct raw3270_view { | |||
| 158 | unsigned char *ascebc; /* ascii -> ebcdic table */ | 160 | unsigned char *ascebc; /* ascii -> ebcdic table */ |
| 159 | }; | 161 | }; |
| 160 | 162 | ||
| 161 | int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int); | 163 | int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); |
| 162 | int raw3270_activate_view(struct raw3270_view *); | 164 | int raw3270_activate_view(struct raw3270_view *); |
| 163 | void raw3270_del_view(struct raw3270_view *); | 165 | void raw3270_del_view(struct raw3270_view *); |
| 164 | void raw3270_deactivate_view(struct raw3270_view *); | 166 | void raw3270_deactivate_view(struct raw3270_view *); |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 2b0c36c2c568..98d7fc152e32 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
| @@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
| 980 | return PTR_ERR(tp); | 980 | return PTR_ERR(tp); |
| 981 | 981 | ||
| 982 | rc = raw3270_add_view(&tp->view, &tty3270_fn, | 982 | rc = raw3270_add_view(&tp->view, &tty3270_fn, |
| 983 | tty->index + RAW3270_FIRSTMINOR); | 983 | tty->index + RAW3270_FIRSTMINOR, |
| 984 | RAW3270_VIEW_LOCK_BH); | ||
| 984 | if (rc) { | 985 | if (rc) { |
| 985 | tty3270_free_view(tp); | 986 | tty3270_free_view(tp); |
| 986 | return rc; | 987 | return rc; |
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 6a340f2c3556..5ea83dc4f1d7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c | |||
| @@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq) | |||
| 751 | __ap_flush_queue(aq); | 751 | __ap_flush_queue(aq); |
| 752 | /* set REMOVE state to prevent new messages are queued in */ | 752 | /* set REMOVE state to prevent new messages are queued in */ |
| 753 | aq->state = AP_STATE_REMOVE; | 753 | aq->state = AP_STATE_REMOVE; |
| 754 | del_timer_sync(&aq->timeout); | ||
| 755 | spin_unlock_bh(&aq->lock); | 754 | spin_unlock_bh(&aq->lock); |
| 755 | del_timer_sync(&aq->timeout); | ||
| 756 | } | 756 | } |
| 757 | 757 | ||
| 758 | void ap_queue_remove(struct ap_queue *aq) | 758 | void ap_queue_remove(struct ap_queue *aq) |
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3e85d665c572..45eb0c14b880 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c | |||
| @@ -51,7 +51,8 @@ static debug_info_t *debug_info; | |||
| 51 | 51 | ||
| 52 | static void __init pkey_debug_init(void) | 52 | static void __init pkey_debug_init(void) |
| 53 | { | 53 | { |
| 54 | debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); | 54 | /* 5 arguments per dbf entry (including the format string ptr) */ |
| 55 | debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); | ||
| 55 | debug_register_view(debug_info, &debug_sprintf_view); | 56 | debug_register_view(debug_info, &debug_sprintf_view); |
| 56 | debug_set_level(debug_info, 3); | 57 | debug_set_level(debug_info, 3); |
| 57 | } | 58 | } |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 7617d21cb296..f63c5c871d3d 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
| @@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
| 1595 | if (priv->channel[direction] == NULL) { | 1595 | if (priv->channel[direction] == NULL) { |
| 1596 | if (direction == CTCM_WRITE) | 1596 | if (direction == CTCM_WRITE) |
| 1597 | channel_free(priv->channel[CTCM_READ]); | 1597 | channel_free(priv->channel[CTCM_READ]); |
| 1598 | result = -ENODEV; | ||
| 1598 | goto out_dev; | 1599 | goto out_dev; |
| 1599 | } | 1600 | } |
| 1600 | priv->channel[direction]->netdev = dev; | 1601 | priv->channel[direction]->netdev = dev; |
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 3d401d02c019..bdd177e3d762 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c | |||
| @@ -91,6 +91,7 @@ aic7770_probe(struct device *dev) | |||
| 91 | ahc = ahc_alloc(&aic7xxx_driver_template, name); | 91 | ahc = ahc_alloc(&aic7xxx_driver_template, name); |
| 92 | if (ahc == NULL) | 92 | if (ahc == NULL) |
| 93 | return (ENOMEM); | 93 | return (ENOMEM); |
| 94 | ahc->dev = dev; | ||
| 94 | error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, | 95 | error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, |
| 95 | eisaBase); | 96 | eisaBase); |
| 96 | if (error != 0) { | 97 | if (error != 0) { |
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 5614921b4041..88b90f9806c9 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h | |||
| @@ -943,6 +943,7 @@ struct ahc_softc { | |||
| 943 | * Platform specific device information. | 943 | * Platform specific device information. |
| 944 | */ | 944 | */ |
| 945 | ahc_dev_softc_t dev_softc; | 945 | ahc_dev_softc_t dev_softc; |
| 946 | struct device *dev; | ||
| 946 | 947 | ||
| 947 | /* | 948 | /* |
| 948 | * Bus specific device information. | 949 | * Bus specific device information. |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 3c9c17450bb3..d5c4a0d23706 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
| @@ -860,8 +860,8 @@ int | |||
| 860 | ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, | 860 | ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, |
| 861 | int flags, bus_dmamap_t *mapp) | 861 | int flags, bus_dmamap_t *mapp) |
| 862 | { | 862 | { |
| 863 | *vaddr = pci_alloc_consistent(ahc->dev_softc, | 863 | /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ |
| 864 | dmat->maxsize, mapp); | 864 | *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); |
| 865 | if (*vaddr == NULL) | 865 | if (*vaddr == NULL) |
| 866 | return ENOMEM; | 866 | return ENOMEM; |
| 867 | return 0; | 867 | return 0; |
| @@ -871,8 +871,7 @@ void | |||
| 871 | ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, | 871 | ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, |
| 872 | void* vaddr, bus_dmamap_t map) | 872 | void* vaddr, bus_dmamap_t map) |
| 873 | { | 873 | { |
| 874 | pci_free_consistent(ahc->dev_softc, dmat->maxsize, | 874 | dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); |
| 875 | vaddr, map); | ||
| 876 | } | 875 | } |
| 877 | 876 | ||
| 878 | int | 877 | int |
| @@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa | |||
| 1123 | 1122 | ||
| 1124 | host->transportt = ahc_linux_transport_template; | 1123 | host->transportt = ahc_linux_transport_template; |
| 1125 | 1124 | ||
| 1126 | retval = scsi_add_host(host, | 1125 | retval = scsi_add_host(host, ahc->dev); |
| 1127 | (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); | ||
| 1128 | if (retval) { | 1126 | if (retval) { |
| 1129 | printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); | 1127 | printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); |
| 1130 | scsi_host_put(host); | 1128 | scsi_host_put(host); |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 0fc14dac7070..717d8d1082ce 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | |||
| @@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 250 | } | 250 | } |
| 251 | } | 251 | } |
| 252 | ahc->dev_softc = pci; | 252 | ahc->dev_softc = pci; |
| 253 | ahc->dev = &pci->dev; | ||
| 253 | error = ahc_pci_config(ahc, entry); | 254 | error = ahc_pci_config(ahc, entry); |
| 254 | if (error != 0) { | 255 | if (error != 0) { |
| 255 | ahc_free(ahc); | 256 | ahc_free(ahc); |
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 462560b2855e..469d0bc9f5fe 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c | |||
| @@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) | |||
| 1713 | } | 1713 | } |
| 1714 | 1714 | ||
| 1715 | out: | 1715 | out: |
| 1716 | if (req->nsge > 0) | 1716 | if (req->nsge > 0) { |
| 1717 | scsi_dma_unmap(cmnd); | 1717 | scsi_dma_unmap(cmnd); |
| 1718 | if (req->dcopy && (host_status == DID_OK)) | ||
| 1719 | host_status = csio_scsi_copy_to_sgl(hw, req); | ||
| 1720 | } | ||
| 1718 | 1721 | ||
| 1719 | cmnd->result = (((host_status) << 16) | scsi_status); | 1722 | cmnd->result = (((host_status) << 16) | scsi_status); |
| 1720 | cmnd->scsi_done(cmnd); | 1723 | cmnd->scsi_done(cmnd); |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index dfba4921b265..5bf61431434b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
| @@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) | |||
| 2162 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", | 2162 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", |
| 2163 | fc_rport_state(rdata)); | 2163 | fc_rport_state(rdata)); |
| 2164 | 2164 | ||
| 2165 | rdata->flags &= ~FC_RP_STARTED; | ||
| 2166 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); | 2165 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); |
| 2167 | mutex_unlock(&rdata->rp_mutex); | 2166 | mutex_unlock(&rdata->rp_mutex); |
| 2168 | kref_put(&rdata->kref, fc_rport_destroy); | 2167 | kref_put(&rdata->kref, fc_rport_destroy); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c98f264f1d83..a497b2c0cb79 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
| @@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 3878 | * wake up the thread. | 3878 | * wake up the thread. |
| 3879 | */ | 3879 | */ |
| 3880 | spin_lock(&lpfc_cmd->buf_lock); | 3880 | spin_lock(&lpfc_cmd->buf_lock); |
| 3881 | if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { | 3881 | lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; |
| 3882 | lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; | 3882 | if (lpfc_cmd->waitq) { |
| 3883 | if (lpfc_cmd->waitq) | 3883 | wake_up(lpfc_cmd->waitq); |
| 3884 | wake_up(lpfc_cmd->waitq); | ||
| 3885 | lpfc_cmd->waitq = NULL; | 3884 | lpfc_cmd->waitq = NULL; |
| 3886 | } | 3885 | } |
| 3887 | spin_unlock(&lpfc_cmd->buf_lock); | 3886 | spin_unlock(&lpfc_cmd->buf_lock); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e74a62448ba4..e5db9a9954dc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
| @@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) | |||
| 1392 | 1392 | ||
| 1393 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) | 1393 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) |
| 1394 | { | 1394 | { |
| 1395 | struct qedi_nvm_iscsi_image nvm_image; | ||
| 1396 | |||
| 1397 | qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, | 1395 | qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, |
| 1398 | sizeof(nvm_image), | 1396 | sizeof(struct qedi_nvm_iscsi_image), |
| 1399 | &qedi->nvm_buf_dma, GFP_KERNEL); | 1397 | &qedi->nvm_buf_dma, GFP_KERNEL); |
| 1400 | if (!qedi->iscsi_image) { | 1398 | if (!qedi->iscsi_image) { |
| 1401 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); | 1399 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); |
| @@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data) | |||
| 2236 | static int qedi_get_boot_info(struct qedi_ctx *qedi) | 2234 | static int qedi_get_boot_info(struct qedi_ctx *qedi) |
| 2237 | { | 2235 | { |
| 2238 | int ret = 1; | 2236 | int ret = 1; |
| 2239 | struct qedi_nvm_iscsi_image nvm_image; | ||
| 2240 | 2237 | ||
| 2241 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | 2238 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
| 2242 | "Get NVM iSCSI CFG image\n"); | 2239 | "Get NVM iSCSI CFG image\n"); |
| 2243 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, | 2240 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, |
| 2244 | QED_NVM_IMAGE_ISCSI_CFG, | 2241 | QED_NVM_IMAGE_ISCSI_CFG, |
| 2245 | (char *)qedi->iscsi_image, | 2242 | (char *)qedi->iscsi_image, |
| 2246 | sizeof(nvm_image)); | 2243 | sizeof(struct qedi_nvm_iscsi_image)); |
| 2247 | if (ret) | 2244 | if (ret) |
| 2248 | QEDI_ERR(&qedi->dbg_ctx, | 2245 | QEDI_ERR(&qedi->dbg_ctx, |
| 2249 | "Could not get NVM image. ret = %d\n", ret); | 2246 | "Could not get NVM image. ret = %d\n", ret); |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b916..a08ff3bd6310 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -238,6 +238,7 @@ static struct { | |||
| 238 | {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 238 | {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 239 | {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 239 | {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 240 | {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 240 | {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
| 241 | {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
| 241 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, | 242 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, |
| 242 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, | 243 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, |
| 243 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ | 244 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ |
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75d..c14006ac98f9 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c | |||
| @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { | |||
| 75 | {"NETAPP", "INF-01-00", "rdac", }, | 75 | {"NETAPP", "INF-01-00", "rdac", }, |
| 76 | {"LSI", "INF-01-00", "rdac", }, | 76 | {"LSI", "INF-01-00", "rdac", }, |
| 77 | {"ENGENIO", "INF-01-00", "rdac", }, | 77 | {"ENGENIO", "INF-01-00", "rdac", }, |
| 78 | {"LENOVO", "DE_Series", "rdac", }, | ||
| 78 | {NULL, NULL, NULL }, | 79 | {NULL, NULL, NULL }, |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 601b9f1de267..07dfc17d4824 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1706,8 +1706,12 @@ out_put_budget: | |||
| 1706 | ret = BLK_STS_DEV_RESOURCE; | 1706 | ret = BLK_STS_DEV_RESOURCE; |
| 1707 | break; | 1707 | break; |
| 1708 | default: | 1708 | default: |
| 1709 | if (unlikely(!scsi_device_online(sdev))) | ||
| 1710 | scsi_req(req)->result = DID_NO_CONNECT << 16; | ||
| 1711 | else | ||
| 1712 | scsi_req(req)->result = DID_ERROR << 16; | ||
| 1709 | /* | 1713 | /* |
| 1710 | * Make sure to release all allocated ressources when | 1714 | * Make sure to release all allocated resources when |
| 1711 | * we hit an error, as we will never see this command | 1715 | * we hit an error, as we will never see this command |
| 1712 | * again. | 1716 | * again. |
| 1713 | */ | 1717 | */ |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 84380bae20f1..8472de1007ff 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -385,7 +385,7 @@ enum storvsc_request_type { | |||
| 385 | * This is the end of Protocol specific defines. | 385 | * This is the end of Protocol specific defines. |
| 386 | */ | 386 | */ |
| 387 | 387 | ||
| 388 | static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); | 388 | static int storvsc_ringbuffer_size = (128 * 1024); |
| 389 | static u32 max_outstanding_req_per_channel; | 389 | static u32 max_outstanding_req_per_channel; |
| 390 | 390 | ||
| 391 | static int storvsc_vcpus_per_sub_channel = 4; | 391 | static int storvsc_vcpus_per_sub_channel = 4; |
| @@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) | |||
| 668 | { | 668 | { |
| 669 | struct device *dev = &device->device; | 669 | struct device *dev = &device->device; |
| 670 | struct storvsc_device *stor_device; | 670 | struct storvsc_device *stor_device; |
| 671 | int num_cpus = num_online_cpus(); | ||
| 672 | int num_sc; | 671 | int num_sc; |
| 673 | struct storvsc_cmd_request *request; | 672 | struct storvsc_cmd_request *request; |
| 674 | struct vstor_packet *vstor_packet; | 673 | struct vstor_packet *vstor_packet; |
| 675 | int ret, t; | 674 | int ret, t; |
| 676 | 675 | ||
| 677 | num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); | 676 | /* |
| 677 | * If the number of CPUs is artificially restricted, such as | ||
| 678 | * with maxcpus=1 on the kernel boot line, Hyper-V could offer | ||
| 679 | * sub-channels >= the number of CPUs. These sub-channels | ||
| 680 | * should not be created. The primary channel is already created | ||
| 681 | * and assigned to one CPU, so check against # CPUs - 1. | ||
| 682 | */ | ||
| 683 | num_sc = min((int)(num_online_cpus() - 1), max_chns); | ||
| 684 | if (!num_sc) | ||
| 685 | return; | ||
| 686 | |||
| 678 | stor_device = get_out_stor_device(device); | 687 | stor_device = get_out_stor_device(device); |
| 679 | if (!stor_device) | 688 | if (!stor_device) |
| 680 | return; | 689 | return; |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8af01777d09c..f8cb7c23305b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
| 793 | 793 | ||
| 794 | /* We need to know how many queues before we allocate. */ | 794 | /* We need to know how many queues before we allocate. */ |
| 795 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; | 795 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; |
| 796 | num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); | ||
| 796 | 797 | ||
| 797 | num_targets = virtscsi_config_get(vdev, max_target) + 1; | 798 | num_targets = virtscsi_config_get(vdev, max_target) + 1; |
| 798 | 799 | ||
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c index 808ed92ed66f..1bb1cb651349 100644 --- a/drivers/staging/comedi/drivers/ni_usb6501.c +++ b/drivers/staging/comedi/drivers/ni_usb6501.c | |||
| @@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) | |||
| 463 | 463 | ||
| 464 | size = usb_endpoint_maxp(devpriv->ep_tx); | 464 | size = usb_endpoint_maxp(devpriv->ep_tx); |
| 465 | devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); | 465 | devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); |
| 466 | if (!devpriv->usb_tx_buf) { | 466 | if (!devpriv->usb_tx_buf) |
| 467 | kfree(devpriv->usb_rx_buf); | ||
| 468 | return -ENOMEM; | 467 | return -ENOMEM; |
| 469 | } | ||
| 470 | 468 | ||
| 471 | return 0; | 469 | return 0; |
| 472 | } | 470 | } |
| @@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev, | |||
| 518 | if (!devpriv) | 516 | if (!devpriv) |
| 519 | return -ENOMEM; | 517 | return -ENOMEM; |
| 520 | 518 | ||
| 519 | mutex_init(&devpriv->mut); | ||
| 520 | usb_set_intfdata(intf, devpriv); | ||
| 521 | |||
| 521 | ret = ni6501_find_endpoints(dev); | 522 | ret = ni6501_find_endpoints(dev); |
| 522 | if (ret) | 523 | if (ret) |
| 523 | return ret; | 524 | return ret; |
| @@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev, | |||
| 526 | if (ret) | 527 | if (ret) |
| 527 | return ret; | 528 | return ret; |
| 528 | 529 | ||
| 529 | mutex_init(&devpriv->mut); | ||
| 530 | usb_set_intfdata(intf, devpriv); | ||
| 531 | |||
| 532 | ret = comedi_alloc_subdevices(dev, 2); | 530 | ret = comedi_alloc_subdevices(dev, 2); |
| 533 | if (ret) | 531 | if (ret) |
| 534 | return ret; | 532 | return ret; |
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 6234b649d887..65dc6c51037e 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c | |||
| @@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) | |||
| 682 | 682 | ||
| 683 | size = usb_endpoint_maxp(devpriv->ep_tx); | 683 | size = usb_endpoint_maxp(devpriv->ep_tx); |
| 684 | devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); | 684 | devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); |
| 685 | if (!devpriv->usb_tx_buf) { | 685 | if (!devpriv->usb_tx_buf) |
| 686 | kfree(devpriv->usb_rx_buf); | ||
| 687 | return -ENOMEM; | 686 | return -ENOMEM; |
| 688 | } | ||
| 689 | 687 | ||
| 690 | return 0; | 688 | return 0; |
| 691 | } | 689 | } |
| @@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, | |||
| 800 | 798 | ||
| 801 | devpriv->model = board->model; | 799 | devpriv->model = board->model; |
| 802 | 800 | ||
| 801 | sema_init(&devpriv->limit_sem, 8); | ||
| 802 | |||
| 803 | ret = vmk80xx_find_usb_endpoints(dev); | 803 | ret = vmk80xx_find_usb_endpoints(dev); |
| 804 | if (ret) | 804 | if (ret) |
| 805 | return ret; | 805 | return ret; |
| @@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, | |||
| 808 | if (ret) | 808 | if (ret) |
| 809 | return ret; | 809 | return ret; |
| 810 | 810 | ||
| 811 | sema_init(&devpriv->limit_sem, 8); | ||
| 812 | |||
| 813 | usb_set_intfdata(intf, devpriv); | 811 | usb_set_intfdata(intf, devpriv); |
| 814 | 812 | ||
| 815 | if (devpriv->model == VMK8055_MODEL) | 813 | if (devpriv->model == VMK8055_MODEL) |
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index 526e0dbea5b5..81af768e7248 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c | |||
| @@ -298,7 +298,7 @@ submit_bio_retry: | |||
| 298 | *last_block = current_block; | 298 | *last_block = current_block; |
| 299 | 299 | ||
| 300 | /* shift in advance in case of it followed by too many gaps */ | 300 | /* shift in advance in case of it followed by too many gaps */ |
| 301 | if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) { | 301 | if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { |
| 302 | /* err should reassign to 0 after submitting */ | 302 | /* err should reassign to 0 after submitting */ |
| 303 | err = 0; | 303 | err = 0; |
| 304 | goto submit_bio_out; | 304 | goto submit_bio_out; |
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index acdbc07fd259..2fc8bc22b57b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c | |||
| @@ -109,10 +109,10 @@ | |||
| 109 | #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ | 109 | #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ |
| 110 | #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ | 110 | #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ |
| 111 | 111 | ||
| 112 | #define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ | 112 | #define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */ |
| 113 | #define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ | 113 | #define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */ |
| 114 | #define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ | 114 | #define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */ |
| 115 | #define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ | 115 | #define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */ |
| 116 | #define AD7193_CH_TEMP 0x100 /* Temp senseor */ | 116 | #define AD7193_CH_TEMP 0x100 /* Temp senseor */ |
| 117 | #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ | 117 | #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ |
| 118 | #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ | 118 | #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ |
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c index 029c3bf42d4d..07774c000c5a 100644 --- a/drivers/staging/iio/meter/ade7854.c +++ b/drivers/staging/iio/meter/ade7854.c | |||
| @@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644, | |||
| 269 | static IIO_DEV_ATTR_IPEAK(0644, | 269 | static IIO_DEV_ATTR_IPEAK(0644, |
| 270 | ade7854_read_32bit, | 270 | ade7854_read_32bit, |
| 271 | ade7854_write_32bit, | 271 | ade7854_write_32bit, |
| 272 | ADE7854_VPEAK); | 272 | ADE7854_IPEAK); |
| 273 | static IIO_DEV_ATTR_APHCAL(0644, | 273 | static IIO_DEV_ATTR_APHCAL(0644, |
| 274 | ade7854_read_16bit, | 274 | ade7854_read_16bit, |
| 275 | ade7854_write_16bit, | 275 | ade7854_write_16bit, |
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c index 18936cdb1083..956daf8c3bd2 100644 --- a/drivers/staging/most/core.c +++ b/drivers/staging/most/core.c | |||
| @@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface) | |||
| 1431 | 1431 | ||
| 1432 | INIT_LIST_HEAD(&iface->p->channel_list); | 1432 | INIT_LIST_HEAD(&iface->p->channel_list); |
| 1433 | iface->p->dev_id = id; | 1433 | iface->p->dev_id = id; |
| 1434 | snprintf(iface->p->name, STRING_SIZE, "mdev%d", id); | 1434 | strcpy(iface->p->name, iface->description); |
| 1435 | iface->dev.init_name = iface->p->name; | 1435 | iface->dev.init_name = iface->p->name; |
| 1436 | iface->dev.bus = &mc.bus; | 1436 | iface->dev.bus = &mc.bus; |
| 1437 | iface->dev.parent = &mc.dev; | 1437 | iface->dev.parent = &mc.dev; |
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 09a183dfc526..a31db15cd7c0 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c | |||
| @@ -1520,11 +1520,13 @@ static int __init sc16is7xx_init(void) | |||
| 1520 | #endif | 1520 | #endif |
| 1521 | return ret; | 1521 | return ret; |
| 1522 | 1522 | ||
| 1523 | #ifdef CONFIG_SERIAL_SC16IS7XX_SPI | ||
| 1523 | err_spi: | 1524 | err_spi: |
| 1525 | #endif | ||
| 1524 | #ifdef CONFIG_SERIAL_SC16IS7XX_I2C | 1526 | #ifdef CONFIG_SERIAL_SC16IS7XX_I2C |
| 1525 | i2c_del_driver(&sc16is7xx_i2c_uart_driver); | 1527 | i2c_del_driver(&sc16is7xx_i2c_uart_driver); |
| 1526 | #endif | ||
| 1527 | err_i2c: | 1528 | err_i2c: |
| 1529 | #endif | ||
| 1528 | uart_unregister_driver(&sc16is7xx_uart); | 1530 | uart_unregister_driver(&sc16is7xx_uart); |
| 1529 | return ret; | 1531 | return ret; |
| 1530 | } | 1532 | } |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 2d1c626312cd..3cd139752d3f 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
| @@ -2512,14 +2512,16 @@ done: | |||
| 2512 | * center of the last stop bit in sampling clocks. | 2512 | * center of the last stop bit in sampling clocks. |
| 2513 | */ | 2513 | */ |
| 2514 | int last_stop = bits * 2 - 1; | 2514 | int last_stop = bits * 2 - 1; |
| 2515 | int deviation = min_err * srr * last_stop / 2 / baud; | 2515 | int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * |
| 2516 | (int)(srr + 1), | ||
| 2517 | 2 * (int)baud); | ||
| 2516 | 2518 | ||
| 2517 | if (abs(deviation) >= 2) { | 2519 | if (abs(deviation) >= 2) { |
| 2518 | /* At least two sampling clocks off at the | 2520 | /* At least two sampling clocks off at the |
| 2519 | * last stop bit; we can increase the error | 2521 | * last stop bit; we can increase the error |
| 2520 | * margin by shifting the sampling point. | 2522 | * margin by shifting the sampling point. |
| 2521 | */ | 2523 | */ |
| 2522 | int shift = min(-8, max(7, deviation / 2)); | 2524 | int shift = clamp(deviation / 2, -8, 7); |
| 2523 | 2525 | ||
| 2524 | hssrr |= (shift << HSCIF_SRHP_SHIFT) & | 2526 | hssrr |= (shift << HSCIF_SRHP_SHIFT) & |
| 2525 | HSCIF_SRHP_MASK; | 2527 | HSCIF_SRHP_MASK; |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index d34984aa646d..650c66886c80 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
| @@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar) | |||
| 1520 | return; | 1520 | return; |
| 1521 | } | 1521 | } |
| 1522 | scr_memsetw(start, vc->vc_video_erase_char, 2 * count); | 1522 | scr_memsetw(start, vc->vc_video_erase_char, 2 * count); |
| 1523 | update_region(vc, (unsigned long) start, count); | 1523 | if (con_should_update(vc)) |
| 1524 | do_update_region(vc, (unsigned long) start, count); | ||
| 1524 | vc->vc_need_wrap = 0; | 1525 | vc->vc_need_wrap = 0; |
| 1525 | } | 1526 | } |
| 1526 | 1527 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a25659b5a5d1..3fa20e95a6bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void) | |||
| 1661 | rc = pci_add_dynid(&vfio_pci_driver, vendor, device, | 1661 | rc = pci_add_dynid(&vfio_pci_driver, vendor, device, |
| 1662 | subvendor, subdevice, class, class_mask, 0); | 1662 | subvendor, subdevice, class, class_mask, 0); |
| 1663 | if (rc) | 1663 | if (rc) |
| 1664 | pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", | 1664 | pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", |
| 1665 | vendor, device, subvendor, subdevice, | 1665 | vendor, device, subvendor, subdevice, |
| 1666 | class, class_mask, rc); | 1666 | class, class_mask, rc); |
| 1667 | else | 1667 | else |
| 1668 | pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", | 1668 | pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", |
| 1669 | vendor, device, subvendor, subdevice, | 1669 | vendor, device, subvendor, subdevice, |
| 1670 | class, class_mask); | 1670 | class, class_mask); |
| 1671 | } | 1671 | } |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 8dbb270998f4..6b64e45a5269 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
| @@ -1398,7 +1398,7 @@ unlock_exit: | |||
| 1398 | mutex_unlock(&container->lock); | 1398 | mutex_unlock(&container->lock); |
| 1399 | } | 1399 | } |
| 1400 | 1400 | ||
| 1401 | const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { | 1401 | static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { |
| 1402 | .name = "iommu-vfio-powerpc", | 1402 | .name = "iommu-vfio-powerpc", |
| 1403 | .owner = THIS_MODULE, | 1403 | .owner = THIS_MODULE, |
| 1404 | .open = tce_iommu_open, | 1404 | .open = tce_iommu_open, |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 73652e21efec..d0f731c9920a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, | |||
| 58 | MODULE_PARM_DESC(disable_hugepages, | 58 | MODULE_PARM_DESC(disable_hugepages, |
| 59 | "Disable VFIO IOMMU support for IOMMU hugepages."); | 59 | "Disable VFIO IOMMU support for IOMMU hugepages."); |
| 60 | 60 | ||
| 61 | static unsigned int dma_entry_limit __read_mostly = U16_MAX; | ||
| 62 | module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); | ||
| 63 | MODULE_PARM_DESC(dma_entry_limit, | ||
| 64 | "Maximum number of user DMA mappings per container (65535)."); | ||
| 65 | |||
| 61 | struct vfio_iommu { | 66 | struct vfio_iommu { |
| 62 | struct list_head domain_list; | 67 | struct list_head domain_list; |
| 63 | struct vfio_domain *external_domain; /* domain for external user */ | 68 | struct vfio_domain *external_domain; /* domain for external user */ |
| 64 | struct mutex lock; | 69 | struct mutex lock; |
| 65 | struct rb_root dma_list; | 70 | struct rb_root dma_list; |
| 66 | struct blocking_notifier_head notifier; | 71 | struct blocking_notifier_head notifier; |
| 72 | unsigned int dma_avail; | ||
| 67 | bool v2; | 73 | bool v2; |
| 68 | bool nesting; | 74 | bool nesting; |
| 69 | }; | 75 | }; |
| @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) | |||
| 836 | vfio_unlink_dma(iommu, dma); | 842 | vfio_unlink_dma(iommu, dma); |
| 837 | put_task_struct(dma->task); | 843 | put_task_struct(dma->task); |
| 838 | kfree(dma); | 844 | kfree(dma); |
| 845 | iommu->dma_avail++; | ||
| 839 | } | 846 | } |
| 840 | 847 | ||
| 841 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) | 848 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) |
| @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
| 1081 | goto out_unlock; | 1088 | goto out_unlock; |
| 1082 | } | 1089 | } |
| 1083 | 1090 | ||
| 1091 | if (!iommu->dma_avail) { | ||
| 1092 | ret = -ENOSPC; | ||
| 1093 | goto out_unlock; | ||
| 1094 | } | ||
| 1095 | |||
| 1084 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); | 1096 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
| 1085 | if (!dma) { | 1097 | if (!dma) { |
| 1086 | ret = -ENOMEM; | 1098 | ret = -ENOMEM; |
| 1087 | goto out_unlock; | 1099 | goto out_unlock; |
| 1088 | } | 1100 | } |
| 1089 | 1101 | ||
| 1102 | iommu->dma_avail--; | ||
| 1090 | dma->iova = iova; | 1103 | dma->iova = iova; |
| 1091 | dma->vaddr = vaddr; | 1104 | dma->vaddr = vaddr; |
| 1092 | dma->prot = prot; | 1105 | dma->prot = prot; |
| @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) | |||
| 1583 | 1596 | ||
| 1584 | INIT_LIST_HEAD(&iommu->domain_list); | 1597 | INIT_LIST_HEAD(&iommu->domain_list); |
| 1585 | iommu->dma_list = RB_ROOT; | 1598 | iommu->dma_list = RB_ROOT; |
| 1599 | iommu->dma_avail = dma_entry_limit; | ||
| 1586 | mutex_init(&iommu->lock); | 1600 | mutex_init(&iommu->lock); |
| 1587 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); | 1601 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); |
| 1588 | 1602 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5ace833de746..351af88231ad 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, | |||
| 911 | u64 start, u64 size, u64 end, | 911 | u64 start, u64 size, u64 end, |
| 912 | u64 userspace_addr, int perm) | 912 | u64 userspace_addr, int perm) |
| 913 | { | 913 | { |
| 914 | struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); | 914 | struct vhost_umem_node *tmp, *node; |
| 915 | 915 | ||
| 916 | if (!size) | ||
| 917 | return -EFAULT; | ||
| 918 | |||
| 919 | node = kmalloc(sizeof(*node), GFP_ATOMIC); | ||
| 916 | if (!node) | 920 | if (!node) |
| 917 | return -ENOMEM; | 921 | return -ENOMEM; |
| 918 | 922 | ||
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index d0584c040c60..7a0398bb84f7 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev) | |||
| 255 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) | 255 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
| 256 | free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); | 256 | free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); |
| 257 | 257 | ||
| 258 | for (i = 0; i < vp_dev->msix_vectors; i++) | 258 | if (vp_dev->msix_affinity_masks) { |
| 259 | if (vp_dev->msix_affinity_masks[i]) | 259 | for (i = 0; i < vp_dev->msix_vectors; i++) |
| 260 | free_cpumask_var(vp_dev->msix_affinity_masks[i]); | 260 | if (vp_dev->msix_affinity_masks[i]) |
| 261 | free_cpumask_var(vp_dev->msix_affinity_masks[i]); | ||
| 262 | } | ||
| 261 | 263 | ||
| 262 | if (vp_dev->msix_enabled) { | 264 | if (vp_dev->msix_enabled) { |
| 263 | /* Disable the vector used for configuration */ | 265 | /* Disable the vector used for configuration */ |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 18846afb39da..5df92c308286 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
| @@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split( | |||
| 882 | GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); | 882 | GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); |
| 883 | if (queue) | 883 | if (queue) |
| 884 | break; | 884 | break; |
| 885 | if (!may_reduce_num) | ||
| 886 | return NULL; | ||
| 885 | } | 887 | } |
| 886 | 888 | ||
| 887 | if (!num) | 889 | if (!num) |
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index de01a6d0059d..a1c61e351d3f 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c | |||
| @@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 140 | if (!(vma->vm_flags & VM_SHARED)) | 140 | if (!(vma->vm_flags & VM_SHARED)) |
| 141 | return -EINVAL; | 141 | return -EINVAL; |
| 142 | 142 | ||
| 143 | vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), | 143 | vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); |
| 144 | GFP_KERNEL); | ||
| 145 | if (!vma_priv) | 144 | if (!vma_priv) |
| 146 | return -ENOMEM; | 145 | return -ENOMEM; |
| 147 | 146 | ||
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
| @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) | |||
| 622 | if (xen_store_evtchn == 0) | 622 | if (xen_store_evtchn == 0) |
| 623 | return -ENOENT; | 623 | return -ENOENT; |
| 624 | 624 | ||
| 625 | nonseekable_open(inode, filp); | 625 | stream_open(inode, filp); |
| 626 | |||
| 627 | filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ | ||
| 628 | 626 | ||
| 629 | u = kzalloc(sizeof(*u), GFP_KERNEL); | 627 | u = kzalloc(sizeof(*u), GFP_KERNEL); |
| 630 | if (u == NULL) | 628 | if (u == NULL) |
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 1c7955f5cdaf..128f2dbe256a 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
| @@ -203,8 +203,7 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi) | |||
| 203 | */ | 203 | */ |
| 204 | void afs_init_callback_state(struct afs_server *server) | 204 | void afs_init_callback_state(struct afs_server *server) |
| 205 | { | 205 | { |
| 206 | if (!test_and_clear_bit(AFS_SERVER_FL_NEW, &server->flags)) | 206 | server->cb_s_break++; |
| 207 | server->cb_s_break++; | ||
| 208 | } | 207 | } |
| 209 | 208 | ||
| 210 | /* | 209 | /* |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 8ee5972893ed..2f8acb4c556d 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
| @@ -34,7 +34,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); | |||
| 34 | static int afs_deliver_yfs_cb_callback(struct afs_call *); | 34 | static int afs_deliver_yfs_cb_callback(struct afs_call *); |
| 35 | 35 | ||
| 36 | #define CM_NAME(name) \ | 36 | #define CM_NAME(name) \ |
| 37 | const char afs_SRXCB##name##_name[] __tracepoint_string = \ | 37 | char afs_SRXCB##name##_name[] __tracepoint_string = \ |
| 38 | "CB." #name | 38 | "CB." #name |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 1a4ce07fb406..9cedc3fc1b77 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
| @@ -216,9 +216,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) | |||
| 216 | set_nlink(inode, 2); | 216 | set_nlink(inode, 2); |
| 217 | inode->i_uid = GLOBAL_ROOT_UID; | 217 | inode->i_uid = GLOBAL_ROOT_UID; |
| 218 | inode->i_gid = GLOBAL_ROOT_GID; | 218 | inode->i_gid = GLOBAL_ROOT_GID; |
| 219 | inode->i_ctime.tv_sec = get_seconds(); | 219 | inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode); |
| 220 | inode->i_ctime.tv_nsec = 0; | ||
| 221 | inode->i_atime = inode->i_mtime = inode->i_ctime; | ||
| 222 | inode->i_blocks = 0; | 220 | inode->i_blocks = 0; |
| 223 | inode_set_iversion_raw(inode, 0); | 221 | inode_set_iversion_raw(inode, 0); |
| 224 | inode->i_generation = 0; | 222 | inode->i_generation = 0; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index bb1f244b2b3a..3904ab0b9563 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -474,7 +474,6 @@ struct afs_server { | |||
| 474 | time64_t put_time; /* Time at which last put */ | 474 | time64_t put_time; /* Time at which last put */ |
| 475 | time64_t update_at; /* Time at which to next update the record */ | 475 | time64_t update_at; /* Time at which to next update the record */ |
| 476 | unsigned long flags; | 476 | unsigned long flags; |
| 477 | #define AFS_SERVER_FL_NEW 0 /* New server, don't inc cb_s_break */ | ||
| 478 | #define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */ | 477 | #define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */ |
| 479 | #define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */ | 478 | #define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */ |
| 480 | #define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */ | 479 | #define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */ |
| @@ -827,7 +826,7 @@ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest | |||
| 827 | 826 | ||
| 828 | static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode) | 827 | static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode) |
| 829 | { | 828 | { |
| 830 | return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break; | 829 | return vnode->cb_break + vnode->cb_v_break; |
| 831 | } | 830 | } |
| 832 | 831 | ||
| 833 | static inline bool afs_cb_is_broken(unsigned int cb_break, | 832 | static inline bool afs_cb_is_broken(unsigned int cb_break, |
| @@ -835,7 +834,6 @@ static inline bool afs_cb_is_broken(unsigned int cb_break, | |||
| 835 | const struct afs_cb_interest *cbi) | 834 | const struct afs_cb_interest *cbi) |
| 836 | { | 835 | { |
| 837 | return !cbi || cb_break != (vnode->cb_break + | 836 | return !cbi || cb_break != (vnode->cb_break + |
| 838 | cbi->server->cb_s_break + | ||
| 839 | vnode->volume->cb_v_break); | 837 | vnode->volume->cb_v_break); |
| 840 | } | 838 | } |
| 841 | 839 | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 2c588f9bbbda..15c7e82d80cb 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -572,13 +572,17 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 572 | case -ENODATA: | 572 | case -ENODATA: |
| 573 | case -EBADMSG: | 573 | case -EBADMSG: |
| 574 | case -EMSGSIZE: | 574 | case -EMSGSIZE: |
| 575 | default: | ||
| 576 | abort_code = RXGEN_CC_UNMARSHAL; | 575 | abort_code = RXGEN_CC_UNMARSHAL; |
| 577 | if (state != AFS_CALL_CL_AWAIT_REPLY) | 576 | if (state != AFS_CALL_CL_AWAIT_REPLY) |
| 578 | abort_code = RXGEN_SS_UNMARSHAL; | 577 | abort_code = RXGEN_SS_UNMARSHAL; |
| 579 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | 578 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, |
| 580 | abort_code, ret, "KUM"); | 579 | abort_code, ret, "KUM"); |
| 581 | goto local_abort; | 580 | goto local_abort; |
| 581 | default: | ||
| 582 | abort_code = RX_USER_ABORT; | ||
| 583 | rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | ||
| 584 | abort_code, ret, "KER"); | ||
| 585 | goto local_abort; | ||
| 582 | } | 586 | } |
| 583 | } | 587 | } |
| 584 | 588 | ||
| @@ -610,6 +614,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
| 610 | bool stalled = false; | 614 | bool stalled = false; |
| 611 | u64 rtt; | 615 | u64 rtt; |
| 612 | u32 life, last_life; | 616 | u32 life, last_life; |
| 617 | bool rxrpc_complete = false; | ||
| 613 | 618 | ||
| 614 | DECLARE_WAITQUEUE(myself, current); | 619 | DECLARE_WAITQUEUE(myself, current); |
| 615 | 620 | ||
| @@ -621,7 +626,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
| 621 | rtt2 = 2; | 626 | rtt2 = 2; |
| 622 | 627 | ||
| 623 | timeout = rtt2; | 628 | timeout = rtt2; |
| 624 | last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); | 629 | rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life); |
| 625 | 630 | ||
| 626 | add_wait_queue(&call->waitq, &myself); | 631 | add_wait_queue(&call->waitq, &myself); |
| 627 | for (;;) { | 632 | for (;;) { |
| @@ -639,7 +644,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
| 639 | if (afs_check_call_state(call, AFS_CALL_COMPLETE)) | 644 | if (afs_check_call_state(call, AFS_CALL_COMPLETE)) |
| 640 | break; | 645 | break; |
| 641 | 646 | ||
| 642 | life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); | 647 | if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) { |
| 648 | /* rxrpc terminated the call. */ | ||
| 649 | rxrpc_complete = true; | ||
| 650 | break; | ||
| 651 | } | ||
| 652 | |||
| 643 | if (timeout == 0 && | 653 | if (timeout == 0 && |
| 644 | life == last_life && signal_pending(current)) { | 654 | life == last_life && signal_pending(current)) { |
| 645 | if (stalled) | 655 | if (stalled) |
| @@ -663,12 +673,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, | |||
| 663 | remove_wait_queue(&call->waitq, &myself); | 673 | remove_wait_queue(&call->waitq, &myself); |
| 664 | __set_current_state(TASK_RUNNING); | 674 | __set_current_state(TASK_RUNNING); |
| 665 | 675 | ||
| 666 | /* Kill off the call if it's still live. */ | ||
| 667 | if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { | 676 | if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { |
| 668 | _debug("call interrupted"); | 677 | if (rxrpc_complete) { |
| 669 | if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | 678 | afs_set_call_complete(call, call->error, call->abort_code); |
| 670 | RX_USER_ABORT, -EINTR, "KWI")) | 679 | } else { |
| 671 | afs_set_call_complete(call, -EINTR, 0); | 680 | /* Kill off the call if it's still live. */ |
| 681 | _debug("call interrupted"); | ||
| 682 | if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, | ||
| 683 | RX_USER_ABORT, -EINTR, "KWI")) | ||
| 684 | afs_set_call_complete(call, -EINTR, 0); | ||
| 685 | } | ||
| 672 | } | 686 | } |
| 673 | 687 | ||
| 674 | spin_lock_bh(&call->state_lock); | 688 | spin_lock_bh(&call->state_lock); |
diff --git a/fs/afs/server.c b/fs/afs/server.c index 642afa2e9783..65b33b6da48b 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
| @@ -226,7 +226,6 @@ static struct afs_server *afs_alloc_server(struct afs_net *net, | |||
| 226 | RCU_INIT_POINTER(server->addresses, alist); | 226 | RCU_INIT_POINTER(server->addresses, alist); |
| 227 | server->addr_version = alist->version; | 227 | server->addr_version = alist->version; |
| 228 | server->uuid = *uuid; | 228 | server->uuid = *uuid; |
| 229 | server->flags = (1UL << AFS_SERVER_FL_NEW); | ||
| 230 | server->update_at = ktime_get_real_seconds() + afs_server_update_delay; | 229 | server->update_at = ktime_get_real_seconds() + afs_server_update_delay; |
| 231 | rwlock_init(&server->fs_lock); | 230 | rwlock_init(&server->fs_lock); |
| 232 | INIT_HLIST_HEAD(&server->cb_volumes); | 231 | INIT_HLIST_HEAD(&server->cb_volumes); |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 72efcfcf9f95..0122d7445fba 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
| @@ -264,6 +264,7 @@ static void afs_kill_pages(struct address_space *mapping, | |||
| 264 | first = page->index + 1; | 264 | first = page->index + 1; |
| 265 | lock_page(page); | 265 | lock_page(page); |
| 266 | generic_error_remove_page(mapping, page); | 266 | generic_error_remove_page(mapping, page); |
| 267 | unlock_page(page); | ||
| 267 | } | 268 | } |
| 268 | 269 | ||
| 269 | __pagevec_release(&pv); | 270 | __pagevec_release(&pv); |
| @@ -181,7 +181,7 @@ struct poll_iocb { | |||
| 181 | struct file *file; | 181 | struct file *file; |
| 182 | struct wait_queue_head *head; | 182 | struct wait_queue_head *head; |
| 183 | __poll_t events; | 183 | __poll_t events; |
| 184 | bool woken; | 184 | bool done; |
| 185 | bool cancelled; | 185 | bool cancelled; |
| 186 | struct wait_queue_entry wait; | 186 | struct wait_queue_entry wait; |
| 187 | struct work_struct work; | 187 | struct work_struct work; |
| @@ -204,8 +204,7 @@ struct aio_kiocb { | |||
| 204 | struct kioctx *ki_ctx; | 204 | struct kioctx *ki_ctx; |
| 205 | kiocb_cancel_fn *ki_cancel; | 205 | kiocb_cancel_fn *ki_cancel; |
| 206 | 206 | ||
| 207 | struct iocb __user *ki_user_iocb; /* user's aiocb */ | 207 | struct io_event ki_res; |
| 208 | __u64 ki_user_data; /* user's data for completion */ | ||
| 209 | 208 | ||
| 210 | struct list_head ki_list; /* the aio core uses this | 209 | struct list_head ki_list; /* the aio core uses this |
| 211 | * for cancellation */ | 210 | * for cancellation */ |
| @@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
| 1022 | /* aio_get_req | 1021 | /* aio_get_req |
| 1023 | * Allocate a slot for an aio request. | 1022 | * Allocate a slot for an aio request. |
| 1024 | * Returns NULL if no requests are free. | 1023 | * Returns NULL if no requests are free. |
| 1024 | * | ||
| 1025 | * The refcount is initialized to 2 - one for the async op completion, | ||
| 1026 | * one for the synchronous code that does this. | ||
| 1025 | */ | 1027 | */ |
| 1026 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | 1028 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
| 1027 | { | 1029 | { |
| @@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | |||
| 1031 | if (unlikely(!req)) | 1033 | if (unlikely(!req)) |
| 1032 | return NULL; | 1034 | return NULL; |
| 1033 | 1035 | ||
| 1036 | if (unlikely(!get_reqs_available(ctx))) { | ||
| 1037 | kmem_cache_free(kiocb_cachep, req); | ||
| 1038 | return NULL; | ||
| 1039 | } | ||
| 1040 | |||
| 1034 | percpu_ref_get(&ctx->reqs); | 1041 | percpu_ref_get(&ctx->reqs); |
| 1035 | req->ki_ctx = ctx; | 1042 | req->ki_ctx = ctx; |
| 1036 | INIT_LIST_HEAD(&req->ki_list); | 1043 | INIT_LIST_HEAD(&req->ki_list); |
| 1037 | refcount_set(&req->ki_refcnt, 0); | 1044 | refcount_set(&req->ki_refcnt, 2); |
| 1038 | req->ki_eventfd = NULL; | 1045 | req->ki_eventfd = NULL; |
| 1039 | return req; | 1046 | return req; |
| 1040 | } | 1047 | } |
| @@ -1067,30 +1074,20 @@ out: | |||
| 1067 | return ret; | 1074 | return ret; |
| 1068 | } | 1075 | } |
| 1069 | 1076 | ||
| 1070 | static inline void iocb_put(struct aio_kiocb *iocb) | 1077 | static inline void iocb_destroy(struct aio_kiocb *iocb) |
| 1071 | { | ||
| 1072 | if (refcount_read(&iocb->ki_refcnt) == 0 || | ||
| 1073 | refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
| 1074 | if (iocb->ki_filp) | ||
| 1075 | fput(iocb->ki_filp); | ||
| 1076 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
| 1077 | kmem_cache_free(kiocb_cachep, iocb); | ||
| 1078 | } | ||
| 1079 | } | ||
| 1080 | |||
| 1081 | static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, | ||
| 1082 | long res, long res2) | ||
| 1083 | { | 1078 | { |
| 1084 | ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; | 1079 | if (iocb->ki_eventfd) |
| 1085 | ev->data = iocb->ki_user_data; | 1080 | eventfd_ctx_put(iocb->ki_eventfd); |
| 1086 | ev->res = res; | 1081 | if (iocb->ki_filp) |
| 1087 | ev->res2 = res2; | 1082 | fput(iocb->ki_filp); |
| 1083 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
| 1084 | kmem_cache_free(kiocb_cachep, iocb); | ||
| 1088 | } | 1085 | } |
| 1089 | 1086 | ||
| 1090 | /* aio_complete | 1087 | /* aio_complete |
| 1091 | * Called when the io request on the given iocb is complete. | 1088 | * Called when the io request on the given iocb is complete. |
| 1092 | */ | 1089 | */ |
| 1093 | static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | 1090 | static void aio_complete(struct aio_kiocb *iocb) |
| 1094 | { | 1091 | { |
| 1095 | struct kioctx *ctx = iocb->ki_ctx; | 1092 | struct kioctx *ctx = iocb->ki_ctx; |
| 1096 | struct aio_ring *ring; | 1093 | struct aio_ring *ring; |
| @@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1114 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1111 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
| 1115 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; | 1112 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
| 1116 | 1113 | ||
| 1117 | aio_fill_event(event, iocb, res, res2); | 1114 | *event = iocb->ki_res; |
| 1118 | 1115 | ||
| 1119 | kunmap_atomic(ev_page); | 1116 | kunmap_atomic(ev_page); |
| 1120 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | 1117 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
| 1121 | 1118 | ||
| 1122 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | 1119 | pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, |
| 1123 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, | 1120 | (void __user *)(unsigned long)iocb->ki_res.obj, |
| 1124 | res, res2); | 1121 | iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); |
| 1125 | 1122 | ||
| 1126 | /* after flagging the request as done, we | 1123 | /* after flagging the request as done, we |
| 1127 | * must never even look at it again | 1124 | * must never even look at it again |
| @@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1148 | * eventfd. The eventfd_signal() function is safe to be called | 1145 | * eventfd. The eventfd_signal() function is safe to be called |
| 1149 | * from IRQ context. | 1146 | * from IRQ context. |
| 1150 | */ | 1147 | */ |
| 1151 | if (iocb->ki_eventfd) { | 1148 | if (iocb->ki_eventfd) |
| 1152 | eventfd_signal(iocb->ki_eventfd, 1); | 1149 | eventfd_signal(iocb->ki_eventfd, 1); |
| 1153 | eventfd_ctx_put(iocb->ki_eventfd); | ||
| 1154 | } | ||
| 1155 | 1150 | ||
| 1156 | /* | 1151 | /* |
| 1157 | * We have to order our ring_info tail store above and test | 1152 | * We have to order our ring_info tail store above and test |
| @@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | |||
| 1163 | 1158 | ||
| 1164 | if (waitqueue_active(&ctx->wait)) | 1159 | if (waitqueue_active(&ctx->wait)) |
| 1165 | wake_up(&ctx->wait); | 1160 | wake_up(&ctx->wait); |
| 1166 | iocb_put(iocb); | 1161 | } |
| 1162 | |||
| 1163 | static inline void iocb_put(struct aio_kiocb *iocb) | ||
| 1164 | { | ||
| 1165 | if (refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
| 1166 | aio_complete(iocb); | ||
| 1167 | iocb_destroy(iocb); | ||
| 1168 | } | ||
| 1167 | } | 1169 | } |
| 1168 | 1170 | ||
| 1169 | /* aio_read_events_ring | 1171 | /* aio_read_events_ring |
| @@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) | |||
| 1437 | file_end_write(kiocb->ki_filp); | 1439 | file_end_write(kiocb->ki_filp); |
| 1438 | } | 1440 | } |
| 1439 | 1441 | ||
| 1440 | aio_complete(iocb, res, res2); | 1442 | iocb->ki_res.res = res; |
| 1443 | iocb->ki_res.res2 = res2; | ||
| 1444 | iocb_put(iocb); | ||
| 1441 | } | 1445 | } |
| 1442 | 1446 | ||
| 1443 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) | 1447 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) |
| @@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) | |||
| 1514 | } | 1518 | } |
| 1515 | } | 1519 | } |
| 1516 | 1520 | ||
| 1517 | static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, | 1521 | static int aio_read(struct kiocb *req, const struct iocb *iocb, |
| 1518 | bool vectored, bool compat) | 1522 | bool vectored, bool compat) |
| 1519 | { | 1523 | { |
| 1520 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | 1524 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1521 | struct iov_iter iter; | 1525 | struct iov_iter iter; |
| 1522 | struct file *file; | 1526 | struct file *file; |
| 1523 | ssize_t ret; | 1527 | int ret; |
| 1524 | 1528 | ||
| 1525 | ret = aio_prep_rw(req, iocb); | 1529 | ret = aio_prep_rw(req, iocb); |
| 1526 | if (ret) | 1530 | if (ret) |
| @@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, | |||
| 1542 | return ret; | 1546 | return ret; |
| 1543 | } | 1547 | } |
| 1544 | 1548 | ||
| 1545 | static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, | 1549 | static int aio_write(struct kiocb *req, const struct iocb *iocb, |
| 1546 | bool vectored, bool compat) | 1550 | bool vectored, bool compat) |
| 1547 | { | 1551 | { |
| 1548 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | 1552 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
| 1549 | struct iov_iter iter; | 1553 | struct iov_iter iter; |
| 1550 | struct file *file; | 1554 | struct file *file; |
| 1551 | ssize_t ret; | 1555 | int ret; |
| 1552 | 1556 | ||
| 1553 | ret = aio_prep_rw(req, iocb); | 1557 | ret = aio_prep_rw(req, iocb); |
| 1554 | if (ret) | 1558 | if (ret) |
| @@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, | |||
| 1585 | 1589 | ||
| 1586 | static void aio_fsync_work(struct work_struct *work) | 1590 | static void aio_fsync_work(struct work_struct *work) |
| 1587 | { | 1591 | { |
| 1588 | struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); | 1592 | struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); |
| 1589 | int ret; | ||
| 1590 | 1593 | ||
| 1591 | ret = vfs_fsync(req->file, req->datasync); | 1594 | iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); |
| 1592 | aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); | 1595 | iocb_put(iocb); |
| 1593 | } | 1596 | } |
| 1594 | 1597 | ||
| 1595 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, | 1598 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, |
| @@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, | |||
| 1608 | return 0; | 1611 | return 0; |
| 1609 | } | 1612 | } |
| 1610 | 1613 | ||
| 1611 | static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) | ||
| 1612 | { | ||
| 1613 | aio_complete(iocb, mangle_poll(mask), 0); | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | static void aio_poll_complete_work(struct work_struct *work) | 1614 | static void aio_poll_complete_work(struct work_struct *work) |
| 1617 | { | 1615 | { |
| 1618 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); | 1616 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); |
| @@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work) | |||
| 1638 | return; | 1636 | return; |
| 1639 | } | 1637 | } |
| 1640 | list_del_init(&iocb->ki_list); | 1638 | list_del_init(&iocb->ki_list); |
| 1639 | iocb->ki_res.res = mangle_poll(mask); | ||
| 1640 | req->done = true; | ||
| 1641 | spin_unlock_irq(&ctx->ctx_lock); | 1641 | spin_unlock_irq(&ctx->ctx_lock); |
| 1642 | 1642 | ||
| 1643 | aio_poll_complete(iocb, mask); | 1643 | iocb_put(iocb); |
| 1644 | } | 1644 | } |
| 1645 | 1645 | ||
| 1646 | /* assumes we are called with irqs disabled */ | 1646 | /* assumes we are called with irqs disabled */ |
| @@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | |||
| 1668 | __poll_t mask = key_to_poll(key); | 1668 | __poll_t mask = key_to_poll(key); |
| 1669 | unsigned long flags; | 1669 | unsigned long flags; |
| 1670 | 1670 | ||
| 1671 | req->woken = true; | ||
| 1672 | |||
| 1673 | /* for instances that support it check for an event match first: */ | 1671 | /* for instances that support it check for an event match first: */ |
| 1674 | if (mask) { | 1672 | if (mask && !(mask & req->events)) |
| 1675 | if (!(mask & req->events)) | 1673 | return 0; |
| 1676 | return 0; | 1674 | |
| 1675 | list_del_init(&req->wait.entry); | ||
| 1677 | 1676 | ||
| 1677 | if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | ||
| 1678 | /* | 1678 | /* |
| 1679 | * Try to complete the iocb inline if we can. Use | 1679 | * Try to complete the iocb inline if we can. Use |
| 1680 | * irqsave/irqrestore because not all filesystems (e.g. fuse) | 1680 | * irqsave/irqrestore because not all filesystems (e.g. fuse) |
| 1681 | * call this function with IRQs disabled and because IRQs | 1681 | * call this function with IRQs disabled and because IRQs |
| 1682 | * have to be disabled before ctx_lock is obtained. | 1682 | * have to be disabled before ctx_lock is obtained. |
| 1683 | */ | 1683 | */ |
| 1684 | if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | 1684 | list_del(&iocb->ki_list); |
| 1685 | list_del(&iocb->ki_list); | 1685 | iocb->ki_res.res = mangle_poll(mask); |
| 1686 | spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | 1686 | req->done = true; |
| 1687 | 1687 | spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | |
| 1688 | list_del_init(&req->wait.entry); | 1688 | iocb_put(iocb); |
| 1689 | aio_poll_complete(iocb, mask); | 1689 | } else { |
| 1690 | return 1; | 1690 | schedule_work(&req->work); |
| 1691 | } | ||
| 1692 | } | 1691 | } |
| 1693 | |||
| 1694 | list_del_init(&req->wait.entry); | ||
| 1695 | schedule_work(&req->work); | ||
| 1696 | return 1; | 1692 | return 1; |
| 1697 | } | 1693 | } |
| 1698 | 1694 | ||
| @@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |||
| 1719 | add_wait_queue(head, &pt->iocb->poll.wait); | 1715 | add_wait_queue(head, &pt->iocb->poll.wait); |
| 1720 | } | 1716 | } |
| 1721 | 1717 | ||
| 1722 | static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | 1718 | static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
| 1723 | { | 1719 | { |
| 1724 | struct kioctx *ctx = aiocb->ki_ctx; | 1720 | struct kioctx *ctx = aiocb->ki_ctx; |
| 1725 | struct poll_iocb *req = &aiocb->poll; | 1721 | struct poll_iocb *req = &aiocb->poll; |
| 1726 | struct aio_poll_table apt; | 1722 | struct aio_poll_table apt; |
| 1723 | bool cancel = false; | ||
| 1727 | __poll_t mask; | 1724 | __poll_t mask; |
| 1728 | 1725 | ||
| 1729 | /* reject any unknown events outside the normal event mask. */ | 1726 | /* reject any unknown events outside the normal event mask. */ |
| @@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | |||
| 1737 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; | 1734 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; |
| 1738 | 1735 | ||
| 1739 | req->head = NULL; | 1736 | req->head = NULL; |
| 1740 | req->woken = false; | 1737 | req->done = false; |
| 1741 | req->cancelled = false; | 1738 | req->cancelled = false; |
| 1742 | 1739 | ||
| 1743 | apt.pt._qproc = aio_poll_queue_proc; | 1740 | apt.pt._qproc = aio_poll_queue_proc; |
| @@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | |||
| 1749 | INIT_LIST_HEAD(&req->wait.entry); | 1746 | INIT_LIST_HEAD(&req->wait.entry); |
| 1750 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); | 1747 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); |
| 1751 | 1748 | ||
| 1752 | /* one for removal from waitqueue, one for this function */ | ||
| 1753 | refcount_set(&aiocb->ki_refcnt, 2); | ||
| 1754 | |||
| 1755 | mask = vfs_poll(req->file, &apt.pt) & req->events; | 1749 | mask = vfs_poll(req->file, &apt.pt) & req->events; |
| 1756 | if (unlikely(!req->head)) { | ||
| 1757 | /* we did not manage to set up a waitqueue, done */ | ||
| 1758 | goto out; | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | spin_lock_irq(&ctx->ctx_lock); | 1750 | spin_lock_irq(&ctx->ctx_lock); |
| 1762 | spin_lock(&req->head->lock); | 1751 | if (likely(req->head)) { |
| 1763 | if (req->woken) { | 1752 | spin_lock(&req->head->lock); |
| 1764 | /* wake_up context handles the rest */ | 1753 | if (unlikely(list_empty(&req->wait.entry))) { |
| 1765 | mask = 0; | 1754 | if (apt.error) |
| 1755 | cancel = true; | ||
| 1756 | apt.error = 0; | ||
| 1757 | mask = 0; | ||
| 1758 | } | ||
| 1759 | if (mask || apt.error) { | ||
| 1760 | list_del_init(&req->wait.entry); | ||
| 1761 | } else if (cancel) { | ||
| 1762 | WRITE_ONCE(req->cancelled, true); | ||
| 1763 | } else if (!req->done) { /* actually waiting for an event */ | ||
| 1764 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
| 1765 | aiocb->ki_cancel = aio_poll_cancel; | ||
| 1766 | } | ||
| 1767 | spin_unlock(&req->head->lock); | ||
| 1768 | } | ||
| 1769 | if (mask) { /* no async, we'd stolen it */ | ||
| 1770 | aiocb->ki_res.res = mangle_poll(mask); | ||
| 1766 | apt.error = 0; | 1771 | apt.error = 0; |
| 1767 | } else if (mask || apt.error) { | ||
| 1768 | /* if we get an error or a mask we are done */ | ||
| 1769 | WARN_ON_ONCE(list_empty(&req->wait.entry)); | ||
| 1770 | list_del_init(&req->wait.entry); | ||
| 1771 | } else { | ||
| 1772 | /* actually waiting for an event */ | ||
| 1773 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
| 1774 | aiocb->ki_cancel = aio_poll_cancel; | ||
| 1775 | } | 1772 | } |
| 1776 | spin_unlock(&req->head->lock); | ||
| 1777 | spin_unlock_irq(&ctx->ctx_lock); | 1773 | spin_unlock_irq(&ctx->ctx_lock); |
| 1778 | |||
| 1779 | out: | ||
| 1780 | if (unlikely(apt.error)) | ||
| 1781 | return apt.error; | ||
| 1782 | |||
| 1783 | if (mask) | 1774 | if (mask) |
| 1784 | aio_poll_complete(aiocb, mask); | 1775 | iocb_put(aiocb); |
| 1785 | iocb_put(aiocb); | 1776 | return apt.error; |
| 1786 | return 0; | ||
| 1787 | } | 1777 | } |
| 1788 | 1778 | ||
| 1789 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, | 1779 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, |
| 1790 | struct iocb __user *user_iocb, bool compat) | 1780 | struct iocb __user *user_iocb, struct aio_kiocb *req, |
| 1781 | bool compat) | ||
| 1791 | { | 1782 | { |
| 1792 | struct aio_kiocb *req; | ||
| 1793 | ssize_t ret; | ||
| 1794 | |||
| 1795 | /* enforce forwards compatibility on users */ | ||
| 1796 | if (unlikely(iocb->aio_reserved2)) { | ||
| 1797 | pr_debug("EINVAL: reserve field set\n"); | ||
| 1798 | return -EINVAL; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | /* prevent overflows */ | ||
| 1802 | if (unlikely( | ||
| 1803 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | ||
| 1804 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | ||
| 1805 | ((ssize_t)iocb->aio_nbytes < 0) | ||
| 1806 | )) { | ||
| 1807 | pr_debug("EINVAL: overflow check\n"); | ||
| 1808 | return -EINVAL; | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | if (!get_reqs_available(ctx)) | ||
| 1812 | return -EAGAIN; | ||
| 1813 | |||
| 1814 | ret = -EAGAIN; | ||
| 1815 | req = aio_get_req(ctx); | ||
| 1816 | if (unlikely(!req)) | ||
| 1817 | goto out_put_reqs_available; | ||
| 1818 | |||
| 1819 | req->ki_filp = fget(iocb->aio_fildes); | 1783 | req->ki_filp = fget(iocb->aio_fildes); |
| 1820 | ret = -EBADF; | ||
| 1821 | if (unlikely(!req->ki_filp)) | 1784 | if (unlikely(!req->ki_filp)) |
| 1822 | goto out_put_req; | 1785 | return -EBADF; |
| 1823 | 1786 | ||
| 1824 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { | 1787 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
| 1788 | struct eventfd_ctx *eventfd; | ||
| 1825 | /* | 1789 | /* |
| 1826 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | 1790 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an |
| 1827 | * instance of the file* now. The file descriptor must be | 1791 | * instance of the file* now. The file descriptor must be |
| 1828 | * an eventfd() fd, and will be signaled for each completed | 1792 | * an eventfd() fd, and will be signaled for each completed |
| 1829 | * event using the eventfd_signal() function. | 1793 | * event using the eventfd_signal() function. |
| 1830 | */ | 1794 | */ |
| 1831 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); | 1795 | eventfd = eventfd_ctx_fdget(iocb->aio_resfd); |
| 1832 | if (IS_ERR(req->ki_eventfd)) { | 1796 | if (IS_ERR(eventfd)) |
| 1833 | ret = PTR_ERR(req->ki_eventfd); | 1797 | return PTR_ERR(eventfd); |
| 1834 | req->ki_eventfd = NULL; | 1798 | |
| 1835 | goto out_put_req; | 1799 | req->ki_eventfd = eventfd; |
| 1836 | } | ||
| 1837 | } | 1800 | } |
| 1838 | 1801 | ||
| 1839 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); | 1802 | if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { |
| 1840 | if (unlikely(ret)) { | ||
| 1841 | pr_debug("EFAULT: aio_key\n"); | 1803 | pr_debug("EFAULT: aio_key\n"); |
| 1842 | goto out_put_req; | 1804 | return -EFAULT; |
| 1843 | } | 1805 | } |
| 1844 | 1806 | ||
| 1845 | req->ki_user_iocb = user_iocb; | 1807 | req->ki_res.obj = (u64)(unsigned long)user_iocb; |
| 1846 | req->ki_user_data = iocb->aio_data; | 1808 | req->ki_res.data = iocb->aio_data; |
| 1809 | req->ki_res.res = 0; | ||
| 1810 | req->ki_res.res2 = 0; | ||
| 1847 | 1811 | ||
| 1848 | switch (iocb->aio_lio_opcode) { | 1812 | switch (iocb->aio_lio_opcode) { |
| 1849 | case IOCB_CMD_PREAD: | 1813 | case IOCB_CMD_PREAD: |
| 1850 | ret = aio_read(&req->rw, iocb, false, compat); | 1814 | return aio_read(&req->rw, iocb, false, compat); |
| 1851 | break; | ||
| 1852 | case IOCB_CMD_PWRITE: | 1815 | case IOCB_CMD_PWRITE: |
| 1853 | ret = aio_write(&req->rw, iocb, false, compat); | 1816 | return aio_write(&req->rw, iocb, false, compat); |
| 1854 | break; | ||
| 1855 | case IOCB_CMD_PREADV: | 1817 | case IOCB_CMD_PREADV: |
| 1856 | ret = aio_read(&req->rw, iocb, true, compat); | 1818 | return aio_read(&req->rw, iocb, true, compat); |
| 1857 | break; | ||
| 1858 | case IOCB_CMD_PWRITEV: | 1819 | case IOCB_CMD_PWRITEV: |
| 1859 | ret = aio_write(&req->rw, iocb, true, compat); | 1820 | return aio_write(&req->rw, iocb, true, compat); |
| 1860 | break; | ||
| 1861 | case IOCB_CMD_FSYNC: | 1821 | case IOCB_CMD_FSYNC: |
| 1862 | ret = aio_fsync(&req->fsync, iocb, false); | 1822 | return aio_fsync(&req->fsync, iocb, false); |
| 1863 | break; | ||
| 1864 | case IOCB_CMD_FDSYNC: | 1823 | case IOCB_CMD_FDSYNC: |
| 1865 | ret = aio_fsync(&req->fsync, iocb, true); | 1824 | return aio_fsync(&req->fsync, iocb, true); |
| 1866 | break; | ||
| 1867 | case IOCB_CMD_POLL: | 1825 | case IOCB_CMD_POLL: |
| 1868 | ret = aio_poll(req, iocb); | 1826 | return aio_poll(req, iocb); |
| 1869 | break; | ||
| 1870 | default: | 1827 | default: |
| 1871 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); | 1828 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); |
| 1872 | ret = -EINVAL; | 1829 | return -EINVAL; |
| 1873 | break; | ||
| 1874 | } | 1830 | } |
| 1875 | |||
| 1876 | /* | ||
| 1877 | * If ret is 0, we'd either done aio_complete() ourselves or have | ||
| 1878 | * arranged for that to be done asynchronously. Anything non-zero | ||
| 1879 | * means that we need to destroy req ourselves. | ||
| 1880 | */ | ||
| 1881 | if (ret) | ||
| 1882 | goto out_put_req; | ||
| 1883 | return 0; | ||
| 1884 | out_put_req: | ||
| 1885 | if (req->ki_eventfd) | ||
| 1886 | eventfd_ctx_put(req->ki_eventfd); | ||
| 1887 | iocb_put(req); | ||
| 1888 | out_put_reqs_available: | ||
| 1889 | put_reqs_available(ctx, 1); | ||
| 1890 | return ret; | ||
| 1891 | } | 1831 | } |
| 1892 | 1832 | ||
| 1893 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1833 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
| 1894 | bool compat) | 1834 | bool compat) |
| 1895 | { | 1835 | { |
| 1836 | struct aio_kiocb *req; | ||
| 1896 | struct iocb iocb; | 1837 | struct iocb iocb; |
| 1838 | int err; | ||
| 1897 | 1839 | ||
| 1898 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) | 1840 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) |
| 1899 | return -EFAULT; | 1841 | return -EFAULT; |
| 1900 | 1842 | ||
| 1901 | return __io_submit_one(ctx, &iocb, user_iocb, compat); | 1843 | /* enforce forwards compatibility on users */ |
| 1844 | if (unlikely(iocb.aio_reserved2)) { | ||
| 1845 | pr_debug("EINVAL: reserve field set\n"); | ||
| 1846 | return -EINVAL; | ||
| 1847 | } | ||
| 1848 | |||
| 1849 | /* prevent overflows */ | ||
| 1850 | if (unlikely( | ||
| 1851 | (iocb.aio_buf != (unsigned long)iocb.aio_buf) || | ||
| 1852 | (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || | ||
| 1853 | ((ssize_t)iocb.aio_nbytes < 0) | ||
| 1854 | )) { | ||
| 1855 | pr_debug("EINVAL: overflow check\n"); | ||
| 1856 | return -EINVAL; | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | req = aio_get_req(ctx); | ||
| 1860 | if (unlikely(!req)) | ||
| 1861 | return -EAGAIN; | ||
| 1862 | |||
| 1863 | err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); | ||
| 1864 | |||
| 1865 | /* Done with the synchronous reference */ | ||
| 1866 | iocb_put(req); | ||
| 1867 | |||
| 1868 | /* | ||
| 1869 | * If err is 0, we'd either done aio_complete() ourselves or have | ||
| 1870 | * arranged for that to be done asynchronously. Anything non-zero | ||
| 1871 | * means that we need to destroy req ourselves. | ||
| 1872 | */ | ||
| 1873 | if (unlikely(err)) { | ||
| 1874 | iocb_destroy(req); | ||
| 1875 | put_reqs_available(ctx, 1); | ||
| 1876 | } | ||
| 1877 | return err; | ||
| 1902 | } | 1878 | } |
| 1903 | 1879 | ||
| 1904 | /* sys_io_submit: | 1880 | /* sys_io_submit: |
| @@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, | |||
| 1997 | } | 1973 | } |
| 1998 | #endif | 1974 | #endif |
| 1999 | 1975 | ||
| 2000 | /* lookup_kiocb | ||
| 2001 | * Finds a given iocb for cancellation. | ||
| 2002 | */ | ||
| 2003 | static struct aio_kiocb * | ||
| 2004 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) | ||
| 2005 | { | ||
| 2006 | struct aio_kiocb *kiocb; | ||
| 2007 | |||
| 2008 | assert_spin_locked(&ctx->ctx_lock); | ||
| 2009 | |||
| 2010 | /* TODO: use a hash or array, this sucks. */ | ||
| 2011 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { | ||
| 2012 | if (kiocb->ki_user_iocb == iocb) | ||
| 2013 | return kiocb; | ||
| 2014 | } | ||
| 2015 | return NULL; | ||
| 2016 | } | ||
| 2017 | |||
| 2018 | /* sys_io_cancel: | 1976 | /* sys_io_cancel: |
| 2019 | * Attempts to cancel an iocb previously passed to io_submit. If | 1977 | * Attempts to cancel an iocb previously passed to io_submit. If |
| 2020 | * the operation is successfully cancelled, the resulting event is | 1978 | * the operation is successfully cancelled, the resulting event is |
| @@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
| 2032 | struct aio_kiocb *kiocb; | 1990 | struct aio_kiocb *kiocb; |
| 2033 | int ret = -EINVAL; | 1991 | int ret = -EINVAL; |
| 2034 | u32 key; | 1992 | u32 key; |
| 1993 | u64 obj = (u64)(unsigned long)iocb; | ||
| 2035 | 1994 | ||
| 2036 | if (unlikely(get_user(key, &iocb->aio_key))) | 1995 | if (unlikely(get_user(key, &iocb->aio_key))) |
| 2037 | return -EFAULT; | 1996 | return -EFAULT; |
| @@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
| 2043 | return -EINVAL; | 2002 | return -EINVAL; |
| 2044 | 2003 | ||
| 2045 | spin_lock_irq(&ctx->ctx_lock); | 2004 | spin_lock_irq(&ctx->ctx_lock); |
| 2046 | kiocb = lookup_kiocb(ctx, iocb); | 2005 | /* TODO: use a hash or array, this sucks. */ |
| 2047 | if (kiocb) { | 2006 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
| 2048 | ret = kiocb->ki_cancel(&kiocb->rw); | 2007 | if (kiocb->ki_res.obj == obj) { |
| 2049 | list_del_init(&kiocb->ki_list); | 2008 | ret = kiocb->ki_cancel(&kiocb->rw); |
| 2009 | list_del_init(&kiocb->ki_list); | ||
| 2010 | break; | ||
| 2011 | } | ||
| 2050 | } | 2012 | } |
| 2051 | spin_unlock_irq(&ctx->ctx_lock); | 2013 | spin_unlock_irq(&ctx->ctx_lock); |
| 2052 | 2014 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 78d3257435c0..24615c76c1d0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -307,10 +307,10 @@ static void blkdev_bio_end_io(struct bio *bio) | |||
| 307 | struct blkdev_dio *dio = bio->bi_private; | 307 | struct blkdev_dio *dio = bio->bi_private; |
| 308 | bool should_dirty = dio->should_dirty; | 308 | bool should_dirty = dio->should_dirty; |
| 309 | 309 | ||
| 310 | if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { | 310 | if (bio->bi_status && !dio->bio.bi_status) |
| 311 | if (bio->bi_status && !dio->bio.bi_status) | 311 | dio->bio.bi_status = bio->bi_status; |
| 312 | dio->bio.bi_status = bio->bi_status; | 312 | |
| 313 | } else { | 313 | if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { |
| 314 | if (!dio->is_sync) { | 314 | if (!dio->is_sync) { |
| 315 | struct kiocb *iocb = dio->iocb; | 315 | struct kiocb *iocb = dio->iocb; |
| 316 | ssize_t ret; | 316 | ssize_t ret; |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 920bf3b4b0ef..cccc75d15970 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
| 8 | #include <linux/pagemap.h> | 8 | #include <linux/pagemap.h> |
| 9 | #include <linux/highmem.h> | 9 | #include <linux/highmem.h> |
| 10 | #include <linux/sched/mm.h> | ||
| 10 | #include "ctree.h" | 11 | #include "ctree.h" |
| 11 | #include "disk-io.h" | 12 | #include "disk-io.h" |
| 12 | #include "transaction.h" | 13 | #include "transaction.h" |
| @@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, | |||
| 427 | unsigned long this_sum_bytes = 0; | 428 | unsigned long this_sum_bytes = 0; |
| 428 | int i; | 429 | int i; |
| 429 | u64 offset; | 430 | u64 offset; |
| 431 | unsigned nofs_flag; | ||
| 432 | |||
| 433 | nofs_flag = memalloc_nofs_save(); | ||
| 434 | sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), | ||
| 435 | GFP_KERNEL); | ||
| 436 | memalloc_nofs_restore(nofs_flag); | ||
| 430 | 437 | ||
| 431 | sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), | ||
| 432 | GFP_NOFS); | ||
| 433 | if (!sums) | 438 | if (!sums) |
| 434 | return BLK_STS_RESOURCE; | 439 | return BLK_STS_RESOURCE; |
| 435 | 440 | ||
| @@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, | |||
| 472 | 477 | ||
| 473 | bytes_left = bio->bi_iter.bi_size - total_bytes; | 478 | bytes_left = bio->bi_iter.bi_size - total_bytes; |
| 474 | 479 | ||
| 475 | sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left), | 480 | nofs_flag = memalloc_nofs_save(); |
| 476 | GFP_NOFS); | 481 | sums = kvzalloc(btrfs_ordered_sum_size(fs_info, |
| 482 | bytes_left), GFP_KERNEL); | ||
| 483 | memalloc_nofs_restore(nofs_flag); | ||
| 477 | BUG_ON(!sums); /* -ENOMEM */ | 484 | BUG_ON(!sums); /* -ENOMEM */ |
| 478 | sums->len = bytes_left; | 485 | sums->len = bytes_left; |
| 479 | ordered = btrfs_lookup_ordered_extent(inode, | 486 | ordered = btrfs_lookup_ordered_extent(inode, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ec2d8919e7fb..cd4e693406a0 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) | |||
| 501 | if (!capable(CAP_SYS_ADMIN)) | 501 | if (!capable(CAP_SYS_ADMIN)) |
| 502 | return -EPERM; | 502 | return -EPERM; |
| 503 | 503 | ||
| 504 | /* | ||
| 505 | * If the fs is mounted with nologreplay, which requires it to be | ||
| 506 | * mounted in RO mode as well, we can not allow discard on free space | ||
| 507 | * inside block groups, because log trees refer to extents that are not | ||
| 508 | * pinned in a block group's free space cache (pinning the extents is | ||
| 509 | * precisely the first phase of replaying a log tree). | ||
| 510 | */ | ||
| 511 | if (btrfs_test_opt(fs_info, NOLOGREPLAY)) | ||
| 512 | return -EROFS; | ||
| 513 | |||
| 504 | rcu_read_lock(); | 514 | rcu_read_lock(); |
| 505 | list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, | 515 | list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, |
| 506 | dev_list) { | 516 | dev_list) { |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 6fde2b2741ef..45e3cfd1198b 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
| 7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/writeback.h> | 8 | #include <linux/writeback.h> |
| 9 | #include <linux/sched/mm.h> | ||
| 9 | #include "ctree.h" | 10 | #include "ctree.h" |
| 10 | #include "transaction.h" | 11 | #include "transaction.h" |
| 11 | #include "btrfs_inode.h" | 12 | #include "btrfs_inode.h" |
| @@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | |||
| 442 | cur = entry->list.next; | 443 | cur = entry->list.next; |
| 443 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | 444 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
| 444 | list_del(&sum->list); | 445 | list_del(&sum->list); |
| 445 | kfree(sum); | 446 | kvfree(sum); |
| 446 | } | 447 | } |
| 447 | kmem_cache_free(btrfs_ordered_extent_cache, entry); | 448 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
| 448 | } | 449 | } |
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index dc6140013ae8..61d22a56c0ba 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c | |||
| @@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, | |||
| 366 | 366 | ||
| 367 | static int prop_compression_validate(const char *value, size_t len) | 367 | static int prop_compression_validate(const char *value, size_t len) |
| 368 | { | 368 | { |
| 369 | if (!strncmp("lzo", value, len)) | 369 | if (!strncmp("lzo", value, 3)) |
| 370 | return 0; | 370 | return 0; |
| 371 | else if (!strncmp("zlib", value, len)) | 371 | else if (!strncmp("zlib", value, 4)) |
| 372 | return 0; | 372 | return 0; |
| 373 | else if (!strncmp("zstd", value, len)) | 373 | else if (!strncmp("zstd", value, 4)) |
| 374 | return 0; | 374 | return 0; |
| 375 | 375 | ||
| 376 | return -EINVAL; | 376 | return -EINVAL; |
| @@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode, | |||
| 396 | btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); | 396 | btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); |
| 397 | } else if (!strncmp("zlib", value, 4)) { | 397 | } else if (!strncmp("zlib", value, 4)) { |
| 398 | type = BTRFS_COMPRESS_ZLIB; | 398 | type = BTRFS_COMPRESS_ZLIB; |
| 399 | } else if (!strncmp("zstd", value, len)) { | 399 | } else if (!strncmp("zstd", value, 4)) { |
| 400 | type = BTRFS_COMPRESS_ZSTD; | 400 | type = BTRFS_COMPRESS_ZSTD; |
| 401 | btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); | 401 | btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); |
| 402 | } else { | 402 | } else { |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index a8f429882249..0637149fb9f9 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
| @@ -1766,6 +1766,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, | |||
| 1766 | unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) | 1766 | unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) |
| 1767 | { | 1767 | { |
| 1768 | struct ceph_inode_info *dci = ceph_inode(dir); | 1768 | struct ceph_inode_info *dci = ceph_inode(dir); |
| 1769 | unsigned hash; | ||
| 1769 | 1770 | ||
| 1770 | switch (dci->i_dir_layout.dl_dir_hash) { | 1771 | switch (dci->i_dir_layout.dl_dir_hash) { |
| 1771 | case 0: /* for backward compat */ | 1772 | case 0: /* for backward compat */ |
| @@ -1773,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) | |||
| 1773 | return dn->d_name.hash; | 1774 | return dn->d_name.hash; |
| 1774 | 1775 | ||
| 1775 | default: | 1776 | default: |
| 1776 | return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, | 1777 | spin_lock(&dn->d_lock); |
| 1778 | hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash, | ||
| 1777 | dn->d_name.name, dn->d_name.len); | 1779 | dn->d_name.name, dn->d_name.len); |
| 1780 | spin_unlock(&dn->d_lock); | ||
| 1781 | return hash; | ||
| 1778 | } | 1782 | } |
| 1779 | } | 1783 | } |
| 1780 | 1784 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 2d61ddda9bf5..c2feb310ac1e 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -1163,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in) | |||
| 1163 | return 0; | 1163 | return 0; |
| 1164 | } | 1164 | } |
| 1165 | 1165 | ||
| 1166 | static int d_name_cmp(struct dentry *dentry, const char *name, size_t len) | ||
| 1167 | { | ||
| 1168 | int ret; | ||
| 1169 | |||
| 1170 | /* take d_lock to ensure dentry->d_name stability */ | ||
| 1171 | spin_lock(&dentry->d_lock); | ||
| 1172 | ret = dentry->d_name.len - len; | ||
| 1173 | if (!ret) | ||
| 1174 | ret = memcmp(dentry->d_name.name, name, len); | ||
| 1175 | spin_unlock(&dentry->d_lock); | ||
| 1176 | return ret; | ||
| 1177 | } | ||
| 1178 | |||
| 1166 | /* | 1179 | /* |
| 1167 | * Incorporate results into the local cache. This is either just | 1180 | * Incorporate results into the local cache. This is either just |
| 1168 | * one inode, or a directory, dentry, and possibly linked-to inode (e.g., | 1181 | * one inode, or a directory, dentry, and possibly linked-to inode (e.g., |
| @@ -1412,7 +1425,8 @@ retry_lookup: | |||
| 1412 | err = splice_dentry(&req->r_dentry, in); | 1425 | err = splice_dentry(&req->r_dentry, in); |
| 1413 | if (err < 0) | 1426 | if (err < 0) |
| 1414 | goto done; | 1427 | goto done; |
| 1415 | } else if (rinfo->head->is_dentry) { | 1428 | } else if (rinfo->head->is_dentry && |
| 1429 | !d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) { | ||
| 1416 | struct ceph_vino *ptvino = NULL; | 1430 | struct ceph_vino *ptvino = NULL; |
| 1417 | 1431 | ||
| 1418 | if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) || | 1432 | if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) || |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 21c33ed048ed..9049c2a3e972 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -1414,6 +1414,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |||
| 1414 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); | 1414 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); |
| 1415 | ci->i_prealloc_cap_flush = NULL; | 1415 | ci->i_prealloc_cap_flush = NULL; |
| 1416 | } | 1416 | } |
| 1417 | |||
| 1418 | if (drop && | ||
| 1419 | ci->i_wrbuffer_ref_head == 0 && | ||
| 1420 | ci->i_wr_ref == 0 && | ||
| 1421 | ci->i_dirty_caps == 0 && | ||
| 1422 | ci->i_flushing_caps == 0) { | ||
| 1423 | ceph_put_snap_context(ci->i_head_snapc); | ||
| 1424 | ci->i_head_snapc = NULL; | ||
| 1425 | } | ||
| 1417 | } | 1426 | } |
| 1418 | spin_unlock(&ci->i_ceph_lock); | 1427 | spin_unlock(&ci->i_ceph_lock); |
| 1419 | while (!list_empty(&to_remove)) { | 1428 | while (!list_empty(&to_remove)) { |
| @@ -2161,10 +2170,39 @@ retry: | |||
| 2161 | return path; | 2170 | return path; |
| 2162 | } | 2171 | } |
| 2163 | 2172 | ||
| 2173 | /* Duplicate the dentry->d_name.name safely */ | ||
| 2174 | static int clone_dentry_name(struct dentry *dentry, const char **ppath, | ||
| 2175 | int *ppathlen) | ||
| 2176 | { | ||
| 2177 | u32 len; | ||
| 2178 | char *name; | ||
| 2179 | |||
| 2180 | retry: | ||
| 2181 | len = READ_ONCE(dentry->d_name.len); | ||
| 2182 | name = kmalloc(len + 1, GFP_NOFS); | ||
| 2183 | if (!name) | ||
| 2184 | return -ENOMEM; | ||
| 2185 | |||
| 2186 | spin_lock(&dentry->d_lock); | ||
| 2187 | if (dentry->d_name.len != len) { | ||
| 2188 | spin_unlock(&dentry->d_lock); | ||
| 2189 | kfree(name); | ||
| 2190 | goto retry; | ||
| 2191 | } | ||
| 2192 | memcpy(name, dentry->d_name.name, len); | ||
| 2193 | spin_unlock(&dentry->d_lock); | ||
| 2194 | |||
| 2195 | name[len] = '\0'; | ||
| 2196 | *ppath = name; | ||
| 2197 | *ppathlen = len; | ||
| 2198 | return 0; | ||
| 2199 | } | ||
| 2200 | |||
| 2164 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, | 2201 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, |
| 2165 | const char **ppath, int *ppathlen, u64 *pino, | 2202 | const char **ppath, int *ppathlen, u64 *pino, |
| 2166 | int *pfreepath) | 2203 | bool *pfreepath, bool parent_locked) |
| 2167 | { | 2204 | { |
| 2205 | int ret; | ||
| 2168 | char *path; | 2206 | char *path; |
| 2169 | 2207 | ||
| 2170 | rcu_read_lock(); | 2208 | rcu_read_lock(); |
| @@ -2173,8 +2211,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir, | |||
| 2173 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { | 2211 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { |
| 2174 | *pino = ceph_ino(dir); | 2212 | *pino = ceph_ino(dir); |
| 2175 | rcu_read_unlock(); | 2213 | rcu_read_unlock(); |
| 2176 | *ppath = dentry->d_name.name; | 2214 | if (parent_locked) { |
| 2177 | *ppathlen = dentry->d_name.len; | 2215 | *ppath = dentry->d_name.name; |
| 2216 | *ppathlen = dentry->d_name.len; | ||
| 2217 | } else { | ||
| 2218 | ret = clone_dentry_name(dentry, ppath, ppathlen); | ||
| 2219 | if (ret) | ||
| 2220 | return ret; | ||
| 2221 | *pfreepath = true; | ||
| 2222 | } | ||
| 2178 | return 0; | 2223 | return 0; |
| 2179 | } | 2224 | } |
| 2180 | rcu_read_unlock(); | 2225 | rcu_read_unlock(); |
| @@ -2182,13 +2227,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir, | |||
| 2182 | if (IS_ERR(path)) | 2227 | if (IS_ERR(path)) |
| 2183 | return PTR_ERR(path); | 2228 | return PTR_ERR(path); |
| 2184 | *ppath = path; | 2229 | *ppath = path; |
| 2185 | *pfreepath = 1; | 2230 | *pfreepath = true; |
| 2186 | return 0; | 2231 | return 0; |
| 2187 | } | 2232 | } |
| 2188 | 2233 | ||
| 2189 | static int build_inode_path(struct inode *inode, | 2234 | static int build_inode_path(struct inode *inode, |
| 2190 | const char **ppath, int *ppathlen, u64 *pino, | 2235 | const char **ppath, int *ppathlen, u64 *pino, |
| 2191 | int *pfreepath) | 2236 | bool *pfreepath) |
| 2192 | { | 2237 | { |
| 2193 | struct dentry *dentry; | 2238 | struct dentry *dentry; |
| 2194 | char *path; | 2239 | char *path; |
| @@ -2204,7 +2249,7 @@ static int build_inode_path(struct inode *inode, | |||
| 2204 | if (IS_ERR(path)) | 2249 | if (IS_ERR(path)) |
| 2205 | return PTR_ERR(path); | 2250 | return PTR_ERR(path); |
| 2206 | *ppath = path; | 2251 | *ppath = path; |
| 2207 | *pfreepath = 1; | 2252 | *pfreepath = true; |
| 2208 | return 0; | 2253 | return 0; |
| 2209 | } | 2254 | } |
| 2210 | 2255 | ||
| @@ -2215,7 +2260,7 @@ static int build_inode_path(struct inode *inode, | |||
| 2215 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | 2260 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, |
| 2216 | struct inode *rdiri, const char *rpath, | 2261 | struct inode *rdiri, const char *rpath, |
| 2217 | u64 rino, const char **ppath, int *pathlen, | 2262 | u64 rino, const char **ppath, int *pathlen, |
| 2218 | u64 *ino, int *freepath) | 2263 | u64 *ino, bool *freepath, bool parent_locked) |
| 2219 | { | 2264 | { |
| 2220 | int r = 0; | 2265 | int r = 0; |
| 2221 | 2266 | ||
| @@ -2225,7 +2270,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |||
| 2225 | ceph_snap(rinode)); | 2270 | ceph_snap(rinode)); |
| 2226 | } else if (rdentry) { | 2271 | } else if (rdentry) { |
| 2227 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, | 2272 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, |
| 2228 | freepath); | 2273 | freepath, parent_locked); |
| 2229 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, | 2274 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, |
| 2230 | *ppath); | 2275 | *ppath); |
| 2231 | } else if (rpath || rino) { | 2276 | } else if (rpath || rino) { |
| @@ -2251,7 +2296,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
| 2251 | const char *path2 = NULL; | 2296 | const char *path2 = NULL; |
| 2252 | u64 ino1 = 0, ino2 = 0; | 2297 | u64 ino1 = 0, ino2 = 0; |
| 2253 | int pathlen1 = 0, pathlen2 = 0; | 2298 | int pathlen1 = 0, pathlen2 = 0; |
| 2254 | int freepath1 = 0, freepath2 = 0; | 2299 | bool freepath1 = false, freepath2 = false; |
| 2255 | int len; | 2300 | int len; |
| 2256 | u16 releases; | 2301 | u16 releases; |
| 2257 | void *p, *end; | 2302 | void *p, *end; |
| @@ -2259,16 +2304,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
| 2259 | 2304 | ||
| 2260 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | 2305 | ret = set_request_path_attr(req->r_inode, req->r_dentry, |
| 2261 | req->r_parent, req->r_path1, req->r_ino1.ino, | 2306 | req->r_parent, req->r_path1, req->r_ino1.ino, |
| 2262 | &path1, &pathlen1, &ino1, &freepath1); | 2307 | &path1, &pathlen1, &ino1, &freepath1, |
| 2308 | test_bit(CEPH_MDS_R_PARENT_LOCKED, | ||
| 2309 | &req->r_req_flags)); | ||
| 2263 | if (ret < 0) { | 2310 | if (ret < 0) { |
| 2264 | msg = ERR_PTR(ret); | 2311 | msg = ERR_PTR(ret); |
| 2265 | goto out; | 2312 | goto out; |
| 2266 | } | 2313 | } |
| 2267 | 2314 | ||
| 2315 | /* If r_old_dentry is set, then assume that its parent is locked */ | ||
| 2268 | ret = set_request_path_attr(NULL, req->r_old_dentry, | 2316 | ret = set_request_path_attr(NULL, req->r_old_dentry, |
| 2269 | req->r_old_dentry_dir, | 2317 | req->r_old_dentry_dir, |
| 2270 | req->r_path2, req->r_ino2.ino, | 2318 | req->r_path2, req->r_ino2.ino, |
| 2271 | &path2, &pathlen2, &ino2, &freepath2); | 2319 | &path2, &pathlen2, &ino2, &freepath2, true); |
| 2272 | if (ret < 0) { | 2320 | if (ret < 0) { |
| 2273 | msg = ERR_PTR(ret); | 2321 | msg = ERR_PTR(ret); |
| 2274 | goto out_free1; | 2322 | goto out_free1; |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 89aa37fa0f84..b26e12cd8ec3 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
| @@ -572,7 +572,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) | |||
| 572 | old_snapc = NULL; | 572 | old_snapc = NULL; |
| 573 | 573 | ||
| 574 | update_snapc: | 574 | update_snapc: |
| 575 | if (ci->i_head_snapc) { | 575 | if (ci->i_wrbuffer_ref_head == 0 && |
| 576 | ci->i_wr_ref == 0 && | ||
| 577 | ci->i_dirty_caps == 0 && | ||
| 578 | ci->i_flushing_caps == 0) { | ||
| 579 | ci->i_head_snapc = NULL; | ||
| 580 | } else { | ||
| 576 | ci->i_head_snapc = ceph_get_snap_context(new_snapc); | 581 | ci->i_head_snapc = ceph_get_snap_context(new_snapc); |
| 577 | dout(" new snapc is %p\n", new_snapc); | 582 | dout(" new snapc is %p\n", new_snapc); |
| 578 | } | 583 | } |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f9b71c12cc9f..a05bf1d6e1d0 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) | |||
| 559 | tcon->ses->server->echo_interval / HZ); | 559 | tcon->ses->server->echo_interval / HZ); |
| 560 | if (tcon->snapshot_time) | 560 | if (tcon->snapshot_time) |
| 561 | seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); | 561 | seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); |
| 562 | if (tcon->handle_timeout) | ||
| 563 | seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); | ||
| 562 | /* convert actimeo and display it in seconds */ | 564 | /* convert actimeo and display it in seconds */ |
| 563 | seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); | 565 | seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); |
| 564 | 566 | ||
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 38feae812b47..585ad3207cb1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -60,6 +60,12 @@ | |||
| 60 | #define CIFS_MAX_ACTIMEO (1 << 30) | 60 | #define CIFS_MAX_ACTIMEO (1 << 30) |
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * Max persistent and resilient handle timeout (milliseconds). | ||
| 64 | * Windows durable max was 960000 (16 minutes) | ||
| 65 | */ | ||
| 66 | #define SMB3_MAX_HANDLE_TIMEOUT 960000 | ||
| 67 | |||
| 68 | /* | ||
| 63 | * MAX_REQ is the maximum number of requests that WE will send | 69 | * MAX_REQ is the maximum number of requests that WE will send |
| 64 | * on one socket concurrently. | 70 | * on one socket concurrently. |
| 65 | */ | 71 | */ |
| @@ -586,6 +592,7 @@ struct smb_vol { | |||
| 586 | struct nls_table *local_nls; | 592 | struct nls_table *local_nls; |
| 587 | unsigned int echo_interval; /* echo interval in secs */ | 593 | unsigned int echo_interval; /* echo interval in secs */ |
| 588 | __u64 snapshot_time; /* needed for timewarp tokens */ | 594 | __u64 snapshot_time; /* needed for timewarp tokens */ |
| 595 | __u32 handle_timeout; /* persistent and durable handle timeout in ms */ | ||
| 589 | unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ | 596 | unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ |
| 590 | }; | 597 | }; |
| 591 | 598 | ||
| @@ -1058,6 +1065,7 @@ struct cifs_tcon { | |||
| 1058 | __u32 vol_serial_number; | 1065 | __u32 vol_serial_number; |
| 1059 | __le64 vol_create_time; | 1066 | __le64 vol_create_time; |
| 1060 | __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ | 1067 | __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ |
| 1068 | __u32 handle_timeout; /* persistent and durable handle timeout in ms */ | ||
| 1061 | __u32 ss_flags; /* sector size flags */ | 1069 | __u32 ss_flags; /* sector size flags */ |
| 1062 | __u32 perf_sector_size; /* best sector size for perf */ | 1070 | __u32 perf_sector_size; /* best sector size for perf */ |
| 1063 | __u32 max_chunks; | 1071 | __u32 max_chunks; |
| @@ -1325,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) | |||
| 1325 | } | 1333 | } |
| 1326 | 1334 | ||
| 1327 | struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file); | 1335 | struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file); |
| 1336 | void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr); | ||
| 1328 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file); | 1337 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file); |
| 1329 | 1338 | ||
| 1330 | #define CIFS_CACHE_READ_FLG 1 | 1339 | #define CIFS_CACHE_READ_FLG 1 |
| @@ -1847,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock; | |||
| 1847 | #endif /* CONFIG_CIFS_ACL */ | 1856 | #endif /* CONFIG_CIFS_ACL */ |
| 1848 | 1857 | ||
| 1849 | void cifs_oplock_break(struct work_struct *work); | 1858 | void cifs_oplock_break(struct work_struct *work); |
| 1859 | void cifs_queue_oplock_break(struct cifsFileInfo *cfile); | ||
| 1850 | 1860 | ||
| 1851 | extern const struct slow_work_ops cifs_oplock_break_ops; | 1861 | extern const struct slow_work_ops cifs_oplock_break_ops; |
| 1852 | extern struct workqueue_struct *cifsiod_wq; | 1862 | extern struct workqueue_struct *cifsiod_wq; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a8e9738db691..4c0e44489f21 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -103,7 +103,7 @@ enum { | |||
| 103 | Opt_cruid, Opt_gid, Opt_file_mode, | 103 | Opt_cruid, Opt_gid, Opt_file_mode, |
| 104 | Opt_dirmode, Opt_port, | 104 | Opt_dirmode, Opt_port, |
| 105 | Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, | 105 | Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, |
| 106 | Opt_echo_interval, Opt_max_credits, | 106 | Opt_echo_interval, Opt_max_credits, Opt_handletimeout, |
| 107 | Opt_snapshot, | 107 | Opt_snapshot, |
| 108 | 108 | ||
| 109 | /* Mount options which take string value */ | 109 | /* Mount options which take string value */ |
| @@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = { | |||
| 208 | { Opt_rsize, "rsize=%s" }, | 208 | { Opt_rsize, "rsize=%s" }, |
| 209 | { Opt_wsize, "wsize=%s" }, | 209 | { Opt_wsize, "wsize=%s" }, |
| 210 | { Opt_actimeo, "actimeo=%s" }, | 210 | { Opt_actimeo, "actimeo=%s" }, |
| 211 | { Opt_handletimeout, "handletimeout=%s" }, | ||
| 211 | { Opt_echo_interval, "echo_interval=%s" }, | 212 | { Opt_echo_interval, "echo_interval=%s" }, |
| 212 | { Opt_max_credits, "max_credits=%s" }, | 213 | { Opt_max_credits, "max_credits=%s" }, |
| 213 | { Opt_snapshot, "snapshot=%s" }, | 214 | { Opt_snapshot, "snapshot=%s" }, |
| @@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 1619 | 1620 | ||
| 1620 | vol->actimeo = CIFS_DEF_ACTIMEO; | 1621 | vol->actimeo = CIFS_DEF_ACTIMEO; |
| 1621 | 1622 | ||
| 1623 | /* Most clients set timeout to 0, allows server to use its default */ | ||
| 1624 | vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ | ||
| 1625 | |||
| 1622 | /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ | 1626 | /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ |
| 1623 | vol->ops = &smb30_operations; | 1627 | vol->ops = &smb30_operations; |
| 1624 | vol->vals = &smbdefault_values; | 1628 | vol->vals = &smbdefault_values; |
| @@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
| 2017 | goto cifs_parse_mount_err; | 2021 | goto cifs_parse_mount_err; |
| 2018 | } | 2022 | } |
| 2019 | break; | 2023 | break; |
| 2024 | case Opt_handletimeout: | ||
| 2025 | if (get_option_ul(args, &option)) { | ||
| 2026 | cifs_dbg(VFS, "%s: Invalid handletimeout value\n", | ||
| 2027 | __func__); | ||
| 2028 | goto cifs_parse_mount_err; | ||
| 2029 | } | ||
| 2030 | vol->handle_timeout = option; | ||
| 2031 | if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) { | ||
| 2032 | cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n"); | ||
| 2033 | goto cifs_parse_mount_err; | ||
| 2034 | } | ||
| 2035 | break; | ||
| 2020 | case Opt_echo_interval: | 2036 | case Opt_echo_interval: |
| 2021 | if (get_option_ul(args, &option)) { | 2037 | if (get_option_ul(args, &option)) { |
| 2022 | cifs_dbg(VFS, "%s: Invalid echo interval value\n", | 2038 | cifs_dbg(VFS, "%s: Invalid echo interval value\n", |
| @@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
| 3183 | return 0; | 3199 | return 0; |
| 3184 | if (tcon->snapshot_time != volume_info->snapshot_time) | 3200 | if (tcon->snapshot_time != volume_info->snapshot_time) |
| 3185 | return 0; | 3201 | return 0; |
| 3202 | if (tcon->handle_timeout != volume_info->handle_timeout) | ||
| 3203 | return 0; | ||
| 3186 | return 1; | 3204 | return 1; |
| 3187 | } | 3205 | } |
| 3188 | 3206 | ||
| @@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 3297 | tcon->snapshot_time = volume_info->snapshot_time; | 3315 | tcon->snapshot_time = volume_info->snapshot_time; |
| 3298 | } | 3316 | } |
| 3299 | 3317 | ||
| 3318 | if (volume_info->handle_timeout) { | ||
| 3319 | if (ses->server->vals->protocol_id == 0) { | ||
| 3320 | cifs_dbg(VFS, | ||
| 3321 | "Use SMB2.1 or later for handle timeout option\n"); | ||
| 3322 | rc = -EOPNOTSUPP; | ||
| 3323 | goto out_fail; | ||
| 3324 | } else | ||
| 3325 | tcon->handle_timeout = volume_info->handle_timeout; | ||
| 3326 | } | ||
| 3327 | |||
| 3300 | tcon->ses = ses; | 3328 | tcon->ses = ses; |
| 3301 | if (volume_info->password) { | 3329 | if (volume_info->password) { |
| 3302 | tcon->password = kstrdup(volume_info->password, GFP_KERNEL); | 3330 | tcon->password = kstrdup(volume_info->password, GFP_KERNEL); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 89006e044973..7037a137fa53 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -360,13 +360,31 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file) | |||
| 360 | return cifs_file; | 360 | return cifs_file; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | /* | 363 | /** |
| 364 | * Release a reference on the file private data. This may involve closing | 364 | * cifsFileInfo_put - release a reference of file priv data |
| 365 | * the filehandle out on the server. Must be called without holding | 365 | * |
| 366 | * tcon->open_file_lock and cifs_file->file_info_lock. | 366 | * Always potentially wait for oplock handler. See _cifsFileInfo_put(). |
| 367 | */ | 367 | */ |
| 368 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | 368 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
| 369 | { | 369 | { |
| 370 | _cifsFileInfo_put(cifs_file, true); | ||
| 371 | } | ||
| 372 | |||
| 373 | /** | ||
| 374 | * _cifsFileInfo_put - release a reference of file priv data | ||
| 375 | * | ||
| 376 | * This may involve closing the filehandle @cifs_file out on the | ||
| 377 | * server. Must be called without holding tcon->open_file_lock and | ||
| 378 | * cifs_file->file_info_lock. | ||
| 379 | * | ||
| 380 | * If @wait_for_oplock_handler is true and we are releasing the last | ||
| 381 | * reference, wait for any running oplock break handler of the file | ||
| 382 | * and cancel any pending one. If calling this function from the | ||
| 383 | * oplock break handler, you need to pass false. | ||
| 384 | * | ||
| 385 | */ | ||
| 386 | void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) | ||
| 387 | { | ||
| 370 | struct inode *inode = d_inode(cifs_file->dentry); | 388 | struct inode *inode = d_inode(cifs_file->dentry); |
| 371 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); | 389 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); |
| 372 | struct TCP_Server_Info *server = tcon->ses->server; | 390 | struct TCP_Server_Info *server = tcon->ses->server; |
| @@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
| 414 | 432 | ||
| 415 | spin_unlock(&tcon->open_file_lock); | 433 | spin_unlock(&tcon->open_file_lock); |
| 416 | 434 | ||
| 417 | oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); | 435 | oplock_break_cancelled = wait_oplock_handler ? |
| 436 | cancel_work_sync(&cifs_file->oplock_break) : false; | ||
| 418 | 437 | ||
| 419 | if (!tcon->need_reconnect && !cifs_file->invalidHandle) { | 438 | if (!tcon->need_reconnect && !cifs_file->invalidHandle) { |
| 420 | struct TCP_Server_Info *server = tcon->ses->server; | 439 | struct TCP_Server_Info *server = tcon->ses->server; |
| @@ -2858,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx) | |||
| 2858 | struct cifs_tcon *tcon; | 2877 | struct cifs_tcon *tcon; |
| 2859 | struct cifs_sb_info *cifs_sb; | 2878 | struct cifs_sb_info *cifs_sb; |
| 2860 | struct dentry *dentry = ctx->cfile->dentry; | 2879 | struct dentry *dentry = ctx->cfile->dentry; |
| 2861 | unsigned int i; | ||
| 2862 | int rc; | 2880 | int rc; |
| 2863 | 2881 | ||
| 2864 | tcon = tlink_tcon(ctx->cfile->tlink); | 2882 | tcon = tlink_tcon(ctx->cfile->tlink); |
| @@ -2922,10 +2940,6 @@ restart_loop: | |||
| 2922 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); | 2940 | kref_put(&wdata->refcount, cifs_uncached_writedata_release); |
| 2923 | } | 2941 | } |
| 2924 | 2942 | ||
| 2925 | if (!ctx->direct_io) | ||
| 2926 | for (i = 0; i < ctx->npages; i++) | ||
| 2927 | put_page(ctx->bv[i].bv_page); | ||
| 2928 | |||
| 2929 | cifs_stats_bytes_written(tcon, ctx->total_len); | 2943 | cifs_stats_bytes_written(tcon, ctx->total_len); |
| 2930 | set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); | 2944 | set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); |
| 2931 | 2945 | ||
| @@ -3563,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx) | |||
| 3563 | struct iov_iter *to = &ctx->iter; | 3577 | struct iov_iter *to = &ctx->iter; |
| 3564 | struct cifs_sb_info *cifs_sb; | 3578 | struct cifs_sb_info *cifs_sb; |
| 3565 | struct cifs_tcon *tcon; | 3579 | struct cifs_tcon *tcon; |
| 3566 | unsigned int i; | ||
| 3567 | int rc; | 3580 | int rc; |
| 3568 | 3581 | ||
| 3569 | tcon = tlink_tcon(ctx->cfile->tlink); | 3582 | tcon = tlink_tcon(ctx->cfile->tlink); |
| @@ -3647,15 +3660,8 @@ again: | |||
| 3647 | kref_put(&rdata->refcount, cifs_uncached_readdata_release); | 3660 | kref_put(&rdata->refcount, cifs_uncached_readdata_release); |
| 3648 | } | 3661 | } |
| 3649 | 3662 | ||
| 3650 | if (!ctx->direct_io) { | 3663 | if (!ctx->direct_io) |
| 3651 | for (i = 0; i < ctx->npages; i++) { | ||
| 3652 | if (ctx->should_dirty) | ||
| 3653 | set_page_dirty(ctx->bv[i].bv_page); | ||
| 3654 | put_page(ctx->bv[i].bv_page); | ||
| 3655 | } | ||
| 3656 | |||
| 3657 | ctx->total_len = ctx->len - iov_iter_count(to); | 3664 | ctx->total_len = ctx->len - iov_iter_count(to); |
| 3658 | } | ||
| 3659 | 3665 | ||
| 3660 | /* mask nodata case */ | 3666 | /* mask nodata case */ |
| 3661 | if (rc == -ENODATA) | 3667 | if (rc == -ENODATA) |
| @@ -4603,6 +4609,7 @@ void cifs_oplock_break(struct work_struct *work) | |||
| 4603 | cinode); | 4609 | cinode); |
| 4604 | cifs_dbg(FYI, "Oplock release rc = %d\n", rc); | 4610 | cifs_dbg(FYI, "Oplock release rc = %d\n", rc); |
| 4605 | } | 4611 | } |
| 4612 | _cifsFileInfo_put(cfile, false /* do not wait for ourself */); | ||
| 4606 | cifs_done_oplock_break(cinode); | 4613 | cifs_done_oplock_break(cinode); |
| 4607 | } | 4614 | } |
| 4608 | 4615 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 53fdb5df0d2e..538fd7d807e4 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
| @@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, | |||
| 1735 | if (rc == 0 || rc != -EBUSY) | 1735 | if (rc == 0 || rc != -EBUSY) |
| 1736 | goto do_rename_exit; | 1736 | goto do_rename_exit; |
| 1737 | 1737 | ||
| 1738 | /* Don't fall back to using SMB on SMB 2+ mount */ | ||
| 1739 | if (server->vals->protocol_id != 0) | ||
| 1740 | goto do_rename_exit; | ||
| 1741 | |||
| 1738 | /* open-file renames don't work across directories */ | 1742 | /* open-file renames don't work across directories */ |
| 1739 | if (to_dentry->d_parent != from_dentry->d_parent) | 1743 | if (to_dentry->d_parent != from_dentry->d_parent) |
| 1740 | goto do_rename_exit; | 1744 | goto do_rename_exit; |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index bee203055b30..0dc6f08020ac 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) | |||
| 501 | CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, | 501 | CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
| 502 | &pCifsInode->flags); | 502 | &pCifsInode->flags); |
| 503 | 503 | ||
| 504 | queue_work(cifsoplockd_wq, | 504 | cifs_queue_oplock_break(netfile); |
| 505 | &netfile->oplock_break); | ||
| 506 | netfile->oplock_break_cancelled = false; | 505 | netfile->oplock_break_cancelled = false; |
| 507 | 506 | ||
| 508 | spin_unlock(&tcon->open_file_lock); | 507 | spin_unlock(&tcon->open_file_lock); |
| @@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode) | |||
| 607 | spin_unlock(&cinode->writers_lock); | 606 | spin_unlock(&cinode->writers_lock); |
| 608 | } | 607 | } |
| 609 | 608 | ||
| 609 | /** | ||
| 610 | * cifs_queue_oplock_break - queue the oplock break handler for cfile | ||
| 611 | * | ||
| 612 | * This function is called from the demultiplex thread when it | ||
| 613 | * receives an oplock break for @cfile. | ||
| 614 | * | ||
| 615 | * Assumes the tcon->open_file_lock is held. | ||
| 616 | * Assumes cfile->file_info_lock is NOT held. | ||
| 617 | */ | ||
| 618 | void cifs_queue_oplock_break(struct cifsFileInfo *cfile) | ||
| 619 | { | ||
| 620 | /* | ||
| 621 | * Bump the handle refcount now while we hold the | ||
| 622 | * open_file_lock to enforce the validity of it for the oplock | ||
| 623 | * break handler. The matching put is done at the end of the | ||
| 624 | * handler. | ||
| 625 | */ | ||
| 626 | cifsFileInfo_get(cfile); | ||
| 627 | |||
| 628 | queue_work(cifsoplockd_wq, &cfile->oplock_break); | ||
| 629 | } | ||
| 630 | |||
| 610 | void cifs_done_oplock_break(struct cifsInodeInfo *cinode) | 631 | void cifs_done_oplock_break(struct cifsInodeInfo *cinode) |
| 611 | { | 632 | { |
| 612 | clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); | 633 | clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); |
| @@ -768,6 +789,11 @@ cifs_aio_ctx_alloc(void) | |||
| 768 | { | 789 | { |
| 769 | struct cifs_aio_ctx *ctx; | 790 | struct cifs_aio_ctx *ctx; |
| 770 | 791 | ||
| 792 | /* | ||
| 793 | * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io | ||
| 794 | * to false so that we know when we have to unreference pages within | ||
| 795 | * cifs_aio_ctx_release() | ||
| 796 | */ | ||
| 771 | ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); | 797 | ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL); |
| 772 | if (!ctx) | 798 | if (!ctx) |
| 773 | return NULL; | 799 | return NULL; |
| @@ -786,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount) | |||
| 786 | struct cifs_aio_ctx, refcount); | 812 | struct cifs_aio_ctx, refcount); |
| 787 | 813 | ||
| 788 | cifsFileInfo_put(ctx->cfile); | 814 | cifsFileInfo_put(ctx->cfile); |
| 789 | kvfree(ctx->bv); | 815 | |
| 816 | /* | ||
| 817 | * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly | ||
| 818 | * which means that iov_iter_get_pages() was a success and thus that | ||
| 819 | * we have taken reference on pages. | ||
| 820 | */ | ||
| 821 | if (ctx->bv) { | ||
| 822 | unsigned i; | ||
| 823 | |||
| 824 | for (i = 0; i < ctx->npages; i++) { | ||
| 825 | if (ctx->should_dirty) | ||
| 826 | set_page_dirty(ctx->bv[i].bv_page); | ||
| 827 | put_page(ctx->bv[i].bv_page); | ||
| 828 | } | ||
| 829 | kvfree(ctx->bv); | ||
| 830 | } | ||
| 831 | |||
| 790 | kfree(ctx); | 832 | kfree(ctx); |
| 791 | } | 833 | } |
| 792 | 834 | ||
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index b204e84b87fb..54bffb2a1786 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c | |||
| @@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
| 68 | 68 | ||
| 69 | 69 | ||
| 70 | if (oparms->tcon->use_resilient) { | 70 | if (oparms->tcon->use_resilient) { |
| 71 | nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ | 71 | /* default timeout is 0, servers pick default (120 seconds) */ |
| 72 | nr_ioctl_req.Timeout = | ||
| 73 | cpu_to_le32(oparms->tcon->handle_timeout); | ||
| 72 | nr_ioctl_req.Reserved = 0; | 74 | nr_ioctl_req.Reserved = 0; |
| 73 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, | 75 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, |
| 74 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, | 76 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, |
| 75 | true /* is_fsctl */, | 77 | true /* is_fsctl */, |
| 76 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), | 78 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), |
| 77 | NULL, NULL /* no return info */); | 79 | CIFSMaxBufSize, NULL, NULL /* no return info */); |
| 78 | if (rc == -EOPNOTSUPP) { | 80 | if (rc == -EOPNOTSUPP) { |
| 79 | cifs_dbg(VFS, | 81 | cifs_dbg(VFS, |
| 80 | "resiliency not supported by server, disabling\n"); | 82 | "resiliency not supported by server, disabling\n"); |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 0e3570e40ff8..e311f58dc1c8 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
| @@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, | |||
| 555 | clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, | 555 | clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
| 556 | &cinode->flags); | 556 | &cinode->flags); |
| 557 | 557 | ||
| 558 | queue_work(cifsoplockd_wq, &cfile->oplock_break); | 558 | cifs_queue_oplock_break(cfile); |
| 559 | kfree(lw); | 559 | kfree(lw); |
| 560 | return true; | 560 | return true; |
| 561 | } | 561 | } |
| @@ -712,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | |||
| 712 | CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, | 712 | CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
| 713 | &cinode->flags); | 713 | &cinode->flags); |
| 714 | spin_unlock(&cfile->file_info_lock); | 714 | spin_unlock(&cfile->file_info_lock); |
| 715 | queue_work(cifsoplockd_wq, | 715 | |
| 716 | &cfile->oplock_break); | 716 | cifs_queue_oplock_break(cfile); |
| 717 | 717 | ||
| 718 | spin_unlock(&tcon->open_file_lock); | 718 | spin_unlock(&tcon->open_file_lock); |
| 719 | spin_unlock(&cifs_tcp_ses_lock); | 719 | spin_unlock(&cifs_tcp_ses_lock); |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 1022a3771e14..c36ff0d1fe2a 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 581 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 581 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 582 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, | 582 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, |
| 583 | NULL /* no data input */, 0 /* no data input */, | 583 | NULL /* no data input */, 0 /* no data input */, |
| 584 | (char **)&out_buf, &ret_data_len); | 584 | CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); |
| 585 | if (rc == -EOPNOTSUPP) { | 585 | if (rc == -EOPNOTSUPP) { |
| 586 | cifs_dbg(FYI, | 586 | cifs_dbg(FYI, |
| 587 | "server does not support query network interfaces\n"); | 587 | "server does not support query network interfaces\n"); |
| @@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) | |||
| 717 | oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); | 717 | oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); |
| 718 | #endif /* CIFS_DEBUG2 */ | 718 | #endif /* CIFS_DEBUG2 */ |
| 719 | 719 | ||
| 720 | if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) | ||
| 721 | oplock = smb2_parse_lease_state(server, o_rsp, | ||
| 722 | &oparms.fid->epoch, | ||
| 723 | oparms.fid->lease_key); | ||
| 724 | else | ||
| 725 | goto oshr_exit; | ||
| 726 | |||
| 727 | |||
| 728 | memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); | 720 | memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); |
| 729 | tcon->crfid.tcon = tcon; | 721 | tcon->crfid.tcon = tcon; |
| 730 | tcon->crfid.is_valid = true; | 722 | tcon->crfid.is_valid = true; |
| 731 | kref_init(&tcon->crfid.refcount); | 723 | kref_init(&tcon->crfid.refcount); |
| 732 | kref_get(&tcon->crfid.refcount); | ||
| 733 | 724 | ||
| 725 | if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { | ||
| 726 | kref_get(&tcon->crfid.refcount); | ||
| 727 | oplock = smb2_parse_lease_state(server, o_rsp, | ||
| 728 | &oparms.fid->epoch, | ||
| 729 | oparms.fid->lease_key); | ||
| 730 | } else | ||
| 731 | goto oshr_exit; | ||
| 734 | 732 | ||
| 735 | qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; | 733 | qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; |
| 736 | if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) | 734 | if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) |
| 737 | goto oshr_exit; | 735 | goto oshr_exit; |
| 738 | rc = smb2_validate_and_copy_iov( | 736 | if (!smb2_validate_and_copy_iov( |
| 739 | le16_to_cpu(qi_rsp->OutputBufferOffset), | 737 | le16_to_cpu(qi_rsp->OutputBufferOffset), |
| 740 | sizeof(struct smb2_file_all_info), | 738 | sizeof(struct smb2_file_all_info), |
| 741 | &rsp_iov[1], sizeof(struct smb2_file_all_info), | 739 | &rsp_iov[1], sizeof(struct smb2_file_all_info), |
| 742 | (char *)&tcon->crfid.file_all_info); | 740 | (char *)&tcon->crfid.file_all_info)) |
| 743 | if (rc) | 741 | tcon->crfid.file_all_info_is_valid = 1; |
| 744 | goto oshr_exit; | ||
| 745 | tcon->crfid.file_all_info_is_valid = 1; | ||
| 746 | 742 | ||
| 747 | oshr_exit: | 743 | oshr_exit: |
| 748 | mutex_unlock(&tcon->crfid.fid_mutex); | 744 | mutex_unlock(&tcon->crfid.fid_mutex); |
| @@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1299 | 1295 | ||
| 1300 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 1296 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
| 1301 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, | 1297 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, |
| 1302 | NULL, 0 /* no input */, | 1298 | NULL, 0 /* no input */, CIFSMaxBufSize, |
| 1303 | (char **)&res_key, &ret_data_len); | 1299 | (char **)&res_key, &ret_data_len); |
| 1304 | 1300 | ||
| 1305 | if (rc) { | 1301 | if (rc) { |
| @@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid, | |||
| 1404 | rc = SMB2_ioctl_init(tcon, &rqst[1], | 1400 | rc = SMB2_ioctl_init(tcon, &rqst[1], |
| 1405 | COMPOUND_FID, COMPOUND_FID, | 1401 | COMPOUND_FID, COMPOUND_FID, |
| 1406 | qi.info_type, true, NULL, | 1402 | qi.info_type, true, NULL, |
| 1407 | 0); | 1403 | 0, CIFSMaxBufSize); |
| 1408 | } | 1404 | } |
| 1409 | } else if (qi.flags == PASSTHRU_QUERY_INFO) { | 1405 | } else if (qi.flags == PASSTHRU_QUERY_INFO) { |
| 1410 | memset(&qi_iov, 0, sizeof(qi_iov)); | 1406 | memset(&qi_iov, 0, sizeof(qi_iov)); |
| @@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid, | |||
| 1532 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, | 1528 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, |
| 1533 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, | 1529 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, |
| 1534 | true /* is_fsctl */, (char *)pcchunk, | 1530 | true /* is_fsctl */, (char *)pcchunk, |
| 1535 | sizeof(struct copychunk_ioctl), (char **)&retbuf, | 1531 | sizeof(struct copychunk_ioctl), CIFSMaxBufSize, |
| 1536 | &ret_data_len); | 1532 | (char **)&retbuf, &ret_data_len); |
| 1537 | if (rc == 0) { | 1533 | if (rc == 0) { |
| 1538 | if (ret_data_len != | 1534 | if (ret_data_len != |
| 1539 | sizeof(struct copychunk_ioctl_rsp)) { | 1535 | sizeof(struct copychunk_ioctl_rsp)) { |
| @@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1693 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1689 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 1694 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, | 1690 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, |
| 1695 | true /* is_fctl */, | 1691 | true /* is_fctl */, |
| 1696 | &setsparse, 1, NULL, NULL); | 1692 | &setsparse, 1, CIFSMaxBufSize, NULL, NULL); |
| 1697 | if (rc) { | 1693 | if (rc) { |
| 1698 | tcon->broken_sparse_sup = true; | 1694 | tcon->broken_sparse_sup = true; |
| 1699 | cifs_dbg(FYI, "set sparse rc = %d\n", rc); | 1695 | cifs_dbg(FYI, "set sparse rc = %d\n", rc); |
| @@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid, | |||
| 1766 | true /* is_fsctl */, | 1762 | true /* is_fsctl */, |
| 1767 | (char *)&dup_ext_buf, | 1763 | (char *)&dup_ext_buf, |
| 1768 | sizeof(struct duplicate_extents_to_file), | 1764 | sizeof(struct duplicate_extents_to_file), |
| 1769 | NULL, | 1765 | CIFSMaxBufSize, NULL, |
| 1770 | &ret_data_len); | 1766 | &ret_data_len); |
| 1771 | 1767 | ||
| 1772 | if (ret_data_len > 0) | 1768 | if (ret_data_len > 0) |
| @@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1801 | true /* is_fsctl */, | 1797 | true /* is_fsctl */, |
| 1802 | (char *)&integr_info, | 1798 | (char *)&integr_info, |
| 1803 | sizeof(struct fsctl_set_integrity_information_req), | 1799 | sizeof(struct fsctl_set_integrity_information_req), |
| 1804 | NULL, | 1800 | CIFSMaxBufSize, NULL, |
| 1805 | &ret_data_len); | 1801 | &ret_data_len); |
| 1806 | 1802 | ||
| 1807 | } | 1803 | } |
| @@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1809 | /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ | 1805 | /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ |
| 1810 | #define GMT_TOKEN_SIZE 50 | 1806 | #define GMT_TOKEN_SIZE 50 |
| 1811 | 1807 | ||
| 1808 | #define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */ | ||
| 1809 | |||
| 1812 | /* | 1810 | /* |
| 1813 | * Input buffer contains (empty) struct smb_snapshot array with size filled in | 1811 | * Input buffer contains (empty) struct smb_snapshot array with size filled in |
| 1814 | * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 | 1812 | * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 |
| @@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1820 | char *retbuf = NULL; | 1818 | char *retbuf = NULL; |
| 1821 | unsigned int ret_data_len = 0; | 1819 | unsigned int ret_data_len = 0; |
| 1822 | int rc; | 1820 | int rc; |
| 1821 | u32 max_response_size; | ||
| 1823 | struct smb_snapshot_array snapshot_in; | 1822 | struct smb_snapshot_array snapshot_in; |
| 1824 | 1823 | ||
| 1824 | if (get_user(ret_data_len, (unsigned int __user *)ioc_buf)) | ||
| 1825 | return -EFAULT; | ||
| 1826 | |||
| 1827 | /* | ||
| 1828 | * Note that for snapshot queries that servers like Azure expect that | ||
| 1829 | * the first query be minimal size (and just used to get the number/size | ||
| 1830 | * of previous versions) so response size must be specified as EXACTLY | ||
| 1831 | * sizeof(struct snapshot_array) which is 16 when rounded up to multiple | ||
| 1832 | * of eight bytes. | ||
| 1833 | */ | ||
| 1834 | if (ret_data_len == 0) | ||
| 1835 | max_response_size = MIN_SNAPSHOT_ARRAY_SIZE; | ||
| 1836 | else | ||
| 1837 | max_response_size = CIFSMaxBufSize; | ||
| 1838 | |||
| 1825 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1839 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 1826 | cfile->fid.volatile_fid, | 1840 | cfile->fid.volatile_fid, |
| 1827 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, | 1841 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, |
| 1828 | true /* is_fsctl */, | 1842 | true /* is_fsctl */, |
| 1829 | NULL, 0 /* no input data */, | 1843 | NULL, 0 /* no input data */, max_response_size, |
| 1830 | (char **)&retbuf, | 1844 | (char **)&retbuf, |
| 1831 | &ret_data_len); | 1845 | &ret_data_len); |
| 1832 | cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", | 1846 | cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", |
| @@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
| 2304 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 2318 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 2305 | FSCTL_DFS_GET_REFERRALS, | 2319 | FSCTL_DFS_GET_REFERRALS, |
| 2306 | true /* is_fsctl */, | 2320 | true /* is_fsctl */, |
| 2307 | (char *)dfs_req, dfs_req_size, | 2321 | (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, |
| 2308 | (char **)&dfs_rsp, &dfs_rsp_size); | 2322 | (char **)&dfs_rsp, &dfs_rsp_size); |
| 2309 | } while (rc == -EAGAIN); | 2323 | } while (rc == -EAGAIN); |
| 2310 | 2324 | ||
| @@ -2375,6 +2389,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2375 | 2389 | ||
| 2376 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov, | 2390 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov, |
| 2377 | &resp_buftype); | 2391 | &resp_buftype); |
| 2392 | if (!rc) | ||
| 2393 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | ||
| 2378 | if (!rc || !err_iov.iov_base) { | 2394 | if (!rc || !err_iov.iov_base) { |
| 2379 | rc = -ENOENT; | 2395 | rc = -ENOENT; |
| 2380 | goto free_path; | 2396 | goto free_path; |
| @@ -2658,7 +2674,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, | |||
| 2658 | rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, | 2674 | rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, |
| 2659 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 2675 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
| 2660 | true /* is_fctl */, (char *)&fsctl_buf, | 2676 | true /* is_fctl */, (char *)&fsctl_buf, |
| 2661 | sizeof(struct file_zero_data_information)); | 2677 | sizeof(struct file_zero_data_information), |
| 2678 | CIFSMaxBufSize); | ||
| 2662 | if (rc) | 2679 | if (rc) |
| 2663 | goto zero_range_exit; | 2680 | goto zero_range_exit; |
| 2664 | 2681 | ||
| @@ -2735,7 +2752,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, | |||
| 2735 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 2752 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
| 2736 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 2753 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
| 2737 | true /* is_fctl */, (char *)&fsctl_buf, | 2754 | true /* is_fctl */, (char *)&fsctl_buf, |
| 2738 | sizeof(struct file_zero_data_information), NULL, NULL); | 2755 | sizeof(struct file_zero_data_information), |
| 2756 | CIFSMaxBufSize, NULL, NULL); | ||
| 2739 | free_xid(xid); | 2757 | free_xid(xid); |
| 2740 | return rc; | 2758 | return rc; |
| 2741 | } | 2759 | } |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 21ac19ff19cb..a37774a55f3a 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -832,8 +832,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
| 832 | } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { | 832 | } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { |
| 833 | /* ops set to 3.0 by default for default so update */ | 833 | /* ops set to 3.0 by default for default so update */ |
| 834 | ses->server->ops = &smb21_operations; | 834 | ses->server->ops = &smb21_operations; |
| 835 | } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) | 835 | ses->server->vals = &smb21_values; |
| 836 | } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { | ||
| 836 | ses->server->ops = &smb311_operations; | 837 | ses->server->ops = &smb311_operations; |
| 838 | ses->server->vals = &smb311_values; | ||
| 839 | } | ||
| 837 | } else if (le16_to_cpu(rsp->DialectRevision) != | 840 | } else if (le16_to_cpu(rsp->DialectRevision) != |
| 838 | ses->server->vals->protocol_id) { | 841 | ses->server->vals->protocol_id) { |
| 839 | /* if requested single dialect ensure returned dialect matched */ | 842 | /* if requested single dialect ensure returned dialect matched */ |
| @@ -1002,7 +1005,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 1002 | 1005 | ||
| 1003 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 1006 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
| 1004 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, | 1007 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, |
| 1005 | (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); | 1008 | (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, |
| 1009 | (char **)&pneg_rsp, &rsplen); | ||
| 1006 | if (rc == -EOPNOTSUPP) { | 1010 | if (rc == -EOPNOTSUPP) { |
| 1007 | /* | 1011 | /* |
| 1008 | * Old Windows versions or Netapp SMB server can return | 1012 | * Old Windows versions or Netapp SMB server can return |
| @@ -1858,8 +1862,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, | |||
| 1858 | } | 1862 | } |
| 1859 | 1863 | ||
| 1860 | static struct create_durable_v2 * | 1864 | static struct create_durable_v2 * |
| 1861 | create_durable_v2_buf(struct cifs_fid *pfid) | 1865 | create_durable_v2_buf(struct cifs_open_parms *oparms) |
| 1862 | { | 1866 | { |
| 1867 | struct cifs_fid *pfid = oparms->fid; | ||
| 1863 | struct create_durable_v2 *buf; | 1868 | struct create_durable_v2 *buf; |
| 1864 | 1869 | ||
| 1865 | buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); | 1870 | buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); |
| @@ -1873,7 +1878,14 @@ create_durable_v2_buf(struct cifs_fid *pfid) | |||
| 1873 | (struct create_durable_v2, Name)); | 1878 | (struct create_durable_v2, Name)); |
| 1874 | buf->ccontext.NameLength = cpu_to_le16(4); | 1879 | buf->ccontext.NameLength = cpu_to_le16(4); |
| 1875 | 1880 | ||
| 1876 | buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ | 1881 | /* |
| 1882 | * NB: Handle timeout defaults to 0, which allows server to choose | ||
| 1883 | * (most servers default to 120 seconds) and most clients default to 0. | ||
| 1884 | * This can be overridden at mount ("handletimeout=") if the user wants | ||
| 1885 | * a different persistent (or resilient) handle timeout for all opens | ||
| 1886 | * opens on a particular SMB3 mount. | ||
| 1887 | */ | ||
| 1888 | buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); | ||
| 1877 | buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); | 1889 | buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); |
| 1878 | generate_random_uuid(buf->dcontext.CreateGuid); | 1890 | generate_random_uuid(buf->dcontext.CreateGuid); |
| 1879 | memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); | 1891 | memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); |
| @@ -1926,7 +1938,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, | |||
| 1926 | struct smb2_create_req *req = iov[0].iov_base; | 1938 | struct smb2_create_req *req = iov[0].iov_base; |
| 1927 | unsigned int num = *num_iovec; | 1939 | unsigned int num = *num_iovec; |
| 1928 | 1940 | ||
| 1929 | iov[num].iov_base = create_durable_v2_buf(oparms->fid); | 1941 | iov[num].iov_base = create_durable_v2_buf(oparms); |
| 1930 | if (iov[num].iov_base == NULL) | 1942 | if (iov[num].iov_base == NULL) |
| 1931 | return -ENOMEM; | 1943 | return -ENOMEM; |
| 1932 | iov[num].iov_len = sizeof(struct create_durable_v2); | 1944 | iov[num].iov_len = sizeof(struct create_durable_v2); |
| @@ -2478,7 +2490,8 @@ creat_exit: | |||
| 2478 | int | 2490 | int |
| 2479 | SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | 2491 | SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, |
| 2480 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 2492 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 2481 | bool is_fsctl, char *in_data, u32 indatalen) | 2493 | bool is_fsctl, char *in_data, u32 indatalen, |
| 2494 | __u32 max_response_size) | ||
| 2482 | { | 2495 | { |
| 2483 | struct smb2_ioctl_req *req; | 2496 | struct smb2_ioctl_req *req; |
| 2484 | struct kvec *iov = rqst->rq_iov; | 2497 | struct kvec *iov = rqst->rq_iov; |
| @@ -2520,16 +2533,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | |||
| 2520 | req->OutputCount = 0; /* MBZ */ | 2533 | req->OutputCount = 0; /* MBZ */ |
| 2521 | 2534 | ||
| 2522 | /* | 2535 | /* |
| 2523 | * Could increase MaxOutputResponse, but that would require more | 2536 | * In most cases max_response_size is set to 16K (CIFSMaxBufSize) |
| 2524 | * than one credit. Windows typically sets this smaller, but for some | 2537 | * We Could increase default MaxOutputResponse, but that could require |
| 2538 | * more credits. Windows typically sets this smaller, but for some | ||
| 2525 | * ioctls it may be useful to allow server to send more. No point | 2539 | * ioctls it may be useful to allow server to send more. No point |
| 2526 | * limiting what the server can send as long as fits in one credit | 2540 | * limiting what the server can send as long as fits in one credit |
| 2527 | * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE | 2541 | * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want |
| 2528 | * (by default, note that it can be overridden to make max larger) | 2542 | * to increase this limit up in the future. |
| 2529 | * in responses (except for read responses which can be bigger. | 2543 | * Note that for snapshot queries that servers like Azure expect that |
| 2530 | * We may want to bump this limit up | 2544 | * the first query be minimal size (and just used to get the number/size |
| 2545 | * of previous versions) so response size must be specified as EXACTLY | ||
| 2546 | * sizeof(struct snapshot_array) which is 16 when rounded up to multiple | ||
| 2547 | * of eight bytes. Currently that is the only case where we set max | ||
| 2548 | * response size smaller. | ||
| 2531 | */ | 2549 | */ |
| 2532 | req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); | 2550 | req->MaxOutputResponse = cpu_to_le32(max_response_size); |
| 2533 | 2551 | ||
| 2534 | if (is_fsctl) | 2552 | if (is_fsctl) |
| 2535 | req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); | 2553 | req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); |
| @@ -2550,13 +2568,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst) | |||
| 2550 | cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ | 2568 | cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ |
| 2551 | } | 2569 | } |
| 2552 | 2570 | ||
| 2571 | |||
| 2553 | /* | 2572 | /* |
| 2554 | * SMB2 IOCTL is used for both IOCTLs and FSCTLs | 2573 | * SMB2 IOCTL is used for both IOCTLs and FSCTLs |
| 2555 | */ | 2574 | */ |
| 2556 | int | 2575 | int |
| 2557 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | 2576 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, |
| 2558 | u64 volatile_fid, u32 opcode, bool is_fsctl, | 2577 | u64 volatile_fid, u32 opcode, bool is_fsctl, |
| 2559 | char *in_data, u32 indatalen, | 2578 | char *in_data, u32 indatalen, u32 max_out_data_len, |
| 2560 | char **out_data, u32 *plen /* returned data len */) | 2579 | char **out_data, u32 *plen /* returned data len */) |
| 2561 | { | 2580 | { |
| 2562 | struct smb_rqst rqst; | 2581 | struct smb_rqst rqst; |
| @@ -2593,8 +2612,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2593 | rqst.rq_iov = iov; | 2612 | rqst.rq_iov = iov; |
| 2594 | rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; | 2613 | rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; |
| 2595 | 2614 | ||
| 2596 | rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, | 2615 | rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode, |
| 2597 | opcode, is_fsctl, in_data, indatalen); | 2616 | is_fsctl, in_data, indatalen, max_out_data_len); |
| 2598 | if (rc) | 2617 | if (rc) |
| 2599 | goto ioctl_exit; | 2618 | goto ioctl_exit; |
| 2600 | 2619 | ||
| @@ -2672,7 +2691,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2672 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 2691 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
| 2673 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, | 2692 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, |
| 2674 | (char *)&fsctl_input /* data input */, | 2693 | (char *)&fsctl_input /* data input */, |
| 2675 | 2 /* in data len */, &ret_data /* out data */, NULL); | 2694 | 2 /* in data len */, CIFSMaxBufSize /* max out data */, |
| 2695 | &ret_data /* out data */, NULL); | ||
| 2676 | 2696 | ||
| 2677 | cifs_dbg(FYI, "set compression rc %d\n", rc); | 2697 | cifs_dbg(FYI, "set compression rc %d\n", rc); |
| 2678 | 2698 | ||
| @@ -3431,8 +3451,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 3431 | rqst.rq_nvec = 1; | 3451 | rqst.rq_nvec = 1; |
| 3432 | 3452 | ||
| 3433 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); | 3453 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); |
| 3434 | cifs_small_buf_release(req); | ||
| 3435 | |||
| 3436 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; | 3454 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; |
| 3437 | 3455 | ||
| 3438 | if (rc) { | 3456 | if (rc) { |
| @@ -3448,12 +3466,15 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 3448 | io_parms->tcon->tid, ses->Suid, | 3466 | io_parms->tcon->tid, ses->Suid, |
| 3449 | io_parms->offset, 0); | 3467 | io_parms->offset, 0); |
| 3450 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | 3468 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 3469 | cifs_small_buf_release(req); | ||
| 3451 | return rc == -ENODATA ? 0 : rc; | 3470 | return rc == -ENODATA ? 0 : rc; |
| 3452 | } else | 3471 | } else |
| 3453 | trace_smb3_read_done(xid, req->PersistentFileId, | 3472 | trace_smb3_read_done(xid, req->PersistentFileId, |
| 3454 | io_parms->tcon->tid, ses->Suid, | 3473 | io_parms->tcon->tid, ses->Suid, |
| 3455 | io_parms->offset, io_parms->length); | 3474 | io_parms->offset, io_parms->length); |
| 3456 | 3475 | ||
| 3476 | cifs_small_buf_release(req); | ||
| 3477 | |||
| 3457 | *nbytes = le32_to_cpu(rsp->DataLength); | 3478 | *nbytes = le32_to_cpu(rsp->DataLength); |
| 3458 | if ((*nbytes > CIFS_MAX_MSGSIZE) || | 3479 | if ((*nbytes > CIFS_MAX_MSGSIZE) || |
| 3459 | (*nbytes > io_parms->length)) { | 3480 | (*nbytes > io_parms->length)) { |
| @@ -3752,7 +3773,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 3752 | 3773 | ||
| 3753 | rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst, | 3774 | rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst, |
| 3754 | &resp_buftype, flags, &rsp_iov); | 3775 | &resp_buftype, flags, &rsp_iov); |
| 3755 | cifs_small_buf_release(req); | ||
| 3756 | rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; | 3776 | rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; |
| 3757 | 3777 | ||
| 3758 | if (rc) { | 3778 | if (rc) { |
| @@ -3770,6 +3790,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 3770 | io_parms->offset, *nbytes); | 3790 | io_parms->offset, *nbytes); |
| 3771 | } | 3791 | } |
| 3772 | 3792 | ||
| 3793 | cifs_small_buf_release(req); | ||
| 3773 | free_rsp_buf(resp_buftype, rsp); | 3794 | free_rsp_buf(resp_buftype, rsp); |
| 3774 | return rc; | 3795 | return rc; |
| 3775 | } | 3796 | } |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 3c32d0cfea69..52df125e9189 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
| @@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | |||
| 142 | extern void SMB2_open_free(struct smb_rqst *rqst); | 142 | extern void SMB2_open_free(struct smb_rqst *rqst); |
| 143 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, | 143 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, |
| 144 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 144 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 145 | bool is_fsctl, char *in_data, u32 indatalen, | 145 | bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, |
| 146 | char **out_data, u32 *plen /* returned data len */); | 146 | char **out_data, u32 *plen /* returned data len */); |
| 147 | extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, | 147 | extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, |
| 148 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 148 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
| 149 | bool is_fsctl, char *in_data, u32 indatalen); | 149 | bool is_fsctl, char *in_data, u32 indatalen, |
| 150 | __u32 max_response_size); | ||
| 150 | extern void SMB2_ioctl_free(struct smb_rqst *rqst); | 151 | extern void SMB2_ioctl_free(struct smb_rqst *rqst); |
| 151 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | 152 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, |
| 152 | u64 persistent_file_id, u64 volatile_file_id); | 153 | u64 persistent_file_id, u64 volatile_file_id); |
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/sizes.h> | 33 | #include <linux/sizes.h> |
| 34 | #include <linux/mmu_notifier.h> | 34 | #include <linux/mmu_notifier.h> |
| 35 | #include <linux/iomap.h> | 35 | #include <linux/iomap.h> |
| 36 | #include <asm/pgalloc.h> | ||
| 36 | #include "internal.h" | 37 | #include "internal.h" |
| 37 | 38 | ||
| 38 | #define CREATE_TRACE_POINTS | 39 | #define CREATE_TRACE_POINTS |
| @@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
| 1407 | { | 1408 | { |
| 1408 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; | 1409 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
| 1409 | unsigned long pmd_addr = vmf->address & PMD_MASK; | 1410 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
| 1411 | struct vm_area_struct *vma = vmf->vma; | ||
| 1410 | struct inode *inode = mapping->host; | 1412 | struct inode *inode = mapping->host; |
| 1413 | pgtable_t pgtable = NULL; | ||
| 1411 | struct page *zero_page; | 1414 | struct page *zero_page; |
| 1412 | spinlock_t *ptl; | 1415 | spinlock_t *ptl; |
| 1413 | pmd_t pmd_entry; | 1416 | pmd_t pmd_entry; |
| @@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
| 1422 | *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, | 1425 | *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, |
| 1423 | DAX_PMD | DAX_ZERO_PAGE, false); | 1426 | DAX_PMD | DAX_ZERO_PAGE, false); |
| 1424 | 1427 | ||
| 1428 | if (arch_needs_pgtable_deposit()) { | ||
| 1429 | pgtable = pte_alloc_one(vma->vm_mm); | ||
| 1430 | if (!pgtable) | ||
| 1431 | return VM_FAULT_OOM; | ||
| 1432 | } | ||
| 1433 | |||
| 1425 | ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); | 1434 | ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); |
| 1426 | if (!pmd_none(*(vmf->pmd))) { | 1435 | if (!pmd_none(*(vmf->pmd))) { |
| 1427 | spin_unlock(ptl); | 1436 | spin_unlock(ptl); |
| 1428 | goto fallback; | 1437 | goto fallback; |
| 1429 | } | 1438 | } |
| 1430 | 1439 | ||
| 1440 | if (pgtable) { | ||
| 1441 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); | ||
| 1442 | mm_inc_nr_ptes(vma->vm_mm); | ||
| 1443 | } | ||
| 1431 | pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); | 1444 | pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); |
| 1432 | pmd_entry = pmd_mkhuge(pmd_entry); | 1445 | pmd_entry = pmd_mkhuge(pmd_entry); |
| 1433 | set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); | 1446 | set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); |
| @@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |||
| 1436 | return VM_FAULT_NOPAGE; | 1449 | return VM_FAULT_NOPAGE; |
| 1437 | 1450 | ||
| 1438 | fallback: | 1451 | fallback: |
| 1452 | if (pgtable) | ||
| 1453 | pte_free(vma->vm_mm, pgtable); | ||
| 1439 | trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); | 1454 | trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); |
| 1440 | return VM_FAULT_FALLBACK; | 1455 | return VM_FAULT_FALLBACK; |
| 1441 | } | 1456 | } |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 95b5e78c22b1..f25daa207421 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
| @@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) | |||
| 163 | return 0; | 163 | return 0; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static void debugfs_evict_inode(struct inode *inode) | 166 | static void debugfs_i_callback(struct rcu_head *head) |
| 167 | { | 167 | { |
| 168 | truncate_inode_pages_final(&inode->i_data); | 168 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 169 | clear_inode(inode); | ||
| 170 | if (S_ISLNK(inode->i_mode)) | 169 | if (S_ISLNK(inode->i_mode)) |
| 171 | kfree(inode->i_link); | 170 | kfree(inode->i_link); |
| 171 | free_inode_nonrcu(inode); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void debugfs_destroy_inode(struct inode *inode) | ||
| 175 | { | ||
| 176 | call_rcu(&inode->i_rcu, debugfs_i_callback); | ||
| 172 | } | 177 | } |
| 173 | 178 | ||
| 174 | static const struct super_operations debugfs_super_operations = { | 179 | static const struct super_operations debugfs_super_operations = { |
| 175 | .statfs = simple_statfs, | 180 | .statfs = simple_statfs, |
| 176 | .remount_fs = debugfs_remount, | 181 | .remount_fs = debugfs_remount, |
| 177 | .show_options = debugfs_show_options, | 182 | .show_options = debugfs_show_options, |
| 178 | .evict_inode = debugfs_evict_inode, | 183 | .destroy_inode = debugfs_destroy_inode, |
| 179 | }; | 184 | }; |
| 180 | 185 | ||
| 181 | static void debugfs_release_dentry(struct dentry *dentry) | 186 | static void debugfs_release_dentry(struct dentry *dentry) |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8a63e52785e9..9971a35cf1ef 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -2056,10 +2056,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
| 2056 | rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; | 2056 | rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; |
| 2057 | 2057 | ||
| 2058 | ret = -EINVAL; | 2058 | ret = -EINVAL; |
| 2059 | if (rem < len) { | 2059 | if (rem < len) |
| 2060 | pipe_unlock(pipe); | 2060 | goto out_free; |
| 2061 | goto out; | ||
| 2062 | } | ||
| 2063 | 2061 | ||
| 2064 | rem = len; | 2062 | rem = len; |
| 2065 | while (rem) { | 2063 | while (rem) { |
| @@ -2077,7 +2075,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
| 2077 | pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); | 2075 | pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); |
| 2078 | pipe->nrbufs--; | 2076 | pipe->nrbufs--; |
| 2079 | } else { | 2077 | } else { |
| 2080 | pipe_buf_get(pipe, ibuf); | 2078 | if (!pipe_buf_get(pipe, ibuf)) |
| 2079 | goto out_free; | ||
| 2080 | |||
| 2081 | *obuf = *ibuf; | 2081 | *obuf = *ibuf; |
| 2082 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; | 2082 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; |
| 2083 | obuf->len = rem; | 2083 | obuf->len = rem; |
| @@ -2100,11 +2100,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
| 2100 | ret = fuse_dev_do_write(fud, &cs, len); | 2100 | ret = fuse_dev_do_write(fud, &cs, len); |
| 2101 | 2101 | ||
| 2102 | pipe_lock(pipe); | 2102 | pipe_lock(pipe); |
| 2103 | out_free: | ||
| 2103 | for (idx = 0; idx < nbuf; idx++) | 2104 | for (idx = 0; idx < nbuf; idx++) |
| 2104 | pipe_buf_release(pipe, &bufs[idx]); | 2105 | pipe_buf_release(pipe, &bufs[idx]); |
| 2105 | pipe_unlock(pipe); | 2106 | pipe_unlock(pipe); |
| 2106 | 2107 | ||
| 2107 | out: | ||
| 2108 | kvfree(bufs); | 2108 | kvfree(bufs); |
| 2109 | return ret; | 2109 | return ret; |
| 2110 | } | 2110 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ec32fece5e1e..9285dd4f4b1c 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -755,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 755 | umode_t mode, dev_t dev) | 755 | umode_t mode, dev_t dev) |
| 756 | { | 756 | { |
| 757 | struct inode *inode; | 757 | struct inode *inode; |
| 758 | struct resv_map *resv_map; | 758 | struct resv_map *resv_map = NULL; |
| 759 | 759 | ||
| 760 | resv_map = resv_map_alloc(); | 760 | /* |
| 761 | if (!resv_map) | 761 | * Reserve maps are only needed for inodes that can have associated |
| 762 | return NULL; | 762 | * page allocations. |
| 763 | */ | ||
| 764 | if (S_ISREG(mode) || S_ISLNK(mode)) { | ||
| 765 | resv_map = resv_map_alloc(); | ||
| 766 | if (!resv_map) | ||
| 767 | return NULL; | ||
| 768 | } | ||
| 763 | 769 | ||
| 764 | inode = new_inode(sb); | 770 | inode = new_inode(sb); |
| 765 | if (inode) { | 771 | if (inode) { |
| @@ -794,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
| 794 | break; | 800 | break; |
| 795 | } | 801 | } |
| 796 | lockdep_annotate_inode_mutex_key(inode); | 802 | lockdep_annotate_inode_mutex_key(inode); |
| 797 | } else | 803 | } else { |
| 798 | kref_put(&resv_map->refs, resv_map_release); | 804 | if (resv_map) |
| 805 | kref_put(&resv_map->refs, resv_map_release); | ||
| 806 | } | ||
| 799 | 807 | ||
| 800 | return inode; | 808 | return inode; |
| 801 | } | 809 | } |
diff --git a/fs/io_uring.c b/fs/io_uring.c index bbdbd56cf2ac..f65f85d89217 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
| @@ -338,7 +338,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) | |||
| 338 | tail = ctx->cached_cq_tail; | 338 | tail = ctx->cached_cq_tail; |
| 339 | /* See comment at the top of the file */ | 339 | /* See comment at the top of the file */ |
| 340 | smp_rmb(); | 340 | smp_rmb(); |
| 341 | if (tail + 1 == READ_ONCE(ring->r.head)) | 341 | if (tail - READ_ONCE(ring->r.head) == ring->ring_entries) |
| 342 | return NULL; | 342 | return NULL; |
| 343 | 343 | ||
| 344 | ctx->cached_cq_tail++; | 344 | ctx->cached_cq_tail++; |
| @@ -682,11 +682,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req) | |||
| 682 | list_add_tail(&req->list, &ctx->poll_list); | 682 | list_add_tail(&req->list, &ctx->poll_list); |
| 683 | } | 683 | } |
| 684 | 684 | ||
| 685 | static void io_file_put(struct io_submit_state *state, struct file *file) | 685 | static void io_file_put(struct io_submit_state *state) |
| 686 | { | 686 | { |
| 687 | if (!state) { | 687 | if (state->file) { |
| 688 | fput(file); | ||
| 689 | } else if (state->file) { | ||
| 690 | int diff = state->has_refs - state->used_refs; | 688 | int diff = state->has_refs - state->used_refs; |
| 691 | 689 | ||
| 692 | if (diff) | 690 | if (diff) |
| @@ -711,7 +709,7 @@ static struct file *io_file_get(struct io_submit_state *state, int fd) | |||
| 711 | state->ios_left--; | 709 | state->ios_left--; |
| 712 | return state->file; | 710 | return state->file; |
| 713 | } | 711 | } |
| 714 | io_file_put(state, NULL); | 712 | io_file_put(state); |
| 715 | } | 713 | } |
| 716 | state->file = fget_many(fd, state->ios_left); | 714 | state->file = fget_many(fd, state->ios_left); |
| 717 | if (!state->file) | 715 | if (!state->file) |
| @@ -1671,7 +1669,7 @@ out: | |||
| 1671 | static void io_submit_state_end(struct io_submit_state *state) | 1669 | static void io_submit_state_end(struct io_submit_state *state) |
| 1672 | { | 1670 | { |
| 1673 | blk_finish_plug(&state->plug); | 1671 | blk_finish_plug(&state->plug); |
| 1674 | io_file_put(state, NULL); | 1672 | io_file_put(state); |
| 1675 | if (state->free_reqs) | 1673 | if (state->free_reqs) |
| 1676 | kmem_cache_free_bulk(req_cachep, state->free_reqs, | 1674 | kmem_cache_free_bulk(req_cachep, state->free_reqs, |
| 1677 | &state->reqs[state->cur_req]); | 1675 | &state->reqs[state->cur_req]); |
| @@ -1920,6 +1918,10 @@ static int io_sq_thread(void *data) | |||
| 1920 | unuse_mm(cur_mm); | 1918 | unuse_mm(cur_mm); |
| 1921 | mmput(cur_mm); | 1919 | mmput(cur_mm); |
| 1922 | } | 1920 | } |
| 1921 | |||
| 1922 | if (kthread_should_park()) | ||
| 1923 | kthread_parkme(); | ||
| 1924 | |||
| 1923 | return 0; | 1925 | return 0; |
| 1924 | } | 1926 | } |
| 1925 | 1927 | ||
| @@ -2054,6 +2056,7 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx) | |||
| 2054 | if (ctx->sqo_thread) { | 2056 | if (ctx->sqo_thread) { |
| 2055 | ctx->sqo_stop = 1; | 2057 | ctx->sqo_stop = 1; |
| 2056 | mb(); | 2058 | mb(); |
| 2059 | kthread_park(ctx->sqo_thread); | ||
| 2057 | kthread_stop(ctx->sqo_thread); | 2060 | kthread_stop(ctx->sqo_thread); |
| 2058 | ctx->sqo_thread = NULL; | 2061 | ctx->sqo_thread = NULL; |
| 2059 | } | 2062 | } |
| @@ -2215,6 +2218,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, | |||
| 2215 | fput(ctx->user_files[i]); | 2218 | fput(ctx->user_files[i]); |
| 2216 | 2219 | ||
| 2217 | kfree(ctx->user_files); | 2220 | kfree(ctx->user_files); |
| 2221 | ctx->user_files = NULL; | ||
| 2218 | ctx->nr_user_files = 0; | 2222 | ctx->nr_user_files = 0; |
| 2219 | return ret; | 2223 | return ret; |
| 2220 | } | 2224 | } |
| @@ -2235,19 +2239,27 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, | |||
| 2235 | mmgrab(current->mm); | 2239 | mmgrab(current->mm); |
| 2236 | ctx->sqo_mm = current->mm; | 2240 | ctx->sqo_mm = current->mm; |
| 2237 | 2241 | ||
| 2238 | ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); | ||
| 2239 | if (!ctx->sq_thread_idle) | ||
| 2240 | ctx->sq_thread_idle = HZ; | ||
| 2241 | |||
| 2242 | ret = -EINVAL; | 2242 | ret = -EINVAL; |
| 2243 | if (!cpu_possible(p->sq_thread_cpu)) | 2243 | if (!cpu_possible(p->sq_thread_cpu)) |
| 2244 | goto err; | 2244 | goto err; |
| 2245 | 2245 | ||
| 2246 | if (ctx->flags & IORING_SETUP_SQPOLL) { | 2246 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 2247 | ret = -EPERM; | ||
| 2248 | if (!capable(CAP_SYS_ADMIN)) | ||
| 2249 | goto err; | ||
| 2250 | |||
| 2251 | ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); | ||
| 2252 | if (!ctx->sq_thread_idle) | ||
| 2253 | ctx->sq_thread_idle = HZ; | ||
| 2254 | |||
| 2247 | if (p->flags & IORING_SETUP_SQ_AFF) { | 2255 | if (p->flags & IORING_SETUP_SQ_AFF) { |
| 2248 | int cpu; | 2256 | int cpu; |
| 2249 | 2257 | ||
| 2250 | cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS); | 2258 | cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS); |
| 2259 | ret = -EINVAL; | ||
| 2260 | if (!cpu_possible(p->sq_thread_cpu)) | ||
| 2261 | goto err; | ||
| 2262 | |||
| 2251 | ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, | 2263 | ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, |
| 2252 | ctx, cpu, | 2264 | ctx, cpu, |
| 2253 | "io_uring-sq"); | 2265 | "io_uring-sq"); |
| @@ -2917,11 +2929,23 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries, | |||
| 2917 | 2929 | ||
| 2918 | static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, | 2930 | static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, |
| 2919 | void __user *arg, unsigned nr_args) | 2931 | void __user *arg, unsigned nr_args) |
| 2932 | __releases(ctx->uring_lock) | ||
| 2933 | __acquires(ctx->uring_lock) | ||
| 2920 | { | 2934 | { |
| 2921 | int ret; | 2935 | int ret; |
| 2922 | 2936 | ||
| 2923 | percpu_ref_kill(&ctx->refs); | 2937 | percpu_ref_kill(&ctx->refs); |
| 2938 | |||
| 2939 | /* | ||
| 2940 | * Drop uring mutex before waiting for references to exit. If another | ||
| 2941 | * thread is currently inside io_uring_enter() it might need to grab | ||
| 2942 | * the uring_lock to make progress. If we hold it here across the drain | ||
| 2943 | * wait, then we can deadlock. It's safe to drop the mutex here, since | ||
| 2944 | * no new references will come in after we've killed the percpu ref. | ||
| 2945 | */ | ||
| 2946 | mutex_unlock(&ctx->uring_lock); | ||
| 2924 | wait_for_completion(&ctx->ctx_done); | 2947 | wait_for_completion(&ctx->ctx_done); |
| 2948 | mutex_lock(&ctx->uring_lock); | ||
| 2925 | 2949 | ||
| 2926 | switch (opcode) { | 2950 | switch (opcode) { |
| 2927 | case IORING_REGISTER_BUFFERS: | 2951 | case IORING_REGISTER_BUFFERS: |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 389ea53ea487..bccfc40b3a74 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
| @@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) | |||
| 1414 | 1414 | ||
| 1415 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); | 1415 | jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); |
| 1416 | 1416 | ||
| 1417 | if (f->target) { | ||
| 1418 | kfree(f->target); | ||
| 1419 | f->target = NULL; | ||
| 1420 | } | ||
| 1421 | |||
| 1422 | fds = f->dents; | 1417 | fds = f->dents; |
| 1423 | while(fds) { | 1418 | while(fds) { |
| 1424 | fd = fds; | 1419 | fd = fds; |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index bb6ae387469f..05d892c79339 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
| @@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb) | |||
| 47 | static void jffs2_i_callback(struct rcu_head *head) | 47 | static void jffs2_i_callback(struct rcu_head *head) |
| 48 | { | 48 | { |
| 49 | struct inode *inode = container_of(head, struct inode, i_rcu); | 49 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 50 | kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); | 50 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
| 51 | |||
| 52 | kfree(f->target); | ||
| 53 | kmem_cache_free(jffs2_inode_cachep, f); | ||
| 51 | } | 54 | } |
| 52 | 55 | ||
| 53 | static void jffs2_destroy_inode(struct inode *inode) | 56 | static void jffs2_destroy_inode(struct inode *inode) |
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index ff6f85fb676b..5196bfa7894d 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
| @@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, | |||
| 329 | }; | 329 | }; |
| 330 | ssize_t err, err2; | 330 | ssize_t err, err2; |
| 331 | 331 | ||
| 332 | if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) | ||
| 333 | return -EOPNOTSUPP; | ||
| 334 | |||
| 335 | src_lock = nfs_get_lock_context(nfs_file_open_context(src)); | 332 | src_lock = nfs_get_lock_context(nfs_file_open_context(src)); |
| 336 | if (IS_ERR(src_lock)) | 333 | if (IS_ERR(src_lock)) |
| 337 | return PTR_ERR(src_lock); | 334 | return PTR_ERR(src_lock); |
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 45b2322e092d..00d17198ee12 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c | |||
| @@ -133,8 +133,10 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, | |||
| 133 | struct file *file_out, loff_t pos_out, | 133 | struct file *file_out, loff_t pos_out, |
| 134 | size_t count, unsigned int flags) | 134 | size_t count, unsigned int flags) |
| 135 | { | 135 | { |
| 136 | if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY)) | ||
| 137 | return -EOPNOTSUPP; | ||
| 136 | if (file_inode(file_in) == file_inode(file_out)) | 138 | if (file_inode(file_in) == file_inode(file_out)) |
| 137 | return -EINVAL; | 139 | return -EOPNOTSUPP; |
| 138 | return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); | 140 | return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); |
| 139 | } | 141 | } |
| 140 | 142 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index cfcabc33e24d..602446158bfb 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -2589,7 +2589,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
| 2589 | ARRAY_SIZE(nfs4_acl_bitmap), &hdr); | 2589 | ARRAY_SIZE(nfs4_acl_bitmap), &hdr); |
| 2590 | 2590 | ||
| 2591 | rpc_prepare_reply_pages(req, args->acl_pages, 0, | 2591 | rpc_prepare_reply_pages(req, args->acl_pages, 0, |
| 2592 | args->acl_len, replen); | 2592 | args->acl_len, replen + 1); |
| 2593 | encode_nops(&hdr); | 2593 | encode_nops(&hdr); |
| 2594 | } | 2594 | } |
| 2595 | 2595 | ||
| @@ -2811,7 +2811,7 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, | |||
| 2811 | } | 2811 | } |
| 2812 | 2812 | ||
| 2813 | rpc_prepare_reply_pages(req, (struct page **)&args->page, 0, | 2813 | rpc_prepare_reply_pages(req, (struct page **)&args->page, 0, |
| 2814 | PAGE_SIZE, replen); | 2814 | PAGE_SIZE, replen + 1); |
| 2815 | encode_nops(&hdr); | 2815 | encode_nops(&hdr); |
| 2816 | } | 2816 | } |
| 2817 | 2817 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 23790c7b2289..c27ac96a95bd 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
| @@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options, | |||
| 2041 | memcpy(sap, &data->addr, sizeof(data->addr)); | 2041 | memcpy(sap, &data->addr, sizeof(data->addr)); |
| 2042 | args->nfs_server.addrlen = sizeof(data->addr); | 2042 | args->nfs_server.addrlen = sizeof(data->addr); |
| 2043 | args->nfs_server.port = ntohs(data->addr.sin_port); | 2043 | args->nfs_server.port = ntohs(data->addr.sin_port); |
| 2044 | if (!nfs_verify_server_address(sap)) | 2044 | if (sap->sa_family != AF_INET || |
| 2045 | !nfs_verify_server_address(sap)) | ||
| 2045 | goto out_no_address; | 2046 | goto out_no_address; |
| 2046 | 2047 | ||
| 2047 | if (!(data->flags & NFS_MOUNT_TCP)) | 2048 | if (!(data->flags & NFS_MOUNT_TCP)) |
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 8f933e84cec1..9bc32af4e2da 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c | |||
| @@ -442,7 +442,9 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp) | |||
| 442 | struct nfsd3_readdirargs *argp = rqstp->rq_argp; | 442 | struct nfsd3_readdirargs *argp = rqstp->rq_argp; |
| 443 | struct nfsd3_readdirres *resp = rqstp->rq_resp; | 443 | struct nfsd3_readdirres *resp = rqstp->rq_resp; |
| 444 | __be32 nfserr; | 444 | __be32 nfserr; |
| 445 | int count; | 445 | int count = 0; |
| 446 | struct page **p; | ||
| 447 | caddr_t page_addr = NULL; | ||
| 446 | 448 | ||
| 447 | dprintk("nfsd: READDIR(3) %s %d bytes at %d\n", | 449 | dprintk("nfsd: READDIR(3) %s %d bytes at %d\n", |
| 448 | SVCFH_fmt(&argp->fh), | 450 | SVCFH_fmt(&argp->fh), |
| @@ -462,7 +464,18 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp) | |||
| 462 | nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, | 464 | nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, |
| 463 | &resp->common, nfs3svc_encode_entry); | 465 | &resp->common, nfs3svc_encode_entry); |
| 464 | memcpy(resp->verf, argp->verf, 8); | 466 | memcpy(resp->verf, argp->verf, 8); |
| 465 | resp->count = resp->buffer - argp->buffer; | 467 | count = 0; |
| 468 | for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) { | ||
| 469 | page_addr = page_address(*p); | ||
| 470 | |||
| 471 | if (((caddr_t)resp->buffer >= page_addr) && | ||
| 472 | ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) { | ||
| 473 | count += (caddr_t)resp->buffer - page_addr; | ||
| 474 | break; | ||
| 475 | } | ||
| 476 | count += PAGE_SIZE; | ||
| 477 | } | ||
| 478 | resp->count = count >> 2; | ||
| 466 | if (resp->offset) { | 479 | if (resp->offset) { |
| 467 | loff_t offset = argp->cookie; | 480 | loff_t offset = argp->cookie; |
| 468 | 481 | ||
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 93fea246f676..8d789124ed3c 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c | |||
| @@ -573,6 +573,7 @@ int | |||
| 573 | nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p) | 573 | nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p) |
| 574 | { | 574 | { |
| 575 | struct nfsd3_readdirargs *args = rqstp->rq_argp; | 575 | struct nfsd3_readdirargs *args = rqstp->rq_argp; |
| 576 | int len; | ||
| 576 | u32 max_blocksize = svc_max_payload(rqstp); | 577 | u32 max_blocksize = svc_max_payload(rqstp); |
| 577 | 578 | ||
| 578 | p = decode_fh(p, &args->fh); | 579 | p = decode_fh(p, &args->fh); |
| @@ -582,8 +583,14 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p) | |||
| 582 | args->verf = p; p += 2; | 583 | args->verf = p; p += 2; |
| 583 | args->dircount = ~0; | 584 | args->dircount = ~0; |
| 584 | args->count = ntohl(*p++); | 585 | args->count = ntohl(*p++); |
| 585 | args->count = min_t(u32, args->count, max_blocksize); | 586 | len = args->count = min_t(u32, args->count, max_blocksize); |
| 586 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 587 | |
| 588 | while (len > 0) { | ||
| 589 | struct page *p = *(rqstp->rq_next_page++); | ||
| 590 | if (!args->buffer) | ||
| 591 | args->buffer = page_address(p); | ||
| 592 | len -= PAGE_SIZE; | ||
| 593 | } | ||
| 587 | 594 | ||
| 588 | return xdr_argsize_check(rqstp, p); | 595 | return xdr_argsize_check(rqstp, p); |
| 589 | } | 596 | } |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index d219159b98af..7caa3801ce72 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
| @@ -1010,8 +1010,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) | |||
| 1010 | cb->cb_seq_status = 1; | 1010 | cb->cb_seq_status = 1; |
| 1011 | cb->cb_status = 0; | 1011 | cb->cb_status = 0; |
| 1012 | if (minorversion) { | 1012 | if (minorversion) { |
| 1013 | if (!nfsd41_cb_get_slot(clp, task)) | 1013 | if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task)) |
| 1014 | return; | 1014 | return; |
| 1015 | cb->cb_holds_slot = true; | ||
| 1015 | } | 1016 | } |
| 1016 | rpc_call_start(task); | 1017 | rpc_call_start(task); |
| 1017 | } | 1018 | } |
| @@ -1038,6 +1039,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback | |||
| 1038 | return true; | 1039 | return true; |
| 1039 | } | 1040 | } |
| 1040 | 1041 | ||
| 1042 | if (!cb->cb_holds_slot) | ||
| 1043 | goto need_restart; | ||
| 1044 | |||
| 1041 | switch (cb->cb_seq_status) { | 1045 | switch (cb->cb_seq_status) { |
| 1042 | case 0: | 1046 | case 0: |
| 1043 | /* | 1047 | /* |
| @@ -1076,6 +1080,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback | |||
| 1076 | cb->cb_seq_status); | 1080 | cb->cb_seq_status); |
| 1077 | } | 1081 | } |
| 1078 | 1082 | ||
| 1083 | cb->cb_holds_slot = false; | ||
| 1079 | clear_bit(0, &clp->cl_cb_slot_busy); | 1084 | clear_bit(0, &clp->cl_cb_slot_busy); |
| 1080 | rpc_wake_up_next(&clp->cl_cb_waitq); | 1085 | rpc_wake_up_next(&clp->cl_cb_waitq); |
| 1081 | dprintk("%s: freed slot, new seqid=%d\n", __func__, | 1086 | dprintk("%s: freed slot, new seqid=%d\n", __func__, |
| @@ -1283,6 +1288,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, | |||
| 1283 | cb->cb_seq_status = 1; | 1288 | cb->cb_seq_status = 1; |
| 1284 | cb->cb_status = 0; | 1289 | cb->cb_status = 0; |
| 1285 | cb->cb_need_restart = false; | 1290 | cb->cb_need_restart = false; |
| 1291 | cb->cb_holds_slot = false; | ||
| 1286 | } | 1292 | } |
| 1287 | 1293 | ||
| 1288 | void nfsd4_run_cb(struct nfsd4_callback *cb) | 1294 | void nfsd4_run_cb(struct nfsd4_callback *cb) |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 6a45fb00c5fc..f056b1d3fecd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, | |||
| 265 | static void | 265 | static void |
| 266 | free_blocked_lock(struct nfsd4_blocked_lock *nbl) | 266 | free_blocked_lock(struct nfsd4_blocked_lock *nbl) |
| 267 | { | 267 | { |
| 268 | locks_delete_block(&nbl->nbl_lock); | ||
| 268 | locks_release_private(&nbl->nbl_lock); | 269 | locks_release_private(&nbl->nbl_lock); |
| 269 | kfree(nbl); | 270 | kfree(nbl); |
| 270 | } | 271 | } |
| @@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo) | |||
| 293 | nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, | 294 | nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, |
| 294 | nbl_lru); | 295 | nbl_lru); |
| 295 | list_del_init(&nbl->nbl_lru); | 296 | list_del_init(&nbl->nbl_lru); |
| 296 | locks_delete_block(&nbl->nbl_lock); | ||
| 297 | free_blocked_lock(nbl); | 297 | free_blocked_lock(nbl); |
| 298 | } | 298 | } |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static void | ||
| 302 | nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) | ||
| 303 | { | ||
| 304 | struct nfsd4_blocked_lock *nbl = container_of(cb, | ||
| 305 | struct nfsd4_blocked_lock, nbl_cb); | ||
| 306 | locks_delete_block(&nbl->nbl_lock); | ||
| 307 | } | ||
| 308 | |||
| 301 | static int | 309 | static int |
| 302 | nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) | 310 | nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) |
| 303 | { | 311 | { |
| @@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) | |||
| 325 | } | 333 | } |
| 326 | 334 | ||
| 327 | static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { | 335 | static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { |
| 336 | .prepare = nfsd4_cb_notify_lock_prepare, | ||
| 328 | .done = nfsd4_cb_notify_lock_done, | 337 | .done = nfsd4_cb_notify_lock_done, |
| 329 | .release = nfsd4_cb_notify_lock_release, | 338 | .release = nfsd4_cb_notify_lock_release, |
| 330 | }; | 339 | }; |
| @@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
| 4863 | nbl = list_first_entry(&reaplist, | 4872 | nbl = list_first_entry(&reaplist, |
| 4864 | struct nfsd4_blocked_lock, nbl_lru); | 4873 | struct nfsd4_blocked_lock, nbl_lru); |
| 4865 | list_del_init(&nbl->nbl_lru); | 4874 | list_del_init(&nbl->nbl_lru); |
| 4866 | locks_delete_block(&nbl->nbl_lock); | ||
| 4867 | free_blocked_lock(nbl); | 4875 | free_blocked_lock(nbl); |
| 4868 | } | 4876 | } |
| 4869 | out: | 4877 | out: |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 396c76755b03..9d6cb246c6c5 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
| @@ -70,6 +70,7 @@ struct nfsd4_callback { | |||
| 70 | int cb_seq_status; | 70 | int cb_seq_status; |
| 71 | int cb_status; | 71 | int cb_status; |
| 72 | bool cb_need_restart; | 72 | bool cb_need_restart; |
| 73 | bool cb_holds_slot; | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | struct nfsd4_callback_ops { | 76 | struct nfsd4_callback_ops { |
| @@ -1215,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp) | |||
| 1215 | } | 1215 | } |
| 1216 | 1216 | ||
| 1217 | EXPORT_SYMBOL(nonseekable_open); | 1217 | EXPORT_SYMBOL(nonseekable_open); |
| 1218 | |||
| 1219 | /* | ||
| 1220 | * stream_open is used by subsystems that want stream-like file descriptors. | ||
| 1221 | * Such file descriptors are not seekable and don't have notion of position | ||
| 1222 | * (file.f_pos is always 0). Contrary to file descriptors of other regular | ||
| 1223 | * files, .read() and .write() can run simultaneously. | ||
| 1224 | * | ||
| 1225 | * stream_open never fails and is marked to return int so that it could be | ||
| 1226 | * directly used as file_operations.open . | ||
| 1227 | */ | ||
| 1228 | int stream_open(struct inode *inode, struct file *filp) | ||
| 1229 | { | ||
| 1230 | filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS); | ||
| 1231 | filp->f_mode |= FMODE_STREAM; | ||
| 1232 | return 0; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | EXPORT_SYMBOL(stream_open); | ||
| @@ -188,9 +188,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal); | |||
| 188 | * in the tee() system call, when we duplicate the buffers in one | 188 | * in the tee() system call, when we duplicate the buffers in one |
| 189 | * pipe into another. | 189 | * pipe into another. |
| 190 | */ | 190 | */ |
| 191 | void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) | 191 | bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) |
| 192 | { | 192 | { |
| 193 | get_page(buf->page); | 193 | return try_get_page(buf->page); |
| 194 | } | 194 | } |
| 195 | EXPORT_SYMBOL(generic_pipe_buf_get); | 195 | EXPORT_SYMBOL(generic_pipe_buf_get); |
| 196 | 196 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index ddef482f1334..6a803a0b75df 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -616,24 +616,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, | |||
| 616 | static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, | 616 | static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, |
| 617 | struct pid *pid, struct task_struct *task) | 617 | struct pid *pid, struct task_struct *task) |
| 618 | { | 618 | { |
| 619 | long nr; | 619 | struct syscall_info info; |
| 620 | unsigned long args[6], sp, pc; | 620 | u64 *args = &info.data.args[0]; |
| 621 | int res; | 621 | int res; |
| 622 | 622 | ||
| 623 | res = lock_trace(task); | 623 | res = lock_trace(task); |
| 624 | if (res) | 624 | if (res) |
| 625 | return res; | 625 | return res; |
| 626 | 626 | ||
| 627 | if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) | 627 | if (task_current_syscall(task, &info)) |
| 628 | seq_puts(m, "running\n"); | 628 | seq_puts(m, "running\n"); |
| 629 | else if (nr < 0) | 629 | else if (info.data.nr < 0) |
| 630 | seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc); | 630 | seq_printf(m, "%d 0x%llx 0x%llx\n", |
| 631 | info.data.nr, info.sp, info.data.instruction_pointer); | ||
| 631 | else | 632 | else |
| 632 | seq_printf(m, | 633 | seq_printf(m, |
| 633 | "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", | 634 | "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n", |
| 634 | nr, | 635 | info.data.nr, |
| 635 | args[0], args[1], args[2], args[3], args[4], args[5], | 636 | args[0], args[1], args[2], args[3], args[4], args[5], |
| 636 | sp, pc); | 637 | info.sp, info.data.instruction_pointer); |
| 637 | unlock_trace(task); | 638 | unlock_trace(task); |
| 638 | 639 | ||
| 639 | return 0; | 640 | return 0; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 92a91e7816d8..95ca1fe7283c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -1143,6 +1143,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
| 1143 | count = -EINTR; | 1143 | count = -EINTR; |
| 1144 | goto out_mm; | 1144 | goto out_mm; |
| 1145 | } | 1145 | } |
| 1146 | /* | ||
| 1147 | * Avoid to modify vma->vm_flags | ||
| 1148 | * without locked ops while the | ||
| 1149 | * coredump reads the vm_flags. | ||
| 1150 | */ | ||
| 1151 | if (!mmget_still_valid(mm)) { | ||
| 1152 | /* | ||
| 1153 | * Silently return "count" | ||
| 1154 | * like if get_task_mm() | ||
| 1155 | * failed. FIXME: should this | ||
| 1156 | * function have returned | ||
| 1157 | * -ESRCH if get_task_mm() | ||
| 1158 | * failed like if | ||
| 1159 | * get_proc_task() fails? | ||
| 1160 | */ | ||
| 1161 | up_write(&mm->mmap_sem); | ||
| 1162 | goto out_mm; | ||
| 1163 | } | ||
| 1146 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 1164 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 1147 | vma->vm_flags &= ~VM_SOFTDIRTY; | 1165 | vma->vm_flags &= ~VM_SOFTDIRTY; |
| 1148 | vma_set_page_prot(vma); | 1166 | vma_set_page_prot(vma); |
diff --git a/fs/read_write.c b/fs/read_write.c index 177ccc3d405a..61b43ad7608e 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ | |||
| 560 | 560 | ||
| 561 | static inline loff_t file_pos_read(struct file *file) | 561 | static inline loff_t file_pos_read(struct file *file) |
| 562 | { | 562 | { |
| 563 | return file->f_pos; | 563 | return file->f_mode & FMODE_STREAM ? 0 : file->f_pos; |
| 564 | } | 564 | } |
| 565 | 565 | ||
| 566 | static inline void file_pos_write(struct file *file, loff_t pos) | 566 | static inline void file_pos_write(struct file *file, loff_t pos) |
| 567 | { | 567 | { |
| 568 | file->f_pos = pos; | 568 | if ((file->f_mode & FMODE_STREAM) == 0) |
| 569 | file->f_pos = pos; | ||
| 569 | } | 570 | } |
| 570 | 571 | ||
| 571 | ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) | 572 | ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) |
diff --git a/fs/splice.c b/fs/splice.c index 3ee7e82df48f..98943d9b219c 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -1593,7 +1593,11 @@ retry: | |||
| 1593 | * Get a reference to this pipe buffer, | 1593 | * Get a reference to this pipe buffer, |
| 1594 | * so we can copy the contents over. | 1594 | * so we can copy the contents over. |
| 1595 | */ | 1595 | */ |
| 1596 | pipe_buf_get(ipipe, ibuf); | 1596 | if (!pipe_buf_get(ipipe, ibuf)) { |
| 1597 | if (ret == 0) | ||
| 1598 | ret = -EFAULT; | ||
| 1599 | break; | ||
| 1600 | } | ||
| 1597 | *obuf = *ibuf; | 1601 | *obuf = *ibuf; |
| 1598 | 1602 | ||
| 1599 | /* | 1603 | /* |
| @@ -1667,7 +1671,11 @@ static int link_pipe(struct pipe_inode_info *ipipe, | |||
| 1667 | * Get a reference to this pipe buffer, | 1671 | * Get a reference to this pipe buffer, |
| 1668 | * so we can copy the contents over. | 1672 | * so we can copy the contents over. |
| 1669 | */ | 1673 | */ |
| 1670 | pipe_buf_get(ipipe, ibuf); | 1674 | if (!pipe_buf_get(ipipe, ibuf)) { |
| 1675 | if (ret == 0) | ||
| 1676 | ret = -EFAULT; | ||
| 1677 | break; | ||
| 1678 | } | ||
| 1671 | 1679 | ||
| 1672 | obuf = opipe->bufs + nbuf; | 1680 | obuf = opipe->bufs + nbuf; |
| 1673 | *obuf = *ibuf; | 1681 | *obuf = *ibuf; |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8dc2818fdd84..12628184772c 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head) | |||
| 276 | { | 276 | { |
| 277 | struct inode *inode = container_of(head, struct inode, i_rcu); | 277 | struct inode *inode = container_of(head, struct inode, i_rcu); |
| 278 | struct ubifs_inode *ui = ubifs_inode(inode); | 278 | struct ubifs_inode *ui = ubifs_inode(inode); |
| 279 | kfree(ui->data); | ||
| 279 | kmem_cache_free(ubifs_inode_slab, ui); | 280 | kmem_cache_free(ubifs_inode_slab, ui); |
| 280 | } | 281 | } |
| 281 | 282 | ||
| 282 | static void ubifs_destroy_inode(struct inode *inode) | 283 | static void ubifs_destroy_inode(struct inode *inode) |
| 283 | { | 284 | { |
| 284 | struct ubifs_inode *ui = ubifs_inode(inode); | ||
| 285 | |||
| 286 | kfree(ui->data); | ||
| 287 | call_rcu(&inode->i_rcu, ubifs_i_callback); | 285 | call_rcu(&inode->i_rcu, ubifs_i_callback); |
| 288 | } | 286 | } |
| 289 | 287 | ||
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 89800fc7dc9d..f5de1e726356 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
| @@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
| 629 | 629 | ||
| 630 | /* the various vma->vm_userfaultfd_ctx still points to it */ | 630 | /* the various vma->vm_userfaultfd_ctx still points to it */ |
| 631 | down_write(&mm->mmap_sem); | 631 | down_write(&mm->mmap_sem); |
| 632 | /* no task can run (and in turn coredump) yet */ | ||
| 633 | VM_WARN_ON(!mmget_still_valid(mm)); | ||
| 632 | for (vma = mm->mmap; vma; vma = vma->vm_next) | 634 | for (vma = mm->mmap; vma; vma = vma->vm_next) |
| 633 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { | 635 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { |
| 634 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | 636 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| @@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
| 883 | * taking the mmap_sem for writing. | 885 | * taking the mmap_sem for writing. |
| 884 | */ | 886 | */ |
| 885 | down_write(&mm->mmap_sem); | 887 | down_write(&mm->mmap_sem); |
| 888 | if (!mmget_still_valid(mm)) | ||
| 889 | goto skip_mm; | ||
| 886 | prev = NULL; | 890 | prev = NULL; |
| 887 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 891 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 888 | cond_resched(); | 892 | cond_resched(); |
| @@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
| 905 | vma->vm_flags = new_flags; | 909 | vma->vm_flags = new_flags; |
| 906 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; | 910 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| 907 | } | 911 | } |
| 912 | skip_mm: | ||
| 908 | up_write(&mm->mmap_sem); | 913 | up_write(&mm->mmap_sem); |
| 909 | mmput(mm); | 914 | mmput(mm); |
| 910 | wakeup: | 915 | wakeup: |
| @@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, | |||
| 1333 | goto out; | 1338 | goto out; |
| 1334 | 1339 | ||
| 1335 | down_write(&mm->mmap_sem); | 1340 | down_write(&mm->mmap_sem); |
| 1341 | if (!mmget_still_valid(mm)) | ||
| 1342 | goto out_unlock; | ||
| 1336 | vma = find_vma_prev(mm, start, &prev); | 1343 | vma = find_vma_prev(mm, start, &prev); |
| 1337 | if (!vma) | 1344 | if (!vma) |
| 1338 | goto out_unlock; | 1345 | goto out_unlock; |
| @@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, | |||
| 1520 | goto out; | 1527 | goto out; |
| 1521 | 1528 | ||
| 1522 | down_write(&mm->mmap_sem); | 1529 | down_write(&mm->mmap_sem); |
| 1530 | if (!mmget_still_valid(mm)) | ||
| 1531 | goto out_unlock; | ||
| 1523 | vma = find_vma_prev(mm, start, &prev); | 1532 | vma = find_vma_prev(mm, start, &prev); |
| 1524 | if (!vma) | 1533 | if (!vma) |
| 1525 | goto out_unlock; | 1534 | goto out_unlock; |
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 0c938a4354f6..b88239e9efe4 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h | |||
| @@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, | |||
| 105 | * syscall_get_arguments - extract system call parameter values | 105 | * syscall_get_arguments - extract system call parameter values |
| 106 | * @task: task of interest, must be blocked | 106 | * @task: task of interest, must be blocked |
| 107 | * @regs: task_pt_regs() of @task | 107 | * @regs: task_pt_regs() of @task |
| 108 | * @i: argument index [0,5] | ||
| 109 | * @n: number of arguments; n+i must be [1,6]. | ||
| 110 | * @args: array filled with argument values | 108 | * @args: array filled with argument values |
| 111 | * | 109 | * |
| 112 | * Fetches @n arguments to the system call starting with the @i'th argument | 110 | * Fetches 6 arguments to the system call. First argument is stored in |
| 113 | * (from 0 through 5). Argument @i is stored in @args[0], and so on. | 111 | * @args[0], and so on. |
| 114 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 115 | * | 112 | * |
| 116 | * It's only valid to call this when @task is stopped for tracing on | 113 | * It's only valid to call this when @task is stopped for tracing on |
| 117 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 114 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 118 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 119 | * taking up to 6 arguments. | ||
| 120 | */ | 115 | */ |
| 121 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, | 116 | void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, |
| 122 | unsigned int i, unsigned int n, unsigned long *args); | 117 | unsigned long *args); |
| 123 | 118 | ||
| 124 | /** | 119 | /** |
| 125 | * syscall_set_arguments - change system call parameter value | 120 | * syscall_set_arguments - change system call parameter value |
| 126 | * @task: task of interest, must be in system call entry tracing | 121 | * @task: task of interest, must be in system call entry tracing |
| 127 | * @regs: task_pt_regs() of @task | 122 | * @regs: task_pt_regs() of @task |
| 128 | * @i: argument index [0,5] | ||
| 129 | * @n: number of arguments; n+i must be [1,6]. | ||
| 130 | * @args: array of argument values to store | 123 | * @args: array of argument values to store |
| 131 | * | 124 | * |
| 132 | * Changes @n arguments to the system call starting with the @i'th argument. | 125 | * Changes 6 arguments to the system call. |
| 133 | * Argument @i gets value @args[0], and so on. | 126 | * The first argument gets value @args[0], and so on. |
| 134 | * An arch inline version is probably optimal when @i and @n are constants. | ||
| 135 | * | 127 | * |
| 136 | * It's only valid to call this when @task is stopped for tracing on | 128 | * It's only valid to call this when @task is stopped for tracing on |
| 137 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. | 129 | * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. |
| 138 | * It's invalid to call this with @i + @n > 6; we only support system calls | ||
| 139 | * taking up to 6 arguments. | ||
| 140 | */ | 130 | */ |
| 141 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, | 131 | void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, |
| 142 | unsigned int i, unsigned int n, | ||
| 143 | const unsigned long *args); | 132 | const unsigned long *args); |
| 144 | 133 | ||
| 145 | /** | 134 | /** |
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index cfb7be40bed7..ce4de6b1e444 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h | |||
| @@ -418,6 +418,8 @@ struct drm_crtc_helper_funcs { | |||
| 418 | * Drivers can use the @old_crtc_state input parameter if the operations | 418 | * Drivers can use the @old_crtc_state input parameter if the operations |
| 419 | * needed to enable the CRTC don't depend solely on the new state but | 419 | * needed to enable the CRTC don't depend solely on the new state but |
| 420 | * also on the transition between the old state and the new state. | 420 | * also on the transition between the old state and the new state. |
| 421 | * | ||
| 422 | * This function is optional. | ||
| 421 | */ | 423 | */ |
| 422 | void (*atomic_enable)(struct drm_crtc *crtc, | 424 | void (*atomic_enable)(struct drm_crtc *crtc, |
| 423 | struct drm_crtc_state *old_crtc_state); | 425 | struct drm_crtc_state *old_crtc_state); |
| @@ -441,6 +443,8 @@ struct drm_crtc_helper_funcs { | |||
| 441 | * parameter @old_crtc_state which could be used to access the old | 443 | * parameter @old_crtc_state which could be used to access the old |
| 442 | * state. Atomic drivers should consider to use this one instead | 444 | * state. Atomic drivers should consider to use this one instead |
| 443 | * of @disable. | 445 | * of @disable. |
| 446 | * | ||
| 447 | * This function is optional. | ||
| 444 | */ | 448 | */ |
| 445 | void (*atomic_disable)(struct drm_crtc *crtc, | 449 | void (*atomic_disable)(struct drm_crtc *crtc, |
| 446 | struct drm_crtc_state *old_crtc_state); | 450 | struct drm_crtc_state *old_crtc_state); |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index cbf3180cb612..668ad971cd7b 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
| @@ -420,7 +420,6 @@ extern struct ttm_bo_global { | |||
| 420 | /** | 420 | /** |
| 421 | * Protected by ttm_global_mutex. | 421 | * Protected by ttm_global_mutex. |
| 422 | */ | 422 | */ |
| 423 | unsigned int use_count; | ||
| 424 | struct list_head device_list; | 423 | struct list_head device_list; |
| 425 | 424 | ||
| 426 | /** | 425 | /** |
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h new file mode 100644 index 000000000000..6a0b70a37d78 --- /dev/null +++ b/include/dt-bindings/clock/sifive-fu540-prci.h | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2018-2019 SiFive, Inc. | ||
| 4 | * Wesley Terpstra | ||
| 5 | * Paul Walmsley | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H | ||
| 9 | #define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H | ||
| 10 | |||
| 11 | /* Clock indexes for use by Device Tree data and the PRCI driver */ | ||
| 12 | |||
| 13 | #define PRCI_CLK_COREPLL 0 | ||
| 14 | #define PRCI_CLK_DDRPLL 1 | ||
| 15 | #define PRCI_CLK_GEMGXLPLL 2 | ||
| 16 | #define PRCI_CLK_TLCLK 3 | ||
| 17 | |||
| 18 | #endif | ||
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h index 8063e8314eef..6d487c5eba2c 100644 --- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h | |||
| @@ -51,7 +51,10 @@ | |||
| 51 | #define RESET_SD_EMMC_A 44 | 51 | #define RESET_SD_EMMC_A 44 |
| 52 | #define RESET_SD_EMMC_B 45 | 52 | #define RESET_SD_EMMC_B 45 |
| 53 | #define RESET_SD_EMMC_C 46 | 53 | #define RESET_SD_EMMC_C 46 |
| 54 | /* 47-60 */ | 54 | /* 47 */ |
| 55 | #define RESET_USB_PHY20 48 | ||
| 56 | #define RESET_USB_PHY21 49 | ||
| 57 | /* 50-60 */ | ||
| 55 | #define RESET_AUDIO_CODEC 61 | 58 | #define RESET_AUDIO_CODEC 61 |
| 56 | /* 62-63 */ | 59 | /* 62-63 */ |
| 57 | /* RESET2 */ | 60 | /* RESET2 */ |
diff --git a/include/keys/trusted.h b/include/keys/trusted.h index adbcb6817826..0071298b9b28 100644 --- a/include/keys/trusted.h +++ b/include/keys/trusted.h | |||
| @@ -38,7 +38,7 @@ enum { | |||
| 38 | 38 | ||
| 39 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, | 39 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, |
| 40 | unsigned int keylen, unsigned char *h1, | 40 | unsigned int keylen, unsigned char *h1, |
| 41 | unsigned char *h2, unsigned char h3, ...); | 41 | unsigned char *h2, unsigned int h3, ...); |
| 42 | int TSS_checkhmac1(unsigned char *buffer, | 42 | int TSS_checkhmac1(unsigned char *buffer, |
| 43 | const uint32_t command, | 43 | const uint32_t command, |
| 44 | const unsigned char *ononce, | 44 | const unsigned char *ononce, |
diff --git a/include/linux/bio.h b/include/linux/bio.h index bb6090aa165d..e584673c1881 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio) | |||
| 120 | return bio->bi_vcnt >= bio->bi_max_vecs; | 120 | return bio->bi_vcnt >= bio->bi_max_vecs; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | #define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \ | 123 | static inline bool bio_next_segment(const struct bio *bio, |
| 124 | for (bv = bvec_init_iter_all(&iter_all); \ | 124 | struct bvec_iter_all *iter) |
| 125 | (iter_all.done < (bvl)->bv_len) && \ | 125 | { |
| 126 | (mp_bvec_next_segment((bvl), &iter_all), 1); \ | 126 | if (iter->idx >= bio->bi_vcnt) |
| 127 | iter_all.done += bv->bv_len, i += 1) | 127 | return false; |
| 128 | |||
| 129 | bvec_advance(&bio->bi_io_vec[iter->idx], iter); | ||
| 130 | return true; | ||
| 131 | } | ||
| 128 | 132 | ||
| 129 | /* | 133 | /* |
| 130 | * drivers should _never_ use the all version - the bio may have been split | 134 | * drivers should _never_ use the all version - the bio may have been split |
| 131 | * before it got to the driver and the driver won't own all of it | 135 | * before it got to the driver and the driver won't own all of it |
| 132 | */ | 136 | */ |
| 133 | #define bio_for_each_segment_all(bvl, bio, i, iter_all) \ | 137 | #define bio_for_each_segment_all(bvl, bio, i, iter) \ |
| 134 | for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \ | 138 | for (i = 0, bvl = bvec_init_iter_all(&iter); \ |
| 135 | mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all) | 139 | bio_next_segment((bio), &iter); i++) |
| 136 | 140 | ||
| 137 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, | 141 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
| 138 | unsigned bytes) | 142 | unsigned bytes) |
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h index 50fb0dee23e8..d35b8ec1c485 100644 --- a/include/linux/bitrev.h +++ b/include/linux/bitrev.h | |||
| @@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x) | |||
| 34 | 34 | ||
| 35 | #define __constant_bitrev32(x) \ | 35 | #define __constant_bitrev32(x) \ |
| 36 | ({ \ | 36 | ({ \ |
| 37 | u32 __x = x; \ | 37 | u32 ___x = x; \ |
| 38 | __x = (__x >> 16) | (__x << 16); \ | 38 | ___x = (___x >> 16) | (___x << 16); \ |
| 39 | __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ | 39 | ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ |
| 40 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 40 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 41 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 41 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 42 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 42 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 43 | __x; \ | 43 | ___x; \ |
| 44 | }) | 44 | }) |
| 45 | 45 | ||
| 46 | #define __constant_bitrev16(x) \ | 46 | #define __constant_bitrev16(x) \ |
| 47 | ({ \ | 47 | ({ \ |
| 48 | u16 __x = x; \ | 48 | u16 ___x = x; \ |
| 49 | __x = (__x >> 8) | (__x << 8); \ | 49 | ___x = (___x >> 8) | (___x << 8); \ |
| 50 | __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ | 50 | ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ |
| 51 | __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ | 51 | ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ |
| 52 | __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ | 52 | ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ |
| 53 | __x; \ | 53 | ___x; \ |
| 54 | }) | 54 | }) |
| 55 | 55 | ||
| 56 | #define __constant_bitrev8x4(x) \ | 56 | #define __constant_bitrev8x4(x) \ |
| 57 | ({ \ | 57 | ({ \ |
| 58 | u32 __x = x; \ | 58 | u32 ___x = x; \ |
| 59 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 59 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 60 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 60 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 61 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 61 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 62 | __x; \ | 62 | ___x; \ |
| 63 | }) | 63 | }) |
| 64 | 64 | ||
| 65 | #define __constant_bitrev8(x) \ | 65 | #define __constant_bitrev8(x) \ |
| 66 | ({ \ | 66 | ({ \ |
| 67 | u8 __x = x; \ | 67 | u8 ___x = x; \ |
| 68 | __x = (__x >> 4) | (__x << 4); \ | 68 | ___x = (___x >> 4) | (___x << 4); \ |
| 69 | __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ | 69 | ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ |
| 70 | __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ | 70 | ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ |
| 71 | __x; \ | 71 | ___x; \ |
| 72 | }) | 72 | }) |
| 73 | 73 | ||
| 74 | #define bitrev32(x) \ | 74 | #define bitrev32(x) \ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index cb2aa7ecafff..db29928de467 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -302,6 +302,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); | |||
| 302 | void blk_mq_kick_requeue_list(struct request_queue *q); | 302 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 303 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | 303 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
| 304 | bool blk_mq_complete_request(struct request *rq); | 304 | bool blk_mq_complete_request(struct request *rq); |
| 305 | void blk_mq_complete_request_sync(struct request *rq); | ||
| 305 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, | 306 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 306 | struct bio *bio); | 307 | struct bio *bio); |
| 307 | bool blk_mq_queue_stopped(struct request_queue *q); | 308 | bool blk_mq_queue_stopped(struct request_queue *q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5c58a3b2bf00..317ab30d2904 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -548,7 +548,6 @@ struct request_queue { | |||
| 548 | struct rcu_head rcu_head; | 548 | struct rcu_head rcu_head; |
| 549 | wait_queue_head_t mq_freeze_wq; | 549 | wait_queue_head_t mq_freeze_wq; |
| 550 | struct percpu_ref q_usage_counter; | 550 | struct percpu_ref q_usage_counter; |
| 551 | struct list_head all_q_node; | ||
| 552 | 551 | ||
| 553 | struct blk_mq_tag_set *tag_set; | 552 | struct blk_mq_tag_set *tag_set; |
| 554 | struct list_head tag_set_list; | 553 | struct list_head tag_set_list; |
diff --git a/include/linux/bvec.h b/include/linux/bvec.h index f6275c4da13a..ff13cbc1887d 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h | |||
| @@ -145,26 +145,33 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, | |||
| 145 | 145 | ||
| 146 | static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) | 146 | static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) |
| 147 | { | 147 | { |
| 148 | iter_all->bv.bv_page = NULL; | ||
| 149 | iter_all->done = 0; | 148 | iter_all->done = 0; |
| 149 | iter_all->idx = 0; | ||
| 150 | 150 | ||
| 151 | return &iter_all->bv; | 151 | return &iter_all->bv; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline void mp_bvec_next_segment(const struct bio_vec *bvec, | 154 | static inline void bvec_advance(const struct bio_vec *bvec, |
| 155 | struct bvec_iter_all *iter_all) | 155 | struct bvec_iter_all *iter_all) |
| 156 | { | 156 | { |
| 157 | struct bio_vec *bv = &iter_all->bv; | 157 | struct bio_vec *bv = &iter_all->bv; |
| 158 | 158 | ||
| 159 | if (bv->bv_page) { | 159 | if (iter_all->done) { |
| 160 | bv->bv_page = nth_page(bv->bv_page, 1); | 160 | bv->bv_page = nth_page(bv->bv_page, 1); |
| 161 | bv->bv_offset = 0; | 161 | bv->bv_offset = 0; |
| 162 | } else { | 162 | } else { |
| 163 | bv->bv_page = bvec->bv_page; | 163 | bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset / |
| 164 | bv->bv_offset = bvec->bv_offset; | 164 | PAGE_SIZE); |
| 165 | bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; | ||
| 165 | } | 166 | } |
| 166 | bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, | 167 | bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, |
| 167 | bvec->bv_len - iter_all->done); | 168 | bvec->bv_len - iter_all->done); |
| 169 | iter_all->done += bv->bv_len; | ||
| 170 | |||
| 171 | if (iter_all->done == bvec->bv_len) { | ||
| 172 | iter_all->idx++; | ||
| 173 | iter_all->done = 0; | ||
| 174 | } | ||
| 168 | } | 175 | } |
| 169 | 176 | ||
| 170 | /* | 177 | /* |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 54357a258b35..6ebc2098cfe1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, | |||
| 1611 | struct screen_info *si, efi_guid_t *proto, | 1611 | struct screen_info *si, efi_guid_t *proto, |
| 1612 | unsigned long size); | 1612 | unsigned long size); |
| 1613 | 1613 | ||
| 1614 | bool efi_runtime_disabled(void); | 1614 | #ifdef CONFIG_EFI |
| 1615 | extern bool efi_runtime_disabled(void); | ||
| 1616 | #else | ||
| 1617 | static inline bool efi_runtime_disabled(void) { return true; } | ||
| 1618 | #endif | ||
| 1619 | |||
| 1615 | extern void efi_call_virt_check_flags(unsigned long flags, const char *call); | 1620 | extern void efi_call_virt_check_flags(unsigned long flags, const char *call); |
| 1616 | extern unsigned long efi_call_virt_save_flags(void); | 1621 | extern unsigned long efi_call_virt_save_flags(void); |
| 1617 | 1622 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2e9e2763bf47..6e8bc53740f0 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -31,6 +31,7 @@ struct elevator_mq_ops { | |||
| 31 | void (*exit_sched)(struct elevator_queue *); | 31 | void (*exit_sched)(struct elevator_queue *); |
| 32 | int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); | 32 | int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
| 33 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); | 33 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
| 34 | void (*depth_updated)(struct blk_mq_hw_ctx *); | ||
| 34 | 35 | ||
| 35 | bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); | 36 | bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); |
| 36 | bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); | 37 | bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index e2f3b21cd72a..aa8bfd6f738c 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
| @@ -449,6 +449,18 @@ static inline void eth_addr_dec(u8 *addr) | |||
| 449 | } | 449 | } |
| 450 | 450 | ||
| 451 | /** | 451 | /** |
| 452 | * eth_addr_inc() - Increment the given MAC address. | ||
| 453 | * @addr: Pointer to a six-byte array containing Ethernet address to increment. | ||
| 454 | */ | ||
| 455 | static inline void eth_addr_inc(u8 *addr) | ||
| 456 | { | ||
| 457 | u64 u = ether_addr_to_u64(addr); | ||
| 458 | |||
| 459 | u++; | ||
| 460 | u64_to_ether_addr(u, addr); | ||
| 461 | } | ||
| 462 | |||
| 463 | /** | ||
| 452 | * is_etherdev_addr - Tell if given Ethernet address belongs to the device. | 464 | * is_etherdev_addr - Tell if given Ethernet address belongs to the device. |
| 453 | * @dev: Pointer to a device structure | 465 | * @dev: Pointer to a device structure |
| 454 | * @addr: Pointer to a six-byte array containing the Ethernet address | 466 | * @addr: Pointer to a six-byte array containing the Ethernet address |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 8b42df09b04c..dd28e7679089 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -158,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 158 | #define FMODE_OPENED ((__force fmode_t)0x80000) | 158 | #define FMODE_OPENED ((__force fmode_t)0x80000) |
| 159 | #define FMODE_CREATED ((__force fmode_t)0x100000) | 159 | #define FMODE_CREATED ((__force fmode_t)0x100000) |
| 160 | 160 | ||
| 161 | /* File is stream-like */ | ||
| 162 | #define FMODE_STREAM ((__force fmode_t)0x200000) | ||
| 163 | |||
| 161 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 164 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
| 162 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) | 165 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) |
| 163 | 166 | ||
| @@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); | |||
| 3074 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); | 3077 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); |
| 3075 | extern int generic_file_open(struct inode * inode, struct file * filp); | 3078 | extern int generic_file_open(struct inode * inode, struct file * filp); |
| 3076 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 3079 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
| 3080 | extern int stream_open(struct inode * inode, struct file * filp); | ||
| 3077 | 3081 | ||
| 3078 | #ifdef CONFIG_BLOCK | 3082 | #ifdef CONFIG_BLOCK |
| 3079 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, | 3083 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 34a5036debd3..2d14e21c16c0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -47,8 +47,8 @@ | |||
| 47 | 47 | ||
| 48 | #define u64_to_user_ptr(x) ( \ | 48 | #define u64_to_user_ptr(x) ( \ |
| 49 | { \ | 49 | { \ |
| 50 | typecheck(u64, x); \ | 50 | typecheck(u64, (x)); \ |
| 51 | (void __user *)(uintptr_t)x; \ | 51 | (void __user *)(uintptr_t)(x); \ |
| 52 | } \ | 52 | } \ |
| 53 | ) | 53 | ) |
| 54 | 54 | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 201f0f2683f2..9a897256e481 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
| @@ -173,6 +173,7 @@ struct kretprobe_instance { | |||
| 173 | struct kretprobe *rp; | 173 | struct kretprobe *rp; |
| 174 | kprobe_opcode_t *ret_addr; | 174 | kprobe_opcode_t *ret_addr; |
| 175 | struct task_struct *task; | 175 | struct task_struct *task; |
| 176 | void *fp; | ||
| 176 | char data[0]; | 177 | char data[0]; |
| 177 | }; | 178 | }; |
| 178 | 179 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9d55c63db09b..640a03642766 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/irqbypass.h> | 28 | #include <linux/irqbypass.h> |
| 29 | #include <linux/swait.h> | 29 | #include <linux/swait.h> |
| 30 | #include <linux/refcount.h> | 30 | #include <linux/refcount.h> |
| 31 | #include <linux/nospec.h> | ||
| 31 | #include <asm/signal.h> | 32 | #include <asm/signal.h> |
| 32 | 33 | ||
| 33 | #include <linux/kvm.h> | 34 | #include <linux/kvm.h> |
| @@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) | |||
| 513 | 514 | ||
| 514 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | 515 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
| 515 | { | 516 | { |
| 516 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case | 517 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
| 517 | * the caller has read kvm->online_vcpus before (as is the case | 518 | i = array_index_nospec(i, num_vcpus); |
| 518 | * for kvm_for_each_vcpu, for example). | 519 | |
| 519 | */ | 520 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ |
| 520 | smp_rmb(); | 521 | smp_rmb(); |
| 521 | return kvm->vcpus[i]; | 522 | return kvm->vcpus[i]; |
| 522 | } | 523 | } |
| @@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm); | |||
| 600 | 601 | ||
| 601 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) | 602 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
| 602 | { | 603 | { |
| 604 | as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); | ||
| 603 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, | 605 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
| 604 | lockdep_is_held(&kvm->slots_lock) || | 606 | lockdep_is_held(&kvm->slots_lock) || |
| 605 | !refcount_read(&kvm->users_count)); | 607 | !refcount_read(&kvm->users_count)); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1f3d880b7ca1..dbb6118370c1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page); | |||
| 566 | void __unlock_page_memcg(struct mem_cgroup *memcg); | 566 | void __unlock_page_memcg(struct mem_cgroup *memcg); |
| 567 | void unlock_page_memcg(struct page *page); | 567 | void unlock_page_memcg(struct page *page); |
| 568 | 568 | ||
| 569 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 569 | /* |
| 570 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 571 | * Keep in sync with memcg_exact_page_state(). | ||
| 572 | */ | ||
| 570 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | 573 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, |
| 571 | int idx) | 574 | int idx) |
| 572 | { | 575 | { |
diff --git a/include/linux/mii.h b/include/linux/mii.h index 6fee8b1a4400..5cd824c1c0ca 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
| @@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) | |||
| 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, |
| 470 | advertising)) | 470 | advertising)) |
| 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; | 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
| 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, |
| 473 | advertising)) | 473 | advertising)) |
| 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
| 475 | 475 | ||
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 022541dc5dbf..0d0729648844 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags { | |||
| 594 | }; | 594 | }; |
| 595 | 595 | ||
| 596 | struct mlx5_td { | 596 | struct mlx5_td { |
| 597 | /* protects tirs list changes while tirs refresh */ | ||
| 598 | struct mutex list_lock; | ||
| 597 | struct list_head tirs_list; | 599 | struct list_head tirs_list; |
| 598 | u32 tdn; | 600 | u32 tdn; |
| 599 | }; | 601 | }; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 76769749b5a5..6b10c21630f5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -966,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page) | |||
| 966 | } | 966 | } |
| 967 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ | 967 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |
| 968 | 968 | ||
| 969 | /* 127: arbitrary random number, small enough to assemble well */ | ||
| 970 | #define page_ref_zero_or_close_to_overflow(page) \ | ||
| 971 | ((unsigned int) page_ref_count(page) + 127u <= 127u) | ||
| 972 | |||
| 969 | static inline void get_page(struct page *page) | 973 | static inline void get_page(struct page *page) |
| 970 | { | 974 | { |
| 971 | page = compound_head(page); | 975 | page = compound_head(page); |
| @@ -973,8 +977,17 @@ static inline void get_page(struct page *page) | |||
| 973 | * Getting a normal page or the head of a compound page | 977 | * Getting a normal page or the head of a compound page |
| 974 | * requires to already have an elevated page->_refcount. | 978 | * requires to already have an elevated page->_refcount. |
| 975 | */ | 979 | */ |
| 976 | VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); | 980 | VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); |
| 981 | page_ref_inc(page); | ||
| 982 | } | ||
| 983 | |||
| 984 | static inline __must_check bool try_get_page(struct page *page) | ||
| 985 | { | ||
| 986 | page = compound_head(page); | ||
| 987 | if (WARN_ON_ONCE(page_ref_count(page) <= 0)) | ||
| 988 | return false; | ||
| 977 | page_ref_inc(page); | 989 | page_ref_inc(page); |
| 990 | return true; | ||
| 978 | } | 991 | } |
| 979 | 992 | ||
| 980 | static inline void put_page(struct page *page) | 993 | static inline void put_page(struct page *page) |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7eade9132f02..4ef4bbe78a1d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -671,7 +671,7 @@ enum vm_fault_reason { | |||
| 671 | 671 | ||
| 672 | /* Encode hstate index for a hwpoisoned large page */ | 672 | /* Encode hstate index for a hwpoisoned large page */ |
| 673 | #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) | 673 | #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) |
| 674 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf) | 674 | #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) |
| 675 | 675 | ||
| 676 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ | 676 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ |
| 677 | VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ | 677 | VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 26f69cf763f4..324e872c91d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -1500,6 +1500,7 @@ struct net_device_ops { | |||
| 1500 | * @IFF_FAILOVER: device is a failover master device | 1500 | * @IFF_FAILOVER: device is a failover master device |
| 1501 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1501 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
| 1502 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | 1502 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device |
| 1503 | * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running | ||
| 1503 | */ | 1504 | */ |
| 1504 | enum netdev_priv_flags { | 1505 | enum netdev_priv_flags { |
| 1505 | IFF_802_1Q_VLAN = 1<<0, | 1506 | IFF_802_1Q_VLAN = 1<<0, |
| @@ -1532,6 +1533,7 @@ enum netdev_priv_flags { | |||
| 1532 | IFF_FAILOVER = 1<<27, | 1533 | IFF_FAILOVER = 1<<27, |
| 1533 | IFF_FAILOVER_SLAVE = 1<<28, | 1534 | IFF_FAILOVER_SLAVE = 1<<28, |
| 1534 | IFF_L3MDEV_RX_HANDLER = 1<<29, | 1535 | IFF_L3MDEV_RX_HANDLER = 1<<29, |
| 1536 | IFF_LIVE_RENAME_OK = 1<<30, | ||
| 1535 | }; | 1537 | }; |
| 1536 | 1538 | ||
| 1537 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1539 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
| @@ -1563,6 +1565,7 @@ enum netdev_priv_flags { | |||
| 1563 | #define IFF_FAILOVER IFF_FAILOVER | 1565 | #define IFF_FAILOVER IFF_FAILOVER |
| 1564 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1566 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
| 1565 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | 1567 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
| 1568 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK | ||
| 1566 | 1569 | ||
| 1567 | /** | 1570 | /** |
| 1568 | * struct net_device - The DEVICE structure. | 1571 | * struct net_device - The DEVICE structure. |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index baa49e6a23cc..c40720cb59ac 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -967,8 +967,13 @@ struct nvme_get_log_page_command { | |||
| 967 | __le16 numdl; | 967 | __le16 numdl; |
| 968 | __le16 numdu; | 968 | __le16 numdu; |
| 969 | __u16 rsvd11; | 969 | __u16 rsvd11; |
| 970 | __le32 lpol; | 970 | union { |
| 971 | __le32 lpou; | 971 | struct { |
| 972 | __le32 lpol; | ||
| 973 | __le32 lpou; | ||
| 974 | }; | ||
| 975 | __le64 lpo; | ||
| 976 | }; | ||
| 972 | __u32 rsvd14[2]; | 977 | __u32 rsvd14[2]; |
| 973 | }; | 978 | }; |
| 974 | 979 | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 787d224ff43e..abb2dac3da9b 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
| @@ -101,18 +101,20 @@ struct pipe_buf_operations { | |||
| 101 | /* | 101 | /* |
| 102 | * Get a reference to the pipe buffer. | 102 | * Get a reference to the pipe buffer. |
| 103 | */ | 103 | */ |
| 104 | void (*get)(struct pipe_inode_info *, struct pipe_buffer *); | 104 | bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | /** | 107 | /** |
| 108 | * pipe_buf_get - get a reference to a pipe_buffer | 108 | * pipe_buf_get - get a reference to a pipe_buffer |
| 109 | * @pipe: the pipe that the buffer belongs to | 109 | * @pipe: the pipe that the buffer belongs to |
| 110 | * @buf: the buffer to get a reference to | 110 | * @buf: the buffer to get a reference to |
| 111 | * | ||
| 112 | * Return: %true if the reference was successfully obtained. | ||
| 111 | */ | 113 | */ |
| 112 | static inline void pipe_buf_get(struct pipe_inode_info *pipe, | 114 | static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, |
| 113 | struct pipe_buffer *buf) | 115 | struct pipe_buffer *buf) |
| 114 | { | 116 | { |
| 115 | buf->ops->get(pipe, buf); | 117 | return buf->ops->get(pipe, buf); |
| 116 | } | 118 | } |
| 117 | 119 | ||
| 118 | /** | 120 | /** |
| @@ -171,7 +173,7 @@ struct pipe_inode_info *alloc_pipe_info(void); | |||
| 171 | void free_pipe_info(struct pipe_inode_info *); | 173 | void free_pipe_info(struct pipe_inode_info *); |
| 172 | 174 | ||
| 173 | /* Generic pipe buffer ops functions */ | 175 | /* Generic pipe buffer ops functions */ |
| 174 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 176 | bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
| 175 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 177 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
| 176 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 178 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
| 177 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | 179 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); |
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h index 3ab892208343..7a37ac27d0fb 100644 --- a/include/linux/platform_data/x86/clk-pmc-atom.h +++ b/include/linux/platform_data/x86/clk-pmc-atom.h | |||
| @@ -35,10 +35,13 @@ struct pmc_clk { | |||
| 35 | * | 35 | * |
| 36 | * @base: PMC clock register base offset | 36 | * @base: PMC clock register base offset |
| 37 | * @clks: pointer to set of registered clocks, typically 0..5 | 37 | * @clks: pointer to set of registered clocks, typically 0..5 |
| 38 | * @critical: flag to indicate if firmware enabled pmc_plt_clks | ||
| 39 | * should be marked as critial or not | ||
| 38 | */ | 40 | */ |
| 39 | struct pmc_clk_data { | 41 | struct pmc_clk_data { |
| 40 | void __iomem *base; | 42 | void __iomem *base; |
| 41 | const struct pmc_clk *clks; | 43 | const struct pmc_clk *clks; |
| 44 | bool critical; | ||
| 42 | }; | 45 | }; |
| 43 | 46 | ||
| 44 | #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ | 47 | #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index edb9b040c94c..d5084ebd9f03 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -9,6 +9,13 @@ | |||
| 9 | #include <linux/bug.h> /* For BUG_ON. */ | 9 | #include <linux/bug.h> /* For BUG_ON. */ |
| 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ | 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ |
| 11 | #include <uapi/linux/ptrace.h> | 11 | #include <uapi/linux/ptrace.h> |
| 12 | #include <linux/seccomp.h> | ||
| 13 | |||
| 14 | /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ | ||
| 15 | struct syscall_info { | ||
| 16 | __u64 sp; | ||
| 17 | struct seccomp_data data; | ||
| 18 | }; | ||
| 12 | 19 | ||
| 13 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | 20 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, |
| 14 | void *buf, int len, unsigned int gup_flags); | 21 | void *buf, int len, unsigned int gup_flags); |
| @@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs) | |||
| 407 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) | 414 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) |
| 408 | #endif | 415 | #endif |
| 409 | 416 | ||
| 410 | extern int task_current_syscall(struct task_struct *target, long *callno, | 417 | extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); |
| 411 | unsigned long args[6], unsigned int maxargs, | ||
| 412 | unsigned long *sp, unsigned long *pc); | ||
| 413 | 418 | ||
| 414 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); | 419 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); |
| 415 | #endif | 420 | #endif |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 0cd9f10423fb..a3fda9f024c3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
| @@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm) | |||
| 49 | __mmdrop(mm); | 49 | __mmdrop(mm); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | /* | ||
| 53 | * This has to be called after a get_task_mm()/mmget_not_zero() | ||
| 54 | * followed by taking the mmap_sem for writing before modifying the | ||
| 55 | * vmas or anything the coredump pretends not to change from under it. | ||
| 56 | * | ||
| 57 | * NOTE: find_extend_vma() called from GUP context is the only place | ||
| 58 | * that can modify the "mm" (notably the vm_start/end) under mmap_sem | ||
| 59 | * for reading and outside the context of the process, so it is also | ||
| 60 | * the only case that holds the mmap_sem for reading that must call | ||
| 61 | * this function. Generally if the mmap_sem is hold for reading | ||
| 62 | * there's no need of this check after get_task_mm()/mmget_not_zero(). | ||
| 63 | * | ||
| 64 | * This function can be obsoleted and the check can be removed, after | ||
| 65 | * the coredump code will hold the mmap_sem for writing before | ||
| 66 | * invoking the ->core_dump methods. | ||
| 67 | */ | ||
| 68 | static inline bool mmget_still_valid(struct mm_struct *mm) | ||
| 69 | { | ||
| 70 | return likely(!mm->core_state); | ||
| 71 | } | ||
| 72 | |||
| 52 | /** | 73 | /** |
| 53 | * mmget() - Pin the address space associated with a &struct mm_struct. | 74 | * mmget() - Pin the address space associated with a &struct mm_struct. |
| 54 | * @mm: The address space to pin. | 75 | * @mm: The address space to pin. |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f3fb1edb3526..20d815a33145 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -21,6 +21,7 @@ struct shmem_inode_info { | |||
| 21 | struct list_head swaplist; /* chain of maybes on swap */ | 21 | struct list_head swaplist; /* chain of maybes on swap */ |
| 22 | struct shared_policy policy; /* NUMA memory alloc policy */ | 22 | struct shared_policy policy; /* NUMA memory alloc policy */ |
| 23 | struct simple_xattrs xattrs; /* list of xattrs */ | 23 | struct simple_xattrs xattrs; /* list of xattrs */ |
| 24 | atomic_t stop_eviction; /* hold when working on inode */ | ||
| 24 | struct inode vfs_inode; | 25 | struct inode vfs_inode; |
| 25 | }; | 26 | }; |
| 26 | 27 | ||
diff --git a/include/linux/string.h b/include/linux/string.h index 7927b875f80c..6ab0a6fa512e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t); | |||
| 150 | #ifndef __HAVE_ARCH_MEMCMP | 150 | #ifndef __HAVE_ARCH_MEMCMP |
| 151 | extern int memcmp(const void *,const void *,__kernel_size_t); | 151 | extern int memcmp(const void *,const void *,__kernel_size_t); |
| 152 | #endif | 152 | #endif |
| 153 | #ifndef __HAVE_ARCH_BCMP | ||
| 154 | extern int bcmp(const void *,const void *,__kernel_size_t); | ||
| 155 | #endif | ||
| 153 | #ifndef __HAVE_ARCH_MEMCHR | 156 | #ifndef __HAVE_ARCH_MEMCHR |
| 154 | extern void * memchr(const void *,int,__kernel_size_t); | 157 | extern void * memchr(const void *,int,__kernel_size_t); |
| 155 | #endif | 158 | #endif |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ec861cd0cfe8..52d41d0c1ae1 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -304,12 +304,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) | |||
| 304 | } | 304 | } |
| 305 | #endif /* CONFIG_SUNRPC_SWAP */ | 305 | #endif /* CONFIG_SUNRPC_SWAP */ |
| 306 | 306 | ||
| 307 | static inline bool | ||
| 308 | rpc_task_need_resched(const struct rpc_task *task) | ||
| 309 | { | ||
| 310 | if (RPC_IS_QUEUED(task) || task->tk_callback) | ||
| 311 | return true; | ||
| 312 | return false; | ||
| 313 | } | ||
| 314 | |||
| 315 | #endif /* _LINUX_SUNRPC_SCHED_H_ */ | 307 | #endif /* _LINUX_SUNRPC_SCHED_H_ */ |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index fab02133a919..3dc70adfe5f5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
| @@ -63,7 +63,7 @@ struct virtqueue; | |||
| 63 | /* | 63 | /* |
| 64 | * Creates a virtqueue and allocates the descriptor ring. If | 64 | * Creates a virtqueue and allocates the descriptor ring. If |
| 65 | * may_reduce_num is set, then this may allocate a smaller ring than | 65 | * may_reduce_num is set, then this may allocate a smaller ring than |
| 66 | * expected. The caller should query virtqueue_get_ring_size to learn | 66 | * expected. The caller should query virtqueue_get_vring_size to learn |
| 67 | * the actual size of the ring. | 67 | * the actual size of the ring. |
| 68 | */ | 68 | */ |
| 69 | struct virtqueue *vring_create_virtqueue(unsigned int index, | 69 | struct virtqueue *vring_create_virtqueue(unsigned int index, |
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 2bfb87eb98ce..78c856cba4f5 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h | |||
| @@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, | |||
| 61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, | 61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, |
| 62 | unsigned int); | 62 | unsigned int); |
| 63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); | 63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); |
| 64 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); | 64 | bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, |
| 65 | u32 *); | ||
| 65 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); | 66 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); |
| 66 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); | 67 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); |
| 67 | bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, | 68 | bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, |
| 68 | ktime_t *); | 69 | ktime_t *); |
| 70 | bool rxrpc_kernel_call_is_complete(struct rxrpc_call *); | ||
| 69 | 71 | ||
| 70 | #endif /* _NET_RXRPC_H */ | 72 | #endif /* _NET_RXRPC_H */ |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bb307a11ee63..13bfeb712d36 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
| @@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, | |||
| 7183 | #define wiphy_info(wiphy, format, args...) \ | 7183 | #define wiphy_info(wiphy, format, args...) \ |
| 7184 | dev_info(&(wiphy)->dev, format, ##args) | 7184 | dev_info(&(wiphy)->dev, format, ##args) |
| 7185 | 7185 | ||
| 7186 | #define wiphy_err_ratelimited(wiphy, format, args...) \ | ||
| 7187 | dev_err_ratelimited(&(wiphy)->dev, format, ##args) | ||
| 7188 | #define wiphy_warn_ratelimited(wiphy, format, args...) \ | ||
| 7189 | dev_warn_ratelimited(&(wiphy)->dev, format, ##args) | ||
| 7190 | |||
| 7186 | #define wiphy_debug(wiphy, format, args...) \ | 7191 | #define wiphy_debug(wiphy, format, args...) \ |
| 7187 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) | 7192 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) |
| 7188 | 7193 | ||
diff --git a/include/net/ip.h b/include/net/ip.h index be3cad9c2e4c..583526aad1d0 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, | |||
| 677 | unsigned char __user *data, int optlen); | 677 | unsigned char __user *data, int optlen); |
| 678 | void ip_options_undo(struct ip_options *opt); | 678 | void ip_options_undo(struct ip_options *opt); |
| 679 | void ip_forward_options(struct sk_buff *skb); | 679 | void ip_forward_options(struct sk_buff *skb); |
| 680 | int ip_options_rcv_srr(struct sk_buff *skb); | 680 | int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); |
| 681 | 681 | ||
| 682 | /* | 682 | /* |
| 683 | * Functions provided by ip_sockglue.c | 683 | * Functions provided by ip_sockglue.c |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ac2ed8ec662b..112dc18c658f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, | |||
| 6231 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6231 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
| 6232 | * @ac: AC number to return packets from. | 6232 | * @ac: AC number to return packets from. |
| 6233 | * | 6233 | * |
| 6234 | * Should only be called between calls to ieee80211_txq_schedule_start() | ||
| 6235 | * and ieee80211_txq_schedule_end(). | ||
| 6236 | * Returns the next txq if successful, %NULL if no queue is eligible. If a txq | 6234 | * Returns the next txq if successful, %NULL if no queue is eligible. If a txq |
| 6237 | * is returned, it should be returned with ieee80211_return_txq() after the | 6235 | * is returned, it should be returned with ieee80211_return_txq() after the |
| 6238 | * driver has finished scheduling it. | 6236 | * driver has finished scheduling it. |
| @@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, | |||
| 6240 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); | 6238 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); |
| 6241 | 6239 | ||
| 6242 | /** | 6240 | /** |
| 6243 | * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() | 6241 | * ieee80211_txq_schedule_start - start new scheduling round for TXQs |
| 6244 | * | ||
| 6245 | * @hw: pointer as obtained from ieee80211_alloc_hw() | ||
| 6246 | * @txq: pointer obtained from station or virtual interface | ||
| 6247 | * | ||
| 6248 | * Should only be called between calls to ieee80211_txq_schedule_start() | ||
| 6249 | * and ieee80211_txq_schedule_end(). | ||
| 6250 | */ | ||
| 6251 | void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); | ||
| 6252 | |||
| 6253 | /** | ||
| 6254 | * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC | ||
| 6255 | * | 6242 | * |
| 6256 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6243 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
| 6257 | * @ac: AC number to acquire locks for | 6244 | * @ac: AC number to acquire locks for |
| 6258 | * | 6245 | * |
| 6259 | * Acquire locks needed to schedule TXQs from the given AC. Should be called | 6246 | * Should be called before ieee80211_next_txq() or ieee80211_return_txq(). |
| 6260 | * before ieee80211_next_txq() or ieee80211_return_txq(). | 6247 | * The driver must not call multiple TXQ scheduling rounds concurrently. |
| 6261 | */ | 6248 | */ |
| 6262 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) | 6249 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac); |
| 6263 | __acquires(txq_lock); | 6250 | |
| 6251 | /* (deprecated) */ | ||
| 6252 | static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | ||
| 6253 | { | ||
| 6254 | } | ||
| 6255 | |||
| 6256 | void __ieee80211_schedule_txq(struct ieee80211_hw *hw, | ||
| 6257 | struct ieee80211_txq *txq, bool force); | ||
| 6264 | 6258 | ||
| 6265 | /** | 6259 | /** |
| 6266 | * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC | 6260 | * ieee80211_schedule_txq - schedule a TXQ for transmission |
| 6267 | * | 6261 | * |
| 6268 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6262 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
| 6269 | * @ac: AC number to acquire locks for | 6263 | * @txq: pointer obtained from station or virtual interface |
| 6270 | * | 6264 | * |
| 6271 | * Release locks previously acquired by ieee80211_txq_schedule_end(). | 6265 | * Schedules a TXQ for transmission if it is not already scheduled, |
| 6266 | * even if mac80211 does not have any packets buffered. | ||
| 6267 | * | ||
| 6268 | * The driver may call this function if it has buffered packets for | ||
| 6269 | * this TXQ internally. | ||
| 6272 | */ | 6270 | */ |
| 6273 | void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | 6271 | static inline void |
| 6274 | __releases(txq_lock); | 6272 | ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) |
| 6273 | { | ||
| 6274 | __ieee80211_schedule_txq(hw, txq, true); | ||
| 6275 | } | ||
| 6275 | 6276 | ||
| 6276 | /** | 6277 | /** |
| 6277 | * ieee80211_schedule_txq - schedule a TXQ for transmission | 6278 | * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() |
| 6278 | * | 6279 | * |
| 6279 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 6280 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
| 6280 | * @txq: pointer obtained from station or virtual interface | 6281 | * @txq: pointer obtained from station or virtual interface |
| 6282 | * @force: schedule txq even if mac80211 does not have any buffered packets. | ||
| 6281 | * | 6283 | * |
| 6282 | * Schedules a TXQ for transmission if it is not already scheduled. Takes a | 6284 | * The driver may set force=true if it has buffered packets for this TXQ |
| 6283 | * lock, which means it must *not* be called between | 6285 | * internally. |
| 6284 | * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end() | ||
| 6285 | */ | 6286 | */ |
| 6286 | void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) | 6287 | static inline void |
| 6287 | __acquires(txq_lock) __releases(txq_lock); | 6288 | ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, |
| 6289 | bool force) | ||
| 6290 | { | ||
| 6291 | __ieee80211_schedule_txq(hw, txq, force); | ||
| 6292 | } | ||
| 6288 | 6293 | ||
| 6289 | /** | 6294 | /** |
| 6290 | * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit | 6295 | * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index a68ced28d8f4..12689ddfc24c 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
| @@ -59,6 +59,7 @@ struct net { | |||
| 59 | */ | 59 | */ |
| 60 | spinlock_t rules_mod_lock; | 60 | spinlock_t rules_mod_lock; |
| 61 | 61 | ||
| 62 | u32 hash_mix; | ||
| 62 | atomic64_t cookie_gen; | 63 | atomic64_t cookie_gen; |
| 63 | 64 | ||
| 64 | struct list_head list; /* list of network namespaces */ | 65 | struct list_head list; /* list of network namespaces */ |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 5ee7b30b4917..d2bc733a2ef1 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
| @@ -316,6 +316,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, | |||
| 316 | gfp_t flags); | 316 | gfp_t flags); |
| 317 | void nf_ct_tmpl_free(struct nf_conn *tmpl); | 317 | void nf_ct_tmpl_free(struct nf_conn *tmpl); |
| 318 | 318 | ||
| 319 | u32 nf_ct_get_id(const struct nf_conn *ct); | ||
| 320 | |||
| 319 | static inline void | 321 | static inline void |
| 320 | nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) | 322 | nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) |
| 321 | { | 323 | { |
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 778087591983..a49edfdf47e8 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h | |||
| @@ -75,6 +75,12 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, | |||
| 75 | bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple, | 75 | bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple, |
| 76 | const struct nf_conntrack_tuple *orig); | 76 | const struct nf_conntrack_tuple *orig); |
| 77 | 77 | ||
| 78 | int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb, | ||
| 79 | unsigned int dataoff, | ||
| 80 | const struct nf_hook_state *state, | ||
| 81 | u8 l4proto, | ||
| 82 | union nf_inet_addr *outer_daddr); | ||
| 83 | |||
| 78 | int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, | 84 | int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, |
| 79 | struct sk_buff *skb, | 85 | struct sk_buff *skb, |
| 80 | unsigned int dataoff, | 86 | unsigned int dataoff, |
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 16a842456189..d9b665151f3d 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h | |||
| @@ -2,16 +2,10 @@ | |||
| 2 | #ifndef __NET_NS_HASH_H__ | 2 | #ifndef __NET_NS_HASH_H__ |
| 3 | #define __NET_NS_HASH_H__ | 3 | #define __NET_NS_HASH_H__ |
| 4 | 4 | ||
| 5 | #include <asm/cache.h> | 5 | #include <net/net_namespace.h> |
| 6 | |||
| 7 | struct net; | ||
| 8 | 6 | ||
| 9 | static inline u32 net_hash_mix(const struct net *net) | 7 | static inline u32 net_hash_mix(const struct net *net) |
| 10 | { | 8 | { |
| 11 | #ifdef CONFIG_NET_NS | 9 | return net->hash_mix; |
| 12 | return (u32)(((unsigned long)net) >> ilog2(sizeof(*net))); | ||
| 13 | #else | ||
| 14 | return 0; | ||
| 15 | #endif | ||
| 16 | } | 10 | } |
| 17 | #endif | 11 | #endif |
diff --git a/include/net/netrom.h b/include/net/netrom.h index 5a0714ff500f..80f15b1c1a48 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h | |||
| @@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *); | |||
| 266 | int nr_t1timer_running(struct sock *); | 266 | int nr_t1timer_running(struct sock *); |
| 267 | 267 | ||
| 268 | /* sysctl_net_netrom.c */ | 268 | /* sysctl_net_netrom.c */ |
| 269 | void nr_register_sysctl(void); | 269 | int nr_register_sysctl(void); |
| 270 | void nr_unregister_sysctl(void); | 270 | void nr_unregister_sysctl(void); |
| 271 | 271 | ||
| 272 | #endif | 272 | #endif |
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 87499b6b35d6..df5c69db68af 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h | |||
| @@ -166,7 +166,7 @@ struct nci_conn_info { | |||
| 166 | * According to specification 102 622 chapter 4.4 Pipes, | 166 | * According to specification 102 622 chapter 4.4 Pipes, |
| 167 | * the pipe identifier is 7 bits long. | 167 | * the pipe identifier is 7 bits long. |
| 168 | */ | 168 | */ |
| 169 | #define NCI_HCI_MAX_PIPES 127 | 169 | #define NCI_HCI_MAX_PIPES 128 |
| 170 | 170 | ||
| 171 | struct nci_hci_gate { | 171 | struct nci_hci_gate { |
| 172 | u8 gate; | 172 | u8 gate; |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 7d1a0483a17b..a2b38b3deeca 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch) | |||
| 923 | sch->qstats.overlimits++; | 923 | sch->qstats.overlimits++; |
| 924 | } | 924 | } |
| 925 | 925 | ||
| 926 | static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) | ||
| 927 | { | ||
| 928 | __u32 qlen = qdisc_qlen_sum(sch); | ||
| 929 | |||
| 930 | return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); | ||
| 931 | } | ||
| 932 | |||
| 933 | static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, | ||
| 934 | __u32 *backlog) | ||
| 935 | { | ||
| 936 | struct gnet_stats_queue qstats = { 0 }; | ||
| 937 | __u32 len = qdisc_qlen_sum(sch); | ||
| 938 | |||
| 939 | __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); | ||
| 940 | *qlen = qstats.qlen; | ||
| 941 | *backlog = qstats.backlog; | ||
| 942 | } | ||
| 943 | |||
| 944 | static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) | ||
| 945 | { | ||
| 946 | __u32 qlen, backlog; | ||
| 947 | |||
| 948 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); | ||
| 949 | qdisc_tree_reduce_backlog(sch, qlen, backlog); | ||
| 950 | } | ||
| 951 | |||
| 952 | static inline void qdisc_purge_queue(struct Qdisc *sch) | ||
| 953 | { | ||
| 954 | __u32 qlen, backlog; | ||
| 955 | |||
| 956 | qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); | ||
| 957 | qdisc_reset(sch); | ||
| 958 | qdisc_tree_reduce_backlog(sch, qlen, backlog); | ||
| 959 | } | ||
| 960 | |||
| 926 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) | 961 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
| 927 | { | 962 | { |
| 928 | qh->head = NULL; | 963 | qh->head = NULL; |
| @@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, | |||
| 1106 | sch_tree_lock(sch); | 1141 | sch_tree_lock(sch); |
| 1107 | old = *pold; | 1142 | old = *pold; |
| 1108 | *pold = new; | 1143 | *pold = new; |
| 1109 | if (old != NULL) { | 1144 | if (old != NULL) |
| 1110 | unsigned int qlen = old->q.qlen; | 1145 | qdisc_tree_flush_backlog(old); |
| 1111 | unsigned int backlog = old->qstats.backlog; | ||
| 1112 | |||
| 1113 | qdisc_reset(old); | ||
| 1114 | qdisc_tree_reduce_backlog(old, qlen, backlog); | ||
| 1115 | } | ||
| 1116 | sch_tree_unlock(sch); | 1146 | sch_tree_unlock(sch); |
| 1117 | 1147 | ||
| 1118 | return old; | 1148 | return old; |
diff --git a/include/net/sock.h b/include/net/sock.h index 8de5ee258b93..341f8bafa0cf 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) | |||
| 2084 | * @p: poll_table | 2084 | * @p: poll_table |
| 2085 | * | 2085 | * |
| 2086 | * See the comments in the wq_has_sleeper function. | 2086 | * See the comments in the wq_has_sleeper function. |
| 2087 | * | ||
| 2088 | * Do not derive sock from filp->private_data here. An SMC socket establishes | ||
| 2089 | * an internal TCP socket that is used in the fallback case. All socket | ||
| 2090 | * operations on the SMC socket are then forwarded to the TCP socket. In case of | ||
| 2091 | * poll, the filp->private_data pointer references the SMC socket because the | ||
| 2092 | * TCP socket has no file assigned. | ||
| 2093 | */ | 2087 | */ |
| 2094 | static inline void sock_poll_wait(struct file *filp, struct socket *sock, | 2088 | static inline void sock_poll_wait(struct file *filp, struct socket *sock, |
| 2095 | poll_table *p) | 2089 | poll_table *p) |
diff --git a/include/net/tls.h b/include/net/tls.h index a5a938583295..5934246b2c6f 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
| @@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | |||
| 307 | int tls_device_sendpage(struct sock *sk, struct page *page, | 307 | int tls_device_sendpage(struct sock *sk, struct page *page, |
| 308 | int offset, size_t size, int flags); | 308 | int offset, size_t size, int flags); |
| 309 | void tls_device_sk_destruct(struct sock *sk); | 309 | void tls_device_sk_destruct(struct sock *sk); |
| 310 | void tls_device_free_resources_tx(struct sock *sk); | ||
| 310 | void tls_device_init(void); | 311 | void tls_device_init(void); |
| 311 | void tls_device_cleanup(void); | 312 | void tls_device_cleanup(void); |
| 312 | int tls_tx_records(struct sock *sk, int flags); | 313 | int tls_tx_records(struct sock *sk, int flags); |
| @@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, | |||
| 330 | int flags); | 331 | int flags); |
| 331 | int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, | 332 | int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, |
| 332 | int flags); | 333 | int flags); |
| 334 | bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); | ||
| 333 | 335 | ||
| 334 | static inline struct tls_msg *tls_msg(struct sk_buff *skb) | 336 | static inline struct tls_msg *tls_msg(struct sk_buff *skb) |
| 335 | { | 337 | { |
| @@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, | |||
| 379 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) | 381 | static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) |
| 380 | { | 382 | { |
| 381 | #ifdef CONFIG_SOCK_VALIDATE_XMIT | 383 | #ifdef CONFIG_SOCK_VALIDATE_XMIT |
| 382 | return sk_fullsock(sk) & | 384 | return sk_fullsock(sk) && |
| 383 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == | 385 | (smp_load_acquire(&sk->sk_validate_xmit_skb) == |
| 384 | &tls_validate_xmit_skb); | 386 | &tls_validate_xmit_skb); |
| 385 | #else | 387 | #else |
diff --git a/include/sound/soc.h b/include/sound/soc.h index eb7db605955b..482b4ea87c3c 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
| @@ -802,8 +802,13 @@ struct snd_soc_component_driver { | |||
| 802 | int probe_order; | 802 | int probe_order; |
| 803 | int remove_order; | 803 | int remove_order; |
| 804 | 804 | ||
| 805 | /* signal if the module handling the component cannot be removed */ | 805 | /* |
| 806 | unsigned int ignore_module_refcount:1; | 806 | * signal if the module handling the component should not be removed |
| 807 | * if a pcm is open. Setting this would prevent the module | ||
| 808 | * refcount being incremented in probe() but allow it be incremented | ||
| 809 | * when a pcm is opened and decremented when it is closed. | ||
| 810 | */ | ||
| 811 | unsigned int module_get_upon_open:1; | ||
| 807 | 812 | ||
| 808 | /* bits */ | 813 | /* bits */ |
| 809 | unsigned int idle_bias_on:1; | 814 | unsigned int idle_bias_on:1; |
| @@ -1083,6 +1088,8 @@ struct snd_soc_card { | |||
| 1083 | struct mutex mutex; | 1088 | struct mutex mutex; |
| 1084 | struct mutex dapm_mutex; | 1089 | struct mutex dapm_mutex; |
| 1085 | 1090 | ||
| 1091 | spinlock_t dpcm_lock; | ||
| 1092 | |||
| 1086 | bool instantiated; | 1093 | bool instantiated; |
| 1087 | bool topology_shortname_created; | 1094 | bool topology_shortname_created; |
| 1088 | 1095 | ||
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 44a3259ed4a5..b6e0cbc2c71f 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h | |||
| @@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter, | |||
| 28 | 28 | ||
| 29 | TP_fast_assign( | 29 | TP_fast_assign( |
| 30 | __entry->id = id; | 30 | __entry->id = id; |
| 31 | syscall_get_arguments(current, regs, 0, 6, __entry->args); | 31 | syscall_get_arguments(current, regs, __entry->args); |
| 32 | ), | 32 | ), |
| 33 | 33 | ||
| 34 | TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", | 34 | TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 3652b239dad1..d473e5ed044c 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
| @@ -1591,7 +1591,7 @@ enum ethtool_link_mode_bit_indices { | |||
| 1591 | 1591 | ||
| 1592 | static inline int ethtool_validate_speed(__u32 speed) | 1592 | static inline int ethtool_validate_speed(__u32 speed) |
| 1593 | { | 1593 | { |
| 1594 | return speed <= INT_MAX || speed == SPEED_UNKNOWN; | 1594 | return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN; |
| 1595 | } | 1595 | } |
| 1596 | 1596 | ||
| 1597 | /* Duplex, half or full. */ | 1597 | /* Duplex, half or full. */ |
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 7f14d4a66c28..64cee116928e 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h | |||
| @@ -439,10 +439,12 @@ | |||
| 439 | #define KEY_TITLE 0x171 | 439 | #define KEY_TITLE 0x171 |
| 440 | #define KEY_SUBTITLE 0x172 | 440 | #define KEY_SUBTITLE 0x172 |
| 441 | #define KEY_ANGLE 0x173 | 441 | #define KEY_ANGLE 0x173 |
| 442 | #define KEY_ZOOM 0x174 | 442 | #define KEY_FULL_SCREEN 0x174 /* AC View Toggle */ |
| 443 | #define KEY_ZOOM KEY_FULL_SCREEN | ||
| 443 | #define KEY_MODE 0x175 | 444 | #define KEY_MODE 0x175 |
| 444 | #define KEY_KEYBOARD 0x176 | 445 | #define KEY_KEYBOARD 0x176 |
| 445 | #define KEY_SCREEN 0x177 | 446 | #define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */ |
| 447 | #define KEY_SCREEN KEY_ASPECT_RATIO | ||
| 446 | #define KEY_PC 0x178 /* Media Select Computer */ | 448 | #define KEY_PC 0x178 /* Media Select Computer */ |
| 447 | #define KEY_TV 0x179 /* Media Select TV */ | 449 | #define KEY_TV 0x179 /* Media Select TV */ |
| 448 | #define KEY_TV2 0x17a /* Media Select Cable */ | 450 | #define KEY_TV2 0x17a /* Media Select Cable */ |
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 404d4b9ffe76..df1153cea0b7 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | #ifndef __KERNEL__ | 33 | #ifndef __KERNEL__ |
| 34 | #include <stdlib.h> | 34 | #include <stdlib.h> |
| 35 | #include <time.h> | ||
| 35 | #endif | 36 | #endif |
| 36 | 37 | ||
| 37 | /* | 38 | /* |
diff --git a/init/main.c b/init/main.c index 598e278b46f7..7d4025d665eb 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -582,6 +582,8 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 582 | page_alloc_init(); | 582 | page_alloc_init(); |
| 583 | 583 | ||
| 584 | pr_notice("Kernel command line: %s\n", boot_command_line); | 584 | pr_notice("Kernel command line: %s\n", boot_command_line); |
| 585 | /* parameters may set static keys */ | ||
| 586 | jump_label_init(); | ||
| 585 | parse_early_param(); | 587 | parse_early_param(); |
| 586 | after_dashes = parse_args("Booting kernel", | 588 | after_dashes = parse_args("Booting kernel", |
| 587 | static_command_line, __start___param, | 589 | static_command_line, __start___param, |
| @@ -591,8 +593,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 591 | parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, | 593 | parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, |
| 592 | NULL, set_init_arg); | 594 | NULL, set_init_arg); |
| 593 | 595 | ||
| 594 | jump_label_init(); | ||
| 595 | |||
| 596 | /* | 596 | /* |
| 597 | * These use large bootmem allocations and must precede | 597 | * These use large bootmem allocations and must precede |
| 598 | * kmem_cache_init() | 598 | * kmem_cache_init() |
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8974b3755670..3c18260403dd 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c | |||
| @@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work) | |||
| 162 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | 162 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, |
| 163 | struct xdp_frame *xdpf) | 163 | struct xdp_frame *xdpf) |
| 164 | { | 164 | { |
| 165 | unsigned int hard_start_headroom; | ||
| 165 | unsigned int frame_size; | 166 | unsigned int frame_size; |
| 166 | void *pkt_data_start; | 167 | void *pkt_data_start; |
| 167 | struct sk_buff *skb; | 168 | struct sk_buff *skb; |
| 168 | 169 | ||
| 170 | /* Part of headroom was reserved to xdpf */ | ||
| 171 | hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; | ||
| 172 | |||
| 169 | /* build_skb need to place skb_shared_info after SKB end, and | 173 | /* build_skb need to place skb_shared_info after SKB end, and |
| 170 | * also want to know the memory "truesize". Thus, need to | 174 | * also want to know the memory "truesize". Thus, need to |
| 171 | * know the memory frame size backing xdp_buff. | 175 | * know the memory frame size backing xdp_buff. |
| @@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | |||
| 183 | * is not at a fixed memory location, with mixed length | 187 | * is not at a fixed memory location, with mixed length |
| 184 | * packets, which is bad for cache-line hotness. | 188 | * packets, which is bad for cache-line hotness. |
| 185 | */ | 189 | */ |
| 186 | frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + | 190 | frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) + |
| 187 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 191 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 188 | 192 | ||
| 189 | pkt_data_start = xdpf->data - xdpf->headroom; | 193 | pkt_data_start = xdpf->data - hard_start_headroom; |
| 190 | skb = build_skb(pkt_data_start, frame_size); | 194 | skb = build_skb(pkt_data_start, frame_size); |
| 191 | if (!skb) | 195 | if (!skb) |
| 192 | return NULL; | 196 | return NULL; |
| 193 | 197 | ||
| 194 | skb_reserve(skb, xdpf->headroom); | 198 | skb_reserve(skb, hard_start_headroom); |
| 195 | __skb_put(skb, xdpf->len); | 199 | __skb_put(skb, xdpf->len); |
| 196 | if (xdpf->metasize) | 200 | if (xdpf->metasize) |
| 197 | skb_metadata_set(skb, xdpf->metasize); | 201 | skb_metadata_set(skb, xdpf->metasize); |
| @@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, | |||
| 205 | * - RX ring dev queue index (skb_record_rx_queue) | 209 | * - RX ring dev queue index (skb_record_rx_queue) |
| 206 | */ | 210 | */ |
| 207 | 211 | ||
| 212 | /* Allow SKB to reuse area used by xdp_frame */ | ||
| 213 | xdp_scrub_frame(xdpf); | ||
| 214 | |||
| 208 | return skb; | 215 | return skb; |
| 209 | } | 216 | } |
| 210 | 217 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 2ada5e21dfa6..4a8f390a2b82 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ | |||
| 554 | } | 554 | } |
| 555 | EXPORT_SYMBOL(bpf_prog_get_type_path); | 555 | EXPORT_SYMBOL(bpf_prog_get_type_path); |
| 556 | 556 | ||
| 557 | static void bpf_evict_inode(struct inode *inode) | ||
| 558 | { | ||
| 559 | enum bpf_type type; | ||
| 560 | |||
| 561 | truncate_inode_pages_final(&inode->i_data); | ||
| 562 | clear_inode(inode); | ||
| 563 | |||
| 564 | if (S_ISLNK(inode->i_mode)) | ||
| 565 | kfree(inode->i_link); | ||
| 566 | if (!bpf_inode_type(inode, &type)) | ||
| 567 | bpf_any_put(inode->i_private, type); | ||
| 568 | } | ||
| 569 | |||
| 570 | /* | 557 | /* |
| 571 | * Display the mount options in /proc/mounts. | 558 | * Display the mount options in /proc/mounts. |
| 572 | */ | 559 | */ |
| @@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) | |||
| 579 | return 0; | 566 | return 0; |
| 580 | } | 567 | } |
| 581 | 568 | ||
| 569 | static void bpf_destroy_inode_deferred(struct rcu_head *head) | ||
| 570 | { | ||
| 571 | struct inode *inode = container_of(head, struct inode, i_rcu); | ||
| 572 | enum bpf_type type; | ||
| 573 | |||
| 574 | if (S_ISLNK(inode->i_mode)) | ||
| 575 | kfree(inode->i_link); | ||
| 576 | if (!bpf_inode_type(inode, &type)) | ||
| 577 | bpf_any_put(inode->i_private, type); | ||
| 578 | free_inode_nonrcu(inode); | ||
| 579 | } | ||
| 580 | |||
| 581 | static void bpf_destroy_inode(struct inode *inode) | ||
| 582 | { | ||
| 583 | call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred); | ||
| 584 | } | ||
| 585 | |||
| 582 | static const struct super_operations bpf_super_ops = { | 586 | static const struct super_operations bpf_super_ops = { |
| 583 | .statfs = simple_statfs, | 587 | .statfs = simple_statfs, |
| 584 | .drop_inode = generic_delete_inode, | 588 | .drop_inode = generic_delete_inode, |
| 585 | .show_options = bpf_show_options, | 589 | .show_options = bpf_show_options, |
| 586 | .evict_inode = bpf_evict_inode, | 590 | .destroy_inode = bpf_destroy_inode, |
| 587 | }; | 591 | }; |
| 588 | 592 | ||
| 589 | enum { | 593 | enum { |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fd502c1f71eb..6c5a41f7f338 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -1897,8 +1897,9 @@ continue_func: | |||
| 1897 | } | 1897 | } |
| 1898 | frame++; | 1898 | frame++; |
| 1899 | if (frame >= MAX_CALL_FRAMES) { | 1899 | if (frame >= MAX_CALL_FRAMES) { |
| 1900 | WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); | 1900 | verbose(env, "the call stack of %d frames is too deep !\n", |
| 1901 | return -EFAULT; | 1901 | frame); |
| 1902 | return -E2BIG; | ||
| 1902 | } | 1903 | } |
| 1903 | goto process_func; | 1904 | goto process_func; |
| 1904 | } | 1905 | } |
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 45d51e8e26f6..a218e43cc382 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c | |||
| @@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
| 706 | #ifdef CONFIG_STACKTRACE | 706 | #ifdef CONFIG_STACKTRACE |
| 707 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | 707 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
| 708 | entry->stacktrace.entries = entry->st_entries; | 708 | entry->stacktrace.entries = entry->st_entries; |
| 709 | entry->stacktrace.skip = 2; | 709 | entry->stacktrace.skip = 1; |
| 710 | save_stack_trace(&entry->stacktrace); | 710 | save_stack_trace(&entry->stacktrace); |
| 711 | #endif | 711 | #endif |
| 712 | 712 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72d06e302e99..dc7dead2d2cc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event, | |||
| 2009 | event->pmu->del(event, 0); | 2009 | event->pmu->del(event, 0); |
| 2010 | event->oncpu = -1; | 2010 | event->oncpu = -1; |
| 2011 | 2011 | ||
| 2012 | if (event->pending_disable) { | 2012 | if (READ_ONCE(event->pending_disable) >= 0) { |
| 2013 | event->pending_disable = 0; | 2013 | WRITE_ONCE(event->pending_disable, -1); |
| 2014 | state = PERF_EVENT_STATE_OFF; | 2014 | state = PERF_EVENT_STATE_OFF; |
| 2015 | } | 2015 | } |
| 2016 | perf_event_set_state(event, state); | 2016 | perf_event_set_state(event, state); |
| @@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable); | |||
| 2198 | 2198 | ||
| 2199 | void perf_event_disable_inatomic(struct perf_event *event) | 2199 | void perf_event_disable_inatomic(struct perf_event *event) |
| 2200 | { | 2200 | { |
| 2201 | event->pending_disable = 1; | 2201 | WRITE_ONCE(event->pending_disable, smp_processor_id()); |
| 2202 | /* can fail, see perf_pending_event_disable() */ | ||
| 2202 | irq_work_queue(&event->pending); | 2203 | irq_work_queue(&event->pending); |
| 2203 | } | 2204 | } |
| 2204 | 2205 | ||
| @@ -5810,10 +5811,45 @@ void perf_event_wakeup(struct perf_event *event) | |||
| 5810 | } | 5811 | } |
| 5811 | } | 5812 | } |
| 5812 | 5813 | ||
| 5814 | static void perf_pending_event_disable(struct perf_event *event) | ||
| 5815 | { | ||
| 5816 | int cpu = READ_ONCE(event->pending_disable); | ||
| 5817 | |||
| 5818 | if (cpu < 0) | ||
| 5819 | return; | ||
| 5820 | |||
| 5821 | if (cpu == smp_processor_id()) { | ||
| 5822 | WRITE_ONCE(event->pending_disable, -1); | ||
| 5823 | perf_event_disable_local(event); | ||
| 5824 | return; | ||
| 5825 | } | ||
| 5826 | |||
| 5827 | /* | ||
| 5828 | * CPU-A CPU-B | ||
| 5829 | * | ||
| 5830 | * perf_event_disable_inatomic() | ||
| 5831 | * @pending_disable = CPU-A; | ||
| 5832 | * irq_work_queue(); | ||
| 5833 | * | ||
| 5834 | * sched-out | ||
| 5835 | * @pending_disable = -1; | ||
| 5836 | * | ||
| 5837 | * sched-in | ||
| 5838 | * perf_event_disable_inatomic() | ||
| 5839 | * @pending_disable = CPU-B; | ||
| 5840 | * irq_work_queue(); // FAILS | ||
| 5841 | * | ||
| 5842 | * irq_work_run() | ||
| 5843 | * perf_pending_event() | ||
| 5844 | * | ||
| 5845 | * But the event runs on CPU-B and wants disabling there. | ||
| 5846 | */ | ||
| 5847 | irq_work_queue_on(&event->pending, cpu); | ||
| 5848 | } | ||
| 5849 | |||
| 5813 | static void perf_pending_event(struct irq_work *entry) | 5850 | static void perf_pending_event(struct irq_work *entry) |
| 5814 | { | 5851 | { |
| 5815 | struct perf_event *event = container_of(entry, | 5852 | struct perf_event *event = container_of(entry, struct perf_event, pending); |
| 5816 | struct perf_event, pending); | ||
| 5817 | int rctx; | 5853 | int rctx; |
| 5818 | 5854 | ||
| 5819 | rctx = perf_swevent_get_recursion_context(); | 5855 | rctx = perf_swevent_get_recursion_context(); |
| @@ -5822,10 +5858,7 @@ static void perf_pending_event(struct irq_work *entry) | |||
| 5822 | * and we won't recurse 'further'. | 5858 | * and we won't recurse 'further'. |
| 5823 | */ | 5859 | */ |
| 5824 | 5860 | ||
| 5825 | if (event->pending_disable) { | 5861 | perf_pending_event_disable(event); |
| 5826 | event->pending_disable = 0; | ||
| 5827 | perf_event_disable_local(event); | ||
| 5828 | } | ||
| 5829 | 5862 | ||
| 5830 | if (event->pending_wakeup) { | 5863 | if (event->pending_wakeup) { |
| 5831 | event->pending_wakeup = 0; | 5864 | event->pending_wakeup = 0; |
| @@ -9044,26 +9077,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event) | |||
| 9044 | if (task == TASK_TOMBSTONE) | 9077 | if (task == TASK_TOMBSTONE) |
| 9045 | return; | 9078 | return; |
| 9046 | 9079 | ||
| 9047 | if (!ifh->nr_file_filters) | 9080 | if (ifh->nr_file_filters) { |
| 9048 | return; | 9081 | mm = get_task_mm(event->ctx->task); |
| 9049 | 9082 | if (!mm) | |
| 9050 | mm = get_task_mm(event->ctx->task); | 9083 | goto restart; |
| 9051 | if (!mm) | ||
| 9052 | goto restart; | ||
| 9053 | 9084 | ||
| 9054 | down_read(&mm->mmap_sem); | 9085 | down_read(&mm->mmap_sem); |
| 9086 | } | ||
| 9055 | 9087 | ||
| 9056 | raw_spin_lock_irqsave(&ifh->lock, flags); | 9088 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 9057 | list_for_each_entry(filter, &ifh->list, entry) { | 9089 | list_for_each_entry(filter, &ifh->list, entry) { |
| 9058 | event->addr_filter_ranges[count].start = 0; | 9090 | if (filter->path.dentry) { |
| 9059 | event->addr_filter_ranges[count].size = 0; | 9091 | /* |
| 9092 | * Adjust base offset if the filter is associated to a | ||
| 9093 | * binary that needs to be mapped: | ||
| 9094 | */ | ||
| 9095 | event->addr_filter_ranges[count].start = 0; | ||
| 9096 | event->addr_filter_ranges[count].size = 0; | ||
| 9060 | 9097 | ||
| 9061 | /* | ||
| 9062 | * Adjust base offset if the filter is associated to a binary | ||
| 9063 | * that needs to be mapped: | ||
| 9064 | */ | ||
| 9065 | if (filter->path.dentry) | ||
| 9066 | perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); | 9098 | perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); |
| 9099 | } else { | ||
| 9100 | event->addr_filter_ranges[count].start = filter->offset; | ||
| 9101 | event->addr_filter_ranges[count].size = filter->size; | ||
| 9102 | } | ||
| 9067 | 9103 | ||
| 9068 | count++; | 9104 | count++; |
| 9069 | } | 9105 | } |
| @@ -9071,9 +9107,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event) | |||
| 9071 | event->addr_filters_gen++; | 9107 | event->addr_filters_gen++; |
| 9072 | raw_spin_unlock_irqrestore(&ifh->lock, flags); | 9108 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 9073 | 9109 | ||
| 9074 | up_read(&mm->mmap_sem); | 9110 | if (ifh->nr_file_filters) { |
| 9111 | up_read(&mm->mmap_sem); | ||
| 9075 | 9112 | ||
| 9076 | mmput(mm); | 9113 | mmput(mm); |
| 9114 | } | ||
| 9077 | 9115 | ||
| 9078 | restart: | 9116 | restart: |
| 9079 | perf_event_stop(event, 1); | 9117 | perf_event_stop(event, 1); |
| @@ -10236,6 +10274,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 10236 | 10274 | ||
| 10237 | 10275 | ||
| 10238 | init_waitqueue_head(&event->waitq); | 10276 | init_waitqueue_head(&event->waitq); |
| 10277 | event->pending_disable = -1; | ||
| 10239 | init_irq_work(&event->pending, perf_pending_event); | 10278 | init_irq_work(&event->pending, perf_pending_event); |
| 10240 | 10279 | ||
| 10241 | mutex_init(&event->mmap_mutex); | 10280 | mutex_init(&event->mmap_mutex); |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index a4047321d7d8..5eedb49a65ea 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
| @@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, | |||
| 392 | * store that will be enabled on successful return | 392 | * store that will be enabled on successful return |
| 393 | */ | 393 | */ |
| 394 | if (!handle->size) { /* A, matches D */ | 394 | if (!handle->size) { /* A, matches D */ |
| 395 | event->pending_disable = 1; | 395 | event->pending_disable = smp_processor_id(); |
| 396 | perf_output_wakeup(handle); | 396 | perf_output_wakeup(handle); |
| 397 | local_set(&rb->aux_nest, 0); | 397 | local_set(&rb->aux_nest, 0); |
| 398 | goto err_put; | 398 | goto err_put; |
| @@ -455,24 +455,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) | |||
| 455 | rb->aux_head += size; | 455 | rb->aux_head += size; |
| 456 | } | 456 | } |
| 457 | 457 | ||
| 458 | if (size || handle->aux_flags) { | 458 | /* |
| 459 | /* | 459 | * Only send RECORD_AUX if we have something useful to communicate |
| 460 | * Only send RECORD_AUX if we have something useful to communicate | 460 | * |
| 461 | * | 461 | * Note: the OVERWRITE records by themselves are not considered |
| 462 | * Note: the OVERWRITE records by themselves are not considered | 462 | * useful, as they don't communicate any *new* information, |
| 463 | * useful, as they don't communicate any *new* information, | 463 | * aside from the short-lived offset, that becomes history at |
| 464 | * aside from the short-lived offset, that becomes history at | 464 | * the next event sched-in and therefore isn't useful. |
| 465 | * the next event sched-in and therefore isn't useful. | 465 | * The userspace that needs to copy out AUX data in overwrite |
| 466 | * The userspace that needs to copy out AUX data in overwrite | 466 | * mode should know to use user_page::aux_head for the actual |
| 467 | * mode should know to use user_page::aux_head for the actual | 467 | * offset. So, from now on we don't output AUX records that |
| 468 | * offset. So, from now on we don't output AUX records that | 468 | * have *only* OVERWRITE flag set. |
| 469 | * have *only* OVERWRITE flag set. | 469 | */ |
| 470 | */ | 470 | if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)) |
| 471 | 471 | perf_event_aux_event(handle->event, aux_head, size, | |
| 472 | if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE) | 472 | handle->aux_flags); |
| 473 | perf_event_aux_event(handle->event, aux_head, size, | ||
| 474 | handle->aux_flags); | ||
| 475 | } | ||
| 476 | 473 | ||
| 477 | rb->user_page->aux_head = rb->aux_head; | 474 | rb->user_page->aux_head = rb->aux_head; |
| 478 | if (rb_need_aux_wakeup(rb)) | 475 | if (rb_need_aux_wakeup(rb)) |
| @@ -480,7 +477,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) | |||
| 480 | 477 | ||
| 481 | if (wakeup) { | 478 | if (wakeup) { |
| 482 | if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) | 479 | if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) |
| 483 | handle->event->pending_disable = 1; | 480 | handle->event->pending_disable = smp_processor_id(); |
| 484 | perf_output_wakeup(handle); | 481 | perf_output_wakeup(handle); |
| 485 | } | 482 | } |
| 486 | 483 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3faef4a77f71..51128bea3846 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) | |||
| 1449 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | 1449 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) |
| 1450 | { | 1450 | { |
| 1451 | data = data->parent_data; | 1451 | data = data->parent_data; |
| 1452 | |||
| 1453 | if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) | ||
| 1454 | return 0; | ||
| 1455 | |||
| 1452 | if (data->chip->irq_set_wake) | 1456 | if (data->chip->irq_set_wake) |
| 1453 | return data->chip->irq_set_wake(data, on); | 1457 | return data->chip->irq_set_wake(data, on); |
| 1454 | 1458 | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 13539e12cd80..9f8a709337cf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -558,6 +558,7 @@ int __init early_irq_init(void) | |||
| 558 | alloc_masks(&desc[i], node); | 558 | alloc_masks(&desc[i], node); |
| 559 | raw_spin_lock_init(&desc[i].lock); | 559 | raw_spin_lock_init(&desc[i].lock); |
| 560 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 560 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 561 | mutex_init(&desc[i].request_mutex); | ||
| 561 | desc_set_defaults(i, &desc[i], node, NULL, NULL); | 562 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
| 562 | } | 563 | } |
| 563 | return arch_early_irq_init(); | 564 | return arch_early_irq_init(); |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c83e54727131..b1ea30a5540e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) | |||
| 709 | static int reuse_unused_kprobe(struct kprobe *ap) | 709 | static int reuse_unused_kprobe(struct kprobe *ap) |
| 710 | { | 710 | { |
| 711 | struct optimized_kprobe *op; | 711 | struct optimized_kprobe *op; |
| 712 | int ret; | ||
| 713 | 712 | ||
| 714 | /* | 713 | /* |
| 715 | * Unused kprobe MUST be on the way of delayed unoptimizing (means | 714 | * Unused kprobe MUST be on the way of delayed unoptimizing (means |
| @@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap) | |||
| 720 | /* Enable the probe again */ | 719 | /* Enable the probe again */ |
| 721 | ap->flags &= ~KPROBE_FLAG_DISABLED; | 720 | ap->flags &= ~KPROBE_FLAG_DISABLED; |
| 722 | /* Optimize it again (remove from op->list) */ | 721 | /* Optimize it again (remove from op->list) */ |
| 723 | ret = kprobe_optready(ap); | 722 | if (!kprobe_optready(ap)) |
| 724 | if (ret) | 723 | return -EINVAL; |
| 725 | return ret; | ||
| 726 | 724 | ||
| 727 | optimize_kprobe(ap); | 725 | optimize_kprobe(ap); |
| 728 | return 0; | 726 | return 0; |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 34cdcbedda49..e221be724fe8 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch) | |||
| 4689 | return; | 4689 | return; |
| 4690 | 4690 | ||
| 4691 | raw_local_irq_save(flags); | 4691 | raw_local_irq_save(flags); |
| 4692 | if (!graph_lock()) | 4692 | arch_spin_lock(&lockdep_lock); |
| 4693 | goto out_irq; | 4693 | current->lockdep_recursion = 1; |
| 4694 | 4694 | ||
| 4695 | /* closed head */ | 4695 | /* closed head */ |
| 4696 | pf = delayed_free.pf + (delayed_free.index ^ 1); | 4696 | pf = delayed_free.pf + (delayed_free.index ^ 1); |
| @@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch) | |||
| 4702 | */ | 4702 | */ |
| 4703 | call_rcu_zapped(delayed_free.pf + delayed_free.index); | 4703 | call_rcu_zapped(delayed_free.pf + delayed_free.index); |
| 4704 | 4704 | ||
| 4705 | graph_unlock(); | 4705 | current->lockdep_recursion = 0; |
| 4706 | out_irq: | 4706 | arch_spin_unlock(&lockdep_lock); |
| 4707 | raw_local_irq_restore(flags); | 4707 | raw_local_irq_restore(flags); |
| 4708 | } | 4708 | } |
| 4709 | 4709 | ||
| @@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) | |||
| 4744 | { | 4744 | { |
| 4745 | struct pending_free *pf; | 4745 | struct pending_free *pf; |
| 4746 | unsigned long flags; | 4746 | unsigned long flags; |
| 4747 | int locked; | ||
| 4748 | 4747 | ||
| 4749 | init_data_structures_once(); | 4748 | init_data_structures_once(); |
| 4750 | 4749 | ||
| 4751 | raw_local_irq_save(flags); | 4750 | raw_local_irq_save(flags); |
| 4752 | locked = graph_lock(); | 4751 | arch_spin_lock(&lockdep_lock); |
| 4753 | if (!locked) | 4752 | current->lockdep_recursion = 1; |
| 4754 | goto out_irq; | ||
| 4755 | |||
| 4756 | pf = get_pending_free(); | 4753 | pf = get_pending_free(); |
| 4757 | __lockdep_free_key_range(pf, start, size); | 4754 | __lockdep_free_key_range(pf, start, size); |
| 4758 | call_rcu_zapped(pf); | 4755 | call_rcu_zapped(pf); |
| 4759 | 4756 | current->lockdep_recursion = 0; | |
| 4760 | graph_unlock(); | 4757 | arch_spin_unlock(&lockdep_lock); |
| 4761 | out_irq: | ||
| 4762 | raw_local_irq_restore(flags); | 4758 | raw_local_irq_restore(flags); |
| 4763 | 4759 | ||
| 4764 | /* | 4760 | /* |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 6a73e41a2016..43901fa3f269 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p) | |||
| 252 | if (dl_entity_is_special(dl_se)) | 252 | if (dl_entity_is_special(dl_se)) |
| 253 | return; | 253 | return; |
| 254 | 254 | ||
| 255 | WARN_ON(hrtimer_active(&dl_se->inactive_timer)); | ||
| 256 | WARN_ON(dl_se->dl_non_contending); | 255 | WARN_ON(dl_se->dl_non_contending); |
| 257 | 256 | ||
| 258 | zerolag_time = dl_se->deadline - | 257 | zerolag_time = dl_se->deadline - |
| @@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p) | |||
| 269 | * If the "0-lag time" already passed, decrease the active | 268 | * If the "0-lag time" already passed, decrease the active |
| 270 | * utilization now, instead of starting a timer | 269 | * utilization now, instead of starting a timer |
| 271 | */ | 270 | */ |
| 272 | if (zerolag_time < 0) { | 271 | if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { |
| 273 | if (dl_task(p)) | 272 | if (dl_task(p)) |
| 274 | sub_running_bw(dl_se, dl_rq); | 273 | sub_running_bw(dl_se, dl_rq); |
| 275 | if (!dl_task(p) || p->state == TASK_DEAD) { | 274 | if (!dl_task(p) || p->state == TASK_DEAD) { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fdab7eb6f351..a4d9e14bf138 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -4885,6 +4885,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) | |||
| 4885 | return HRTIMER_NORESTART; | 4885 | return HRTIMER_NORESTART; |
| 4886 | } | 4886 | } |
| 4887 | 4887 | ||
| 4888 | extern const u64 max_cfs_quota_period; | ||
| 4889 | |||
| 4888 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) | 4890 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) |
| 4889 | { | 4891 | { |
| 4890 | struct cfs_bandwidth *cfs_b = | 4892 | struct cfs_bandwidth *cfs_b = |
| @@ -4892,6 +4894,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) | |||
| 4892 | unsigned long flags; | 4894 | unsigned long flags; |
| 4893 | int overrun; | 4895 | int overrun; |
| 4894 | int idle = 0; | 4896 | int idle = 0; |
| 4897 | int count = 0; | ||
| 4895 | 4898 | ||
| 4896 | raw_spin_lock_irqsave(&cfs_b->lock, flags); | 4899 | raw_spin_lock_irqsave(&cfs_b->lock, flags); |
| 4897 | for (;;) { | 4900 | for (;;) { |
| @@ -4899,6 +4902,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) | |||
| 4899 | if (!overrun) | 4902 | if (!overrun) |
| 4900 | break; | 4903 | break; |
| 4901 | 4904 | ||
| 4905 | if (++count > 3) { | ||
| 4906 | u64 new, old = ktime_to_ns(cfs_b->period); | ||
| 4907 | |||
| 4908 | new = (old * 147) / 128; /* ~115% */ | ||
| 4909 | new = min(new, max_cfs_quota_period); | ||
| 4910 | |||
| 4911 | cfs_b->period = ns_to_ktime(new); | ||
| 4912 | |||
| 4913 | /* since max is 1s, this is limited to 1e9^2, which fits in u64 */ | ||
| 4914 | cfs_b->quota *= new; | ||
| 4915 | cfs_b->quota = div64_u64(cfs_b->quota, old); | ||
| 4916 | |||
| 4917 | pr_warn_ratelimited( | ||
| 4918 | "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", | ||
| 4919 | smp_processor_id(), | ||
| 4920 | div_u64(new, NSEC_PER_USEC), | ||
| 4921 | div_u64(cfs_b->quota, NSEC_PER_USEC)); | ||
| 4922 | |||
| 4923 | /* reset count so we don't come right back in here */ | ||
| 4924 | count = 0; | ||
| 4925 | } | ||
| 4926 | |||
| 4902 | idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); | 4927 | idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); |
| 4903 | } | 4928 | } |
| 4904 | if (idle) | 4929 | if (idle) |
| @@ -7784,10 +7809,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) | |||
| 7784 | if (cfs_rq->last_h_load_update == now) | 7809 | if (cfs_rq->last_h_load_update == now) |
| 7785 | return; | 7810 | return; |
| 7786 | 7811 | ||
| 7787 | cfs_rq->h_load_next = NULL; | 7812 | WRITE_ONCE(cfs_rq->h_load_next, NULL); |
| 7788 | for_each_sched_entity(se) { | 7813 | for_each_sched_entity(se) { |
| 7789 | cfs_rq = cfs_rq_of(se); | 7814 | cfs_rq = cfs_rq_of(se); |
| 7790 | cfs_rq->h_load_next = se; | 7815 | WRITE_ONCE(cfs_rq->h_load_next, se); |
| 7791 | if (cfs_rq->last_h_load_update == now) | 7816 | if (cfs_rq->last_h_load_update == now) |
| 7792 | break; | 7817 | break; |
| 7793 | } | 7818 | } |
| @@ -7797,7 +7822,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) | |||
| 7797 | cfs_rq->last_h_load_update = now; | 7822 | cfs_rq->last_h_load_update = now; |
| 7798 | } | 7823 | } |
| 7799 | 7824 | ||
| 7800 | while ((se = cfs_rq->h_load_next) != NULL) { | 7825 | while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { |
| 7801 | load = cfs_rq->h_load; | 7826 | load = cfs_rq->h_load; |
| 7802 | load = div64_ul(load * se->avg.load_avg, | 7827 | load = div64_ul(load * se->avg.load_avg, |
| 7803 | cfs_rq_load_avg(cfs_rq) + 1); | 7828 | cfs_rq_load_avg(cfs_rq) + 1); |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 54a0347ca812..df27e499956a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd) | |||
| 149 | 149 | ||
| 150 | sd->nr = syscall_get_nr(task, regs); | 150 | sd->nr = syscall_get_nr(task, regs); |
| 151 | sd->arch = syscall_get_arch(); | 151 | sd->arch = syscall_get_arch(); |
| 152 | syscall_get_arguments(task, regs, 0, 6, args); | 152 | syscall_get_arguments(task, regs, args); |
| 153 | sd->args[0] = args[0]; | 153 | sd->args[0] = args[0]; |
| 154 | sd->args[1] = args[1]; | 154 | sd->args[1] = args[1]; |
| 155 | sd->args[2] = args[2]; | 155 | sd->args[2] = args[2]; |
diff --git a/kernel/signal.c b/kernel/signal.c index b7953934aa99..227ba170298e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -3581,7 +3581,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, | |||
| 3581 | if (flags) | 3581 | if (flags) |
| 3582 | return -EINVAL; | 3582 | return -EINVAL; |
| 3583 | 3583 | ||
| 3584 | f = fdget_raw(pidfd); | 3584 | f = fdget(pidfd); |
| 3585 | if (!f.file) | 3585 | if (!f.file) |
| 3586 | return -EBADF; | 3586 | return -EBADF; |
| 3587 | 3587 | ||
| @@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, | |||
| 3605 | if (unlikely(sig != kinfo.si_signo)) | 3605 | if (unlikely(sig != kinfo.si_signo)) |
| 3606 | goto err; | 3606 | goto err; |
| 3607 | 3607 | ||
| 3608 | /* Only allow sending arbitrary signals to yourself. */ | ||
| 3609 | ret = -EPERM; | ||
| 3608 | if ((task_pid(current) != pid) && | 3610 | if ((task_pid(current) != pid) && |
| 3609 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) { | 3611 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) |
| 3610 | /* Only allow sending arbitrary signals to yourself. */ | 3612 | goto err; |
| 3611 | ret = -EPERM; | ||
| 3612 | if (kinfo.si_code != SI_USER) | ||
| 3613 | goto err; | ||
| 3614 | |||
| 3615 | /* Turn this into a regular kill signal. */ | ||
| 3616 | prepare_kill_siginfo(sig, &kinfo); | ||
| 3617 | } | ||
| 3618 | } else { | 3613 | } else { |
| 3619 | prepare_kill_siginfo(sig, &kinfo); | 3614 | prepare_kill_siginfo(sig, &kinfo); |
| 3620 | } | 3615 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e5da394d1ca3..c9ec050bcf46 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -128,6 +128,7 @@ static int zero; | |||
| 128 | static int __maybe_unused one = 1; | 128 | static int __maybe_unused one = 1; |
| 129 | static int __maybe_unused two = 2; | 129 | static int __maybe_unused two = 2; |
| 130 | static int __maybe_unused four = 4; | 130 | static int __maybe_unused four = 4; |
| 131 | static unsigned long zero_ul; | ||
| 131 | static unsigned long one_ul = 1; | 132 | static unsigned long one_ul = 1; |
| 132 | static unsigned long long_max = LONG_MAX; | 133 | static unsigned long long_max = LONG_MAX; |
| 133 | static int one_hundred = 100; | 134 | static int one_hundred = 100; |
| @@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = { | |||
| 1750 | .maxlen = sizeof(files_stat.max_files), | 1751 | .maxlen = sizeof(files_stat.max_files), |
| 1751 | .mode = 0644, | 1752 | .mode = 0644, |
| 1752 | .proc_handler = proc_doulongvec_minmax, | 1753 | .proc_handler = proc_doulongvec_minmax, |
| 1753 | .extra1 = &zero, | 1754 | .extra1 = &zero_ul, |
| 1754 | .extra2 = &long_max, | 1755 | .extra2 = &long_max, |
| 1755 | }, | 1756 | }, |
| 1756 | { | 1757 | { |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 2c97e8c2d29f..0519a8805aab 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
| @@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now) | |||
| 594 | { | 594 | { |
| 595 | struct alarm *alarm = &timr->it.alarm.alarmtimer; | 595 | struct alarm *alarm = &timr->it.alarm.alarmtimer; |
| 596 | 596 | ||
| 597 | return ktime_sub(now, alarm->node.expires); | 597 | return ktime_sub(alarm->node.expires, now); |
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | /** | 600 | /** |
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 094b82ca95e5..930113b9799a 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
| @@ -272,7 +272,7 @@ static u64 notrace suspended_sched_clock_read(void) | |||
| 272 | return cd.read_data[seq & 1].epoch_cyc; | 272 | return cd.read_data[seq & 1].epoch_cyc; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | static int sched_clock_suspend(void) | 275 | int sched_clock_suspend(void) |
| 276 | { | 276 | { |
| 277 | struct clock_read_data *rd = &cd.read_data[0]; | 277 | struct clock_read_data *rd = &cd.read_data[0]; |
| 278 | 278 | ||
| @@ -283,7 +283,7 @@ static int sched_clock_suspend(void) | |||
| 283 | return 0; | 283 | return 0; |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | static void sched_clock_resume(void) | 286 | void sched_clock_resume(void) |
| 287 | { | 287 | { |
| 288 | struct clock_read_data *rd = &cd.read_data[0]; | 288 | struct clock_read_data *rd = &cd.read_data[0]; |
| 289 | 289 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 529143b4c8d2..df401463a191 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -487,6 +487,7 @@ void tick_freeze(void) | |||
| 487 | trace_suspend_resume(TPS("timekeeping_freeze"), | 487 | trace_suspend_resume(TPS("timekeeping_freeze"), |
| 488 | smp_processor_id(), true); | 488 | smp_processor_id(), true); |
| 489 | system_state = SYSTEM_SUSPEND; | 489 | system_state = SYSTEM_SUSPEND; |
| 490 | sched_clock_suspend(); | ||
| 490 | timekeeping_suspend(); | 491 | timekeeping_suspend(); |
| 491 | } else { | 492 | } else { |
| 492 | tick_suspend_local(); | 493 | tick_suspend_local(); |
| @@ -510,6 +511,7 @@ void tick_unfreeze(void) | |||
| 510 | 511 | ||
| 511 | if (tick_freeze_depth == num_online_cpus()) { | 512 | if (tick_freeze_depth == num_online_cpus()) { |
| 512 | timekeeping_resume(); | 513 | timekeeping_resume(); |
| 514 | sched_clock_resume(); | ||
| 513 | system_state = SYSTEM_RUNNING; | 515 | system_state = SYSTEM_RUNNING; |
| 514 | trace_suspend_resume(TPS("timekeeping_freeze"), | 516 | trace_suspend_resume(TPS("timekeeping_freeze"), |
| 515 | smp_processor_id(), false); | 517 | smp_processor_id(), false); |
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index 7a9b4eb7a1d5..141ab3ab0354 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h | |||
| @@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void); | |||
| 14 | extern void timekeeping_warp_clock(void); | 14 | extern void timekeeping_warp_clock(void); |
| 15 | extern int timekeeping_suspend(void); | 15 | extern int timekeeping_suspend(void); |
| 16 | extern void timekeeping_resume(void); | 16 | extern void timekeeping_resume(void); |
| 17 | #ifdef CONFIG_GENERIC_SCHED_CLOCK | ||
| 18 | extern int sched_clock_suspend(void); | ||
| 19 | extern void sched_clock_resume(void); | ||
| 20 | #else | ||
| 21 | static inline int sched_clock_suspend(void) { return 0; } | ||
| 22 | static inline void sched_clock_resume(void) { } | ||
| 23 | #endif | ||
| 17 | 24 | ||
| 18 | extern void do_timer(unsigned long ticks); | 25 | extern void do_timer(unsigned long ticks); |
| 19 | extern void update_wall_time(void); | 26 | extern void update_wall_time(void); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 26c8ca9bd06b..b920358dd8f7 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
| 34 | #include <linux/hash.h> | 34 | #include <linux/hash.h> |
| 35 | #include <linux/rcupdate.h> | 35 | #include <linux/rcupdate.h> |
| 36 | #include <linux/kprobes.h> | ||
| 36 | 37 | ||
| 37 | #include <trace/events/sched.h> | 38 | #include <trace/events/sched.h> |
| 38 | 39 | ||
| @@ -6246,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr) | |||
| 6246 | tr->ops->func = ftrace_stub; | 6247 | tr->ops->func = ftrace_stub; |
| 6247 | } | 6248 | } |
| 6248 | 6249 | ||
| 6249 | static inline void | 6250 | static nokprobe_inline void |
| 6250 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 6251 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
| 6251 | struct ftrace_ops *ignored, struct pt_regs *regs) | 6252 | struct ftrace_ops *ignored, struct pt_regs *regs) |
| 6252 | { | 6253 | { |
| @@ -6306,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
| 6306 | { | 6307 | { |
| 6307 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); | 6308 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); |
| 6308 | } | 6309 | } |
| 6310 | NOKPROBE_SYMBOL(ftrace_ops_list_func); | ||
| 6309 | #else | 6311 | #else |
| 6310 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | 6312 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) |
| 6311 | { | 6313 | { |
| 6312 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); | 6314 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
| 6313 | } | 6315 | } |
| 6316 | NOKPROBE_SYMBOL(ftrace_ops_no_ops); | ||
| 6314 | #endif | 6317 | #endif |
| 6315 | 6318 | ||
| 6316 | /* | 6319 | /* |
| @@ -6337,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, | |||
| 6337 | preempt_enable_notrace(); | 6340 | preempt_enable_notrace(); |
| 6338 | trace_clear_recursion(bit); | 6341 | trace_clear_recursion(bit); |
| 6339 | } | 6342 | } |
| 6343 | NOKPROBE_SYMBOL(ftrace_ops_assist_func); | ||
| 6340 | 6344 | ||
| 6341 | /** | 6345 | /** |
| 6342 | * ftrace_ops_get_func - get the function a trampoline should call | 6346 | * ftrace_ops_get_func - get the function a trampoline should call |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 21153e64bf1c..6c24755655c7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -7041,12 +7041,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | |||
| 7041 | buf->private = 0; | 7041 | buf->private = 0; |
| 7042 | } | 7042 | } |
| 7043 | 7043 | ||
| 7044 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | 7044 | static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, |
| 7045 | struct pipe_buffer *buf) | 7045 | struct pipe_buffer *buf) |
| 7046 | { | 7046 | { |
| 7047 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | 7047 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; |
| 7048 | 7048 | ||
| 7049 | if (ref->ref > INT_MAX/2) | ||
| 7050 | return false; | ||
| 7051 | |||
| 7049 | ref->ref++; | 7052 | ref->ref++; |
| 7053 | return true; | ||
| 7050 | } | 7054 | } |
| 7051 | 7055 | ||
| 7052 | /* Pipe buffer operations for a buffer. */ | 7056 | /* Pipe buffer operations for a buffer. */ |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index f93a56d2db27..fa8fbff736d6 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 314 | struct ring_buffer_event *event; | 314 | struct ring_buffer_event *event; |
| 315 | struct ring_buffer *buffer; | 315 | struct ring_buffer *buffer; |
| 316 | unsigned long irq_flags; | 316 | unsigned long irq_flags; |
| 317 | unsigned long args[6]; | ||
| 317 | int pc; | 318 | int pc; |
| 318 | int syscall_nr; | 319 | int syscall_nr; |
| 319 | int size; | 320 | int size; |
| @@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 347 | 348 | ||
| 348 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
| 349 | entry->nr = syscall_nr; | 350 | entry->nr = syscall_nr; |
| 350 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 351 | syscall_get_arguments(current, regs, args); |
| 352 | memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); | ||
| 351 | 353 | ||
| 352 | event_trigger_unlock_commit(trace_file, buffer, event, entry, | 354 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 353 | irq_flags, pc); | 355 | irq_flags, pc); |
| @@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
| 583 | struct syscall_metadata *sys_data; | 585 | struct syscall_metadata *sys_data; |
| 584 | struct syscall_trace_enter *rec; | 586 | struct syscall_trace_enter *rec; |
| 585 | struct hlist_head *head; | 587 | struct hlist_head *head; |
| 588 | unsigned long args[6]; | ||
| 586 | bool valid_prog_array; | 589 | bool valid_prog_array; |
| 587 | int syscall_nr; | 590 | int syscall_nr; |
| 588 | int rctx; | 591 | int rctx; |
| @@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
| 613 | return; | 616 | return; |
| 614 | 617 | ||
| 615 | rec->nr = syscall_nr; | 618 | rec->nr = syscall_nr; |
| 616 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 619 | syscall_get_arguments(current, regs, args); |
| 617 | (unsigned long *)&rec->args); | 620 | memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args); |
| 618 | 621 | ||
| 619 | if ((valid_prog_array && | 622 | if ((valid_prog_array && |
| 620 | !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || | 623 | !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 71381168dede..247bf0b1582c 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
| @@ -135,7 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event, | |||
| 135 | if (__this_cpu_read(hard_watchdog_warn) == true) | 135 | if (__this_cpu_read(hard_watchdog_warn) == true) |
| 136 | return; | 136 | return; |
| 137 | 137 | ||
| 138 | pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | 138 | pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", |
| 139 | this_cpu); | ||
| 139 | print_modules(); | 140 | print_modules(); |
| 140 | print_irqtrace_events(current); | 141 | print_irqtrace_events(current); |
| 141 | if (regs) | 142 | if (regs) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0d9e81779e37..00dbcdbc9a0d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -753,9 +753,9 @@ endmenu # "Memory Debugging" | |||
| 753 | config ARCH_HAS_KCOV | 753 | config ARCH_HAS_KCOV |
| 754 | bool | 754 | bool |
| 755 | help | 755 | help |
| 756 | KCOV does not have any arch-specific code, but currently it is enabled | 756 | An architecture should select this when it can successfully |
| 757 | only for x86_64. KCOV requires testing on other archs, and most likely | 757 | build and run with CONFIG_KCOV. This typically requires |
| 758 | disabling of instrumentation for some early boot code. | 758 | disabling instrumentation for some early boot code. |
| 759 | 759 | ||
| 760 | config CC_HAS_SANCOV_TRACE_PC | 760 | config CC_HAS_SANCOV_TRACE_PC |
| 761 | def_bool $(cc-option,-fsanitize-coverage=trace-pc) | 761 | def_bool $(cc-option,-fsanitize-coverage=trace-pc) |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index ea36dc355da1..b396d328a764 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter); | |||
| 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
| 1529 | struct iov_iter *i) | 1529 | struct iov_iter *i) |
| 1530 | { | 1530 | { |
| 1531 | #ifdef CONFIG_CRYPTO | ||
| 1531 | struct ahash_request *hash = hashp; | 1532 | struct ahash_request *hash = hashp; |
| 1532 | struct scatterlist sg; | 1533 | struct scatterlist sg; |
| 1533 | size_t copied; | 1534 | size_t copied; |
| @@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | |||
| 1537 | ahash_request_set_crypt(hash, &sg, NULL, copied); | 1538 | ahash_request_set_crypt(hash, &sg, NULL, copied); |
| 1538 | crypto_ahash_update(hash); | 1539 | crypto_ahash_update(hash); |
| 1539 | return copied; | 1540 | return copied; |
| 1541 | #else | ||
| 1542 | return 0; | ||
| 1543 | #endif | ||
| 1540 | } | 1544 | } |
| 1541 | EXPORT_SYMBOL(hash_and_copy_to_iter); | 1545 | EXPORT_SYMBOL(hash_and_copy_to_iter); |
| 1542 | 1546 | ||
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 4525fb094844..a8ede77afe0d 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c | |||
| @@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 291 | { | 291 | { |
| 292 | const unsigned char *ip = in; | 292 | const unsigned char *ip = in; |
| 293 | unsigned char *op = out; | 293 | unsigned char *op = out; |
| 294 | unsigned char *data_start; | ||
| 294 | size_t l = in_len; | 295 | size_t l = in_len; |
| 295 | size_t t = 0; | 296 | size_t t = 0; |
| 296 | signed char state_offset = -2; | 297 | signed char state_offset = -2; |
| 297 | unsigned int m4_max_offset; | 298 | unsigned int m4_max_offset; |
| 298 | 299 | ||
| 299 | // LZO v0 will never write 17 as first byte, | 300 | // LZO v0 will never write 17 as first byte (except for zero-length |
| 300 | // so this is used to version the bitstream | 301 | // input), so this is used to version the bitstream |
| 301 | if (bitstream_version > 0) { | 302 | if (bitstream_version > 0) { |
| 302 | *op++ = 17; | 303 | *op++ = 17; |
| 303 | *op++ = bitstream_version; | 304 | *op++ = bitstream_version; |
| @@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 306 | m4_max_offset = M4_MAX_OFFSET_V0; | 307 | m4_max_offset = M4_MAX_OFFSET_V0; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 310 | data_start = op; | ||
| 311 | |||
| 309 | while (l > 20) { | 312 | while (l > 20) { |
| 310 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); | 313 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); |
| 311 | uintptr_t ll_end = (uintptr_t) ip + ll; | 314 | uintptr_t ll_end = (uintptr_t) ip + ll; |
| @@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 324 | if (t > 0) { | 327 | if (t > 0) { |
| 325 | const unsigned char *ii = in + in_len - t; | 328 | const unsigned char *ii = in + in_len - t; |
| 326 | 329 | ||
| 327 | if (op == out && t <= 238) { | 330 | if (op == data_start && t <= 238) { |
| 328 | *op++ = (17 + t); | 331 | *op++ = (17 + t); |
| 329 | } else if (t <= 3) { | 332 | } else if (t <= 3) { |
| 330 | op[state_offset] |= t; | 333 | op[state_offset] |= t; |
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 6d2600ea3b55..9e07e9ef1aad 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c | |||
| @@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | |||
| 54 | if (unlikely(in_len < 3)) | 54 | if (unlikely(in_len < 3)) |
| 55 | goto input_overrun; | 55 | goto input_overrun; |
| 56 | 56 | ||
| 57 | if (likely(*ip == 17)) { | 57 | if (likely(in_len >= 5) && likely(*ip == 17)) { |
| 58 | bitstream_version = ip[1]; | 58 | bitstream_version = ip[1]; |
| 59 | ip += 2; | 59 | ip += 2; |
| 60 | if (unlikely(in_len < 5)) | ||
| 61 | goto input_overrun; | ||
| 62 | } else { | 60 | } else { |
| 63 | bitstream_version = 0; | 61 | bitstream_version = 0; |
| 64 | } | 62 | } |
diff --git a/lib/string.c b/lib/string.c index 38e4ca08e757..3ab861c1a857 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) | |||
| 866 | EXPORT_SYMBOL(memcmp); | 866 | EXPORT_SYMBOL(memcmp); |
| 867 | #endif | 867 | #endif |
| 868 | 868 | ||
| 869 | #ifndef __HAVE_ARCH_BCMP | ||
| 870 | /** | ||
| 871 | * bcmp - returns 0 if and only if the buffers have identical contents. | ||
| 872 | * @a: pointer to first buffer. | ||
| 873 | * @b: pointer to second buffer. | ||
| 874 | * @len: size of buffers. | ||
| 875 | * | ||
| 876 | * The sign or magnitude of a non-zero return value has no particular | ||
| 877 | * meaning, and architectures may implement their own more efficient bcmp(). So | ||
| 878 | * while this particular implementation is a simple (tail) call to memcmp, do | ||
| 879 | * not rely on anything but whether the return value is zero or non-zero. | ||
| 880 | */ | ||
| 881 | #undef bcmp | ||
| 882 | int bcmp(const void *a, const void *b, size_t len) | ||
| 883 | { | ||
| 884 | return memcmp(a, b, len); | ||
| 885 | } | ||
| 886 | EXPORT_SYMBOL(bcmp); | ||
| 887 | #endif | ||
| 888 | |||
| 869 | #ifndef __HAVE_ARCH_MEMSCAN | 889 | #ifndef __HAVE_ARCH_MEMSCAN |
| 870 | /** | 890 | /** |
| 871 | * memscan - Find a character in an area of memory. | 891 | * memscan - Find a character in an area of memory. |
diff --git a/lib/syscall.c b/lib/syscall.c index 1a7077f20eae..fb328e7ccb08 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -5,16 +5,14 @@ | |||
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include <asm/syscall.h> | 6 | #include <asm/syscall.h> |
| 7 | 7 | ||
| 8 | static int collect_syscall(struct task_struct *target, long *callno, | 8 | static int collect_syscall(struct task_struct *target, struct syscall_info *info) |
| 9 | unsigned long args[6], unsigned int maxargs, | ||
| 10 | unsigned long *sp, unsigned long *pc) | ||
| 11 | { | 9 | { |
| 12 | struct pt_regs *regs; | 10 | struct pt_regs *regs; |
| 13 | 11 | ||
| 14 | if (!try_get_task_stack(target)) { | 12 | if (!try_get_task_stack(target)) { |
| 15 | /* Task has no stack, so the task isn't in a syscall. */ | 13 | /* Task has no stack, so the task isn't in a syscall. */ |
| 16 | *sp = *pc = 0; | 14 | memset(info, 0, sizeof(*info)); |
| 17 | *callno = -1; | 15 | info->data.nr = -1; |
| 18 | return 0; | 16 | return 0; |
| 19 | } | 17 | } |
| 20 | 18 | ||
| @@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 24 | return -EAGAIN; | 22 | return -EAGAIN; |
| 25 | } | 23 | } |
| 26 | 24 | ||
| 27 | *sp = user_stack_pointer(regs); | 25 | info->sp = user_stack_pointer(regs); |
| 28 | *pc = instruction_pointer(regs); | 26 | info->data.instruction_pointer = instruction_pointer(regs); |
| 29 | 27 | ||
| 30 | *callno = syscall_get_nr(target, regs); | 28 | info->data.nr = syscall_get_nr(target, regs); |
| 31 | if (*callno != -1L && maxargs > 0) | 29 | if (info->data.nr != -1L) |
| 32 | syscall_get_arguments(target, regs, 0, maxargs, args); | 30 | syscall_get_arguments(target, regs, |
| 31 | (unsigned long *)&info->data.args[0]); | ||
| 33 | 32 | ||
| 34 | put_task_stack(target); | 33 | put_task_stack(target); |
| 35 | return 0; | 34 | return 0; |
| @@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 38 | /** | 37 | /** |
| 39 | * task_current_syscall - Discover what a blocked task is doing. | 38 | * task_current_syscall - Discover what a blocked task is doing. |
| 40 | * @target: thread to examine | 39 | * @target: thread to examine |
| 41 | * @callno: filled with system call number or -1 | 40 | * @info: structure with the following fields: |
| 42 | * @args: filled with @maxargs system call arguments | 41 | * .sp - filled with user stack pointer |
| 43 | * @maxargs: number of elements in @args to fill | 42 | * .data.nr - filled with system call number or -1 |
| 44 | * @sp: filled with user stack pointer | 43 | * .data.args - filled with @maxargs system call arguments |
| 45 | * @pc: filled with user PC | 44 | * .data.instruction_pointer - filled with user PC |
| 46 | * | 45 | * |
| 47 | * If @target is blocked in a system call, returns zero with *@callno | 46 | * If @target is blocked in a system call, returns zero with @info.data.nr |
| 48 | * set to the the call's number and @args filled in with its arguments. | 47 | * set to the the call's number and @info.data.args filled in with its |
| 49 | * Registers not used for system call arguments may not be available and | 48 | * arguments. Registers not used for system call arguments may not be available |
| 50 | * it is not kosher to use &struct user_regset calls while the system | 49 | * and it is not kosher to use &struct user_regset calls while the system |
| 51 | * call is still in progress. Note we may get this result if @target | 50 | * call is still in progress. Note we may get this result if @target |
| 52 | * has finished its system call but not yet returned to user mode, such | 51 | * has finished its system call but not yet returned to user mode, such |
| 53 | * as when it's stopped for signal handling or syscall exit tracing. | 52 | * as when it's stopped for signal handling or syscall exit tracing. |
| 54 | * | 53 | * |
| 55 | * If @target is blocked in the kernel during a fault or exception, | 54 | * If @target is blocked in the kernel during a fault or exception, |
| 56 | * returns zero with *@callno set to -1 and does not fill in @args. | 55 | * returns zero with *@info.data.nr set to -1 and does not fill in |
| 57 | * If so, it's now safe to examine @target using &struct user_regset | 56 | * @info.data.args. If so, it's now safe to examine @target using |
| 58 | * get() calls as long as we're sure @target won't return to user mode. | 57 | * &struct user_regset get() calls as long as we're sure @target won't return |
| 58 | * to user mode. | ||
| 59 | * | 59 | * |
| 60 | * Returns -%EAGAIN if @target does not remain blocked. | 60 | * Returns -%EAGAIN if @target does not remain blocked. |
| 61 | * | ||
| 62 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
| 63 | */ | 61 | */ |
| 64 | int task_current_syscall(struct task_struct *target, long *callno, | 62 | int task_current_syscall(struct task_struct *target, struct syscall_info *info) |
| 65 | unsigned long args[6], unsigned int maxargs, | ||
| 66 | unsigned long *sp, unsigned long *pc) | ||
| 67 | { | 63 | { |
| 68 | long state; | 64 | long state; |
| 69 | unsigned long ncsw; | 65 | unsigned long ncsw; |
| 70 | 66 | ||
| 71 | if (unlikely(maxargs > 6)) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 74 | if (target == current) | 67 | if (target == current) |
| 75 | return collect_syscall(target, callno, args, maxargs, sp, pc); | 68 | return collect_syscall(target, info); |
| 76 | 69 | ||
| 77 | state = target->state; | 70 | state = target->state; |
| 78 | if (unlikely(!state)) | 71 | if (unlikely(!state)) |
| @@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno, | |||
| 80 | 73 | ||
| 81 | ncsw = wait_task_inactive(target, state); | 74 | ncsw = wait_task_inactive(target, state); |
| 82 | if (unlikely(!ncsw) || | 75 | if (unlikely(!ncsw) || |
| 83 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | 76 | unlikely(collect_syscall(target, info)) || |
| 84 | unlikely(wait_task_inactive(target, state) != ncsw)) | 77 | unlikely(wait_task_inactive(target, state) != ncsw)) |
| 85 | return -EAGAIN; | 78 | return -EAGAIN; |
| 86 | 79 | ||
diff --git a/mm/compaction.c b/mm/compaction.c index f171a83707ce..3319e0872d01 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 242 | bool check_target) | 242 | bool check_target) |
| 243 | { | 243 | { |
| 244 | struct page *page = pfn_to_online_page(pfn); | 244 | struct page *page = pfn_to_online_page(pfn); |
| 245 | struct page *block_page; | ||
| 245 | struct page *end_page; | 246 | struct page *end_page; |
| 246 | unsigned long block_pfn; | 247 | unsigned long block_pfn; |
| 247 | 248 | ||
| @@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 267 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) | 268 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) |
| 268 | return false; | 269 | return false; |
| 269 | 270 | ||
| 271 | /* Ensure the start of the pageblock or zone is online and valid */ | ||
| 272 | block_pfn = pageblock_start_pfn(pfn); | ||
| 273 | block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn)); | ||
| 274 | if (block_page) { | ||
| 275 | page = block_page; | ||
| 276 | pfn = block_pfn; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* Ensure the end of the pageblock or zone is online and valid */ | ||
| 280 | block_pfn += pageblock_nr_pages; | ||
| 281 | block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); | ||
| 282 | end_page = pfn_to_online_page(block_pfn); | ||
| 283 | if (!end_page) | ||
| 284 | return false; | ||
| 285 | |||
| 270 | /* | 286 | /* |
| 271 | * Only clear the hint if a sample indicates there is either a | 287 | * Only clear the hint if a sample indicates there is either a |
| 272 | * free page or an LRU page in the block. One or other condition | 288 | * free page or an LRU page in the block. One or other condition |
| 273 | * is necessary for the block to be a migration source/target. | 289 | * is necessary for the block to be a migration source/target. |
| 274 | */ | 290 | */ |
| 275 | block_pfn = pageblock_start_pfn(pfn); | ||
| 276 | pfn = max(block_pfn, zone->zone_start_pfn); | ||
| 277 | page = pfn_to_page(pfn); | ||
| 278 | if (zone != page_zone(page)) | ||
| 279 | return false; | ||
| 280 | pfn = block_pfn + pageblock_nr_pages; | ||
| 281 | pfn = min(pfn, zone_end_pfn(zone)); | ||
| 282 | end_page = pfn_to_page(pfn); | ||
| 283 | |||
| 284 | do { | 291 | do { |
| 285 | if (pfn_valid_within(pfn)) { | 292 | if (pfn_valid_within(pfn)) { |
| 286 | if (check_source && PageLRU(page)) { | 293 | if (check_source && PageLRU(page)) { |
| @@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, | |||
| 309 | static void __reset_isolation_suitable(struct zone *zone) | 316 | static void __reset_isolation_suitable(struct zone *zone) |
| 310 | { | 317 | { |
| 311 | unsigned long migrate_pfn = zone->zone_start_pfn; | 318 | unsigned long migrate_pfn = zone->zone_start_pfn; |
| 312 | unsigned long free_pfn = zone_end_pfn(zone); | 319 | unsigned long free_pfn = zone_end_pfn(zone) - 1; |
| 313 | unsigned long reset_migrate = free_pfn; | 320 | unsigned long reset_migrate = free_pfn; |
| 314 | unsigned long reset_free = migrate_pfn; | 321 | unsigned long reset_free = migrate_pfn; |
| 315 | bool source_set = false; | 322 | bool source_set = false; |
| @@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc) | |||
| 1363 | count_compact_events(COMPACTISOLATED, nr_isolated); | 1370 | count_compact_events(COMPACTISOLATED, nr_isolated); |
| 1364 | } else { | 1371 | } else { |
| 1365 | /* If isolation fails, abort the search */ | 1372 | /* If isolation fails, abort the search */ |
| 1366 | order = -1; | 1373 | order = cc->search_order + 1; |
| 1367 | page = NULL; | 1374 | page = NULL; |
| 1368 | } | 1375 | } |
| 1369 | } | 1376 | } |
| @@ -160,8 +160,12 @@ retry: | |||
| 160 | goto retry; | 160 | goto retry; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | if (flags & FOLL_GET) | 163 | if (flags & FOLL_GET) { |
| 164 | get_page(page); | 164 | if (unlikely(!try_get_page(page))) { |
| 165 | page = ERR_PTR(-ENOMEM); | ||
| 166 | goto out; | ||
| 167 | } | ||
| 168 | } | ||
| 165 | if (flags & FOLL_TOUCH) { | 169 | if (flags & FOLL_TOUCH) { |
| 166 | if ((flags & FOLL_WRITE) && | 170 | if ((flags & FOLL_WRITE) && |
| 167 | !pte_dirty(pte) && !PageDirty(page)) | 171 | !pte_dirty(pte) && !PageDirty(page)) |
| @@ -298,7 +302,10 @@ retry_locked: | |||
| 298 | if (pmd_trans_unstable(pmd)) | 302 | if (pmd_trans_unstable(pmd)) |
| 299 | ret = -EBUSY; | 303 | ret = -EBUSY; |
| 300 | } else { | 304 | } else { |
| 301 | get_page(page); | 305 | if (unlikely(!try_get_page(page))) { |
| 306 | spin_unlock(ptl); | ||
| 307 | return ERR_PTR(-ENOMEM); | ||
| 308 | } | ||
| 302 | spin_unlock(ptl); | 309 | spin_unlock(ptl); |
| 303 | lock_page(page); | 310 | lock_page(page); |
| 304 | ret = split_huge_page(page); | 311 | ret = split_huge_page(page); |
| @@ -500,7 +507,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, | |||
| 500 | if (is_device_public_page(*page)) | 507 | if (is_device_public_page(*page)) |
| 501 | goto unmap; | 508 | goto unmap; |
| 502 | } | 509 | } |
| 503 | get_page(*page); | 510 | if (unlikely(!try_get_page(*page))) { |
| 511 | ret = -ENOMEM; | ||
| 512 | goto unmap; | ||
| 513 | } | ||
| 504 | out: | 514 | out: |
| 505 | ret = 0; | 515 | ret = 0; |
| 506 | unmap: | 516 | unmap: |
| @@ -1545,6 +1555,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) | |||
| 1545 | } | 1555 | } |
| 1546 | } | 1556 | } |
| 1547 | 1557 | ||
| 1558 | /* | ||
| 1559 | * Return the compund head page with ref appropriately incremented, | ||
| 1560 | * or NULL if that failed. | ||
| 1561 | */ | ||
| 1562 | static inline struct page *try_get_compound_head(struct page *page, int refs) | ||
| 1563 | { | ||
| 1564 | struct page *head = compound_head(page); | ||
| 1565 | if (WARN_ON_ONCE(page_ref_count(head) < 0)) | ||
| 1566 | return NULL; | ||
| 1567 | if (unlikely(!page_cache_add_speculative(head, refs))) | ||
| 1568 | return NULL; | ||
| 1569 | return head; | ||
| 1570 | } | ||
| 1571 | |||
| 1548 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL | 1572 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
| 1549 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | 1573 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
| 1550 | int write, struct page **pages, int *nr) | 1574 | int write, struct page **pages, int *nr) |
| @@ -1579,9 +1603,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | |||
| 1579 | 1603 | ||
| 1580 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 1604 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 1581 | page = pte_page(pte); | 1605 | page = pte_page(pte); |
| 1582 | head = compound_head(page); | ||
| 1583 | 1606 | ||
| 1584 | if (!page_cache_get_speculative(head)) | 1607 | head = try_get_compound_head(page, 1); |
| 1608 | if (!head) | ||
| 1585 | goto pte_unmap; | 1609 | goto pte_unmap; |
| 1586 | 1610 | ||
| 1587 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | 1611 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
| @@ -1720,8 +1744,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, | |||
| 1720 | refs++; | 1744 | refs++; |
| 1721 | } while (addr += PAGE_SIZE, addr != end); | 1745 | } while (addr += PAGE_SIZE, addr != end); |
| 1722 | 1746 | ||
| 1723 | head = compound_head(pmd_page(orig)); | 1747 | head = try_get_compound_head(pmd_page(orig), refs); |
| 1724 | if (!page_cache_add_speculative(head, refs)) { | 1748 | if (!head) { |
| 1725 | *nr -= refs; | 1749 | *nr -= refs; |
| 1726 | return 0; | 1750 | return 0; |
| 1727 | } | 1751 | } |
| @@ -1758,8 +1782,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, | |||
| 1758 | refs++; | 1782 | refs++; |
| 1759 | } while (addr += PAGE_SIZE, addr != end); | 1783 | } while (addr += PAGE_SIZE, addr != end); |
| 1760 | 1784 | ||
| 1761 | head = compound_head(pud_page(orig)); | 1785 | head = try_get_compound_head(pud_page(orig), refs); |
| 1762 | if (!page_cache_add_speculative(head, refs)) { | 1786 | if (!head) { |
| 1763 | *nr -= refs; | 1787 | *nr -= refs; |
| 1764 | return 0; | 1788 | return 0; |
| 1765 | } | 1789 | } |
| @@ -1795,8 +1819,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, | |||
| 1795 | refs++; | 1819 | refs++; |
| 1796 | } while (addr += PAGE_SIZE, addr != end); | 1820 | } while (addr += PAGE_SIZE, addr != end); |
| 1797 | 1821 | ||
| 1798 | head = compound_head(pgd_page(orig)); | 1822 | head = try_get_compound_head(pgd_page(orig), refs); |
| 1799 | if (!page_cache_add_speculative(head, refs)) { | 1823 | if (!head) { |
| 1800 | *nr -= refs; | 1824 | *nr -= refs; |
| 1801 | return 0; | 1825 | return 0; |
| 1802 | } | 1826 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 404acdcd0455..165ea46bf149 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 755 | spinlock_t *ptl; | 755 | spinlock_t *ptl; |
| 756 | 756 | ||
| 757 | ptl = pmd_lock(mm, pmd); | 757 | ptl = pmd_lock(mm, pmd); |
| 758 | if (!pmd_none(*pmd)) { | ||
| 759 | if (write) { | ||
| 760 | if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { | ||
| 761 | WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); | ||
| 762 | goto out_unlock; | ||
| 763 | } | ||
| 764 | entry = pmd_mkyoung(*pmd); | ||
| 765 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||
| 766 | if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) | ||
| 767 | update_mmu_cache_pmd(vma, addr, pmd); | ||
| 768 | } | ||
| 769 | |||
| 770 | goto out_unlock; | ||
| 771 | } | ||
| 772 | |||
| 758 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); | 773 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| 759 | if (pfn_t_devmap(pfn)) | 774 | if (pfn_t_devmap(pfn)) |
| 760 | entry = pmd_mkdevmap(entry); | 775 | entry = pmd_mkdevmap(entry); |
| @@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 766 | if (pgtable) { | 781 | if (pgtable) { |
| 767 | pgtable_trans_huge_deposit(mm, pmd, pgtable); | 782 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
| 768 | mm_inc_nr_ptes(mm); | 783 | mm_inc_nr_ptes(mm); |
| 784 | pgtable = NULL; | ||
| 769 | } | 785 | } |
| 770 | 786 | ||
| 771 | set_pmd_at(mm, addr, pmd, entry); | 787 | set_pmd_at(mm, addr, pmd, entry); |
| 772 | update_mmu_cache_pmd(vma, addr, pmd); | 788 | update_mmu_cache_pmd(vma, addr, pmd); |
| 789 | |||
| 790 | out_unlock: | ||
| 773 | spin_unlock(ptl); | 791 | spin_unlock(ptl); |
| 792 | if (pgtable) | ||
| 793 | pte_free(mm, pgtable); | ||
| 774 | } | 794 | } |
| 775 | 795 | ||
| 776 | vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | 796 | vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
| @@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 821 | spinlock_t *ptl; | 841 | spinlock_t *ptl; |
| 822 | 842 | ||
| 823 | ptl = pud_lock(mm, pud); | 843 | ptl = pud_lock(mm, pud); |
| 844 | if (!pud_none(*pud)) { | ||
| 845 | if (write) { | ||
| 846 | if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { | ||
| 847 | WARN_ON_ONCE(!is_huge_zero_pud(*pud)); | ||
| 848 | goto out_unlock; | ||
| 849 | } | ||
| 850 | entry = pud_mkyoung(*pud); | ||
| 851 | entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); | ||
| 852 | if (pudp_set_access_flags(vma, addr, pud, entry, 1)) | ||
| 853 | update_mmu_cache_pud(vma, addr, pud); | ||
| 854 | } | ||
| 855 | goto out_unlock; | ||
| 856 | } | ||
| 857 | |||
| 824 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); | 858 | entry = pud_mkhuge(pfn_t_pud(pfn, prot)); |
| 825 | if (pfn_t_devmap(pfn)) | 859 | if (pfn_t_devmap(pfn)) |
| 826 | entry = pud_mkdevmap(entry); | 860 | entry = pud_mkdevmap(entry); |
| @@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 830 | } | 864 | } |
| 831 | set_pud_at(mm, addr, pud, entry); | 865 | set_pud_at(mm, addr, pud, entry); |
| 832 | update_mmu_cache_pud(vma, addr, pud); | 866 | update_mmu_cache_pud(vma, addr, pud); |
| 867 | |||
| 868 | out_unlock: | ||
| 833 | spin_unlock(ptl); | 869 | spin_unlock(ptl); |
| 834 | } | 870 | } |
| 835 | 871 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 97b1e0290c66..6cdc7b2d9100 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 4299 | 4299 | ||
| 4300 | pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; | 4300 | pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; |
| 4301 | page = pte_page(huge_ptep_get(pte)); | 4301 | page = pte_page(huge_ptep_get(pte)); |
| 4302 | |||
| 4303 | /* | ||
| 4304 | * Instead of doing 'try_get_page()' below in the same_page | ||
| 4305 | * loop, just check the count once here. | ||
| 4306 | */ | ||
| 4307 | if (unlikely(page_count(page) <= 0)) { | ||
| 4308 | if (pages) { | ||
| 4309 | spin_unlock(ptl); | ||
| 4310 | remainder = 0; | ||
| 4311 | err = -ENOMEM; | ||
| 4312 | break; | ||
| 4313 | } | ||
| 4314 | } | ||
| 4302 | same_page: | 4315 | same_page: |
| 4303 | if (pages) { | 4316 | if (pages) { |
| 4304 | pages[i] = mem_map_offset(page, pfn_offset); | 4317 | pages[i] = mem_map_offset(page, pfn_offset); |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 707fa5579f66..2e435b8142e5 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -1401,6 +1401,7 @@ static void scan_block(void *_start, void *_end, | |||
| 1401 | /* | 1401 | /* |
| 1402 | * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. | 1402 | * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. |
| 1403 | */ | 1403 | */ |
| 1404 | #ifdef CONFIG_SMP | ||
| 1404 | static void scan_large_block(void *start, void *end) | 1405 | static void scan_large_block(void *start, void *end) |
| 1405 | { | 1406 | { |
| 1406 | void *next; | 1407 | void *next; |
| @@ -1412,6 +1413,7 @@ static void scan_large_block(void *start, void *end) | |||
| 1412 | cond_resched(); | 1413 | cond_resched(); |
| 1413 | } | 1414 | } |
| 1414 | } | 1415 | } |
| 1416 | #endif | ||
| 1415 | 1417 | ||
| 1416 | /* | 1418 | /* |
| 1417 | * Scan a memory block corresponding to a kmemleak_object. A condition is | 1419 | * Scan a memory block corresponding to a kmemleak_object. A condition is |
| @@ -1529,11 +1531,6 @@ static void kmemleak_scan(void) | |||
| 1529 | } | 1531 | } |
| 1530 | rcu_read_unlock(); | 1532 | rcu_read_unlock(); |
| 1531 | 1533 | ||
| 1532 | /* data/bss scanning */ | ||
| 1533 | scan_large_block(_sdata, _edata); | ||
| 1534 | scan_large_block(__bss_start, __bss_stop); | ||
| 1535 | scan_large_block(__start_ro_after_init, __end_ro_after_init); | ||
| 1536 | |||
| 1537 | #ifdef CONFIG_SMP | 1534 | #ifdef CONFIG_SMP |
| 1538 | /* per-cpu sections scanning */ | 1535 | /* per-cpu sections scanning */ |
| 1539 | for_each_possible_cpu(i) | 1536 | for_each_possible_cpu(i) |
| @@ -2071,6 +2068,17 @@ void __init kmemleak_init(void) | |||
| 2071 | } | 2068 | } |
| 2072 | local_irq_restore(flags); | 2069 | local_irq_restore(flags); |
| 2073 | 2070 | ||
| 2071 | /* register the data/bss sections */ | ||
| 2072 | create_object((unsigned long)_sdata, _edata - _sdata, | ||
| 2073 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2074 | create_object((unsigned long)__bss_start, __bss_stop - __bss_start, | ||
| 2075 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2076 | /* only register .data..ro_after_init if not within .data */ | ||
| 2077 | if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) | ||
| 2078 | create_object((unsigned long)__start_ro_after_init, | ||
| 2079 | __end_ro_after_init - __start_ro_after_init, | ||
| 2080 | KMEMLEAK_GREY, GFP_ATOMIC); | ||
| 2081 | |||
| 2074 | /* | 2082 | /* |
| 2075 | * This is the point where tracking allocations is safe. Automatic | 2083 | * This is the point where tracking allocations is safe. Automatic |
| 2076 | * scanning is started during the late initcall. Add the early logged | 2084 | * scanning is started during the late initcall. Add the early logged |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 532e0e2a4817..81a0d3914ec9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |||
| 3882 | return &memcg->cgwb_domain; | 3882 | return &memcg->cgwb_domain; |
| 3883 | } | 3883 | } |
| 3884 | 3884 | ||
| 3885 | /* | ||
| 3886 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 3887 | * Keep in sync with memcg_exact_page(). | ||
| 3888 | */ | ||
| 3889 | static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) | ||
| 3890 | { | ||
| 3891 | long x = atomic_long_read(&memcg->stat[idx]); | ||
| 3892 | int cpu; | ||
| 3893 | |||
| 3894 | for_each_online_cpu(cpu) | ||
| 3895 | x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; | ||
| 3896 | if (x < 0) | ||
| 3897 | x = 0; | ||
| 3898 | return x; | ||
| 3899 | } | ||
| 3900 | |||
| 3885 | /** | 3901 | /** |
| 3886 | * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg | 3902 | * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg |
| 3887 | * @wb: bdi_writeback in question | 3903 | * @wb: bdi_writeback in question |
| @@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, | |||
| 3907 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); | 3923 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); |
| 3908 | struct mem_cgroup *parent; | 3924 | struct mem_cgroup *parent; |
| 3909 | 3925 | ||
| 3910 | *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); | 3926 | *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); |
| 3911 | 3927 | ||
| 3912 | /* this should eventually include NR_UNSTABLE_NFS */ | 3928 | /* this should eventually include NR_UNSTABLE_NFS */ |
| 3913 | *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); | 3929 | *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); |
| 3914 | *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | | 3930 | *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | |
| 3915 | (1 << LRU_ACTIVE_FILE)); | 3931 | (1 << LRU_ACTIVE_FILE)); |
| 3916 | *pheadroom = PAGE_COUNTER_MAX; | 3932 | *pheadroom = PAGE_COUNTER_MAX; |
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/moduleparam.h> | 45 | #include <linux/moduleparam.h> |
| 46 | #include <linux/pkeys.h> | 46 | #include <linux/pkeys.h> |
| 47 | #include <linux/oom.h> | 47 | #include <linux/oom.h> |
| 48 | #include <linux/sched/mm.h> | ||
| 48 | 49 | ||
| 49 | #include <linux/uaccess.h> | 50 | #include <linux/uaccess.h> |
| 50 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
| @@ -2525,7 +2526,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
| 2525 | vma = find_vma_prev(mm, addr, &prev); | 2526 | vma = find_vma_prev(mm, addr, &prev); |
| 2526 | if (vma && (vma->vm_start <= addr)) | 2527 | if (vma && (vma->vm_start <= addr)) |
| 2527 | return vma; | 2528 | return vma; |
| 2528 | if (!prev || expand_stack(prev, addr)) | 2529 | /* don't alter vm_end if the coredump is running */ |
| 2530 | if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr)) | ||
| 2529 | return NULL; | 2531 | return NULL; |
| 2530 | if (prev->vm_flags & VM_LOCKED) | 2532 | if (prev->vm_flags & VM_LOCKED) |
| 2531 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); | 2533 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); |
| @@ -2551,6 +2553,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
| 2551 | return vma; | 2553 | return vma; |
| 2552 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 2554 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 2553 | return NULL; | 2555 | return NULL; |
| 2556 | /* don't alter vm_start if the coredump is running */ | ||
| 2557 | if (!mmget_still_valid(mm)) | ||
| 2558 | return NULL; | ||
| 2554 | start = vma->vm_start; | 2559 | start = vma->vm_start; |
| 2555 | if (expand_stack(vma, addr)) | 2560 | if (expand_stack(vma, addr)) |
| 2556 | return NULL; | 2561 | return NULL; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d96ca5bc555b..c6ce20aaf80b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -8005,7 +8005,10 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
| 8005 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | 8005 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
| 8006 | int migratetype, int flags) | 8006 | int migratetype, int flags) |
| 8007 | { | 8007 | { |
| 8008 | unsigned long pfn, iter, found; | 8008 | unsigned long found; |
| 8009 | unsigned long iter = 0; | ||
| 8010 | unsigned long pfn = page_to_pfn(page); | ||
| 8011 | const char *reason = "unmovable page"; | ||
| 8009 | 8012 | ||
| 8010 | /* | 8013 | /* |
| 8011 | * TODO we could make this much more efficient by not checking every | 8014 | * TODO we could make this much more efficient by not checking every |
| @@ -8015,17 +8018,20 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 8015 | * can still lead to having bootmem allocations in zone_movable. | 8018 | * can still lead to having bootmem allocations in zone_movable. |
| 8016 | */ | 8019 | */ |
| 8017 | 8020 | ||
| 8018 | /* | 8021 | if (is_migrate_cma_page(page)) { |
| 8019 | * CMA allocations (alloc_contig_range) really need to mark isolate | 8022 | /* |
| 8020 | * CMA pageblocks even when they are not movable in fact so consider | 8023 | * CMA allocations (alloc_contig_range) really need to mark |
| 8021 | * them movable here. | 8024 | * isolate CMA pageblocks even when they are not movable in fact |
| 8022 | */ | 8025 | * so consider them movable here. |
| 8023 | if (is_migrate_cma(migratetype) && | 8026 | */ |
| 8024 | is_migrate_cma(get_pageblock_migratetype(page))) | 8027 | if (is_migrate_cma(migratetype)) |
| 8025 | return false; | 8028 | return false; |
| 8029 | |||
| 8030 | reason = "CMA page"; | ||
| 8031 | goto unmovable; | ||
| 8032 | } | ||
| 8026 | 8033 | ||
| 8027 | pfn = page_to_pfn(page); | 8034 | for (found = 0; iter < pageblock_nr_pages; iter++) { |
| 8028 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | ||
| 8029 | unsigned long check = pfn + iter; | 8035 | unsigned long check = pfn + iter; |
| 8030 | 8036 | ||
| 8031 | if (!pfn_valid_within(check)) | 8037 | if (!pfn_valid_within(check)) |
| @@ -8105,7 +8111,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 8105 | unmovable: | 8111 | unmovable: |
| 8106 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); | 8112 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); |
| 8107 | if (flags & REPORT_FAILURE) | 8113 | if (flags & REPORT_FAILURE) |
| 8108 | dump_page(pfn_to_page(pfn+iter), "unmovable page"); | 8114 | dump_page(pfn_to_page(pfn + iter), reason); |
| 8109 | return true; | 8115 | return true; |
| 8110 | } | 8116 | } |
| 8111 | 8117 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 2e6fc8d552c9..68dd2e7e73b5 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -2567,8 +2567,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
| 2567 | ai->groups[group].base_offset = areas[group] - base; | 2567 | ai->groups[group].base_offset = areas[group] - base; |
| 2568 | } | 2568 | } |
| 2569 | 2569 | ||
| 2570 | pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", | 2570 | pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", |
| 2571 | PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, | 2571 | PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, |
| 2572 | ai->dyn_size, ai->unit_size); | 2572 | ai->dyn_size, ai->unit_size); |
| 2573 | 2573 | ||
| 2574 | rc = pcpu_setup_first_chunk(ai, base); | 2574 | rc = pcpu_setup_first_chunk(ai, base); |
| @@ -2692,8 +2692,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size, | |||
| 2692 | } | 2692 | } |
| 2693 | 2693 | ||
| 2694 | /* we're ready, commit */ | 2694 | /* we're ready, commit */ |
| 2695 | pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", | 2695 | pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", |
| 2696 | unit_pages, psize_str, vm.addr, ai->static_size, | 2696 | unit_pages, psize_str, ai->static_size, |
| 2697 | ai->reserved_size, ai->dyn_size); | 2697 | ai->reserved_size, ai->dyn_size); |
| 2698 | 2698 | ||
| 2699 | rc = pcpu_setup_first_chunk(ai, vm.addr); | 2699 | rc = pcpu_setup_first_chunk(ai, vm.addr); |
diff --git a/mm/shmem.c b/mm/shmem.c index b3db3779a30a..2275a0ff7c30 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -1081,9 +1081,14 @@ static void shmem_evict_inode(struct inode *inode) | |||
| 1081 | } | 1081 | } |
| 1082 | spin_unlock(&sbinfo->shrinklist_lock); | 1082 | spin_unlock(&sbinfo->shrinklist_lock); |
| 1083 | } | 1083 | } |
| 1084 | if (!list_empty(&info->swaplist)) { | 1084 | while (!list_empty(&info->swaplist)) { |
| 1085 | /* Wait while shmem_unuse() is scanning this inode... */ | ||
| 1086 | wait_var_event(&info->stop_eviction, | ||
| 1087 | !atomic_read(&info->stop_eviction)); | ||
| 1085 | mutex_lock(&shmem_swaplist_mutex); | 1088 | mutex_lock(&shmem_swaplist_mutex); |
| 1086 | list_del_init(&info->swaplist); | 1089 | /* ...but beware of the race if we peeked too early */ |
| 1090 | if (!atomic_read(&info->stop_eviction)) | ||
| 1091 | list_del_init(&info->swaplist); | ||
| 1087 | mutex_unlock(&shmem_swaplist_mutex); | 1092 | mutex_unlock(&shmem_swaplist_mutex); |
| 1088 | } | 1093 | } |
| 1089 | } | 1094 | } |
| @@ -1099,10 +1104,11 @@ extern struct swap_info_struct *swap_info[]; | |||
| 1099 | static int shmem_find_swap_entries(struct address_space *mapping, | 1104 | static int shmem_find_swap_entries(struct address_space *mapping, |
| 1100 | pgoff_t start, unsigned int nr_entries, | 1105 | pgoff_t start, unsigned int nr_entries, |
| 1101 | struct page **entries, pgoff_t *indices, | 1106 | struct page **entries, pgoff_t *indices, |
| 1102 | bool frontswap) | 1107 | unsigned int type, bool frontswap) |
| 1103 | { | 1108 | { |
| 1104 | XA_STATE(xas, &mapping->i_pages, start); | 1109 | XA_STATE(xas, &mapping->i_pages, start); |
| 1105 | struct page *page; | 1110 | struct page *page; |
| 1111 | swp_entry_t entry; | ||
| 1106 | unsigned int ret = 0; | 1112 | unsigned int ret = 0; |
| 1107 | 1113 | ||
| 1108 | if (!nr_entries) | 1114 | if (!nr_entries) |
| @@ -1116,13 +1122,12 @@ static int shmem_find_swap_entries(struct address_space *mapping, | |||
| 1116 | if (!xa_is_value(page)) | 1122 | if (!xa_is_value(page)) |
| 1117 | continue; | 1123 | continue; |
| 1118 | 1124 | ||
| 1119 | if (frontswap) { | 1125 | entry = radix_to_swp_entry(page); |
| 1120 | swp_entry_t entry = radix_to_swp_entry(page); | 1126 | if (swp_type(entry) != type) |
| 1121 | 1127 | continue; | |
| 1122 | if (!frontswap_test(swap_info[swp_type(entry)], | 1128 | if (frontswap && |
| 1123 | swp_offset(entry))) | 1129 | !frontswap_test(swap_info[type], swp_offset(entry))) |
| 1124 | continue; | 1130 | continue; |
| 1125 | } | ||
| 1126 | 1131 | ||
| 1127 | indices[ret] = xas.xa_index; | 1132 | indices[ret] = xas.xa_index; |
| 1128 | entries[ret] = page; | 1133 | entries[ret] = page; |
| @@ -1194,7 +1199,7 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type, | |||
| 1194 | 1199 | ||
| 1195 | pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, | 1200 | pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, |
| 1196 | pvec.pages, indices, | 1201 | pvec.pages, indices, |
| 1197 | frontswap); | 1202 | type, frontswap); |
| 1198 | if (pvec.nr == 0) { | 1203 | if (pvec.nr == 0) { |
| 1199 | ret = 0; | 1204 | ret = 0; |
| 1200 | break; | 1205 | break; |
| @@ -1227,36 +1232,27 @@ int shmem_unuse(unsigned int type, bool frontswap, | |||
| 1227 | unsigned long *fs_pages_to_unuse) | 1232 | unsigned long *fs_pages_to_unuse) |
| 1228 | { | 1233 | { |
| 1229 | struct shmem_inode_info *info, *next; | 1234 | struct shmem_inode_info *info, *next; |
| 1230 | struct inode *inode; | ||
| 1231 | struct inode *prev_inode = NULL; | ||
| 1232 | int error = 0; | 1235 | int error = 0; |
| 1233 | 1236 | ||
| 1234 | if (list_empty(&shmem_swaplist)) | 1237 | if (list_empty(&shmem_swaplist)) |
| 1235 | return 0; | 1238 | return 0; |
| 1236 | 1239 | ||
| 1237 | mutex_lock(&shmem_swaplist_mutex); | 1240 | mutex_lock(&shmem_swaplist_mutex); |
| 1238 | |||
| 1239 | /* | ||
| 1240 | * The extra refcount on the inode is necessary to safely dereference | ||
| 1241 | * p->next after re-acquiring the lock. New shmem inodes with swap | ||
| 1242 | * get added to the end of the list and we will scan them all. | ||
| 1243 | */ | ||
| 1244 | list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { | 1241 | list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { |
| 1245 | if (!info->swapped) { | 1242 | if (!info->swapped) { |
| 1246 | list_del_init(&info->swaplist); | 1243 | list_del_init(&info->swaplist); |
| 1247 | continue; | 1244 | continue; |
| 1248 | } | 1245 | } |
| 1249 | 1246 | /* | |
| 1250 | inode = igrab(&info->vfs_inode); | 1247 | * Drop the swaplist mutex while searching the inode for swap; |
| 1251 | if (!inode) | 1248 | * but before doing so, make sure shmem_evict_inode() will not |
| 1252 | continue; | 1249 | * remove placeholder inode from swaplist, nor let it be freed |
| 1253 | 1250 | * (igrab() would protect from unlink, but not from unmount). | |
| 1251 | */ | ||
| 1252 | atomic_inc(&info->stop_eviction); | ||
| 1254 | mutex_unlock(&shmem_swaplist_mutex); | 1253 | mutex_unlock(&shmem_swaplist_mutex); |
| 1255 | if (prev_inode) | ||
| 1256 | iput(prev_inode); | ||
| 1257 | prev_inode = inode; | ||
| 1258 | 1254 | ||
| 1259 | error = shmem_unuse_inode(inode, type, frontswap, | 1255 | error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, |
| 1260 | fs_pages_to_unuse); | 1256 | fs_pages_to_unuse); |
| 1261 | cond_resched(); | 1257 | cond_resched(); |
| 1262 | 1258 | ||
| @@ -1264,14 +1260,13 @@ int shmem_unuse(unsigned int type, bool frontswap, | |||
| 1264 | next = list_next_entry(info, swaplist); | 1260 | next = list_next_entry(info, swaplist); |
| 1265 | if (!info->swapped) | 1261 | if (!info->swapped) |
| 1266 | list_del_init(&info->swaplist); | 1262 | list_del_init(&info->swaplist); |
| 1263 | if (atomic_dec_and_test(&info->stop_eviction)) | ||
| 1264 | wake_up_var(&info->stop_eviction); | ||
| 1267 | if (error) | 1265 | if (error) |
| 1268 | break; | 1266 | break; |
| 1269 | } | 1267 | } |
| 1270 | mutex_unlock(&shmem_swaplist_mutex); | 1268 | mutex_unlock(&shmem_swaplist_mutex); |
| 1271 | 1269 | ||
| 1272 | if (prev_inode) | ||
| 1273 | iput(prev_inode); | ||
| 1274 | |||
| 1275 | return error; | 1270 | return error; |
| 1276 | } | 1271 | } |
| 1277 | 1272 | ||
| @@ -2238,6 +2233,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode | |||
| 2238 | info = SHMEM_I(inode); | 2233 | info = SHMEM_I(inode); |
| 2239 | memset(info, 0, (char *)inode - (char *)info); | 2234 | memset(info, 0, (char *)inode - (char *)info); |
| 2240 | spin_lock_init(&info->lock); | 2235 | spin_lock_init(&info->lock); |
| 2236 | atomic_set(&info->stop_eviction, 0); | ||
| 2241 | info->seals = F_SEAL_SEAL; | 2237 | info->seals = F_SEAL_SEAL; |
| 2242 | info->flags = flags & VM_NORESERVE; | 2238 | info->flags = flags & VM_NORESERVE; |
| 2243 | INIT_LIST_HEAD(&info->shrinklist); | 2239 | INIT_LIST_HEAD(&info->shrinklist); |
| @@ -2374,7 +2374,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
| 2374 | /* Slab management obj is off-slab. */ | 2374 | /* Slab management obj is off-slab. */ |
| 2375 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, | 2375 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, |
| 2376 | local_flags, nodeid); | 2376 | local_flags, nodeid); |
| 2377 | freelist = kasan_reset_tag(freelist); | ||
| 2378 | if (!freelist) | 2377 | if (!freelist) |
| 2379 | return NULL; | 2378 | return NULL; |
| 2380 | } else { | 2379 | } else { |
| @@ -4308,7 +4307,8 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
| 4308 | 4307 | ||
| 4309 | static int leaks_show(struct seq_file *m, void *p) | 4308 | static int leaks_show(struct seq_file *m, void *p) |
| 4310 | { | 4309 | { |
| 4311 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | 4310 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, |
| 4311 | root_caches_node); | ||
| 4312 | struct page *page; | 4312 | struct page *page; |
| 4313 | struct kmem_cache_node *n; | 4313 | struct kmem_cache_node *n; |
| 4314 | const char *name; | 4314 | const char *name; |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 2b8d9c3fbb47..cf63b5f01adf 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -2023,7 +2023,6 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, | |||
| 2023 | * If the boolean frontswap is true, only unuse pages_to_unuse pages; | 2023 | * If the boolean frontswap is true, only unuse pages_to_unuse pages; |
| 2024 | * pages_to_unuse==0 means all pages; ignored if frontswap is false | 2024 | * pages_to_unuse==0 means all pages; ignored if frontswap is false |
| 2025 | */ | 2025 | */ |
| 2026 | #define SWAP_UNUSE_MAX_TRIES 3 | ||
| 2027 | int try_to_unuse(unsigned int type, bool frontswap, | 2026 | int try_to_unuse(unsigned int type, bool frontswap, |
| 2028 | unsigned long pages_to_unuse) | 2027 | unsigned long pages_to_unuse) |
| 2029 | { | 2028 | { |
| @@ -2035,7 +2034,6 @@ int try_to_unuse(unsigned int type, bool frontswap, | |||
| 2035 | struct page *page; | 2034 | struct page *page; |
| 2036 | swp_entry_t entry; | 2035 | swp_entry_t entry; |
| 2037 | unsigned int i; | 2036 | unsigned int i; |
| 2038 | int retries = 0; | ||
| 2039 | 2037 | ||
| 2040 | if (!si->inuse_pages) | 2038 | if (!si->inuse_pages) |
| 2041 | return 0; | 2039 | return 0; |
| @@ -2053,11 +2051,9 @@ retry: | |||
| 2053 | 2051 | ||
| 2054 | spin_lock(&mmlist_lock); | 2052 | spin_lock(&mmlist_lock); |
| 2055 | p = &init_mm.mmlist; | 2053 | p = &init_mm.mmlist; |
| 2056 | while ((p = p->next) != &init_mm.mmlist) { | 2054 | while (si->inuse_pages && |
| 2057 | if (signal_pending(current)) { | 2055 | !signal_pending(current) && |
| 2058 | retval = -EINTR; | 2056 | (p = p->next) != &init_mm.mmlist) { |
| 2059 | break; | ||
| 2060 | } | ||
| 2061 | 2057 | ||
| 2062 | mm = list_entry(p, struct mm_struct, mmlist); | 2058 | mm = list_entry(p, struct mm_struct, mmlist); |
| 2063 | if (!mmget_not_zero(mm)) | 2059 | if (!mmget_not_zero(mm)) |
| @@ -2084,7 +2080,9 @@ retry: | |||
| 2084 | mmput(prev_mm); | 2080 | mmput(prev_mm); |
| 2085 | 2081 | ||
| 2086 | i = 0; | 2082 | i = 0; |
| 2087 | while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { | 2083 | while (si->inuse_pages && |
| 2084 | !signal_pending(current) && | ||
| 2085 | (i = find_next_to_unuse(si, i, frontswap)) != 0) { | ||
| 2088 | 2086 | ||
| 2089 | entry = swp_entry(type, i); | 2087 | entry = swp_entry(type, i); |
| 2090 | page = find_get_page(swap_address_space(entry), i); | 2088 | page = find_get_page(swap_address_space(entry), i); |
| @@ -2117,14 +2115,18 @@ retry: | |||
| 2117 | * If yes, we would need to do retry the unuse logic again. | 2115 | * If yes, we would need to do retry the unuse logic again. |
| 2118 | * Under global memory pressure, swap entries can be reinserted back | 2116 | * Under global memory pressure, swap entries can be reinserted back |
| 2119 | * into process space after the mmlist loop above passes over them. | 2117 | * into process space after the mmlist loop above passes over them. |
| 2120 | * Its not worth continuosuly retrying to unuse the swap in this case. | 2118 | * |
| 2121 | * So we try SWAP_UNUSE_MAX_TRIES times. | 2119 | * Limit the number of retries? No: when mmget_not_zero() above fails, |
| 2120 | * that mm is likely to be freeing swap from exit_mmap(), which proceeds | ||
| 2121 | * at its own independent pace; and even shmem_writepage() could have | ||
| 2122 | * been preempted after get_swap_page(), temporarily hiding that swap. | ||
| 2123 | * It's easy and robust (though cpu-intensive) just to keep retrying. | ||
| 2122 | */ | 2124 | */ |
| 2123 | if (++retries >= SWAP_UNUSE_MAX_TRIES) | 2125 | if (si->inuse_pages) { |
| 2124 | retval = -EBUSY; | 2126 | if (!signal_pending(current)) |
| 2125 | else if (si->inuse_pages) | 2127 | goto retry; |
| 2126 | goto retry; | 2128 | retval = -EINTR; |
| 2127 | 2129 | } | |
| 2128 | out: | 2130 | out: |
| 2129 | return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval; | 2131 | return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval; |
| 2130 | } | 2132 | } |
| @@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user); | |||
| 204 | * @s: The string to duplicate | 204 | * @s: The string to duplicate |
| 205 | * @n: Maximum number of bytes to copy, including the trailing NUL. | 205 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
| 206 | * | 206 | * |
| 207 | * Return: newly allocated copy of @s or %NULL in case of error | 207 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
| 208 | */ | 208 | */ |
| 209 | char *strndup_user(const char __user *s, long n) | 209 | char *strndup_user(const char __user *s, long n) |
| 210 | { | 210 | { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a5ad0b35ab8e..a815f73ee4d5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -2176,7 +2176,6 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
| 2176 | * 10TB 320 32GB | 2176 | * 10TB 320 32GB |
| 2177 | */ | 2177 | */ |
| 2178 | static bool inactive_list_is_low(struct lruvec *lruvec, bool file, | 2178 | static bool inactive_list_is_low(struct lruvec *lruvec, bool file, |
| 2179 | struct mem_cgroup *memcg, | ||
| 2180 | struct scan_control *sc, bool actual_reclaim) | 2179 | struct scan_control *sc, bool actual_reclaim) |
| 2181 | { | 2180 | { |
| 2182 | enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; | 2181 | enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; |
| @@ -2197,16 +2196,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, | |||
| 2197 | inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); | 2196 | inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); |
| 2198 | active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); | 2197 | active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); |
| 2199 | 2198 | ||
| 2200 | if (memcg) | ||
| 2201 | refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE); | ||
| 2202 | else | ||
| 2203 | refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); | ||
| 2204 | |||
| 2205 | /* | 2199 | /* |
| 2206 | * When refaults are being observed, it means a new workingset | 2200 | * When refaults are being observed, it means a new workingset |
| 2207 | * is being established. Disable active list protection to get | 2201 | * is being established. Disable active list protection to get |
| 2208 | * rid of the stale workingset quickly. | 2202 | * rid of the stale workingset quickly. |
| 2209 | */ | 2203 | */ |
| 2204 | refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE); | ||
| 2210 | if (file && actual_reclaim && lruvec->refaults != refaults) { | 2205 | if (file && actual_reclaim && lruvec->refaults != refaults) { |
| 2211 | inactive_ratio = 0; | 2206 | inactive_ratio = 0; |
| 2212 | } else { | 2207 | } else { |
| @@ -2227,12 +2222,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file, | |||
| 2227 | } | 2222 | } |
| 2228 | 2223 | ||
| 2229 | static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, | 2224 | static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, |
| 2230 | struct lruvec *lruvec, struct mem_cgroup *memcg, | 2225 | struct lruvec *lruvec, struct scan_control *sc) |
| 2231 | struct scan_control *sc) | ||
| 2232 | { | 2226 | { |
| 2233 | if (is_active_lru(lru)) { | 2227 | if (is_active_lru(lru)) { |
| 2234 | if (inactive_list_is_low(lruvec, is_file_lru(lru), | 2228 | if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) |
| 2235 | memcg, sc, true)) | ||
| 2236 | shrink_active_list(nr_to_scan, lruvec, sc, lru); | 2229 | shrink_active_list(nr_to_scan, lruvec, sc, lru); |
| 2237 | return 0; | 2230 | return 0; |
| 2238 | } | 2231 | } |
| @@ -2332,7 +2325,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, | |||
| 2332 | * anonymous pages on the LRU in eligible zones. | 2325 | * anonymous pages on the LRU in eligible zones. |
| 2333 | * Otherwise, the small LRU gets thrashed. | 2326 | * Otherwise, the small LRU gets thrashed. |
| 2334 | */ | 2327 | */ |
| 2335 | if (!inactive_list_is_low(lruvec, false, memcg, sc, false) && | 2328 | if (!inactive_list_is_low(lruvec, false, sc, false) && |
| 2336 | lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) | 2329 | lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) |
| 2337 | >> sc->priority) { | 2330 | >> sc->priority) { |
| 2338 | scan_balance = SCAN_ANON; | 2331 | scan_balance = SCAN_ANON; |
| @@ -2350,7 +2343,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, | |||
| 2350 | * lruvec even if it has plenty of old anonymous pages unless the | 2343 | * lruvec even if it has plenty of old anonymous pages unless the |
| 2351 | * system is under heavy pressure. | 2344 | * system is under heavy pressure. |
| 2352 | */ | 2345 | */ |
| 2353 | if (!inactive_list_is_low(lruvec, true, memcg, sc, false) && | 2346 | if (!inactive_list_is_low(lruvec, true, sc, false) && |
| 2354 | lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { | 2347 | lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { |
| 2355 | scan_balance = SCAN_FILE; | 2348 | scan_balance = SCAN_FILE; |
| 2356 | goto out; | 2349 | goto out; |
| @@ -2503,7 +2496,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc | |||
| 2503 | nr[lru] -= nr_to_scan; | 2496 | nr[lru] -= nr_to_scan; |
| 2504 | 2497 | ||
| 2505 | nr_reclaimed += shrink_list(lru, nr_to_scan, | 2498 | nr_reclaimed += shrink_list(lru, nr_to_scan, |
| 2506 | lruvec, memcg, sc); | 2499 | lruvec, sc); |
| 2507 | } | 2500 | } |
| 2508 | } | 2501 | } |
| 2509 | 2502 | ||
| @@ -2570,7 +2563,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc | |||
| 2570 | * Even if we did not try to evict anon pages at all, we want to | 2563 | * Even if we did not try to evict anon pages at all, we want to |
| 2571 | * rebalance the anon lru active/inactive ratio. | 2564 | * rebalance the anon lru active/inactive ratio. |
| 2572 | */ | 2565 | */ |
| 2573 | if (inactive_list_is_low(lruvec, false, memcg, sc, true)) | 2566 | if (inactive_list_is_low(lruvec, false, sc, true)) |
| 2574 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, | 2567 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, |
| 2575 | sc, LRU_ACTIVE_ANON); | 2568 | sc, LRU_ACTIVE_ANON); |
| 2576 | } | 2569 | } |
| @@ -2969,12 +2962,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) | |||
| 2969 | unsigned long refaults; | 2962 | unsigned long refaults; |
| 2970 | struct lruvec *lruvec; | 2963 | struct lruvec *lruvec; |
| 2971 | 2964 | ||
| 2972 | if (memcg) | ||
| 2973 | refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE); | ||
| 2974 | else | ||
| 2975 | refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE); | ||
| 2976 | |||
| 2977 | lruvec = mem_cgroup_lruvec(pgdat, memcg); | 2965 | lruvec = mem_cgroup_lruvec(pgdat, memcg); |
| 2966 | refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE); | ||
| 2978 | lruvec->refaults = refaults; | 2967 | lruvec->refaults = refaults; |
| 2979 | } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); | 2968 | } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); |
| 2980 | } | 2969 | } |
| @@ -3339,7 +3328,7 @@ static void age_active_anon(struct pglist_data *pgdat, | |||
| 3339 | do { | 3328 | do { |
| 3340 | struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); | 3329 | struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); |
| 3341 | 3330 | ||
| 3342 | if (inactive_list_is_low(lruvec, false, memcg, sc, true)) | 3331 | if (inactive_list_is_low(lruvec, false, sc, true)) |
| 3343 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, | 3332 | shrink_active_list(SWAP_CLUSTER_MAX, lruvec, |
| 3344 | sc, LRU_ACTIVE_ANON); | 3333 | sc, LRU_ACTIVE_ANON); |
| 3345 | 3334 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 36b56f858f0f..a7d493366a65 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = { | |||
| 1274 | #endif | 1274 | #endif |
| 1275 | #endif /* CONFIG_MEMORY_BALLOON */ | 1275 | #endif /* CONFIG_MEMORY_BALLOON */ |
| 1276 | #ifdef CONFIG_DEBUG_TLBFLUSH | 1276 | #ifdef CONFIG_DEBUG_TLBFLUSH |
| 1277 | #ifdef CONFIG_SMP | ||
| 1278 | "nr_tlb_remote_flush", | 1277 | "nr_tlb_remote_flush", |
| 1279 | "nr_tlb_remote_flush_received", | 1278 | "nr_tlb_remote_flush_received", |
| 1280 | #else | ||
| 1281 | "", /* nr_tlb_remote_flush */ | ||
| 1282 | "", /* nr_tlb_remote_flush_received */ | ||
| 1283 | #endif /* CONFIG_SMP */ | ||
| 1284 | "nr_tlb_local_flush_all", | 1279 | "nr_tlb_local_flush_all", |
| 1285 | "nr_tlb_local_flush_one", | 1280 | "nr_tlb_local_flush_one", |
| 1286 | #endif /* CONFIG_DEBUG_TLBFLUSH */ | 1281 | #endif /* CONFIG_DEBUG_TLBFLUSH */ |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 15293c2a5dd8..8d77b6ee4477 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev) | |||
| 443 | return rc; | 443 | return rc; |
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | 446 | static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, |
| 447 | struct scatterlist *sgl, unsigned int sgc) | ||
| 447 | { | 448 | { |
| 448 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; | 449 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; |
| 449 | const struct net_device_ops *ops = real_dev->netdev_ops; | 450 | const struct net_device_ops *ops = real_dev->netdev_ops; |
| 450 | int rc = -EINVAL; | 451 | int rc = 0; |
| 452 | |||
| 453 | if (ops->ndo_fcoe_ddp_target) | ||
| 454 | rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); | ||
| 451 | 455 | ||
| 452 | if (ops->ndo_fcoe_get_wwn) | ||
| 453 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
| 454 | return rc; | 456 | return rc; |
| 455 | } | 457 | } |
| 458 | #endif | ||
| 456 | 459 | ||
| 457 | static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, | 460 | #ifdef NETDEV_FCOE_WWNN |
| 458 | struct scatterlist *sgl, unsigned int sgc) | 461 | static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) |
| 459 | { | 462 | { |
| 460 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; | 463 | struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; |
| 461 | const struct net_device_ops *ops = real_dev->netdev_ops; | 464 | const struct net_device_ops *ops = real_dev->netdev_ops; |
| 462 | int rc = 0; | 465 | int rc = -EINVAL; |
| 463 | |||
| 464 | if (ops->ndo_fcoe_ddp_target) | ||
| 465 | rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); | ||
| 466 | 466 | ||
| 467 | if (ops->ndo_fcoe_get_wwn) | ||
| 468 | rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); | ||
| 467 | return rc; | 469 | return rc; |
| 468 | } | 470 | } |
| 469 | #endif | 471 | #endif |
| @@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
| 794 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 796 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
| 795 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 797 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
| 796 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | 798 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, |
| 797 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 798 | .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, | 799 | .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, |
| 799 | #endif | 800 | #endif |
| 801 | #ifdef NETDEV_FCOE_WWNN | ||
| 802 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 803 | #endif | ||
| 800 | #ifdef CONFIG_NET_POLL_CONTROLLER | 804 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 801 | .ndo_poll_controller = vlan_dev_poll_controller, | 805 | .ndo_poll_controller = vlan_dev_poll_controller, |
| 802 | .ndo_netpoll_setup = vlan_dev_netpoll_setup, | 806 | .ndo_netpoll_setup = vlan_dev_netpoll_setup, |
diff --git a/net/atm/lec.c b/net/atm/lec.c index d7f5cf5b7594..ad4f829193f0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
| @@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) | |||
| 710 | 710 | ||
| 711 | static int lec_mcast_attach(struct atm_vcc *vcc, int arg) | 711 | static int lec_mcast_attach(struct atm_vcc *vcc, int arg) |
| 712 | { | 712 | { |
| 713 | if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) | 713 | if (arg < 0 || arg >= MAX_LEC_ITF) |
| 714 | return -EINVAL; | ||
| 715 | arg = array_index_nospec(arg, MAX_LEC_ITF); | ||
| 716 | if (!dev_lec[arg]) | ||
| 714 | return -EINVAL; | 717 | return -EINVAL; |
| 715 | vcc->proto_data = dev_lec[arg]; | 718 | vcc->proto_data = dev_lec[arg]; |
| 716 | return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); | 719 | return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); |
| @@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) | |||
| 728 | i = arg; | 731 | i = arg; |
| 729 | if (arg >= MAX_LEC_ITF) | 732 | if (arg >= MAX_LEC_ITF) |
| 730 | return -EINVAL; | 733 | return -EINVAL; |
| 734 | i = array_index_nospec(arg, MAX_LEC_ITF); | ||
| 731 | if (!dev_lec[i]) { | 735 | if (!dev_lec[i]) { |
| 732 | int size; | 736 | int size; |
| 733 | 737 | ||
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index a9b7919c9de5..d5df0114f08a 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
| @@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) | |||
| 104 | 104 | ||
| 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); | 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); |
| 106 | 106 | ||
| 107 | /* free the TID stats immediately */ | 107 | if (!ret) { |
| 108 | cfg80211_sinfo_release_content(&sinfo); | 108 | /* free the TID stats immediately */ |
| 109 | cfg80211_sinfo_release_content(&sinfo); | ||
| 110 | } | ||
| 109 | 111 | ||
| 110 | dev_put(real_netdev); | 112 | dev_put(real_netdev); |
| 111 | if (ret == -ENOENT) { | 113 | if (ret == -ENOENT) { |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ef39aabdb694..4fb01108e5f5 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
| @@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, | |||
| 803 | const u8 *mac, const unsigned short vid) | 803 | const u8 *mac, const unsigned short vid) |
| 804 | { | 804 | { |
| 805 | struct batadv_bla_claim search_claim, *claim; | 805 | struct batadv_bla_claim search_claim, *claim; |
| 806 | struct batadv_bla_claim *claim_removed_entry; | ||
| 807 | struct hlist_node *claim_removed_node; | ||
| 806 | 808 | ||
| 807 | ether_addr_copy(search_claim.addr, mac); | 809 | ether_addr_copy(search_claim.addr, mac); |
| 808 | search_claim.vid = vid; | 810 | search_claim.vid = vid; |
| @@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, | |||
| 813 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, | 815 | batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, |
| 814 | mac, batadv_print_vid(vid)); | 816 | mac, batadv_print_vid(vid)); |
| 815 | 817 | ||
| 816 | batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, | 818 | claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash, |
| 817 | batadv_choose_claim, claim); | 819 | batadv_compare_claim, |
| 818 | batadv_claim_put(claim); /* reference from the hash is gone */ | 820 | batadv_choose_claim, claim); |
| 821 | if (!claim_removed_node) | ||
| 822 | goto free_claim; | ||
| 819 | 823 | ||
| 824 | /* reference from the hash is gone */ | ||
| 825 | claim_removed_entry = hlist_entry(claim_removed_node, | ||
| 826 | struct batadv_bla_claim, hash_entry); | ||
| 827 | batadv_claim_put(claim_removed_entry); | ||
| 828 | |||
| 829 | free_claim: | ||
| 820 | /* don't need the reference from hash_find() anymore */ | 830 | /* don't need the reference from hash_find() anymore */ |
| 821 | batadv_claim_put(claim); | 831 | batadv_claim_put(claim); |
| 822 | } | 832 | } |
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 0b4b3fb778a6..208655cf6717 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c | |||
| @@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
| 1116 | struct attribute *attr, | 1116 | struct attribute *attr, |
| 1117 | char *buff, size_t count) | 1117 | char *buff, size_t count) |
| 1118 | { | 1118 | { |
| 1119 | struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); | ||
| 1120 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); | 1119 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); |
| 1121 | struct batadv_hard_iface *hard_iface; | 1120 | struct batadv_hard_iface *hard_iface; |
| 1121 | struct batadv_priv *bat_priv; | ||
| 1122 | u32 tp_override; | 1122 | u32 tp_override; |
| 1123 | u32 old_tp_override; | 1123 | u32 old_tp_override; |
| 1124 | bool ret; | 1124 | bool ret; |
| @@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
| 1147 | 1147 | ||
| 1148 | atomic_set(&hard_iface->bat_v.throughput_override, tp_override); | 1148 | atomic_set(&hard_iface->bat_v.throughput_override, tp_override); |
| 1149 | 1149 | ||
| 1150 | batadv_netlink_notify_hardif(bat_priv, hard_iface); | 1150 | if (hard_iface->soft_iface) { |
| 1151 | bat_priv = netdev_priv(hard_iface->soft_iface); | ||
| 1152 | batadv_netlink_notify_hardif(bat_priv, hard_iface); | ||
| 1153 | } | ||
| 1151 | 1154 | ||
| 1152 | out: | 1155 | out: |
| 1153 | batadv_hardif_put(hard_iface); | 1156 | batadv_hardif_put(hard_iface); |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index f73d79139ae7..26c4e2493ddf 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, | |||
| 616 | struct batadv_tt_global_entry *tt_global, | 616 | struct batadv_tt_global_entry *tt_global, |
| 617 | const char *message) | 617 | const char *message) |
| 618 | { | 618 | { |
| 619 | struct batadv_tt_global_entry *tt_removed_entry; | ||
| 620 | struct hlist_node *tt_removed_node; | ||
| 621 | |||
| 619 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 622 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
| 620 | "Deleting global tt entry %pM (vid: %d): %s\n", | 623 | "Deleting global tt entry %pM (vid: %d): %s\n", |
| 621 | tt_global->common.addr, | 624 | tt_global->common.addr, |
| 622 | batadv_print_vid(tt_global->common.vid), message); | 625 | batadv_print_vid(tt_global->common.vid), message); |
| 623 | 626 | ||
| 624 | batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, | 627 | tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash, |
| 625 | batadv_choose_tt, &tt_global->common); | 628 | batadv_compare_tt, |
| 626 | batadv_tt_global_entry_put(tt_global); | 629 | batadv_choose_tt, |
| 630 | &tt_global->common); | ||
| 631 | if (!tt_removed_node) | ||
| 632 | return; | ||
| 633 | |||
| 634 | /* drop reference of remove hash entry */ | ||
| 635 | tt_removed_entry = hlist_entry(tt_removed_node, | ||
| 636 | struct batadv_tt_global_entry, | ||
| 637 | common.hash_entry); | ||
| 638 | batadv_tt_global_entry_put(tt_removed_entry); | ||
| 627 | } | 639 | } |
| 628 | 640 | ||
| 629 | /** | 641 | /** |
| @@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
| 1337 | unsigned short vid, const char *message, | 1349 | unsigned short vid, const char *message, |
| 1338 | bool roaming) | 1350 | bool roaming) |
| 1339 | { | 1351 | { |
| 1352 | struct batadv_tt_local_entry *tt_removed_entry; | ||
| 1340 | struct batadv_tt_local_entry *tt_local_entry; | 1353 | struct batadv_tt_local_entry *tt_local_entry; |
| 1341 | u16 flags, curr_flags = BATADV_NO_FLAGS; | 1354 | u16 flags, curr_flags = BATADV_NO_FLAGS; |
| 1342 | void *tt_entry_exists; | 1355 | struct hlist_node *tt_removed_node; |
| 1343 | 1356 | ||
| 1344 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); | 1357 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); |
| 1345 | if (!tt_local_entry) | 1358 | if (!tt_local_entry) |
| @@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
| 1368 | */ | 1381 | */ |
| 1369 | batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); | 1382 | batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); |
| 1370 | 1383 | ||
| 1371 | tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, | 1384 | tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash, |
| 1372 | batadv_compare_tt, | 1385 | batadv_compare_tt, |
| 1373 | batadv_choose_tt, | 1386 | batadv_choose_tt, |
| 1374 | &tt_local_entry->common); | 1387 | &tt_local_entry->common); |
| 1375 | if (!tt_entry_exists) | 1388 | if (!tt_removed_node) |
| 1376 | goto out; | 1389 | goto out; |
| 1377 | 1390 | ||
| 1378 | /* extra call to free the local tt entry */ | 1391 | /* drop reference of remove hash entry */ |
| 1379 | batadv_tt_local_entry_put(tt_local_entry); | 1392 | tt_removed_entry = hlist_entry(tt_removed_node, |
| 1393 | struct batadv_tt_local_entry, | ||
| 1394 | common.hash_entry); | ||
| 1395 | batadv_tt_local_entry_put(tt_removed_entry); | ||
| 1380 | 1396 | ||
| 1381 | out: | 1397 | out: |
| 1382 | if (tt_local_entry) | 1398 | if (tt_local_entry) |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 9a580999ca57..d892b7c3cc42 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
| 523 | struct sock *sk = sock->sk; | 523 | struct sock *sk = sock->sk; |
| 524 | int err = 0; | 524 | int err = 0; |
| 525 | 525 | ||
| 526 | BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); | ||
| 527 | |||
| 528 | if (!addr || addr_len < sizeof(struct sockaddr_sco) || | 526 | if (!addr || addr_len < sizeof(struct sockaddr_sco) || |
| 529 | addr->sa_family != AF_BLUETOOTH) | 527 | addr->sa_family != AF_BLUETOOTH) |
| 530 | return -EINVAL; | 528 | return -EINVAL; |
| 531 | 529 | ||
| 530 | BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); | ||
| 531 | |||
| 532 | lock_sock(sk); | 532 | lock_sock(sk); |
| 533 | 533 | ||
| 534 | if (sk->sk_state != BT_OPEN) { | 534 | if (sk->sk_state != BT_OPEN) { |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5ea7e56119c1..ba303ee99b9b 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
| @@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb) | |||
| 197 | /* note: already called with rcu_read_lock */ | 197 | /* note: already called with rcu_read_lock */ |
| 198 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 198 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 199 | { | 199 | { |
| 200 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | ||
| 201 | |||
| 202 | __br_handle_local_finish(skb); | 200 | __br_handle_local_finish(skb); |
| 203 | 201 | ||
| 204 | BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; | 202 | /* return 1 to signal the okfn() was called so it's ok to use the skb */ |
| 205 | br_pass_frame_up(skb); | 203 | return 1; |
| 206 | return 0; | ||
| 207 | } | 204 | } |
| 208 | 205 | ||
| 209 | /* | 206 | /* |
| @@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) | |||
| 280 | goto forward; | 277 | goto forward; |
| 281 | } | 278 | } |
| 282 | 279 | ||
| 283 | /* Deliver packet to local host only */ | 280 | /* The else clause should be hit when nf_hook(): |
| 284 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), | 281 | * - returns < 0 (drop/error) |
| 285 | NULL, skb, skb->dev, NULL, br_handle_local_finish); | 282 | * - returns = 0 (stolen/nf_queue) |
| 286 | return RX_HANDLER_CONSUMED; | 283 | * Thus return 1 from the okfn() to signal the skb is ok to pass |
| 284 | */ | ||
| 285 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, | ||
| 286 | dev_net(skb->dev), NULL, skb, skb->dev, NULL, | ||
| 287 | br_handle_local_finish) == 1) { | ||
| 288 | return RX_HANDLER_PASS; | ||
| 289 | } else { | ||
| 290 | return RX_HANDLER_CONSUMED; | ||
| 291 | } | ||
| 287 | } | 292 | } |
| 288 | 293 | ||
| 289 | forward: | 294 | forward: |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a0e369179f6d..45e7f4173bba 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br, | |||
| 601 | if (ipv4_is_local_multicast(group)) | 601 | if (ipv4_is_local_multicast(group)) |
| 602 | return 0; | 602 | return 0; |
| 603 | 603 | ||
| 604 | memset(&br_group, 0, sizeof(br_group)); | ||
| 604 | br_group.u.ip4 = group; | 605 | br_group.u.ip4 = group; |
| 605 | br_group.proto = htons(ETH_P_IP); | 606 | br_group.proto = htons(ETH_P_IP); |
| 606 | br_group.vid = vid; | 607 | br_group.vid = vid; |
| @@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
| 1497 | 1498 | ||
| 1498 | own_query = port ? &port->ip4_own_query : &br->ip4_own_query; | 1499 | own_query = port ? &port->ip4_own_query : &br->ip4_own_query; |
| 1499 | 1500 | ||
| 1501 | memset(&br_group, 0, sizeof(br_group)); | ||
| 1500 | br_group.u.ip4 = group; | 1502 | br_group.u.ip4 = group; |
| 1501 | br_group.proto = htons(ETH_P_IP); | 1503 | br_group.proto = htons(ETH_P_IP); |
| 1502 | br_group.vid = vid; | 1504 | br_group.vid = vid; |
| @@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
| 1520 | 1522 | ||
| 1521 | own_query = port ? &port->ip6_own_query : &br->ip6_own_query; | 1523 | own_query = port ? &port->ip6_own_query : &br->ip6_own_query; |
| 1522 | 1524 | ||
| 1525 | memset(&br_group, 0, sizeof(br_group)); | ||
| 1523 | br_group.u.ip6 = *group; | 1526 | br_group.u.ip6 = *group; |
| 1524 | br_group.proto = htons(ETH_P_IPV6); | 1527 | br_group.proto = htons(ETH_P_IPV6); |
| 1525 | br_group.vid = vid; | 1528 | br_group.vid = vid; |
| @@ -2028,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
| 2028 | 2031 | ||
| 2029 | __br_multicast_open(br, query); | 2032 | __br_multicast_open(br, query); |
| 2030 | 2033 | ||
| 2031 | list_for_each_entry(port, &br->port_list, list) { | 2034 | rcu_read_lock(); |
| 2035 | list_for_each_entry_rcu(port, &br->port_list, list) { | ||
| 2032 | if (port->state == BR_STATE_DISABLED || | 2036 | if (port->state == BR_STATE_DISABLED || |
| 2033 | port->state == BR_STATE_BLOCKING) | 2037 | port->state == BR_STATE_BLOCKING) |
| 2034 | continue; | 2038 | continue; |
| @@ -2040,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
| 2040 | br_multicast_enable(&port->ip6_own_query); | 2044 | br_multicast_enable(&port->ip6_own_query); |
| 2041 | #endif | 2045 | #endif |
| 2042 | } | 2046 | } |
| 2047 | rcu_read_unlock(); | ||
| 2043 | } | 2048 | } |
| 2044 | 2049 | ||
| 2045 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) | 2050 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 9c07591b0232..7104cf13da84 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) | |||
| 1441 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, | 1441 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, |
| 1442 | br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || | 1442 | br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || |
| 1443 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, | 1443 | nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, |
| 1444 | br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT))) | 1444 | br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) |
| 1445 | return -EMSGSIZE; | 1445 | return -EMSGSIZE; |
| 1446 | #endif | 1446 | #endif |
| 1447 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 1447 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index eb15891f8b9f..3cad01ac64e4 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
| @@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | |||
| 2032 | if (match_kern) | 2032 | if (match_kern) |
| 2033 | match_kern->match_size = ret; | 2033 | match_kern->match_size = ret; |
| 2034 | 2034 | ||
| 2035 | if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) | 2035 | /* rule should have no remaining data after target */ |
| 2036 | if (type == EBT_COMPAT_TARGET && size_left) | ||
| 2036 | return -EINVAL; | 2037 | return -EINVAL; |
| 2037 | 2038 | ||
| 2038 | match32 = (struct compat_ebt_entry_mwt *) buf; | 2039 | match32 = (struct compat_ebt_entry_mwt *) buf; |
diff --git a/net/core/datagram.c b/net/core/datagram.c index b2651bb6d2a3..e657289db4ac 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
| @@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, | |||
| 279 | break; | 279 | break; |
| 280 | 280 | ||
| 281 | sk_busy_loop(sk, flags & MSG_DONTWAIT); | 281 | sk_busy_loop(sk, flags & MSG_DONTWAIT); |
| 282 | } while (!skb_queue_empty(&sk->sk_receive_queue)); | 282 | } while (sk->sk_receive_queue.prev != *last); |
| 283 | 283 | ||
| 284 | error = -EAGAIN; | 284 | error = -EAGAIN; |
| 285 | 285 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 2b67f2aa59dd..f409406254dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
| 1184 | BUG_ON(!dev_net(dev)); | 1184 | BUG_ON(!dev_net(dev)); |
| 1185 | 1185 | ||
| 1186 | net = dev_net(dev); | 1186 | net = dev_net(dev); |
| 1187 | if (dev->flags & IFF_UP) | 1187 | |
| 1188 | /* Some auto-enslaved devices e.g. failover slaves are | ||
| 1189 | * special, as userspace might rename the device after | ||
| 1190 | * the interface had been brought up and running since | ||
| 1191 | * the point kernel initiated auto-enslavement. Allow | ||
| 1192 | * live name change even when these slave devices are | ||
| 1193 | * up and running. | ||
| 1194 | * | ||
| 1195 | * Typically, users of these auto-enslaving devices | ||
| 1196 | * don't actually care about slave name change, as | ||
| 1197 | * they are supposed to operate on master interface | ||
| 1198 | * directly. | ||
| 1199 | */ | ||
| 1200 | if (dev->flags & IFF_UP && | ||
| 1201 | likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) | ||
| 1188 | return -EBUSY; | 1202 | return -EBUSY; |
| 1189 | 1203 | ||
| 1190 | write_seqcount_begin(&devnet_rename_seq); | 1204 | write_seqcount_begin(&devnet_rename_seq); |
| @@ -5014,8 +5028,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head, | |||
| 5014 | if (pt_prev->list_func != NULL) | 5028 | if (pt_prev->list_func != NULL) |
| 5015 | pt_prev->list_func(head, pt_prev, orig_dev); | 5029 | pt_prev->list_func(head, pt_prev, orig_dev); |
| 5016 | else | 5030 | else |
| 5017 | list_for_each_entry_safe(skb, next, head, list) | 5031 | list_for_each_entry_safe(skb, next, head, list) { |
| 5032 | skb_list_del_init(skb); | ||
| 5018 | pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 5033 | pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
| 5034 | } | ||
| 5019 | } | 5035 | } |
| 5020 | 5036 | ||
| 5021 | static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) | 5037 | static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b1eb32419732..36ed619faf36 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | |||
| 1797 | WARN_ON_ONCE(!ret); | 1797 | WARN_ON_ONCE(!ret); |
| 1798 | 1798 | ||
| 1799 | gstrings.len = ret; | 1799 | gstrings.len = ret; |
| 1800 | data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); | ||
| 1801 | if (gstrings.len && !data) | ||
| 1802 | return -ENOMEM; | ||
| 1803 | 1800 | ||
| 1804 | __ethtool_get_strings(dev, gstrings.string_set, data); | 1801 | if (gstrings.len) { |
| 1802 | data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); | ||
| 1803 | if (!data) | ||
| 1804 | return -ENOMEM; | ||
| 1805 | |||
| 1806 | __ethtool_get_strings(dev, gstrings.string_set, data); | ||
| 1807 | } else { | ||
| 1808 | data = NULL; | ||
| 1809 | } | ||
| 1805 | 1810 | ||
| 1806 | ret = -EFAULT; | 1811 | ret = -EFAULT; |
| 1807 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) | 1812 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) |
| @@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | |||
| 1897 | return -EFAULT; | 1902 | return -EFAULT; |
| 1898 | 1903 | ||
| 1899 | stats.n_stats = n_stats; | 1904 | stats.n_stats = n_stats; |
| 1900 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1901 | if (n_stats && !data) | ||
| 1902 | return -ENOMEM; | ||
| 1903 | 1905 | ||
| 1904 | ops->get_ethtool_stats(dev, &stats, data); | 1906 | if (n_stats) { |
| 1907 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1908 | if (!data) | ||
| 1909 | return -ENOMEM; | ||
| 1910 | ops->get_ethtool_stats(dev, &stats, data); | ||
| 1911 | } else { | ||
| 1912 | data = NULL; | ||
| 1913 | } | ||
| 1905 | 1914 | ||
| 1906 | ret = -EFAULT; | 1915 | ret = -EFAULT; |
| 1907 | if (copy_to_user(useraddr, &stats, sizeof(stats))) | 1916 | if (copy_to_user(useraddr, &stats, sizeof(stats))) |
| @@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) | |||
| 1941 | return -EFAULT; | 1950 | return -EFAULT; |
| 1942 | 1951 | ||
| 1943 | stats.n_stats = n_stats; | 1952 | stats.n_stats = n_stats; |
| 1944 | data = vzalloc(array_size(n_stats, sizeof(u64))); | ||
| 1945 | if (n_stats && !data) | ||
| 1946 | return -ENOMEM; | ||
| 1947 | 1953 | ||
| 1948 | if (dev->phydev && !ops->get_ethtool_phy_stats) { | 1954 | if (n_stats) { |
| 1949 | ret = phy_ethtool_get_stats(dev->phydev, &stats, data); | 1955 | data = vzalloc(array_size(n_stats, sizeof(u64))); |
| 1950 | if (ret < 0) | 1956 | if (!data) |
| 1951 | return ret; | 1957 | return -ENOMEM; |
| 1958 | |||
| 1959 | if (dev->phydev && !ops->get_ethtool_phy_stats) { | ||
| 1960 | ret = phy_ethtool_get_stats(dev->phydev, &stats, data); | ||
| 1961 | if (ret < 0) | ||
| 1962 | goto out; | ||
| 1963 | } else { | ||
| 1964 | ops->get_ethtool_phy_stats(dev, &stats, data); | ||
| 1965 | } | ||
| 1952 | } else { | 1966 | } else { |
| 1953 | ops->get_ethtool_phy_stats(dev, &stats, data); | 1967 | data = NULL; |
| 1954 | } | 1968 | } |
| 1955 | 1969 | ||
| 1956 | ret = -EFAULT; | 1970 | ret = -EFAULT; |
diff --git a/net/core/failover.c b/net/core/failover.c index 4a92a98ccce9..b5cd3c727285 100644 --- a/net/core/failover.c +++ b/net/core/failover.c | |||
| @@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev) | |||
| 80 | goto err_upper_link; | 80 | goto err_upper_link; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | slave_dev->priv_flags |= IFF_FAILOVER_SLAVE; | 83 | slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
| 84 | 84 | ||
| 85 | if (fops && fops->slave_register && | 85 | if (fops && fops->slave_register && |
| 86 | !fops->slave_register(slave_dev, failover_dev)) | 86 | !fops->slave_register(slave_dev, failover_dev)) |
| 87 | return NOTIFY_OK; | 87 | return NOTIFY_OK; |
| 88 | 88 | ||
| 89 | netdev_upper_dev_unlink(slave_dev, failover_dev); | 89 | netdev_upper_dev_unlink(slave_dev, failover_dev); |
| 90 | slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; | 90 | slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
| 91 | err_upper_link: | 91 | err_upper_link: |
| 92 | netdev_rx_handler_unregister(slave_dev); | 92 | netdev_rx_handler_unregister(slave_dev); |
| 93 | done: | 93 | done: |
| @@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev) | |||
| 121 | 121 | ||
| 122 | netdev_rx_handler_unregister(slave_dev); | 122 | netdev_rx_handler_unregister(slave_dev); |
| 123 | netdev_upper_dev_unlink(slave_dev, failover_dev); | 123 | netdev_upper_dev_unlink(slave_dev, failover_dev); |
| 124 | slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; | 124 | slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); |
| 125 | 125 | ||
| 126 | if (fops && fops->slave_unregister && | 126 | if (fops && fops->slave_unregister && |
| 127 | !fops->slave_unregister(slave_dev, failover_dev)) | 127 | !fops->slave_unregister(slave_dev, failover_dev)) |
diff --git a/net/core/filter.c b/net/core/filter.c index 647c63a7b25b..27e61ffd9039 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, | |||
| 4383 | * Only binding to IP is supported. | 4383 | * Only binding to IP is supported. |
| 4384 | */ | 4384 | */ |
| 4385 | err = -EINVAL; | 4385 | err = -EINVAL; |
| 4386 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
| 4387 | return err; | ||
| 4386 | if (addr->sa_family == AF_INET) { | 4388 | if (addr->sa_family == AF_INET) { |
| 4387 | if (addr_len < sizeof(struct sockaddr_in)) | 4389 | if (addr_len < sizeof(struct sockaddr_in)) |
| 4388 | return err; | 4390 | return err; |
| @@ -6613,14 +6615,8 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6613 | const struct bpf_prog *prog, | 6615 | const struct bpf_prog *prog, |
| 6614 | struct bpf_insn_access_aux *info) | 6616 | struct bpf_insn_access_aux *info) |
| 6615 | { | 6617 | { |
| 6616 | if (type == BPF_WRITE) { | 6618 | if (type == BPF_WRITE) |
| 6617 | switch (off) { | 6619 | return false; |
| 6618 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | ||
| 6619 | break; | ||
| 6620 | default: | ||
| 6621 | return false; | ||
| 6622 | } | ||
| 6623 | } | ||
| 6624 | 6620 | ||
| 6625 | switch (off) { | 6621 | switch (off) { |
| 6626 | case bpf_ctx_range(struct __sk_buff, data): | 6622 | case bpf_ctx_range(struct __sk_buff, data): |
| @@ -6632,11 +6628,7 @@ static bool flow_dissector_is_valid_access(int off, int size, | |||
| 6632 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): | 6628 | case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): |
| 6633 | info->reg_type = PTR_TO_FLOW_KEYS; | 6629 | info->reg_type = PTR_TO_FLOW_KEYS; |
| 6634 | break; | 6630 | break; |
| 6635 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 6631 | default: |
| 6636 | case bpf_ctx_range(struct __sk_buff, data_meta): | ||
| 6637 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | ||
| 6638 | case bpf_ctx_range(struct __sk_buff, tstamp): | ||
| 6639 | case bpf_ctx_range(struct __sk_buff, wire_len): | ||
| 6640 | return false; | 6632 | return false; |
| 6641 | } | 6633 | } |
| 6642 | 6634 | ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index bb1a54747d64..94a450b2191a 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog, | |||
| 707 | /* Pass parameters to the BPF program */ | 707 | /* Pass parameters to the BPF program */ |
| 708 | memset(flow_keys, 0, sizeof(*flow_keys)); | 708 | memset(flow_keys, 0, sizeof(*flow_keys)); |
| 709 | cb->qdisc_cb.flow_keys = flow_keys; | 709 | cb->qdisc_cb.flow_keys = flow_keys; |
| 710 | flow_keys->n_proto = skb->protocol; | ||
| 710 | flow_keys->nhoff = skb_network_offset(skb); | 711 | flow_keys->nhoff = skb_network_offset(skb); |
| 711 | flow_keys->thoff = flow_keys->nhoff; | 712 | flow_keys->thoff = flow_keys->nhoff; |
| 712 | 713 | ||
| @@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog, | |||
| 716 | /* Restore state */ | 717 | /* Restore state */ |
| 717 | memcpy(cb, &cb_saved, sizeof(cb_saved)); | 718 | memcpy(cb, &cb_saved, sizeof(cb_saved)); |
| 718 | 719 | ||
| 719 | flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len); | 720 | flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, |
| 721 | skb_network_offset(skb), skb->len); | ||
| 720 | flow_keys->thoff = clamp_t(u16, flow_keys->thoff, | 722 | flow_keys->thoff = clamp_t(u16, flow_keys->thoff, |
| 721 | flow_keys->nhoff, skb->len); | 723 | flow_keys->nhoff, skb->len); |
| 722 | 724 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index f8f94303a1f5..8f8b7b6c2945 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev) | |||
| 1747 | 1747 | ||
| 1748 | error = device_add(dev); | 1748 | error = device_add(dev); |
| 1749 | if (error) | 1749 | if (error) |
| 1750 | goto error_put_device; | 1750 | return error; |
| 1751 | 1751 | ||
| 1752 | error = register_queue_kobjects(ndev); | 1752 | error = register_queue_kobjects(ndev); |
| 1753 | if (error) | 1753 | if (error) { |
| 1754 | goto error_device_del; | 1754 | device_del(dev); |
| 1755 | return error; | ||
| 1756 | } | ||
| 1755 | 1757 | ||
| 1756 | pm_runtime_set_memalloc_noio(dev, true); | 1758 | pm_runtime_set_memalloc_noio(dev, true); |
| 1757 | 1759 | ||
| 1758 | return 0; | ||
| 1759 | |||
| 1760 | error_device_del: | ||
| 1761 | device_del(dev); | ||
| 1762 | error_put_device: | ||
| 1763 | put_device(dev); | ||
| 1764 | return error; | 1760 | return error; |
| 1765 | } | 1761 | } |
| 1766 | 1762 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 17f36317363d..7e6dcc625701 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) | |||
| 304 | 304 | ||
| 305 | refcount_set(&net->count, 1); | 305 | refcount_set(&net->count, 1); |
| 306 | refcount_set(&net->passive, 1); | 306 | refcount_set(&net->passive, 1); |
| 307 | get_random_bytes(&net->hash_mix, sizeof(u32)); | ||
| 307 | net->dev_base_seq = 1; | 308 | net->dev_base_seq = 1; |
| 308 | net->user_ns = user_ns; | 309 | net->user_ns = user_ns; |
| 309 | idr_init(&net->netns_ids); | 310 | idr_init(&net->netns_ids); |
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c index 703cf76aa7c2..7109c168b5e0 100644 --- a/net/core/ptp_classifier.c +++ b/net/core/ptp_classifier.c | |||
| @@ -185,9 +185,10 @@ void __init ptp_classifier_init(void) | |||
| 185 | { 0x16, 0, 0, 0x00000000 }, | 185 | { 0x16, 0, 0, 0x00000000 }, |
| 186 | { 0x06, 0, 0, 0x00000000 }, | 186 | { 0x06, 0, 0, 0x00000000 }, |
| 187 | }; | 187 | }; |
| 188 | struct sock_fprog_kern ptp_prog = { | 188 | struct sock_fprog_kern ptp_prog; |
| 189 | .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, | 189 | |
| 190 | }; | 190 | ptp_prog.len = ARRAY_SIZE(ptp_filter); |
| 191 | ptp_prog.filter = ptp_filter; | ||
| 191 | 192 | ||
| 192 | BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); | 193 | BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); |
| 193 | } | 194 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a51cab95ba64..220c56e93659 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, | |||
| 4948 | { | 4948 | { |
| 4949 | struct if_stats_msg *ifsm; | 4949 | struct if_stats_msg *ifsm; |
| 4950 | 4950 | ||
| 4951 | if (nlh->nlmsg_len < sizeof(*ifsm)) { | 4951 | if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) { |
| 4952 | NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); | 4952 | NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); |
| 4953 | return -EINVAL; | 4953 | return -EINVAL; |
| 4954 | } | 4954 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2415d9cb9b89..40796b8bf820 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) | |||
| 3801 | unsigned int delta_truesize; | 3801 | unsigned int delta_truesize; |
| 3802 | struct sk_buff *lp; | 3802 | struct sk_buff *lp; |
| 3803 | 3803 | ||
| 3804 | if (unlikely(p->len + len >= 65536)) | 3804 | if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) |
| 3805 | return -E2BIG; | 3805 | return -E2BIG; |
| 3806 | 3806 | ||
| 3807 | lp = NAPI_GRO_CB(p)->last; | 3807 | lp = NAPI_GRO_CB(p)->last; |
| @@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); | |||
| 5083 | 5083 | ||
| 5084 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | 5084 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
| 5085 | { | 5085 | { |
| 5086 | int mac_len; | 5086 | int mac_len, meta_len; |
| 5087 | void *meta; | ||
| 5087 | 5088 | ||
| 5088 | if (skb_cow(skb, skb_headroom(skb)) < 0) { | 5089 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
| 5089 | kfree_skb(skb); | 5090 | kfree_skb(skb); |
| @@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | |||
| 5095 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), | 5096 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
| 5096 | mac_len - VLAN_HLEN - ETH_TLEN); | 5097 | mac_len - VLAN_HLEN - ETH_TLEN); |
| 5097 | } | 5098 | } |
| 5099 | |||
| 5100 | meta_len = skb_metadata_len(skb); | ||
| 5101 | if (meta_len) { | ||
| 5102 | meta = skb_metadata_end(skb) - meta_len; | ||
| 5103 | memmove(meta + VLAN_HLEN, meta, meta_len); | ||
| 5104 | } | ||
| 5105 | |||
| 5098 | skb->mac_header += VLAN_HLEN; | 5106 | skb->mac_header += VLAN_HLEN; |
| 5099 | return skb; | 5107 | return skb; |
| 5100 | } | 5108 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 782343bb925b..067878a1e4c5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval) | |||
| 348 | tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; | 348 | tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; |
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { | 351 | if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { |
| 352 | struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; | 352 | struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; |
| 353 | *(struct old_timeval32 *)optval = tv32; | 353 | *(struct old_timeval32 *)optval = tv32; |
| 354 | return sizeof(tv32); | 354 | return sizeof(tv32); |
| @@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool | |||
| 372 | { | 372 | { |
| 373 | struct __kernel_sock_timeval tv; | 373 | struct __kernel_sock_timeval tv; |
| 374 | 374 | ||
| 375 | if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { | 375 | if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { |
| 376 | struct old_timeval32 tv32; | 376 | struct old_timeval32 tv32; |
| 377 | 377 | ||
| 378 | if (optlen < sizeof(tv32)) | 378 | if (optlen < sizeof(tv32)) |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index f227f002c73d..db87d9f58019 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
| @@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local, | |||
| 738 | if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) | 738 | if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) |
| 739 | return -ENOMEM; | 739 | return -ENOMEM; |
| 740 | 740 | ||
| 741 | return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval); | 741 | if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) { |
| 742 | kfree(fval.sp.vec); | ||
| 743 | return -ENOMEM; | ||
| 744 | } | ||
| 745 | |||
| 746 | return 0; | ||
| 742 | } | 747 | } |
| 743 | 748 | ||
| 744 | /** | 749 | /** |
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index ed4f6dc26365..85c22ada4744 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c | |||
| @@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 98 | return skb; | 98 | return skb; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, | ||
| 102 | int *offset) | ||
| 103 | { | ||
| 104 | *offset = QCA_HDR_LEN; | ||
| 105 | *proto = ((__be16 *)skb->data)[0]; | ||
| 106 | |||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 101 | const struct dsa_device_ops qca_netdev_ops = { | 110 | const struct dsa_device_ops qca_netdev_ops = { |
| 102 | .xmit = qca_tag_xmit, | 111 | .xmit = qca_tag_xmit, |
| 103 | .rcv = qca_tag_rcv, | 112 | .rcv = qca_tag_rcv, |
| 113 | .flow_dissect = qca_tag_flow_dissect, | ||
| 104 | .overhead = QCA_HDR_LEN, | 114 | .overhead = QCA_HDR_LEN, |
| 105 | }; | 115 | }; |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 79e98e21cdd7..12ce6c526d72 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
| 121 | struct guehdr *guehdr; | 121 | struct guehdr *guehdr; |
| 122 | void *data; | 122 | void *data; |
| 123 | u16 doffset = 0; | 123 | u16 doffset = 0; |
| 124 | u8 proto_ctype; | ||
| 124 | 125 | ||
| 125 | if (!fou) | 126 | if (!fou) |
| 126 | return 1; | 127 | return 1; |
| @@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
| 212 | if (unlikely(guehdr->control)) | 213 | if (unlikely(guehdr->control)) |
| 213 | return gue_control_message(skb, guehdr); | 214 | return gue_control_message(skb, guehdr); |
| 214 | 215 | ||
| 216 | proto_ctype = guehdr->proto_ctype; | ||
| 215 | __skb_pull(skb, sizeof(struct udphdr) + hdrlen); | 217 | __skb_pull(skb, sizeof(struct udphdr) + hdrlen); |
| 216 | skb_reset_transport_header(skb); | 218 | skb_reset_transport_header(skb); |
| 217 | 219 | ||
| 218 | if (iptunnel_pull_offloads(skb)) | 220 | if (iptunnel_pull_offloads(skb)) |
| 219 | goto drop; | 221 | goto drop; |
| 220 | 222 | ||
| 221 | return -guehdr->proto_ctype; | 223 | return -proto_ctype; |
| 222 | 224 | ||
| 223 | drop: | 225 | drop: |
| 224 | kfree_skb(skb); | 226 | kfree_skb(skb); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index fd219f7bd3ea..4b0526441476 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 259 | struct net *net = dev_net(skb->dev); | 259 | struct net *net = dev_net(skb->dev); |
| 260 | struct metadata_dst *tun_dst = NULL; | 260 | struct metadata_dst *tun_dst = NULL; |
| 261 | struct erspan_base_hdr *ershdr; | 261 | struct erspan_base_hdr *ershdr; |
| 262 | struct erspan_metadata *pkt_md; | ||
| 263 | struct ip_tunnel_net *itn; | 262 | struct ip_tunnel_net *itn; |
| 264 | struct ip_tunnel *tunnel; | 263 | struct ip_tunnel *tunnel; |
| 265 | const struct iphdr *iph; | 264 | const struct iphdr *iph; |
| @@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 282 | if (unlikely(!pskb_may_pull(skb, len))) | 281 | if (unlikely(!pskb_may_pull(skb, len))) |
| 283 | return PACKET_REJECT; | 282 | return PACKET_REJECT; |
| 284 | 283 | ||
| 285 | ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); | ||
| 286 | pkt_md = (struct erspan_metadata *)(ershdr + 1); | ||
| 287 | |||
| 288 | if (__iptunnel_pull_header(skb, | 284 | if (__iptunnel_pull_header(skb, |
| 289 | len, | 285 | len, |
| 290 | htons(ETH_P_TEB), | 286 | htons(ETH_P_TEB), |
| @@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 292 | goto drop; | 288 | goto drop; |
| 293 | 289 | ||
| 294 | if (tunnel->collect_md) { | 290 | if (tunnel->collect_md) { |
| 291 | struct erspan_metadata *pkt_md, *md; | ||
| 295 | struct ip_tunnel_info *info; | 292 | struct ip_tunnel_info *info; |
| 296 | struct erspan_metadata *md; | 293 | unsigned char *gh; |
| 297 | __be64 tun_id; | 294 | __be64 tun_id; |
| 298 | __be16 flags; | 295 | __be16 flags; |
| 299 | 296 | ||
| @@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 306 | if (!tun_dst) | 303 | if (!tun_dst) |
| 307 | return PACKET_REJECT; | 304 | return PACKET_REJECT; |
| 308 | 305 | ||
| 306 | /* skb can be uncloned in __iptunnel_pull_header, so | ||
| 307 | * old pkt_md is no longer valid and we need to reset | ||
| 308 | * it | ||
| 309 | */ | ||
| 310 | gh = skb_network_header(skb) + | ||
| 311 | skb_network_header_len(skb); | ||
| 312 | pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + | ||
| 313 | sizeof(*ershdr)); | ||
| 309 | md = ip_tunnel_info_opts(&tun_dst->u.tun_info); | 314 | md = ip_tunnel_info_opts(&tun_dst->u.tun_info); |
| 310 | md->version = ver; | 315 | md->version = ver; |
| 311 | md2 = &md->u.md2; | 316 | md2 = &md->u.md2; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index ecce2dc78f17..1132d6d1796a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb) | |||
| 257 | ip_local_deliver_finish); | 257 | ip_local_deliver_finish); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static inline bool ip_rcv_options(struct sk_buff *skb) | 260 | static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) |
| 261 | { | 261 | { |
| 262 | struct ip_options *opt; | 262 | struct ip_options *opt; |
| 263 | const struct iphdr *iph; | 263 | const struct iphdr *iph; |
| 264 | struct net_device *dev = skb->dev; | ||
| 265 | 264 | ||
| 266 | /* It looks as overkill, because not all | 265 | /* It looks as overkill, because not all |
| 267 | IP options require packet mangling. | 266 | IP options require packet mangling. |
| @@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb) | |||
| 297 | } | 296 | } |
| 298 | } | 297 | } |
| 299 | 298 | ||
| 300 | if (ip_options_rcv_srr(skb)) | 299 | if (ip_options_rcv_srr(skb, dev)) |
| 301 | goto drop; | 300 | goto drop; |
| 302 | } | 301 | } |
| 303 | 302 | ||
| @@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, | |||
| 353 | } | 352 | } |
| 354 | #endif | 353 | #endif |
| 355 | 354 | ||
| 356 | if (iph->ihl > 5 && ip_rcv_options(skb)) | 355 | if (iph->ihl > 5 && ip_rcv_options(skb, dev)) |
| 357 | goto drop; | 356 | goto drop; |
| 358 | 357 | ||
| 359 | rt = skb_rtable(skb); | 358 | rt = skb_rtable(skb); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 32a35043c9f5..3db31bb9df50 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
| @@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb) | |||
| 612 | } | 612 | } |
| 613 | } | 613 | } |
| 614 | 614 | ||
| 615 | int ip_options_rcv_srr(struct sk_buff *skb) | 615 | int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) |
| 616 | { | 616 | { |
| 617 | struct ip_options *opt = &(IPCB(skb)->opt); | 617 | struct ip_options *opt = &(IPCB(skb)->opt); |
| 618 | int srrspace, srrptr; | 618 | int srrspace, srrptr; |
| @@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
| 647 | 647 | ||
| 648 | orefdst = skb->_skb_refdst; | 648 | orefdst = skb->_skb_refdst; |
| 649 | skb_dst_set(skb, NULL); | 649 | skb_dst_set(skb, NULL); |
| 650 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 650 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); |
| 651 | rt2 = skb_rtable(skb); | 651 | rt2 = skb_rtable(skb); |
| 652 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 652 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
| 653 | skb_dst_drop(skb); | 653 | skb_dst_drop(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a5da63e5faa2..6fdf1c195d8e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1183,11 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | |||
| 1183 | return dst; | 1183 | return dst; |
| 1184 | } | 1184 | } |
| 1185 | 1185 | ||
| 1186 | static void ipv4_send_dest_unreach(struct sk_buff *skb) | ||
| 1187 | { | ||
| 1188 | struct ip_options opt; | ||
| 1189 | int res; | ||
| 1190 | |||
| 1191 | /* Recompile ip options since IPCB may not be valid anymore. | ||
| 1192 | * Also check we have a reasonable ipv4 header. | ||
| 1193 | */ | ||
| 1194 | if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) || | ||
| 1195 | ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5) | ||
| 1196 | return; | ||
| 1197 | |||
| 1198 | memset(&opt, 0, sizeof(opt)); | ||
| 1199 | if (ip_hdr(skb)->ihl > 5) { | ||
| 1200 | if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4)) | ||
| 1201 | return; | ||
| 1202 | opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); | ||
| 1203 | |||
| 1204 | rcu_read_lock(); | ||
| 1205 | res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); | ||
| 1206 | rcu_read_unlock(); | ||
| 1207 | |||
| 1208 | if (res) | ||
| 1209 | return; | ||
| 1210 | } | ||
| 1211 | __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); | ||
| 1212 | } | ||
| 1213 | |||
| 1186 | static void ipv4_link_failure(struct sk_buff *skb) | 1214 | static void ipv4_link_failure(struct sk_buff *skb) |
| 1187 | { | 1215 | { |
| 1188 | struct rtable *rt; | 1216 | struct rtable *rt; |
| 1189 | 1217 | ||
| 1190 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); | 1218 | ipv4_send_dest_unreach(skb); |
| 1191 | 1219 | ||
| 1192 | rt = skb_rtable(skb); | 1220 | rt = skb_rtable(skb); |
| 1193 | if (rt) | 1221 | if (rt) |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index ba0fc4b18465..eeb4041fa5f9 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 }; | |||
| 49 | static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; | 49 | static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; |
| 50 | static int comp_sack_nr_max = 255; | 50 | static int comp_sack_nr_max = 255; |
| 51 | static u32 u32_max_div_HZ = UINT_MAX / HZ; | 51 | static u32 u32_max_div_HZ = UINT_MAX / HZ; |
| 52 | static int one_day_secs = 24 * 3600; | ||
| 52 | 53 | ||
| 53 | /* obsolete */ | 54 | /* obsolete */ |
| 54 | static int sysctl_tcp_low_latency __read_mostly; | 55 | static int sysctl_tcp_low_latency __read_mostly; |
| @@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = { | |||
| 1151 | .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen, | 1152 | .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen, |
| 1152 | .maxlen = sizeof(int), | 1153 | .maxlen = sizeof(int), |
| 1153 | .mode = 0644, | 1154 | .mode = 0644, |
| 1154 | .proc_handler = proc_dointvec | 1155 | .proc_handler = proc_dointvec_minmax, |
| 1156 | .extra1 = &zero, | ||
| 1157 | .extra2 = &one_day_secs | ||
| 1155 | }, | 1158 | }, |
| 1156 | { | 1159 | { |
| 1157 | .procname = "tcp_autocorking", | 1160 | .procname = "tcp_autocorking", |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index cd4814f7e962..477cb4aa456c 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
| @@ -49,9 +49,8 @@ | |||
| 49 | #define DCTCP_MAX_ALPHA 1024U | 49 | #define DCTCP_MAX_ALPHA 1024U |
| 50 | 50 | ||
| 51 | struct dctcp { | 51 | struct dctcp { |
| 52 | u32 acked_bytes_ecn; | 52 | u32 old_delivered; |
| 53 | u32 acked_bytes_total; | 53 | u32 old_delivered_ce; |
| 54 | u32 prior_snd_una; | ||
| 55 | u32 prior_rcv_nxt; | 54 | u32 prior_rcv_nxt; |
| 56 | u32 dctcp_alpha; | 55 | u32 dctcp_alpha; |
| 57 | u32 next_seq; | 56 | u32 next_seq; |
| @@ -67,19 +66,14 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA; | |||
| 67 | module_param(dctcp_alpha_on_init, uint, 0644); | 66 | module_param(dctcp_alpha_on_init, uint, 0644); |
| 68 | MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); | 67 | MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); |
| 69 | 68 | ||
| 70 | static unsigned int dctcp_clamp_alpha_on_loss __read_mostly; | ||
| 71 | module_param(dctcp_clamp_alpha_on_loss, uint, 0644); | ||
| 72 | MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss, | ||
| 73 | "parameter for clamping alpha on loss"); | ||
| 74 | |||
| 75 | static struct tcp_congestion_ops dctcp_reno; | 69 | static struct tcp_congestion_ops dctcp_reno; |
| 76 | 70 | ||
| 77 | static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) | 71 | static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) |
| 78 | { | 72 | { |
| 79 | ca->next_seq = tp->snd_nxt; | 73 | ca->next_seq = tp->snd_nxt; |
| 80 | 74 | ||
| 81 | ca->acked_bytes_ecn = 0; | 75 | ca->old_delivered = tp->delivered; |
| 82 | ca->acked_bytes_total = 0; | 76 | ca->old_delivered_ce = tp->delivered_ce; |
| 83 | } | 77 | } |
| 84 | 78 | ||
| 85 | static void dctcp_init(struct sock *sk) | 79 | static void dctcp_init(struct sock *sk) |
| @@ -91,7 +85,6 @@ static void dctcp_init(struct sock *sk) | |||
| 91 | sk->sk_state == TCP_CLOSE)) { | 85 | sk->sk_state == TCP_CLOSE)) { |
| 92 | struct dctcp *ca = inet_csk_ca(sk); | 86 | struct dctcp *ca = inet_csk_ca(sk); |
| 93 | 87 | ||
| 94 | ca->prior_snd_una = tp->snd_una; | ||
| 95 | ca->prior_rcv_nxt = tp->rcv_nxt; | 88 | ca->prior_rcv_nxt = tp->rcv_nxt; |
| 96 | 89 | ||
| 97 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 90 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
| @@ -123,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags) | |||
| 123 | { | 116 | { |
| 124 | const struct tcp_sock *tp = tcp_sk(sk); | 117 | const struct tcp_sock *tp = tcp_sk(sk); |
| 125 | struct dctcp *ca = inet_csk_ca(sk); | 118 | struct dctcp *ca = inet_csk_ca(sk); |
| 126 | u32 acked_bytes = tp->snd_una - ca->prior_snd_una; | ||
| 127 | |||
| 128 | /* If ack did not advance snd_una, count dupack as MSS size. | ||
| 129 | * If ack did update window, do not count it at all. | ||
| 130 | */ | ||
| 131 | if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE)) | ||
| 132 | acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; | ||
| 133 | if (acked_bytes) { | ||
| 134 | ca->acked_bytes_total += acked_bytes; | ||
| 135 | ca->prior_snd_una = tp->snd_una; | ||
| 136 | |||
| 137 | if (flags & CA_ACK_ECE) | ||
| 138 | ca->acked_bytes_ecn += acked_bytes; | ||
| 139 | } | ||
| 140 | 119 | ||
| 141 | /* Expired RTT */ | 120 | /* Expired RTT */ |
| 142 | if (!before(tp->snd_una, ca->next_seq)) { | 121 | if (!before(tp->snd_una, ca->next_seq)) { |
| 143 | u64 bytes_ecn = ca->acked_bytes_ecn; | 122 | u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; |
| 144 | u32 alpha = ca->dctcp_alpha; | 123 | u32 alpha = ca->dctcp_alpha; |
| 145 | 124 | ||
| 146 | /* alpha = (1 - g) * alpha + g * F */ | 125 | /* alpha = (1 - g) * alpha + g * F */ |
| 147 | 126 | ||
| 148 | alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); | 127 | alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); |
| 149 | if (bytes_ecn) { | 128 | if (delivered_ce) { |
| 129 | u32 delivered = tp->delivered - ca->old_delivered; | ||
| 130 | |||
| 150 | /* If dctcp_shift_g == 1, a 32bit value would overflow | 131 | /* If dctcp_shift_g == 1, a 32bit value would overflow |
| 151 | * after 8 Mbytes. | 132 | * after 8 M packets. |
| 152 | */ | 133 | */ |
| 153 | bytes_ecn <<= (10 - dctcp_shift_g); | 134 | delivered_ce <<= (10 - dctcp_shift_g); |
| 154 | do_div(bytes_ecn, max(1U, ca->acked_bytes_total)); | 135 | delivered_ce /= max(1U, delivered); |
| 155 | 136 | ||
| 156 | alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA); | 137 | alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); |
| 157 | } | 138 | } |
| 158 | /* dctcp_alpha can be read from dctcp_get_info() without | 139 | /* dctcp_alpha can be read from dctcp_get_info() without |
| 159 | * synchro, so we ask compiler to not use dctcp_alpha | 140 | * synchro, so we ask compiler to not use dctcp_alpha |
| @@ -164,21 +145,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags) | |||
| 164 | } | 145 | } |
| 165 | } | 146 | } |
| 166 | 147 | ||
| 167 | static void dctcp_state(struct sock *sk, u8 new_state) | 148 | static void dctcp_react_to_loss(struct sock *sk) |
| 168 | { | 149 | { |
| 169 | if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { | 150 | struct dctcp *ca = inet_csk_ca(sk); |
| 170 | struct dctcp *ca = inet_csk_ca(sk); | 151 | struct tcp_sock *tp = tcp_sk(sk); |
| 171 | 152 | ||
| 172 | /* If this extension is enabled, we clamp dctcp_alpha to | 153 | ca->loss_cwnd = tp->snd_cwnd; |
| 173 | * max on packet loss; the motivation is that dctcp_alpha | 154 | tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); |
| 174 | * is an indicator to the extend of congestion and packet | 155 | } |
| 175 | * loss is an indicator of extreme congestion; setting | 156 | |
| 176 | * this in practice turned out to be beneficial, and | 157 | static void dctcp_state(struct sock *sk, u8 new_state) |
| 177 | * effectively assumes total congestion which reduces the | 158 | { |
| 178 | * window by half. | 159 | if (new_state == TCP_CA_Recovery && |
| 179 | */ | 160 | new_state != inet_csk(sk)->icsk_ca_state) |
| 180 | ca->dctcp_alpha = DCTCP_MAX_ALPHA; | 161 | dctcp_react_to_loss(sk); |
| 181 | } | 162 | /* We handle RTO in dctcp_cwnd_event to ensure that we perform only |
| 163 | * one loss-adjustment per RTT. | ||
| 164 | */ | ||
| 182 | } | 165 | } |
| 183 | 166 | ||
| 184 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | 167 | static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) |
| @@ -190,6 +173,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) | |||
| 190 | case CA_EVENT_ECN_NO_CE: | 173 | case CA_EVENT_ECN_NO_CE: |
| 191 | dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); | 174 | dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); |
| 192 | break; | 175 | break; |
| 176 | case CA_EVENT_LOSS: | ||
| 177 | dctcp_react_to_loss(sk); | ||
| 178 | break; | ||
| 193 | default: | 179 | default: |
| 194 | /* Don't care for the rest. */ | 180 | /* Don't care for the rest. */ |
| 195 | break; | 181 | break; |
| @@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
| 200 | union tcp_cc_info *info) | 186 | union tcp_cc_info *info) |
| 201 | { | 187 | { |
| 202 | const struct dctcp *ca = inet_csk_ca(sk); | 188 | const struct dctcp *ca = inet_csk_ca(sk); |
| 189 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 203 | 190 | ||
| 204 | /* Fill it also in case of VEGASINFO due to req struct limits. | 191 | /* Fill it also in case of VEGASINFO due to req struct limits. |
| 205 | * We can still correctly retrieve it later. | 192 | * We can still correctly retrieve it later. |
| @@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
| 211 | info->dctcp.dctcp_enabled = 1; | 198 | info->dctcp.dctcp_enabled = 1; |
| 212 | info->dctcp.dctcp_ce_state = (u16) ca->ce_state; | 199 | info->dctcp.dctcp_ce_state = (u16) ca->ce_state; |
| 213 | info->dctcp.dctcp_alpha = ca->dctcp_alpha; | 200 | info->dctcp.dctcp_alpha = ca->dctcp_alpha; |
| 214 | info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn; | 201 | info->dctcp.dctcp_ab_ecn = tp->mss_cache * |
| 215 | info->dctcp.dctcp_ab_tot = ca->acked_bytes_total; | 202 | (tp->delivered_ce - ca->old_delivered_ce); |
| 203 | info->dctcp.dctcp_ab_tot = tp->mss_cache * | ||
| 204 | (tp->delivered - ca->old_delivered); | ||
| 216 | } | 205 | } |
| 217 | 206 | ||
| 218 | *attr = INET_DIAG_DCTCPINFO; | 207 | *attr = INET_DIAG_DCTCPINFO; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5def3c48870e..731d3045b50a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) | |||
| 402 | static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) | 402 | static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) |
| 403 | { | 403 | { |
| 404 | struct tcp_sock *tp = tcp_sk(sk); | 404 | struct tcp_sock *tp = tcp_sk(sk); |
| 405 | int room; | ||
| 406 | |||
| 407 | room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; | ||
| 405 | 408 | ||
| 406 | /* Check #1 */ | 409 | /* Check #1 */ |
| 407 | if (tp->rcv_ssthresh < tp->window_clamp && | 410 | if (room > 0 && !tcp_under_memory_pressure(sk)) { |
| 408 | (int)tp->rcv_ssthresh < tcp_space(sk) && | ||
| 409 | !tcp_under_memory_pressure(sk)) { | ||
| 410 | int incr; | 411 | int incr; |
| 411 | 412 | ||
| 412 | /* Check #2. Increase window, if skb with such overhead | 413 | /* Check #2. Increase window, if skb with such overhead |
| @@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) | |||
| 419 | 420 | ||
| 420 | if (incr) { | 421 | if (incr) { |
| 421 | incr = max_t(int, incr, 2 * skb->len); | 422 | incr = max_t(int, incr, 2 * skb->len); |
| 422 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, | 423 | tp->rcv_ssthresh += min(room, incr); |
| 423 | tp->window_clamp); | ||
| 424 | inet_csk(sk)->icsk_ack.quick |= 1; | 424 | inet_csk(sk)->icsk_ack.quick |= 1; |
| 425 | } | 425 | } |
| 426 | } | 426 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 277d71239d75..2f8039a26b08 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net) | |||
| 2578 | { | 2578 | { |
| 2579 | int cpu; | 2579 | int cpu; |
| 2580 | 2580 | ||
| 2581 | module_put(net->ipv4.tcp_congestion_control->owner); | 2581 | if (net->ipv4.tcp_congestion_control) |
| 2582 | module_put(net->ipv4.tcp_congestion_control->owner); | ||
| 2582 | 2583 | ||
| 2583 | for_each_possible_cpu(cpu) | 2584 | for_each_possible_cpu(cpu) |
| 2584 | inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); | 2585 | inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index d43d076c98f5..1766325423b5 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
| @@ -476,7 +476,7 @@ static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh, | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | if (nlmsg_attrlen(nlh, sizeof(*ifal))) { | 478 | if (nlmsg_attrlen(nlh, sizeof(*ifal))) { |
| 479 | NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst"); | 479 | NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump request"); |
| 480 | return -EINVAL; | 480 | return -EINVAL; |
| 481 | } | 481 | } |
| 482 | 482 | ||
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 79d2e43c05c5..5fc1f4e0c0cf 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c | |||
| @@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info) | |||
| 417 | 417 | ||
| 418 | done: | 418 | done: |
| 419 | rhashtable_walk_stop(&iter); | 419 | rhashtable_walk_stop(&iter); |
| 420 | rhashtable_walk_exit(&iter); | ||
| 420 | return ret; | 421 | return ret; |
| 421 | } | 422 | } |
| 422 | 423 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b32c95f02128..655e46b227f9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) | |||
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | static int ip6erspan_rcv(struct sk_buff *skb, | 527 | static int ip6erspan_rcv(struct sk_buff *skb, |
| 528 | struct tnl_ptk_info *tpi) | 528 | struct tnl_ptk_info *tpi, |
| 529 | int gre_hdr_len) | ||
| 529 | { | 530 | { |
| 530 | struct erspan_base_hdr *ershdr; | 531 | struct erspan_base_hdr *ershdr; |
| 531 | struct erspan_metadata *pkt_md; | ||
| 532 | const struct ipv6hdr *ipv6h; | 532 | const struct ipv6hdr *ipv6h; |
| 533 | struct erspan_md2 *md2; | 533 | struct erspan_md2 *md2; |
| 534 | struct ip6_tnl *tunnel; | 534 | struct ip6_tnl *tunnel; |
| @@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, | |||
| 547 | if (unlikely(!pskb_may_pull(skb, len))) | 547 | if (unlikely(!pskb_may_pull(skb, len))) |
| 548 | return PACKET_REJECT; | 548 | return PACKET_REJECT; |
| 549 | 549 | ||
| 550 | ershdr = (struct erspan_base_hdr *)skb->data; | ||
| 551 | pkt_md = (struct erspan_metadata *)(ershdr + 1); | ||
| 552 | |||
| 553 | if (__iptunnel_pull_header(skb, len, | 550 | if (__iptunnel_pull_header(skb, len, |
| 554 | htons(ETH_P_TEB), | 551 | htons(ETH_P_TEB), |
| 555 | false, false) < 0) | 552 | false, false) < 0) |
| 556 | return PACKET_REJECT; | 553 | return PACKET_REJECT; |
| 557 | 554 | ||
| 558 | if (tunnel->parms.collect_md) { | 555 | if (tunnel->parms.collect_md) { |
| 556 | struct erspan_metadata *pkt_md, *md; | ||
| 559 | struct metadata_dst *tun_dst; | 557 | struct metadata_dst *tun_dst; |
| 560 | struct ip_tunnel_info *info; | 558 | struct ip_tunnel_info *info; |
| 561 | struct erspan_metadata *md; | 559 | unsigned char *gh; |
| 562 | __be64 tun_id; | 560 | __be64 tun_id; |
| 563 | __be16 flags; | 561 | __be16 flags; |
| 564 | 562 | ||
| @@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, | |||
| 571 | if (!tun_dst) | 569 | if (!tun_dst) |
| 572 | return PACKET_REJECT; | 570 | return PACKET_REJECT; |
| 573 | 571 | ||
| 572 | /* skb can be uncloned in __iptunnel_pull_header, so | ||
| 573 | * old pkt_md is no longer valid and we need to reset | ||
| 574 | * it | ||
| 575 | */ | ||
| 576 | gh = skb_network_header(skb) + | ||
| 577 | skb_network_header_len(skb); | ||
| 578 | pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + | ||
| 579 | sizeof(*ershdr)); | ||
| 574 | info = &tun_dst->u.tun_info; | 580 | info = &tun_dst->u.tun_info; |
| 575 | md = ip_tunnel_info_opts(info); | 581 | md = ip_tunnel_info_opts(info); |
| 576 | md->version = ver; | 582 | md->version = ver; |
| @@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb) | |||
| 607 | 613 | ||
| 608 | if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || | 614 | if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || |
| 609 | tpi.proto == htons(ETH_P_ERSPAN2))) { | 615 | tpi.proto == htons(ETH_P_ERSPAN2))) { |
| 610 | if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD) | 616 | if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) |
| 611 | return 0; | 617 | return 0; |
| 612 | goto out; | 618 | goto out; |
| 613 | } | 619 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index edbd12067170..e51f3c648b09 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 601 | inet6_sk(skb->sk) : NULL; | 601 | inet6_sk(skb->sk) : NULL; |
| 602 | struct ipv6hdr *tmp_hdr; | 602 | struct ipv6hdr *tmp_hdr; |
| 603 | struct frag_hdr *fh; | 603 | struct frag_hdr *fh; |
| 604 | unsigned int mtu, hlen, left, len; | 604 | unsigned int mtu, hlen, left, len, nexthdr_offset; |
| 605 | int hroom, troom; | 605 | int hroom, troom; |
| 606 | __be32 frag_id; | 606 | __be32 frag_id; |
| 607 | int ptr, offset = 0, err = 0; | 607 | int ptr, offset = 0, err = 0; |
| @@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 612 | goto fail; | 612 | goto fail; |
| 613 | hlen = err; | 613 | hlen = err; |
| 614 | nexthdr = *prevhdr; | 614 | nexthdr = *prevhdr; |
| 615 | nexthdr_offset = prevhdr - skb_network_header(skb); | ||
| 615 | 616 | ||
| 616 | mtu = ip6_skb_dst_mtu(skb); | 617 | mtu = ip6_skb_dst_mtu(skb); |
| 617 | 618 | ||
| @@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 646 | (err = skb_checksum_help(skb))) | 647 | (err = skb_checksum_help(skb))) |
| 647 | goto fail; | 648 | goto fail; |
| 648 | 649 | ||
| 650 | prevhdr = skb_network_header(skb) + nexthdr_offset; | ||
| 649 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | 651 | hroom = LL_RESERVED_SPACE(rt->dst.dev); |
| 650 | if (skb_has_frag_list(skb)) { | 652 | if (skb_has_frag_list(skb)) { |
| 651 | unsigned int first_len = skb_pagelen(skb); | 653 | unsigned int first_len = skb_pagelen(skb); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 0c6403cf8b52..ade1390c6348 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 627 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, | 627 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, |
| 628 | eiph->daddr, eiph->saddr, 0, 0, | 628 | eiph->daddr, eiph->saddr, 0, 0, |
| 629 | IPPROTO_IPIP, RT_TOS(eiph->tos), 0); | 629 | IPPROTO_IPIP, RT_TOS(eiph->tos), 0); |
| 630 | if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { | 630 | if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) { |
| 631 | if (!IS_ERR(rt)) | 631 | if (!IS_ERR(rt)) |
| 632 | ip_rt_put(rt); | 632 | ip_rt_put(rt); |
| 633 | goto out; | 633 | goto out; |
| @@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 636 | } else { | 636 | } else { |
| 637 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, | 637 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, |
| 638 | skb2->dev) || | 638 | skb2->dev) || |
| 639 | skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) | 639 | skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) |
| 640 | goto out; | 640 | goto out; |
| 641 | } | 641 | } |
| 642 | 642 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0302e0eb07af..7178e32eb15d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
| 2330 | 2330 | ||
| 2331 | rcu_read_lock(); | 2331 | rcu_read_lock(); |
| 2332 | from = rcu_dereference(rt6->from); | 2332 | from = rcu_dereference(rt6->from); |
| 2333 | if (!from) { | ||
| 2334 | rcu_read_unlock(); | ||
| 2335 | return; | ||
| 2336 | } | ||
| 2333 | nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); | 2337 | nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); |
| 2334 | if (nrt6) { | 2338 | if (nrt6) { |
| 2335 | rt6_do_update_pmtu(nrt6, mtu); | 2339 | rt6_do_update_pmtu(nrt6, mtu); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 07e21a82ce4c..b2109b74857d 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
| 669 | !net_eq(tunnel->net, dev_net(tunnel->dev)))) | 669 | !net_eq(tunnel->net, dev_net(tunnel->dev)))) |
| 670 | goto out; | 670 | goto out; |
| 671 | 671 | ||
| 672 | /* skb can be uncloned in iptunnel_pull_header, so | ||
| 673 | * old iph is no longer valid | ||
| 674 | */ | ||
| 675 | iph = (const struct iphdr *)skb_mac_header(skb); | ||
| 672 | err = IP_ECN_decapsulate(iph, skb); | 676 | err = IP_ECN_decapsulate(iph, skb); |
| 673 | if (unlikely(err)) { | 677 | if (unlikely(err)) { |
| 674 | if (log_ecn_error) | 678 | if (log_ecn_error) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b444483cdb2b..622eeaf5732b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk) | |||
| 1047 | static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, | 1047 | static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, |
| 1048 | int addr_len) | 1048 | int addr_len) |
| 1049 | { | 1049 | { |
| 1050 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
| 1051 | return -EINVAL; | ||
| 1050 | /* The following checks are replicated from __ip6_datagram_connect() | 1052 | /* The following checks are replicated from __ip6_datagram_connect() |
| 1051 | * and intended to prevent BPF program called below from accessing | 1053 | * and intended to prevent BPF program called below from accessing |
| 1052 | * bytes that are out of the bound specified by user in addr_len. | 1054 | * bytes that are out of the bound specified by user in addr_len. |
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index c5c5ab6c5a1c..44fdc641710d 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c | |||
| @@ -2054,14 +2054,14 @@ static int __init kcm_init(void) | |||
| 2054 | if (err) | 2054 | if (err) |
| 2055 | goto fail; | 2055 | goto fail; |
| 2056 | 2056 | ||
| 2057 | err = sock_register(&kcm_family_ops); | ||
| 2058 | if (err) | ||
| 2059 | goto sock_register_fail; | ||
| 2060 | |||
| 2061 | err = register_pernet_device(&kcm_net_ops); | 2057 | err = register_pernet_device(&kcm_net_ops); |
| 2062 | if (err) | 2058 | if (err) |
| 2063 | goto net_ops_fail; | 2059 | goto net_ops_fail; |
| 2064 | 2060 | ||
| 2061 | err = sock_register(&kcm_family_ops); | ||
| 2062 | if (err) | ||
| 2063 | goto sock_register_fail; | ||
| 2064 | |||
| 2065 | err = kcm_proc_init(); | 2065 | err = kcm_proc_init(); |
| 2066 | if (err) | 2066 | if (err) |
| 2067 | goto proc_init_fail; | 2067 | goto proc_init_fail; |
| @@ -2069,12 +2069,12 @@ static int __init kcm_init(void) | |||
| 2069 | return 0; | 2069 | return 0; |
| 2070 | 2070 | ||
| 2071 | proc_init_fail: | 2071 | proc_init_fail: |
| 2072 | unregister_pernet_device(&kcm_net_ops); | ||
| 2073 | |||
| 2074 | net_ops_fail: | ||
| 2075 | sock_unregister(PF_KCM); | 2072 | sock_unregister(PF_KCM); |
| 2076 | 2073 | ||
| 2077 | sock_register_fail: | 2074 | sock_register_fail: |
| 2075 | unregister_pernet_device(&kcm_net_ops); | ||
| 2076 | |||
| 2077 | net_ops_fail: | ||
| 2078 | proto_unregister(&kcm_proto); | 2078 | proto_unregister(&kcm_proto); |
| 2079 | 2079 | ||
| 2080 | fail: | 2080 | fail: |
| @@ -2090,8 +2090,8 @@ fail: | |||
| 2090 | static void __exit kcm_exit(void) | 2090 | static void __exit kcm_exit(void) |
| 2091 | { | 2091 | { |
| 2092 | kcm_proc_exit(); | 2092 | kcm_proc_exit(); |
| 2093 | unregister_pernet_device(&kcm_net_ops); | ||
| 2094 | sock_unregister(PF_KCM); | 2093 | sock_unregister(PF_KCM); |
| 2094 | unregister_pernet_device(&kcm_net_ops); | ||
| 2095 | proto_unregister(&kcm_proto); | 2095 | proto_unregister(&kcm_proto); |
| 2096 | destroy_workqueue(kcm_wq); | 2096 | destroy_workqueue(kcm_wq); |
| 2097 | 2097 | ||
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index b99e73a7e7e0..2017b7d780f5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
| @@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) | |||
| 320 | struct llc_sap *sap; | 320 | struct llc_sap *sap; |
| 321 | int rc = -EINVAL; | 321 | int rc = -EINVAL; |
| 322 | 322 | ||
| 323 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); | ||
| 324 | |||
| 325 | lock_sock(sk); | 323 | lock_sock(sk); |
| 326 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) | 324 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) |
| 327 | goto out; | 325 | goto out; |
| 328 | rc = -EAFNOSUPPORT; | 326 | rc = -EAFNOSUPPORT; |
| 329 | if (unlikely(addr->sllc_family != AF_LLC)) | 327 | if (unlikely(addr->sllc_family != AF_LLC)) |
| 330 | goto out; | 328 | goto out; |
| 329 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); | ||
| 331 | rc = -ENODEV; | 330 | rc = -ENODEV; |
| 332 | rcu_read_lock(); | 331 | rcu_read_lock(); |
| 333 | if (sk->sk_bound_dev_if) { | 332 | if (sk->sk_bound_dev_if) { |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 28d022a3eee3..ae4f0be3b393 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
| @@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, | |||
| 1195 | { | 1195 | { |
| 1196 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); | 1196 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); |
| 1197 | 1197 | ||
| 1198 | if (local->in_reconfig) | ||
| 1199 | return; | ||
| 1200 | |||
| 1198 | if (!check_sdata_in_driver(sdata)) | 1201 | if (!check_sdata_in_driver(sdata)) |
| 1199 | return; | 1202 | return; |
| 1200 | 1203 | ||
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 4700718e010f..37e372896230 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
| @@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
| 167 | * The driver doesn't know anything about VLAN interfaces. | 167 | * The driver doesn't know anything about VLAN interfaces. |
| 168 | * Hence, don't send GTKs for VLAN interfaces to the driver. | 168 | * Hence, don't send GTKs for VLAN interfaces to the driver. |
| 169 | */ | 169 | */ |
| 170 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) | 170 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { |
| 171 | ret = 1; | ||
| 171 | goto out_unsupported; | 172 | goto out_unsupported; |
| 173 | } | ||
| 172 | } | 174 | } |
| 173 | 175 | ||
| 174 | ret = drv_set_key(key->local, SET_KEY, sdata, | 176 | ret = drv_set_key(key->local, SET_KEY, sdata, |
| @@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
| 213 | /* all of these we can do in software - if driver can */ | 215 | /* all of these we can do in software - if driver can */ |
| 214 | if (ret == 1) | 216 | if (ret == 1) |
| 215 | return 0; | 217 | return 0; |
| 216 | if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) { | 218 | if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) |
| 217 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
| 218 | return 0; | ||
| 219 | return -EINVAL; | 219 | return -EINVAL; |
| 220 | } | ||
| 221 | return 0; | 220 | return 0; |
| 222 | default: | 221 | default: |
| 223 | return -EINVAL; | 222 | return -EINVAL; |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 95eb5064fa91..b76a2aefa9ec 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
| @@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); | |||
| 23 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) | 23 | static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) |
| 24 | { | 24 | { |
| 25 | /* Use last four bytes of hw addr as hash index */ | 25 | /* Use last four bytes of hw addr as hash index */ |
| 26 | return jhash_1word(*(u32 *)(addr+2), seed); | 26 | return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | static const struct rhashtable_params mesh_rht_params = { | 29 | static const struct rhashtable_params mesh_rht_params = { |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 7f8d93401ce0..bf0b187f994e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta) | |||
| 1568 | return; | 1568 | return; |
| 1569 | 1569 | ||
| 1570 | for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { | 1570 | for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { |
| 1571 | if (txq_has_queue(sta->sta.txq[tid])) | 1571 | struct ieee80211_txq *txq = sta->sta.txq[tid]; |
| 1572 | struct txq_info *txqi = to_txq_info(txq); | ||
| 1573 | |||
| 1574 | spin_lock(&local->active_txq_lock[txq->ac]); | ||
| 1575 | if (!list_empty(&txqi->schedule_order)) | ||
| 1576 | list_del_init(&txqi->schedule_order); | ||
| 1577 | spin_unlock(&local->active_txq_lock[txq->ac]); | ||
| 1578 | |||
| 1579 | if (txq_has_queue(txq)) | ||
| 1572 | set_bit(tid, &sta->txq_buffered_tids); | 1580 | set_bit(tid, &sta->txq_buffered_tids); |
| 1573 | else | 1581 | else |
| 1574 | clear_bit(tid, &sta->txq_buffered_tids); | 1582 | clear_bit(tid, &sta->txq_buffered_tids); |
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h index 366b9e6f043e..40141df09f25 100644 --- a/net/mac80211/trace_msg.h +++ b/net/mac80211/trace_msg.h | |||
| @@ -1,4 +1,9 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | ||
| 3 | * Portions of this file | ||
| 4 | * Copyright (C) 2019 Intel Corporation | ||
| 5 | */ | ||
| 6 | |||
| 2 | #ifdef CONFIG_MAC80211_MESSAGE_TRACING | 7 | #ifdef CONFIG_MAC80211_MESSAGE_TRACING |
| 3 | 8 | ||
| 4 | #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) | 9 | #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) |
| @@ -11,7 +16,7 @@ | |||
| 11 | #undef TRACE_SYSTEM | 16 | #undef TRACE_SYSTEM |
| 12 | #define TRACE_SYSTEM mac80211_msg | 17 | #define TRACE_SYSTEM mac80211_msg |
| 13 | 18 | ||
| 14 | #define MAX_MSG_LEN 100 | 19 | #define MAX_MSG_LEN 120 |
| 15 | 20 | ||
| 16 | DECLARE_EVENT_CLASS(mac80211_msg_event, | 21 | DECLARE_EVENT_CLASS(mac80211_msg_event, |
| 17 | TP_PROTO(struct va_format *vaf), | 22 | TP_PROTO(struct va_format *vaf), |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8a49a74c0a37..2e816dd67be7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3221 | u8 max_subframes = sta->sta.max_amsdu_subframes; | 3221 | u8 max_subframes = sta->sta.max_amsdu_subframes; |
| 3222 | int max_frags = local->hw.max_tx_fragments; | 3222 | int max_frags = local->hw.max_tx_fragments; |
| 3223 | int max_amsdu_len = sta->sta.max_amsdu_len; | 3223 | int max_amsdu_len = sta->sta.max_amsdu_len; |
| 3224 | int orig_truesize; | ||
| 3224 | __be16 len; | 3225 | __be16 len; |
| 3225 | void *data; | 3226 | void *data; |
| 3226 | bool ret = false; | 3227 | bool ret = false; |
| @@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3261 | if (!head || skb_is_gso(head)) | 3262 | if (!head || skb_is_gso(head)) |
| 3262 | goto out; | 3263 | goto out; |
| 3263 | 3264 | ||
| 3265 | orig_truesize = head->truesize; | ||
| 3264 | orig_len = head->len; | 3266 | orig_len = head->len; |
| 3265 | 3267 | ||
| 3266 | if (skb->len + head->len > max_amsdu_len) | 3268 | if (skb->len + head->len > max_amsdu_len) |
| @@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, | |||
| 3318 | *frag_tail = skb; | 3320 | *frag_tail = skb; |
| 3319 | 3321 | ||
| 3320 | out_recalc: | 3322 | out_recalc: |
| 3323 | fq->memory_usage += head->truesize - orig_truesize; | ||
| 3321 | if (head->len != orig_len) { | 3324 | if (head->len != orig_len) { |
| 3322 | flow->backlog += head->len - orig_len; | 3325 | flow->backlog += head->len - orig_len; |
| 3323 | tin->backlog_bytes += head->len - orig_len; | 3326 | tin->backlog_bytes += head->len - orig_len; |
| @@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue); | |||
| 3646 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) | 3649 | struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) |
| 3647 | { | 3650 | { |
| 3648 | struct ieee80211_local *local = hw_to_local(hw); | 3651 | struct ieee80211_local *local = hw_to_local(hw); |
| 3652 | struct ieee80211_txq *ret = NULL; | ||
| 3649 | struct txq_info *txqi = NULL; | 3653 | struct txq_info *txqi = NULL; |
| 3650 | 3654 | ||
| 3651 | lockdep_assert_held(&local->active_txq_lock[ac]); | 3655 | spin_lock_bh(&local->active_txq_lock[ac]); |
| 3652 | 3656 | ||
| 3653 | begin: | 3657 | begin: |
| 3654 | txqi = list_first_entry_or_null(&local->active_txqs[ac], | 3658 | txqi = list_first_entry_or_null(&local->active_txqs[ac], |
| 3655 | struct txq_info, | 3659 | struct txq_info, |
| 3656 | schedule_order); | 3660 | schedule_order); |
| 3657 | if (!txqi) | 3661 | if (!txqi) |
| 3658 | return NULL; | 3662 | goto out; |
| 3659 | 3663 | ||
| 3660 | if (txqi->txq.sta) { | 3664 | if (txqi->txq.sta) { |
| 3661 | struct sta_info *sta = container_of(txqi->txq.sta, | 3665 | struct sta_info *sta = container_of(txqi->txq.sta, |
| @@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) | |||
| 3672 | 3676 | ||
| 3673 | 3677 | ||
| 3674 | if (txqi->schedule_round == local->schedule_round[ac]) | 3678 | if (txqi->schedule_round == local->schedule_round[ac]) |
| 3675 | return NULL; | 3679 | goto out; |
| 3676 | 3680 | ||
| 3677 | list_del_init(&txqi->schedule_order); | 3681 | list_del_init(&txqi->schedule_order); |
| 3678 | txqi->schedule_round = local->schedule_round[ac]; | 3682 | txqi->schedule_round = local->schedule_round[ac]; |
| 3679 | return &txqi->txq; | 3683 | ret = &txqi->txq; |
| 3684 | |||
| 3685 | out: | ||
| 3686 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
| 3687 | return ret; | ||
| 3680 | } | 3688 | } |
| 3681 | EXPORT_SYMBOL(ieee80211_next_txq); | 3689 | EXPORT_SYMBOL(ieee80211_next_txq); |
| 3682 | 3690 | ||
| 3683 | void ieee80211_return_txq(struct ieee80211_hw *hw, | 3691 | void __ieee80211_schedule_txq(struct ieee80211_hw *hw, |
| 3684 | struct ieee80211_txq *txq) | 3692 | struct ieee80211_txq *txq, |
| 3693 | bool force) | ||
| 3685 | { | 3694 | { |
| 3686 | struct ieee80211_local *local = hw_to_local(hw); | 3695 | struct ieee80211_local *local = hw_to_local(hw); |
| 3687 | struct txq_info *txqi = to_txq_info(txq); | 3696 | struct txq_info *txqi = to_txq_info(txq); |
| 3688 | 3697 | ||
| 3689 | lockdep_assert_held(&local->active_txq_lock[txq->ac]); | 3698 | spin_lock_bh(&local->active_txq_lock[txq->ac]); |
| 3690 | 3699 | ||
| 3691 | if (list_empty(&txqi->schedule_order) && | 3700 | if (list_empty(&txqi->schedule_order) && |
| 3692 | (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) { | 3701 | (force || !skb_queue_empty(&txqi->frags) || |
| 3702 | txqi->tin.backlog_packets)) { | ||
| 3693 | /* If airtime accounting is active, always enqueue STAs at the | 3703 | /* If airtime accounting is active, always enqueue STAs at the |
| 3694 | * head of the list to ensure that they only get moved to the | 3704 | * head of the list to ensure that they only get moved to the |
| 3695 | * back by the airtime DRR scheduler once they have a negative | 3705 | * back by the airtime DRR scheduler once they have a negative |
| @@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw, | |||
| 3706 | list_add_tail(&txqi->schedule_order, | 3716 | list_add_tail(&txqi->schedule_order, |
| 3707 | &local->active_txqs[txq->ac]); | 3717 | &local->active_txqs[txq->ac]); |
| 3708 | } | 3718 | } |
| 3709 | } | ||
| 3710 | EXPORT_SYMBOL(ieee80211_return_txq); | ||
| 3711 | 3719 | ||
| 3712 | void ieee80211_schedule_txq(struct ieee80211_hw *hw, | ||
| 3713 | struct ieee80211_txq *txq) | ||
| 3714 | __acquires(txq_lock) __releases(txq_lock) | ||
| 3715 | { | ||
| 3716 | struct ieee80211_local *local = hw_to_local(hw); | ||
| 3717 | |||
| 3718 | spin_lock_bh(&local->active_txq_lock[txq->ac]); | ||
| 3719 | ieee80211_return_txq(hw, txq); | ||
| 3720 | spin_unlock_bh(&local->active_txq_lock[txq->ac]); | 3720 | spin_unlock_bh(&local->active_txq_lock[txq->ac]); |
| 3721 | } | 3721 | } |
| 3722 | EXPORT_SYMBOL(ieee80211_schedule_txq); | 3722 | EXPORT_SYMBOL(__ieee80211_schedule_txq); |
| 3723 | 3723 | ||
| 3724 | bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | 3724 | bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, |
| 3725 | struct ieee80211_txq *txq) | 3725 | struct ieee80211_txq *txq) |
| @@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | |||
| 3729 | struct sta_info *sta; | 3729 | struct sta_info *sta; |
| 3730 | u8 ac = txq->ac; | 3730 | u8 ac = txq->ac; |
| 3731 | 3731 | ||
| 3732 | lockdep_assert_held(&local->active_txq_lock[ac]); | 3732 | spin_lock_bh(&local->active_txq_lock[ac]); |
| 3733 | 3733 | ||
| 3734 | if (!txqi->txq.sta) | 3734 | if (!txqi->txq.sta) |
| 3735 | goto out; | 3735 | goto out; |
| @@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, | |||
| 3759 | 3759 | ||
| 3760 | sta->airtime[ac].deficit += sta->airtime_weight; | 3760 | sta->airtime[ac].deficit += sta->airtime_weight; |
| 3761 | list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); | 3761 | list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); |
| 3762 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
| 3762 | 3763 | ||
| 3763 | return false; | 3764 | return false; |
| 3764 | out: | 3765 | out: |
| 3765 | if (!list_empty(&txqi->schedule_order)) | 3766 | if (!list_empty(&txqi->schedule_order)) |
| 3766 | list_del_init(&txqi->schedule_order); | 3767 | list_del_init(&txqi->schedule_order); |
| 3768 | spin_unlock_bh(&local->active_txq_lock[ac]); | ||
| 3767 | 3769 | ||
| 3768 | return true; | 3770 | return true; |
| 3769 | } | 3771 | } |
| 3770 | EXPORT_SYMBOL(ieee80211_txq_may_transmit); | 3772 | EXPORT_SYMBOL(ieee80211_txq_may_transmit); |
| 3771 | 3773 | ||
| 3772 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) | 3774 | void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) |
| 3773 | __acquires(txq_lock) | ||
| 3774 | { | 3775 | { |
| 3775 | struct ieee80211_local *local = hw_to_local(hw); | 3776 | struct ieee80211_local *local = hw_to_local(hw); |
| 3776 | 3777 | ||
| 3777 | spin_lock_bh(&local->active_txq_lock[ac]); | 3778 | spin_lock_bh(&local->active_txq_lock[ac]); |
| 3778 | local->schedule_round[ac]++; | 3779 | local->schedule_round[ac]++; |
| 3779 | } | ||
| 3780 | EXPORT_SYMBOL(ieee80211_txq_schedule_start); | ||
| 3781 | |||
| 3782 | void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) | ||
| 3783 | __releases(txq_lock) | ||
| 3784 | { | ||
| 3785 | struct ieee80211_local *local = hw_to_local(hw); | ||
| 3786 | |||
| 3787 | spin_unlock_bh(&local->active_txq_lock[ac]); | 3780 | spin_unlock_bh(&local->active_txq_lock[ac]); |
| 3788 | } | 3781 | } |
| 3789 | EXPORT_SYMBOL(ieee80211_txq_schedule_end); | 3782 | EXPORT_SYMBOL(ieee80211_txq_schedule_start); |
| 3790 | 3783 | ||
| 3791 | void __ieee80211_subif_start_xmit(struct sk_buff *skb, | 3784 | void __ieee80211_subif_start_xmit(struct sk_buff *skb, |
| 3792 | struct net_device *dev, | 3785 | struct net_device *dev, |
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index dc07fcc7938e..802db01e3075 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
| 13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
| 14 | #include <linux/etherdevice.h> | ||
| 14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
| 15 | 16 | ||
| 16 | #include <net/ncsi.h> | 17 | #include <net/ncsi.h> |
| @@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr) | |||
| 667 | ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 668 | ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 668 | memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN); | 669 | memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN); |
| 669 | /* Increase mac address by 1 for BMC's address */ | 670 | /* Increase mac address by 1 for BMC's address */ |
| 670 | saddr.sa_data[ETH_ALEN - 1]++; | 671 | eth_addr_inc((u8 *)saddr.sa_data); |
| 672 | if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) | ||
| 673 | return -ENXIO; | ||
| 674 | |||
| 671 | ret = ops->ndo_set_mac_address(ndev, &saddr); | 675 | ret = ops->ndo_set_mac_address(ndev, &saddr); |
| 672 | if (ret < 0) | 676 | if (ret < 0) |
| 673 | netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); | 677 | netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 43bbaa32b1d6..14457551bcb4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -1678,7 +1678,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related, | |||
| 1678 | if (!cp) { | 1678 | if (!cp) { |
| 1679 | int v; | 1679 | int v; |
| 1680 | 1680 | ||
| 1681 | if (!sysctl_schedule_icmp(ipvs)) | 1681 | if (ipip || !sysctl_schedule_icmp(ipvs)) |
| 1682 | return NF_ACCEPT; | 1682 | return NF_ACCEPT; |
| 1683 | 1683 | ||
| 1684 | if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph)) | 1684 | if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph)) |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 82bfbeef46af..2a714527cde1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/random.h> | 26 | #include <linux/random.h> |
| 27 | #include <linux/jhash.h> | 27 | #include <linux/jhash.h> |
| 28 | #include <linux/siphash.h> | ||
| 28 | #include <linux/err.h> | 29 | #include <linux/err.h> |
| 29 | #include <linux/percpu.h> | 30 | #include <linux/percpu.h> |
| 30 | #include <linux/moduleparam.h> | 31 | #include <linux/moduleparam.h> |
| @@ -449,6 +450,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | |||
| 449 | } | 450 | } |
| 450 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | 451 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); |
| 451 | 452 | ||
| 453 | /* Generate a almost-unique pseudo-id for a given conntrack. | ||
| 454 | * | ||
| 455 | * intentionally doesn't re-use any of the seeds used for hash | ||
| 456 | * table location, we assume id gets exposed to userspace. | ||
| 457 | * | ||
| 458 | * Following nf_conn items do not change throughout lifetime | ||
| 459 | * of the nf_conn after it has been committed to main hash table: | ||
| 460 | * | ||
| 461 | * 1. nf_conn address | ||
| 462 | * 2. nf_conn->ext address | ||
| 463 | * 3. nf_conn->master address (normally NULL) | ||
| 464 | * 4. tuple | ||
| 465 | * 5. the associated net namespace | ||
| 466 | */ | ||
| 467 | u32 nf_ct_get_id(const struct nf_conn *ct) | ||
| 468 | { | ||
| 469 | static __read_mostly siphash_key_t ct_id_seed; | ||
| 470 | unsigned long a, b, c, d; | ||
| 471 | |||
| 472 | net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); | ||
| 473 | |||
| 474 | a = (unsigned long)ct; | ||
| 475 | b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); | ||
| 476 | c = (unsigned long)ct->ext; | ||
| 477 | d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), | ||
| 478 | &ct_id_seed); | ||
| 479 | #ifdef CONFIG_64BIT | ||
| 480 | return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); | ||
| 481 | #else | ||
| 482 | return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed); | ||
| 483 | #endif | ||
| 484 | } | ||
| 485 | EXPORT_SYMBOL_GPL(nf_ct_get_id); | ||
| 486 | |||
| 452 | static void | 487 | static void |
| 453 | clean_from_lists(struct nf_conn *ct) | 488 | clean_from_lists(struct nf_conn *ct) |
| 454 | { | 489 | { |
| @@ -982,12 +1017,9 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
| 982 | 1017 | ||
| 983 | /* set conntrack timestamp, if enabled. */ | 1018 | /* set conntrack timestamp, if enabled. */ |
| 984 | tstamp = nf_conn_tstamp_find(ct); | 1019 | tstamp = nf_conn_tstamp_find(ct); |
| 985 | if (tstamp) { | 1020 | if (tstamp) |
| 986 | if (skb->tstamp == 0) | 1021 | tstamp->start = ktime_get_real_ns(); |
| 987 | __net_timestamp(skb); | ||
| 988 | 1022 | ||
| 989 | tstamp->start = ktime_to_ns(skb->tstamp); | ||
| 990 | } | ||
| 991 | /* Since the lookup is lockless, hash insertion must be done after | 1023 | /* Since the lookup is lockless, hash insertion must be done after |
| 992 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | 1024 | * starting the timer and setting the CONFIRMED bit. The RCU barriers |
| 993 | * guarantee that no other CPU can find the conntrack before the above | 1025 | * guarantee that no other CPU can find the conntrack before the above |
| @@ -1350,6 +1382,7 @@ __nf_conntrack_alloc(struct net *net, | |||
| 1350 | /* save hash for reusing when confirming */ | 1382 | /* save hash for reusing when confirming */ |
| 1351 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; | 1383 | *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; |
| 1352 | ct->status = 0; | 1384 | ct->status = 0; |
| 1385 | ct->timeout = 0; | ||
| 1353 | write_pnet(&ct->ct_net, net); | 1386 | write_pnet(&ct->ct_net, net); |
| 1354 | memset(&ct->__nfct_init_offset[0], 0, | 1387 | memset(&ct->__nfct_init_offset[0], 0, |
| 1355 | offsetof(struct nf_conn, proto) - | 1388 | offsetof(struct nf_conn, proto) - |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 66c596d287a5..d7f61b0547c6 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
| 30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 32 | #include <linux/siphash.h> | ||
| 32 | 33 | ||
| 33 | #include <linux/netfilter.h> | 34 | #include <linux/netfilter.h> |
| 34 | #include <net/netlink.h> | 35 | #include <net/netlink.h> |
| @@ -485,7 +486,9 @@ nla_put_failure: | |||
| 485 | 486 | ||
| 486 | static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) | 487 | static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) |
| 487 | { | 488 | { |
| 488 | if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct))) | 489 | __be32 id = (__force __be32)nf_ct_get_id(ct); |
| 490 | |||
| 491 | if (nla_put_be32(skb, CTA_ID, id)) | ||
| 489 | goto nla_put_failure; | 492 | goto nla_put_failure; |
| 490 | return 0; | 493 | return 0; |
| 491 | 494 | ||
| @@ -1286,8 +1289,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, | |||
| 1286 | } | 1289 | } |
| 1287 | 1290 | ||
| 1288 | if (cda[CTA_ID]) { | 1291 | if (cda[CTA_ID]) { |
| 1289 | u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID])); | 1292 | __be32 id = nla_get_be32(cda[CTA_ID]); |
| 1290 | if (id != (u32)(unsigned long)ct) { | 1293 | |
| 1294 | if (id != (__force __be32)nf_ct_get_id(ct)) { | ||
| 1291 | nf_ct_put(ct); | 1295 | nf_ct_put(ct); |
| 1292 | return -ENOENT; | 1296 | return -ENOENT; |
| 1293 | } | 1297 | } |
| @@ -2692,6 +2696,25 @@ nla_put_failure: | |||
| 2692 | 2696 | ||
| 2693 | static const union nf_inet_addr any_addr; | 2697 | static const union nf_inet_addr any_addr; |
| 2694 | 2698 | ||
| 2699 | static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) | ||
| 2700 | { | ||
| 2701 | static __read_mostly siphash_key_t exp_id_seed; | ||
| 2702 | unsigned long a, b, c, d; | ||
| 2703 | |||
| 2704 | net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); | ||
| 2705 | |||
| 2706 | a = (unsigned long)exp; | ||
| 2707 | b = (unsigned long)exp->helper; | ||
| 2708 | c = (unsigned long)exp->master; | ||
| 2709 | d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed); | ||
| 2710 | |||
| 2711 | #ifdef CONFIG_64BIT | ||
| 2712 | return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); | ||
| 2713 | #else | ||
| 2714 | return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); | ||
| 2715 | #endif | ||
| 2716 | } | ||
| 2717 | |||
| 2695 | static int | 2718 | static int |
| 2696 | ctnetlink_exp_dump_expect(struct sk_buff *skb, | 2719 | ctnetlink_exp_dump_expect(struct sk_buff *skb, |
| 2697 | const struct nf_conntrack_expect *exp) | 2720 | const struct nf_conntrack_expect *exp) |
| @@ -2739,7 +2762,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, | |||
| 2739 | } | 2762 | } |
| 2740 | #endif | 2763 | #endif |
| 2741 | if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || | 2764 | if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || |
| 2742 | nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) || | 2765 | nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) || |
| 2743 | nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || | 2766 | nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || |
| 2744 | nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) | 2767 | nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) |
| 2745 | goto nla_put_failure; | 2768 | goto nla_put_failure; |
| @@ -3044,7 +3067,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl, | |||
| 3044 | 3067 | ||
| 3045 | if (cda[CTA_EXPECT_ID]) { | 3068 | if (cda[CTA_EXPECT_ID]) { |
| 3046 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); | 3069 | __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); |
| 3047 | if (ntohl(id) != (u32)(unsigned long)exp) { | 3070 | |
| 3071 | if (id != nf_expect_get_id(exp)) { | ||
| 3048 | nf_ct_expect_put(exp); | 3072 | nf_ct_expect_put(exp); |
| 3049 | return -ENOENT; | 3073 | return -ENOENT; |
| 3050 | } | 3074 | } |
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index b9403a266a2e..37bb530d848f 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
| @@ -55,7 +55,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb, | |||
| 55 | struct va_format vaf; | 55 | struct va_format vaf; |
| 56 | va_list args; | 56 | va_list args; |
| 57 | 57 | ||
| 58 | if (net->ct.sysctl_log_invalid != protonum || | 58 | if (net->ct.sysctl_log_invalid != protonum && |
| 59 | net->ct.sysctl_log_invalid != IPPROTO_RAW) | 59 | net->ct.sysctl_log_invalid != IPPROTO_RAW) |
| 60 | return; | 60 | return; |
| 61 | 61 | ||
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index 7df477996b16..9becac953587 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c | |||
| @@ -103,49 +103,94 @@ int nf_conntrack_icmp_packet(struct nf_conn *ct, | |||
| 103 | return NF_ACCEPT; | 103 | return NF_ACCEPT; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ | 106 | /* Check inner header is related to any of the existing connections */ |
| 107 | static int | 107 | int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb, |
| 108 | icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb, | 108 | unsigned int dataoff, |
| 109 | const struct nf_hook_state *state) | 109 | const struct nf_hook_state *state, |
| 110 | u8 l4proto, union nf_inet_addr *outer_daddr) | ||
| 110 | { | 111 | { |
| 111 | struct nf_conntrack_tuple innertuple, origtuple; | 112 | struct nf_conntrack_tuple innertuple, origtuple; |
| 112 | const struct nf_conntrack_tuple_hash *h; | 113 | const struct nf_conntrack_tuple_hash *h; |
| 113 | const struct nf_conntrack_zone *zone; | 114 | const struct nf_conntrack_zone *zone; |
| 114 | enum ip_conntrack_info ctinfo; | 115 | enum ip_conntrack_info ctinfo; |
| 115 | struct nf_conntrack_zone tmp; | 116 | struct nf_conntrack_zone tmp; |
| 117 | union nf_inet_addr *ct_daddr; | ||
| 118 | enum ip_conntrack_dir dir; | ||
| 119 | struct nf_conn *ct; | ||
| 116 | 120 | ||
| 117 | WARN_ON(skb_nfct(skb)); | 121 | WARN_ON(skb_nfct(skb)); |
| 118 | zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); | 122 | zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); |
| 119 | 123 | ||
| 120 | /* Are they talking about one of our connections? */ | 124 | /* Are they talking about one of our connections? */ |
| 121 | if (!nf_ct_get_tuplepr(skb, | 125 | if (!nf_ct_get_tuplepr(skb, dataoff, |
| 122 | skb_network_offset(skb) + ip_hdrlen(skb) | 126 | state->pf, state->net, &origtuple)) |
| 123 | + sizeof(struct icmphdr), | ||
| 124 | PF_INET, state->net, &origtuple)) { | ||
| 125 | pr_debug("icmp_error_message: failed to get tuple\n"); | ||
| 126 | return -NF_ACCEPT; | 127 | return -NF_ACCEPT; |
| 127 | } | ||
| 128 | 128 | ||
| 129 | /* Ordinarily, we'd expect the inverted tupleproto, but it's | 129 | /* Ordinarily, we'd expect the inverted tupleproto, but it's |
| 130 | been preserved inside the ICMP. */ | 130 | been preserved inside the ICMP. */ |
| 131 | if (!nf_ct_invert_tuple(&innertuple, &origtuple)) { | 131 | if (!nf_ct_invert_tuple(&innertuple, &origtuple)) |
| 132 | pr_debug("icmp_error_message: no match\n"); | ||
| 133 | return -NF_ACCEPT; | 132 | return -NF_ACCEPT; |
| 134 | } | ||
| 135 | |||
| 136 | ctinfo = IP_CT_RELATED; | ||
| 137 | 133 | ||
| 138 | h = nf_conntrack_find_get(state->net, zone, &innertuple); | 134 | h = nf_conntrack_find_get(state->net, zone, &innertuple); |
| 139 | if (!h) { | 135 | if (!h) |
| 140 | pr_debug("icmp_error_message: no match\n"); | 136 | return -NF_ACCEPT; |
| 137 | |||
| 138 | /* Consider: A -> T (=This machine) -> B | ||
| 139 | * Conntrack entry will look like this: | ||
| 140 | * Original: A->B | ||
| 141 | * Reply: B->T (SNAT case) OR A | ||
| 142 | * | ||
| 143 | * When this function runs, we got packet that looks like this: | ||
| 144 | * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..). | ||
| 145 | * | ||
| 146 | * Above nf_conntrack_find_get() makes lookup based on inner_hdr, | ||
| 147 | * so we should expect that destination of the found connection | ||
| 148 | * matches outer header destination address. | ||
| 149 | * | ||
| 150 | * In above example, we can consider these two cases: | ||
| 151 | * 1. Error coming in reply direction from B or M (middle box) to | ||
| 152 | * T (SNAT case) or A. | ||
| 153 | * Inner saddr will be B, dst will be T or A. | ||
| 154 | * The found conntrack will be reply tuple (B->T/A). | ||
| 155 | * 2. Error coming in original direction from A or M to B. | ||
| 156 | * Inner saddr will be A, inner daddr will be B. | ||
| 157 | * The found conntrack will be original tuple (A->B). | ||
| 158 | * | ||
| 159 | * In both cases, conntrack[dir].dst == inner.dst. | ||
| 160 | * | ||
| 161 | * A bogus packet could look like this: | ||
| 162 | * Inner: B->T | ||
| 163 | * Outer: B->X (other machine reachable by T). | ||
| 164 | * | ||
| 165 | * In this case, lookup yields connection A->B and will | ||
| 166 | * set packet from B->X as *RELATED*, even though no connection | ||
| 167 | * from X was ever seen. | ||
| 168 | */ | ||
| 169 | ct = nf_ct_tuplehash_to_ctrack(h); | ||
| 170 | dir = NF_CT_DIRECTION(h); | ||
| 171 | ct_daddr = &ct->tuplehash[dir].tuple.dst.u3; | ||
| 172 | if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) { | ||
| 173 | if (state->pf == AF_INET) { | ||
| 174 | nf_l4proto_log_invalid(skb, state->net, state->pf, | ||
| 175 | l4proto, | ||
| 176 | "outer daddr %pI4 != inner %pI4", | ||
| 177 | &outer_daddr->ip, &ct_daddr->ip); | ||
| 178 | } else if (state->pf == AF_INET6) { | ||
| 179 | nf_l4proto_log_invalid(skb, state->net, state->pf, | ||
| 180 | l4proto, | ||
| 181 | "outer daddr %pI6 != inner %pI6", | ||
| 182 | &outer_daddr->ip6, &ct_daddr->ip6); | ||
| 183 | } | ||
| 184 | nf_ct_put(ct); | ||
| 141 | return -NF_ACCEPT; | 185 | return -NF_ACCEPT; |
| 142 | } | 186 | } |
| 143 | 187 | ||
| 144 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) | 188 | ctinfo = IP_CT_RELATED; |
| 189 | if (dir == IP_CT_DIR_REPLY) | ||
| 145 | ctinfo += IP_CT_IS_REPLY; | 190 | ctinfo += IP_CT_IS_REPLY; |
| 146 | 191 | ||
| 147 | /* Update skb to refer to this connection */ | 192 | /* Update skb to refer to this connection */ |
| 148 | nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); | 193 | nf_ct_set(skb, ct, ctinfo); |
| 149 | return NF_ACCEPT; | 194 | return NF_ACCEPT; |
| 150 | } | 195 | } |
| 151 | 196 | ||
| @@ -162,11 +207,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, | |||
| 162 | struct sk_buff *skb, unsigned int dataoff, | 207 | struct sk_buff *skb, unsigned int dataoff, |
| 163 | const struct nf_hook_state *state) | 208 | const struct nf_hook_state *state) |
| 164 | { | 209 | { |
| 210 | union nf_inet_addr outer_daddr; | ||
| 165 | const struct icmphdr *icmph; | 211 | const struct icmphdr *icmph; |
| 166 | struct icmphdr _ih; | 212 | struct icmphdr _ih; |
| 167 | 213 | ||
| 168 | /* Not enough header? */ | 214 | /* Not enough header? */ |
| 169 | icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); | 215 | icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); |
| 170 | if (icmph == NULL) { | 216 | if (icmph == NULL) { |
| 171 | icmp_error_log(skb, state, "short packet"); | 217 | icmp_error_log(skb, state, "short packet"); |
| 172 | return -NF_ACCEPT; | 218 | return -NF_ACCEPT; |
| @@ -199,7 +245,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, | |||
| 199 | icmph->type != ICMP_REDIRECT) | 245 | icmph->type != ICMP_REDIRECT) |
| 200 | return NF_ACCEPT; | 246 | return NF_ACCEPT; |
| 201 | 247 | ||
| 202 | return icmp_error_message(tmpl, skb, state); | 248 | memset(&outer_daddr, 0, sizeof(outer_daddr)); |
| 249 | outer_daddr.ip = ip_hdr(skb)->daddr; | ||
| 250 | |||
| 251 | dataoff += sizeof(*icmph); | ||
| 252 | return nf_conntrack_inet_error(tmpl, skb, dataoff, state, | ||
| 253 | IPPROTO_ICMP, &outer_daddr); | ||
| 203 | } | 254 | } |
| 204 | 255 | ||
| 205 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | 256 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index bec4a3211658..c63ee3612855 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c | |||
| @@ -123,51 +123,6 @@ int nf_conntrack_icmpv6_packet(struct nf_conn *ct, | |||
| 123 | return NF_ACCEPT; | 123 | return NF_ACCEPT; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static int | ||
| 127 | icmpv6_error_message(struct net *net, struct nf_conn *tmpl, | ||
| 128 | struct sk_buff *skb, | ||
| 129 | unsigned int icmp6off) | ||
| 130 | { | ||
| 131 | struct nf_conntrack_tuple intuple, origtuple; | ||
| 132 | const struct nf_conntrack_tuple_hash *h; | ||
| 133 | enum ip_conntrack_info ctinfo; | ||
| 134 | struct nf_conntrack_zone tmp; | ||
| 135 | |||
| 136 | WARN_ON(skb_nfct(skb)); | ||
| 137 | |||
| 138 | /* Are they talking about one of our connections? */ | ||
| 139 | if (!nf_ct_get_tuplepr(skb, | ||
| 140 | skb_network_offset(skb) | ||
| 141 | + sizeof(struct ipv6hdr) | ||
| 142 | + sizeof(struct icmp6hdr), | ||
| 143 | PF_INET6, net, &origtuple)) { | ||
| 144 | pr_debug("icmpv6_error: Can't get tuple\n"); | ||
| 145 | return -NF_ACCEPT; | ||
| 146 | } | ||
| 147 | |||
| 148 | /* Ordinarily, we'd expect the inverted tupleproto, but it's | ||
| 149 | been preserved inside the ICMP. */ | ||
| 150 | if (!nf_ct_invert_tuple(&intuple, &origtuple)) { | ||
| 151 | pr_debug("icmpv6_error: Can't invert tuple\n"); | ||
| 152 | return -NF_ACCEPT; | ||
| 153 | } | ||
| 154 | |||
| 155 | ctinfo = IP_CT_RELATED; | ||
| 156 | |||
| 157 | h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp), | ||
| 158 | &intuple); | ||
| 159 | if (!h) { | ||
| 160 | pr_debug("icmpv6_error: no match\n"); | ||
| 161 | return -NF_ACCEPT; | ||
| 162 | } else { | ||
| 163 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) | ||
| 164 | ctinfo += IP_CT_IS_REPLY; | ||
| 165 | } | ||
| 166 | |||
| 167 | /* Update skb to refer to this connection */ | ||
| 168 | nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); | ||
| 169 | return NF_ACCEPT; | ||
| 170 | } | ||
| 171 | 126 | ||
| 172 | static void icmpv6_error_log(const struct sk_buff *skb, | 127 | static void icmpv6_error_log(const struct sk_buff *skb, |
| 173 | const struct nf_hook_state *state, | 128 | const struct nf_hook_state *state, |
| @@ -182,6 +137,7 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, | |||
| 182 | unsigned int dataoff, | 137 | unsigned int dataoff, |
| 183 | const struct nf_hook_state *state) | 138 | const struct nf_hook_state *state) |
| 184 | { | 139 | { |
| 140 | union nf_inet_addr outer_daddr; | ||
| 185 | const struct icmp6hdr *icmp6h; | 141 | const struct icmp6hdr *icmp6h; |
| 186 | struct icmp6hdr _ih; | 142 | struct icmp6hdr _ih; |
| 187 | int type; | 143 | int type; |
| @@ -210,7 +166,11 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, | |||
| 210 | if (icmp6h->icmp6_type >= 128) | 166 | if (icmp6h->icmp6_type >= 128) |
| 211 | return NF_ACCEPT; | 167 | return NF_ACCEPT; |
| 212 | 168 | ||
| 213 | return icmpv6_error_message(state->net, tmpl, skb, dataoff); | 169 | memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr, |
| 170 | sizeof(outer_daddr.ip6)); | ||
| 171 | dataoff += sizeof(*icmp6h); | ||
| 172 | return nf_conntrack_inet_error(tmpl, skb, dataoff, state, | ||
| 173 | IPPROTO_ICMPV6, &outer_daddr); | ||
| 214 | } | 174 | } |
| 215 | 175 | ||
| 216 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | 176 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index af7dc6537758..000952719adf 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
| @@ -415,9 +415,14 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
| 415 | case IPPROTO_ICMPV6: | 415 | case IPPROTO_ICMPV6: |
| 416 | /* id is same for either direction... */ | 416 | /* id is same for either direction... */ |
| 417 | keyptr = &tuple->src.u.icmp.id; | 417 | keyptr = &tuple->src.u.icmp.id; |
| 418 | min = range->min_proto.icmp.id; | 418 | if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { |
| 419 | range_size = ntohs(range->max_proto.icmp.id) - | 419 | min = 0; |
| 420 | ntohs(range->min_proto.icmp.id) + 1; | 420 | range_size = 65536; |
| 421 | } else { | ||
| 422 | min = ntohs(range->min_proto.icmp.id); | ||
| 423 | range_size = ntohs(range->max_proto.icmp.id) - | ||
| 424 | ntohs(range->min_proto.icmp.id) + 1; | ||
| 425 | } | ||
| 421 | goto find_free_id; | 426 | goto find_free_id; |
| 422 | #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE) | 427 | #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE) |
| 423 | case IPPROTO_GRE: | 428 | case IPPROTO_GRE: |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ef7772e976cc..1606eaa5ae0d 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -1545,7 +1545,7 @@ static int nft_chain_parse_hook(struct net *net, | |||
| 1545 | if (IS_ERR(type)) | 1545 | if (IS_ERR(type)) |
| 1546 | return PTR_ERR(type); | 1546 | return PTR_ERR(type); |
| 1547 | } | 1547 | } |
| 1548 | if (!(type->hook_mask & (1 << hook->num))) | 1548 | if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num))) |
| 1549 | return -EOPNOTSUPP; | 1549 | return -EOPNOTSUPP; |
| 1550 | 1550 | ||
| 1551 | if (type->type == NFT_CHAIN_T_NAT && | 1551 | if (type->type == NFT_CHAIN_T_NAT && |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b1f9c5303f02..0b3347570265 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -540,7 +540,7 @@ __build_packet_message(struct nfnl_log_net *log, | |||
| 540 | goto nla_put_failure; | 540 | goto nla_put_failure; |
| 541 | } | 541 | } |
| 542 | 542 | ||
| 543 | if (skb->tstamp) { | 543 | if (hooknum <= NF_INET_FORWARD && skb->tstamp) { |
| 544 | struct nfulnl_msg_packet_timestamp ts; | 544 | struct nfulnl_msg_packet_timestamp ts; |
| 545 | struct timespec64 kts = ktime_to_timespec64(skb->tstamp); | 545 | struct timespec64 kts = ktime_to_timespec64(skb->tstamp); |
| 546 | ts.sec = cpu_to_be64(kts.tv_sec); | 546 | ts.sec = cpu_to_be64(kts.tv_sec); |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 0dcc3592d053..e057b2961d31 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -582,7 +582,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 582 | if (nfqnl_put_bridge(entry, skb) < 0) | 582 | if (nfqnl_put_bridge(entry, skb) < 0) |
| 583 | goto nla_put_failure; | 583 | goto nla_put_failure; |
| 584 | 584 | ||
| 585 | if (entskb->tstamp) { | 585 | if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) { |
| 586 | struct nfqnl_msg_packet_timestamp ts; | 586 | struct nfqnl_msg_packet_timestamp ts; |
| 587 | struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); | 587 | struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); |
| 588 | 588 | ||
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index c13bcd0ab491..8dbb4d48f2ed 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
| @@ -163,19 +163,24 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 163 | s64 stamp; | 163 | s64 stamp; |
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * We cannot use get_seconds() instead of __net_timestamp() here. | 166 | * We need real time here, but we can neither use skb->tstamp |
| 167 | * nor __net_timestamp(). | ||
| 168 | * | ||
| 169 | * skb->tstamp and skb->skb_mstamp_ns overlap, however, they | ||
| 170 | * use different clock types (real vs monotonic). | ||
| 171 | * | ||
| 167 | * Suppose you have two rules: | 172 | * Suppose you have two rules: |
| 168 | * 1. match before 13:00 | 173 | * 1. match before 13:00 |
| 169 | * 2. match after 13:00 | 174 | * 2. match after 13:00 |
| 175 | * | ||
| 170 | * If you match against processing time (get_seconds) it | 176 | * If you match against processing time (get_seconds) it |
| 171 | * may happen that the same packet matches both rules if | 177 | * may happen that the same packet matches both rules if |
| 172 | * it arrived at the right moment before 13:00. | 178 | * it arrived at the right moment before 13:00, so it would be |
| 179 | * better to check skb->tstamp and set it via __net_timestamp() | ||
| 180 | * if needed. This however breaks outgoing packets tx timestamp, | ||
| 181 | * and causes them to get delayed forever by fq packet scheduler. | ||
| 173 | */ | 182 | */ |
| 174 | if (skb->tstamp == 0) | 183 | stamp = get_seconds(); |
| 175 | __net_timestamp((struct sk_buff *)skb); | ||
| 176 | |||
| 177 | stamp = ktime_to_ns(skb->tstamp); | ||
| 178 | stamp = div_s64(stamp, NSEC_PER_SEC); | ||
| 179 | 184 | ||
| 180 | if (info->flags & XT_TIME_LOCAL_TZ) | 185 | if (info->flags & XT_TIME_LOCAL_TZ) |
| 181 | /* Adjust for local timezone */ | 186 | /* Adjust for local timezone */ |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f28e937320a3..216ab915dd54 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
| 988 | struct netlink_sock *nlk = nlk_sk(sk); | 988 | struct netlink_sock *nlk = nlk_sk(sk); |
| 989 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 989 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
| 990 | int err = 0; | 990 | int err = 0; |
| 991 | unsigned long groups = nladdr->nl_groups; | 991 | unsigned long groups; |
| 992 | bool bound; | 992 | bool bound; |
| 993 | 993 | ||
| 994 | if (addr_len < sizeof(struct sockaddr_nl)) | 994 | if (addr_len < sizeof(struct sockaddr_nl)) |
| @@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
| 996 | 996 | ||
| 997 | if (nladdr->nl_family != AF_NETLINK) | 997 | if (nladdr->nl_family != AF_NETLINK) |
| 998 | return -EINVAL; | 998 | return -EINVAL; |
| 999 | groups = nladdr->nl_groups; | ||
| 999 | 1000 | ||
| 1000 | /* Only superuser is allowed to listen multicasts */ | 1001 | /* Only superuser is allowed to listen multicasts */ |
| 1001 | if (groups) { | 1002 | if (groups) { |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 1d3144d19903..71ffd1a6dc7c 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void) | |||
| 1392 | int i; | 1392 | int i; |
| 1393 | int rc = proto_register(&nr_proto, 0); | 1393 | int rc = proto_register(&nr_proto, 0); |
| 1394 | 1394 | ||
| 1395 | if (rc != 0) | 1395 | if (rc) |
| 1396 | goto out; | 1396 | return rc; |
| 1397 | 1397 | ||
| 1398 | if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { | 1398 | if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { |
| 1399 | printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); | 1399 | pr_err("NET/ROM: %s - nr_ndevs parameter too large\n", |
| 1400 | return -1; | 1400 | __func__); |
| 1401 | rc = -EINVAL; | ||
| 1402 | goto unregister_proto; | ||
| 1401 | } | 1403 | } |
| 1402 | 1404 | ||
| 1403 | dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); | 1405 | dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); |
| 1404 | if (dev_nr == NULL) { | 1406 | if (!dev_nr) { |
| 1405 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); | 1407 | pr_err("NET/ROM: %s - unable to allocate device array\n", |
| 1406 | return -1; | 1408 | __func__); |
| 1409 | rc = -ENOMEM; | ||
| 1410 | goto unregister_proto; | ||
| 1407 | } | 1411 | } |
| 1408 | 1412 | ||
| 1409 | for (i = 0; i < nr_ndevs; i++) { | 1413 | for (i = 0; i < nr_ndevs; i++) { |
| @@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void) | |||
| 1413 | sprintf(name, "nr%d", i); | 1417 | sprintf(name, "nr%d", i); |
| 1414 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); | 1418 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); |
| 1415 | if (!dev) { | 1419 | if (!dev) { |
| 1416 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); | 1420 | rc = -ENOMEM; |
| 1417 | goto fail; | 1421 | goto fail; |
| 1418 | } | 1422 | } |
| 1419 | 1423 | ||
| 1420 | dev->base_addr = i; | 1424 | dev->base_addr = i; |
| 1421 | if (register_netdev(dev)) { | 1425 | rc = register_netdev(dev); |
| 1422 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); | 1426 | if (rc) { |
| 1423 | free_netdev(dev); | 1427 | free_netdev(dev); |
| 1424 | goto fail; | 1428 | goto fail; |
| 1425 | } | 1429 | } |
| @@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void) | |||
| 1427 | dev_nr[i] = dev; | 1431 | dev_nr[i] = dev; |
| 1428 | } | 1432 | } |
| 1429 | 1433 | ||
| 1430 | if (sock_register(&nr_family_ops)) { | 1434 | rc = sock_register(&nr_family_ops); |
| 1431 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); | 1435 | if (rc) |
| 1432 | goto fail; | 1436 | goto fail; |
| 1433 | } | ||
| 1434 | 1437 | ||
| 1435 | register_netdevice_notifier(&nr_dev_notifier); | 1438 | rc = register_netdevice_notifier(&nr_dev_notifier); |
| 1439 | if (rc) | ||
| 1440 | goto out_sock; | ||
| 1436 | 1441 | ||
| 1437 | ax25_register_pid(&nr_pid); | 1442 | ax25_register_pid(&nr_pid); |
| 1438 | ax25_linkfail_register(&nr_linkfail_notifier); | 1443 | ax25_linkfail_register(&nr_linkfail_notifier); |
| 1439 | 1444 | ||
| 1440 | #ifdef CONFIG_SYSCTL | 1445 | #ifdef CONFIG_SYSCTL |
| 1441 | nr_register_sysctl(); | 1446 | rc = nr_register_sysctl(); |
| 1447 | if (rc) | ||
| 1448 | goto out_sysctl; | ||
| 1442 | #endif | 1449 | #endif |
| 1443 | 1450 | ||
| 1444 | nr_loopback_init(); | 1451 | nr_loopback_init(); |
| 1445 | 1452 | ||
| 1446 | proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops); | 1453 | rc = -ENOMEM; |
| 1447 | proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops); | 1454 | if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops)) |
| 1448 | proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops); | 1455 | goto proc_remove1; |
| 1449 | out: | 1456 | if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net, |
| 1450 | return rc; | 1457 | &nr_neigh_seqops)) |
| 1458 | goto proc_remove2; | ||
| 1459 | if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net, | ||
| 1460 | &nr_node_seqops)) | ||
| 1461 | goto proc_remove3; | ||
| 1462 | |||
| 1463 | return 0; | ||
| 1464 | |||
| 1465 | proc_remove3: | ||
| 1466 | remove_proc_entry("nr_neigh", init_net.proc_net); | ||
| 1467 | proc_remove2: | ||
| 1468 | remove_proc_entry("nr", init_net.proc_net); | ||
| 1469 | proc_remove1: | ||
| 1470 | |||
| 1471 | nr_loopback_clear(); | ||
| 1472 | nr_rt_free(); | ||
| 1473 | |||
| 1474 | #ifdef CONFIG_SYSCTL | ||
| 1475 | nr_unregister_sysctl(); | ||
| 1476 | out_sysctl: | ||
| 1477 | #endif | ||
| 1478 | ax25_linkfail_release(&nr_linkfail_notifier); | ||
| 1479 | ax25_protocol_release(AX25_P_NETROM); | ||
| 1480 | unregister_netdevice_notifier(&nr_dev_notifier); | ||
| 1481 | out_sock: | ||
| 1482 | sock_unregister(PF_NETROM); | ||
| 1451 | fail: | 1483 | fail: |
| 1452 | while (--i >= 0) { | 1484 | while (--i >= 0) { |
| 1453 | unregister_netdev(dev_nr[i]); | 1485 | unregister_netdev(dev_nr[i]); |
| 1454 | free_netdev(dev_nr[i]); | 1486 | free_netdev(dev_nr[i]); |
| 1455 | } | 1487 | } |
| 1456 | kfree(dev_nr); | 1488 | kfree(dev_nr); |
| 1489 | unregister_proto: | ||
| 1457 | proto_unregister(&nr_proto); | 1490 | proto_unregister(&nr_proto); |
| 1458 | rc = -1; | 1491 | return rc; |
| 1459 | goto out; | ||
| 1460 | } | 1492 | } |
| 1461 | 1493 | ||
| 1462 | module_init(nr_proto_init); | 1494 | module_init(nr_proto_init); |
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c index 215ad22a9647..93d13f019981 100644 --- a/net/netrom/nr_loopback.c +++ b/net/netrom/nr_loopback.c | |||
| @@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused) | |||
| 70 | } | 70 | } |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | void __exit nr_loopback_clear(void) | 73 | void nr_loopback_clear(void) |
| 74 | { | 74 | { |
| 75 | del_timer_sync(&loopback_timer); | 75 | del_timer_sync(&loopback_timer); |
| 76 | skb_queue_purge(&loopback_queue); | 76 | skb_queue_purge(&loopback_queue); |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 6485f593e2f0..b76aa668a94b 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
| @@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = { | |||
| 953 | /* | 953 | /* |
| 954 | * Free all memory associated with the nodes and routes lists. | 954 | * Free all memory associated with the nodes and routes lists. |
| 955 | */ | 955 | */ |
| 956 | void __exit nr_rt_free(void) | 956 | void nr_rt_free(void) |
| 957 | { | 957 | { |
| 958 | struct nr_neigh *s = NULL; | 958 | struct nr_neigh *s = NULL; |
| 959 | struct nr_node *t = NULL; | 959 | struct nr_node *t = NULL; |
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c index ba1c368b3f18..771011b84270 100644 --- a/net/netrom/sysctl_net_netrom.c +++ b/net/netrom/sysctl_net_netrom.c | |||
| @@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = { | |||
| 146 | { } | 146 | { } |
| 147 | }; | 147 | }; |
| 148 | 148 | ||
| 149 | void __init nr_register_sysctl(void) | 149 | int __init nr_register_sysctl(void) |
| 150 | { | 150 | { |
| 151 | nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); | 151 | nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); |
| 152 | if (!nr_table_header) | ||
| 153 | return -ENOMEM; | ||
| 154 | return 0; | ||
| 152 | } | 155 | } |
| 153 | 156 | ||
| 154 | void nr_unregister_sysctl(void) | 157 | void nr_unregister_sysctl(void) |
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index ddfc52ac1f9b..c0d323b58e73 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c | |||
| @@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, | |||
| 312 | create_info = (struct nci_hci_create_pipe_resp *)skb->data; | 312 | create_info = (struct nci_hci_create_pipe_resp *)skb->data; |
| 313 | dest_gate = create_info->dest_gate; | 313 | dest_gate = create_info->dest_gate; |
| 314 | new_pipe = create_info->pipe; | 314 | new_pipe = create_info->pipe; |
| 315 | if (new_pipe >= NCI_HCI_MAX_PIPES) { | ||
| 316 | status = NCI_HCI_ANY_E_NOK; | ||
| 317 | goto exit; | ||
| 318 | } | ||
| 315 | 319 | ||
| 316 | /* Save the new created pipe and bind with local gate, | 320 | /* Save the new created pipe and bind with local gate, |
| 317 | * the description for skb->data[3] is destination gate id | 321 | * the description for skb->data[3] is destination gate id |
| @@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, | |||
| 336 | goto exit; | 340 | goto exit; |
| 337 | } | 341 | } |
| 338 | delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; | 342 | delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; |
| 343 | if (delete_info->pipe >= NCI_HCI_MAX_PIPES) { | ||
| 344 | status = NCI_HCI_ANY_E_NOK; | ||
| 345 | goto exit; | ||
| 346 | } | ||
| 339 | 347 | ||
| 340 | ndev->hci_dev->pipes[delete_info->pipe].gate = | 348 | ndev->hci_dev->pipes[delete_info->pipe].gate = |
| 341 | NCI_HCI_INVALID_GATE; | 349 | NCI_HCI_INVALID_GATE; |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 691da853bef5..4bdf5e3ac208 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | |||
| 2306 | 2306 | ||
| 2307 | struct sw_flow_actions *acts; | 2307 | struct sw_flow_actions *acts; |
| 2308 | int new_acts_size; | 2308 | int new_acts_size; |
| 2309 | int req_size = NLA_ALIGN(attr_len); | 2309 | size_t req_size = NLA_ALIGN(attr_len); |
| 2310 | int next_offset = offsetof(struct sw_flow_actions, actions) + | 2310 | int next_offset = offsetof(struct sw_flow_actions, actions) + |
| 2311 | (*sfa)->actions_len; | 2311 | (*sfa)->actions_len; |
| 2312 | 2312 | ||
| 2313 | if (req_size <= (ksize(*sfa) - next_offset)) | 2313 | if (req_size <= (ksize(*sfa) - next_offset)) |
| 2314 | goto out; | 2314 | goto out; |
| 2315 | 2315 | ||
| 2316 | new_acts_size = ksize(*sfa) * 2; | 2316 | new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); |
| 2317 | 2317 | ||
| 2318 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | 2318 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { |
| 2319 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { | 2319 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index d6cc97fbbbb0..2b969f99ef13 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
| @@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 543 | struct rds_sock *rs = rds_sk_to_rs(sk); | 543 | struct rds_sock *rs = rds_sk_to_rs(sk); |
| 544 | int ret = 0; | 544 | int ret = 0; |
| 545 | 545 | ||
| 546 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
| 547 | return -EINVAL; | ||
| 548 | |||
| 546 | lock_sock(sk); | 549 | lock_sock(sk); |
| 547 | 550 | ||
| 548 | switch (uaddr->sa_family) { | 551 | switch (uaddr->sa_family) { |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 17c9d9f0c848..0f4398e7f2a7 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
| @@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 173 | /* We allow an RDS socket to be bound to either IPv4 or IPv6 | 173 | /* We allow an RDS socket to be bound to either IPv4 or IPv6 |
| 174 | * address. | 174 | * address. |
| 175 | */ | 175 | */ |
| 176 | if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
| 177 | return -EINVAL; | ||
| 176 | if (uaddr->sa_family == AF_INET) { | 178 | if (uaddr->sa_family == AF_INET) { |
| 177 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | 179 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; |
| 178 | 180 | ||
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c index 31cf37da4510..93c0437e6a5f 100644 --- a/net/rds/ib_fmr.c +++ b/net/rds/ib_fmr.c | |||
| @@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages) | |||
| 44 | else | 44 | else |
| 45 | pool = rds_ibdev->mr_1m_pool; | 45 | pool = rds_ibdev->mr_1m_pool; |
| 46 | 46 | ||
| 47 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) | ||
| 48 | queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); | ||
| 49 | |||
| 50 | /* Switch pools if one of the pool is reaching upper limit */ | ||
| 51 | if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) { | ||
| 52 | if (pool->pool_type == RDS_IB_MR_8K_POOL) | ||
| 53 | pool = rds_ibdev->mr_1m_pool; | ||
| 54 | else | ||
| 55 | pool = rds_ibdev->mr_8k_pool; | ||
| 56 | } | ||
| 57 | |||
| 47 | ibmr = rds_ib_try_reuse_ibmr(pool); | 58 | ibmr = rds_ib_try_reuse_ibmr(pool); |
| 48 | if (ibmr) | 59 | if (ibmr) |
| 49 | return ibmr; | 60 | return ibmr; |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 63c8d107adcf..d664e9ade74d 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
| @@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) | |||
| 454 | struct rds_ib_mr *ibmr = NULL; | 454 | struct rds_ib_mr *ibmr = NULL; |
| 455 | int iter = 0; | 455 | int iter = 0; |
| 456 | 456 | ||
| 457 | if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10) | ||
| 458 | queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); | ||
| 459 | |||
| 460 | while (1) { | 457 | while (1) { |
| 461 | ibmr = rds_ib_reuse_mr(pool); | 458 | ibmr = rds_ib_reuse_mr(pool); |
| 462 | if (ibmr) | 459 | if (ibmr) |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index fd2694174607..faf726e00e27 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
| @@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net) | |||
| 608 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { | 608 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
| 609 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); | 609 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
| 610 | 610 | ||
| 611 | if (net != c_net || !tc->t_sock) | 611 | if (net != c_net) |
| 612 | continue; | 612 | continue; |
| 613 | if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { | 613 | if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { |
| 614 | list_move_tail(&tc->t_tcp_node, &tmp_list); | 614 | list_move_tail(&tc->t_tcp_node, &tmp_list); |
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 7af4f99c4a93..094a6621f8e8 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | 17 | ||
| 18 | static struct sk_buff_head loopback_queue; | 18 | static struct sk_buff_head loopback_queue; |
| 19 | #define ROSE_LOOPBACK_LIMIT 1000 | ||
| 19 | static struct timer_list loopback_timer; | 20 | static struct timer_list loopback_timer; |
| 20 | 21 | ||
| 21 | static void rose_set_loopback_timer(void); | 22 | static void rose_set_loopback_timer(void); |
| @@ -35,29 +36,27 @@ static int rose_loopback_running(void) | |||
| 35 | 36 | ||
| 36 | int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) | 37 | int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) |
| 37 | { | 38 | { |
| 38 | struct sk_buff *skbn; | 39 | struct sk_buff *skbn = NULL; |
| 39 | 40 | ||
| 40 | skbn = skb_clone(skb, GFP_ATOMIC); | 41 | if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT) |
| 42 | skbn = skb_clone(skb, GFP_ATOMIC); | ||
| 41 | 43 | ||
| 42 | kfree_skb(skb); | 44 | if (skbn) { |
| 43 | 45 | consume_skb(skb); | |
| 44 | if (skbn != NULL) { | ||
| 45 | skb_queue_tail(&loopback_queue, skbn); | 46 | skb_queue_tail(&loopback_queue, skbn); |
| 46 | 47 | ||
| 47 | if (!rose_loopback_running()) | 48 | if (!rose_loopback_running()) |
| 48 | rose_set_loopback_timer(); | 49 | rose_set_loopback_timer(); |
| 50 | } else { | ||
| 51 | kfree_skb(skb); | ||
| 49 | } | 52 | } |
| 50 | 53 | ||
| 51 | return 1; | 54 | return 1; |
| 52 | } | 55 | } |
| 53 | 56 | ||
| 54 | |||
| 55 | static void rose_set_loopback_timer(void) | 57 | static void rose_set_loopback_timer(void) |
| 56 | { | 58 | { |
| 57 | del_timer(&loopback_timer); | 59 | mod_timer(&loopback_timer, jiffies + 10); |
| 58 | |||
| 59 | loopback_timer.expires = jiffies + 10; | ||
| 60 | add_timer(&loopback_timer); | ||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | static void rose_loopback_timer(struct timer_list *unused) | 62 | static void rose_loopback_timer(struct timer_list *unused) |
| @@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused) | |||
| 68 | struct sock *sk; | 67 | struct sock *sk; |
| 69 | unsigned short frametype; | 68 | unsigned short frametype; |
| 70 | unsigned int lci_i, lci_o; | 69 | unsigned int lci_i, lci_o; |
| 70 | int count; | ||
| 71 | 71 | ||
| 72 | while ((skb = skb_dequeue(&loopback_queue)) != NULL) { | 72 | for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) { |
| 73 | skb = skb_dequeue(&loopback_queue); | ||
| 74 | if (!skb) | ||
| 75 | return; | ||
| 73 | if (skb->len < ROSE_MIN_LEN) { | 76 | if (skb->len < ROSE_MIN_LEN) { |
| 74 | kfree_skb(skb); | 77 | kfree_skb(skb); |
| 75 | continue; | 78 | continue; |
| @@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused) | |||
| 106 | kfree_skb(skb); | 109 | kfree_skb(skb); |
| 107 | } | 110 | } |
| 108 | } | 111 | } |
| 112 | if (!skb_queue_empty(&loopback_queue)) | ||
| 113 | mod_timer(&loopback_timer, jiffies + 1); | ||
| 109 | } | 114 | } |
| 110 | 115 | ||
| 111 | void __exit rose_loopback_clear(void) | 116 | void __exit rose_loopback_clear(void) |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 96f2952bbdfd..ae8c5d7f3bf1 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
| @@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
| 135 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; | 135 | struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; |
| 136 | struct rxrpc_local *local; | 136 | struct rxrpc_local *local; |
| 137 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | 137 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); |
| 138 | u16 service_id = srx->srx_service; | 138 | u16 service_id; |
| 139 | int ret; | 139 | int ret; |
| 140 | 140 | ||
| 141 | _enter("%p,%p,%d", rx, saddr, len); | 141 | _enter("%p,%p,%d", rx, saddr, len); |
| @@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
| 143 | ret = rxrpc_validate_address(rx, srx, len); | 143 | ret = rxrpc_validate_address(rx, srx, len); |
| 144 | if (ret < 0) | 144 | if (ret < 0) |
| 145 | goto error; | 145 | goto error; |
| 146 | service_id = srx->srx_service; | ||
| 146 | 147 | ||
| 147 | lock_sock(&rx->sk); | 148 | lock_sock(&rx->sk); |
| 148 | 149 | ||
| @@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); | |||
| 370 | * rxrpc_kernel_check_life - Check to see whether a call is still alive | 371 | * rxrpc_kernel_check_life - Check to see whether a call is still alive |
| 371 | * @sock: The socket the call is on | 372 | * @sock: The socket the call is on |
| 372 | * @call: The call to check | 373 | * @call: The call to check |
| 374 | * @_life: Where to store the life value | ||
| 373 | * | 375 | * |
| 374 | * Allow a kernel service to find out whether a call is still alive - ie. we're | 376 | * Allow a kernel service to find out whether a call is still alive - ie. we're |
| 375 | * getting ACKs from the server. Returns a number representing the life state | 377 | * getting ACKs from the server. Passes back in *_life a number representing |
| 376 | * which can be compared to that returned by a previous call. | 378 | * the life state which can be compared to that returned by a previous call and |
| 379 | * return true if the call is still alive. | ||
| 377 | * | 380 | * |
| 378 | * If the life state stalls, rxrpc_kernel_probe_life() should be called and | 381 | * If the life state stalls, rxrpc_kernel_probe_life() should be called and |
| 379 | * then 2RTT waited. | 382 | * then 2RTT waited. |
| 380 | */ | 383 | */ |
| 381 | u32 rxrpc_kernel_check_life(const struct socket *sock, | 384 | bool rxrpc_kernel_check_life(const struct socket *sock, |
| 382 | const struct rxrpc_call *call) | 385 | const struct rxrpc_call *call, |
| 386 | u32 *_life) | ||
| 383 | { | 387 | { |
| 384 | return call->acks_latest; | 388 | *_life = call->acks_latest; |
| 389 | return call->state != RXRPC_CALL_COMPLETE; | ||
| 385 | } | 390 | } |
| 386 | EXPORT_SYMBOL(rxrpc_kernel_check_life); | 391 | EXPORT_SYMBOL(rxrpc_kernel_check_life); |
| 387 | 392 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 4b1a534d290a..062ca9dc29b8 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
| @@ -654,6 +654,7 @@ struct rxrpc_call { | |||
| 654 | u8 ackr_reason; /* reason to ACK */ | 654 | u8 ackr_reason; /* reason to ACK */ |
| 655 | u16 ackr_skew; /* skew on packet being ACK'd */ | 655 | u16 ackr_skew; /* skew on packet being ACK'd */ |
| 656 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ | 656 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ |
| 657 | rxrpc_serial_t ackr_first_seq; /* first sequence number received */ | ||
| 657 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ | 658 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ |
| 658 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ | 659 | rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ |
| 659 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ | 660 | rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ |
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index b6fca8ebb117..8d31fb4c51e1 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
| @@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
| 153 | * pass a connection-level abort onto all calls on that connection | 153 | * pass a connection-level abort onto all calls on that connection |
| 154 | */ | 154 | */ |
| 155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, | 155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, |
| 156 | enum rxrpc_call_completion compl) | 156 | enum rxrpc_call_completion compl, |
| 157 | rxrpc_serial_t serial) | ||
| 157 | { | 158 | { |
| 158 | struct rxrpc_call *call; | 159 | struct rxrpc_call *call; |
| 159 | int i; | 160 | int i; |
| @@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, | |||
| 173 | call->call_id, 0, | 174 | call->call_id, 0, |
| 174 | conn->abort_code, | 175 | conn->abort_code, |
| 175 | conn->error); | 176 | conn->error); |
| 177 | else | ||
| 178 | trace_rxrpc_rx_abort(call, serial, | ||
| 179 | conn->abort_code); | ||
| 176 | if (rxrpc_set_call_completion(call, compl, | 180 | if (rxrpc_set_call_completion(call, compl, |
| 177 | conn->abort_code, | 181 | conn->abort_code, |
| 178 | conn->error)) | 182 | conn->error)) |
| @@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
| 213 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; | 217 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; |
| 214 | spin_unlock_bh(&conn->state_lock); | 218 | spin_unlock_bh(&conn->state_lock); |
| 215 | 219 | ||
| 216 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED); | ||
| 217 | |||
| 218 | msg.msg_name = &conn->params.peer->srx.transport; | 220 | msg.msg_name = &conn->params.peer->srx.transport; |
| 219 | msg.msg_namelen = conn->params.peer->srx.transport_len; | 221 | msg.msg_namelen = conn->params.peer->srx.transport_len; |
| 220 | msg.msg_control = NULL; | 222 | msg.msg_control = NULL; |
| @@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
| 242 | len = iov[0].iov_len + iov[1].iov_len; | 244 | len = iov[0].iov_len + iov[1].iov_len; |
| 243 | 245 | ||
| 244 | serial = atomic_inc_return(&conn->serial); | 246 | serial = atomic_inc_return(&conn->serial); |
| 247 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial); | ||
| 245 | whdr.serial = htonl(serial); | 248 | whdr.serial = htonl(serial); |
| 246 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); | 249 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); |
| 247 | 250 | ||
| @@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, | |||
| 321 | conn->error = -ECONNABORTED; | 324 | conn->error = -ECONNABORTED; |
| 322 | conn->abort_code = abort_code; | 325 | conn->abort_code = abort_code; |
| 323 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; | 326 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; |
| 324 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); | 327 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial); |
| 325 | return -ECONNABORTED; | 328 | return -ECONNABORTED; |
| 326 | 329 | ||
| 327 | case RXRPC_PACKET_TYPE_CHALLENGE: | 330 | case RXRPC_PACKET_TYPE_CHALLENGE: |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9128aa0e40aa..c2c35cf4e308 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
| @@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 837 | u8 acks[RXRPC_MAXACKS]; | 837 | u8 acks[RXRPC_MAXACKS]; |
| 838 | } buf; | 838 | } buf; |
| 839 | rxrpc_serial_t acked_serial; | 839 | rxrpc_serial_t acked_serial; |
| 840 | rxrpc_seq_t first_soft_ack, hard_ack; | 840 | rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; |
| 841 | int nr_acks, offset, ioffset; | 841 | int nr_acks, offset, ioffset; |
| 842 | 842 | ||
| 843 | _enter(""); | 843 | _enter(""); |
| @@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 851 | 851 | ||
| 852 | acked_serial = ntohl(buf.ack.serial); | 852 | acked_serial = ntohl(buf.ack.serial); |
| 853 | first_soft_ack = ntohl(buf.ack.firstPacket); | 853 | first_soft_ack = ntohl(buf.ack.firstPacket); |
| 854 | prev_pkt = ntohl(buf.ack.previousPacket); | ||
| 854 | hard_ack = first_soft_ack - 1; | 855 | hard_ack = first_soft_ack - 1; |
| 855 | nr_acks = buf.ack.nAcks; | 856 | nr_acks = buf.ack.nAcks; |
| 856 | summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? | 857 | summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? |
| 857 | buf.ack.reason : RXRPC_ACK__INVALID); | 858 | buf.ack.reason : RXRPC_ACK__INVALID); |
| 858 | 859 | ||
| 859 | trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, | 860 | trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, |
| 860 | first_soft_ack, ntohl(buf.ack.previousPacket), | 861 | first_soft_ack, prev_pkt, |
| 861 | summary.ack_reason, nr_acks); | 862 | summary.ack_reason, nr_acks); |
| 862 | 863 | ||
| 863 | if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) | 864 | if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) |
| @@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 878 | rxrpc_propose_ack_respond_to_ack); | 879 | rxrpc_propose_ack_respond_to_ack); |
| 879 | } | 880 | } |
| 880 | 881 | ||
| 881 | /* Discard any out-of-order or duplicate ACKs. */ | 882 | /* Discard any out-of-order or duplicate ACKs (outside lock). */ |
| 882 | if (before_eq(sp->hdr.serial, call->acks_latest)) | 883 | if (before(first_soft_ack, call->ackr_first_seq) || |
| 884 | before(prev_pkt, call->ackr_prev_seq)) | ||
| 883 | return; | 885 | return; |
| 884 | 886 | ||
| 885 | buf.info.rxMTU = 0; | 887 | buf.info.rxMTU = 0; |
| @@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 890 | 892 | ||
| 891 | spin_lock(&call->input_lock); | 893 | spin_lock(&call->input_lock); |
| 892 | 894 | ||
| 893 | /* Discard any out-of-order or duplicate ACKs. */ | 895 | /* Discard any out-of-order or duplicate ACKs (inside lock). */ |
| 894 | if (before_eq(sp->hdr.serial, call->acks_latest)) | 896 | if (before(first_soft_ack, call->ackr_first_seq) || |
| 897 | before(prev_pkt, call->ackr_prev_seq)) | ||
| 895 | goto out; | 898 | goto out; |
| 896 | call->acks_latest_ts = skb->tstamp; | 899 | call->acks_latest_ts = skb->tstamp; |
| 897 | call->acks_latest = sp->hdr.serial; | 900 | call->acks_latest = sp->hdr.serial; |
| 898 | 901 | ||
| 902 | call->ackr_first_seq = first_soft_ack; | ||
| 903 | call->ackr_prev_seq = prev_pkt; | ||
| 904 | |||
| 899 | /* Parse rwind and mtu sizes if provided. */ | 905 | /* Parse rwind and mtu sizes if provided. */ |
| 900 | if (buf.info.rxMTU) | 906 | if (buf.info.rxMTU) |
| 901 | rxrpc_input_ackinfo(call, skb, &buf.info); | 907 | rxrpc_input_ackinfo(call, skb, &buf.info); |
| @@ -1155,19 +1161,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) | |||
| 1155 | * handle data received on the local endpoint | 1161 | * handle data received on the local endpoint |
| 1156 | * - may be called in interrupt context | 1162 | * - may be called in interrupt context |
| 1157 | * | 1163 | * |
| 1158 | * The socket is locked by the caller and this prevents the socket from being | 1164 | * [!] Note that as this is called from the encap_rcv hook, the socket is not |
| 1159 | * shut down and the local endpoint from going away, thus sk_user_data will not | 1165 | * held locked by the caller and nothing prevents sk_user_data on the UDP from |
| 1160 | * be cleared until this function returns. | 1166 | * being cleared in the middle of processing this function. |
| 1161 | * | 1167 | * |
| 1162 | * Called with the RCU read lock held from the IP layer via UDP. | 1168 | * Called with the RCU read lock held from the IP layer via UDP. |
| 1163 | */ | 1169 | */ |
| 1164 | int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | 1170 | int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) |
| 1165 | { | 1171 | { |
| 1172 | struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); | ||
| 1166 | struct rxrpc_connection *conn; | 1173 | struct rxrpc_connection *conn; |
| 1167 | struct rxrpc_channel *chan; | 1174 | struct rxrpc_channel *chan; |
| 1168 | struct rxrpc_call *call = NULL; | 1175 | struct rxrpc_call *call = NULL; |
| 1169 | struct rxrpc_skb_priv *sp; | 1176 | struct rxrpc_skb_priv *sp; |
| 1170 | struct rxrpc_local *local = udp_sk->sk_user_data; | ||
| 1171 | struct rxrpc_peer *peer = NULL; | 1177 | struct rxrpc_peer *peer = NULL; |
| 1172 | struct rxrpc_sock *rx = NULL; | 1178 | struct rxrpc_sock *rx = NULL; |
| 1173 | unsigned int channel; | 1179 | unsigned int channel; |
| @@ -1175,6 +1181,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
| 1175 | 1181 | ||
| 1176 | _enter("%p", udp_sk); | 1182 | _enter("%p", udp_sk); |
| 1177 | 1183 | ||
| 1184 | if (unlikely(!local)) { | ||
| 1185 | kfree_skb(skb); | ||
| 1186 | return 0; | ||
| 1187 | } | ||
| 1178 | if (skb->tstamp == 0) | 1188 | if (skb->tstamp == 0) |
| 1179 | skb->tstamp = ktime_get_real(); | 1189 | skb->tstamp = ktime_get_real(); |
| 1180 | 1190 | ||
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 15cf42d5b53a..01959db51445 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
| @@ -304,7 +304,8 @@ nomem: | |||
| 304 | ret = -ENOMEM; | 304 | ret = -ENOMEM; |
| 305 | sock_error: | 305 | sock_error: |
| 306 | mutex_unlock(&rxnet->local_mutex); | 306 | mutex_unlock(&rxnet->local_mutex); |
| 307 | kfree(local); | 307 | if (local) |
| 308 | call_rcu(&local->rcu, rxrpc_local_rcu); | ||
| 308 | _leave(" = %d", ret); | 309 | _leave(" = %d", ret); |
| 309 | return ERR_PTR(ret); | 310 | return ERR_PTR(ret); |
| 310 | 311 | ||
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index bc05af89fc38..6e84d878053c 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
| @@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk) | |||
| 157 | 157 | ||
| 158 | _enter("%p{%d}", sk, local->debug_id); | 158 | _enter("%p{%d}", sk, local->debug_id); |
| 159 | 159 | ||
| 160 | /* Clear the outstanding error value on the socket so that it doesn't | ||
| 161 | * cause kernel_sendmsg() to return it later. | ||
| 162 | */ | ||
| 163 | sock_error(sk); | ||
| 164 | |||
| 160 | skb = sock_dequeue_err_skb(sk); | 165 | skb = sock_dequeue_err_skb(sk); |
| 161 | if (!skb) { | 166 | if (!skb) { |
| 162 | _leave("UDP socket errqueue empty"); | 167 | _leave("UDP socket errqueue empty"); |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 46c9312085b1..bec64deb7b0a 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
| @@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /* | 154 | /* |
| 155 | * Queue a DATA packet for transmission, set the resend timeout and send the | 155 | * Queue a DATA packet for transmission, set the resend timeout and send |
| 156 | * packet immediately | 156 | * the packet immediately. Returns the error from rxrpc_send_data_packet() |
| 157 | * in case the caller wants to do something with it. | ||
| 157 | */ | 158 | */ |
| 158 | static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | 159 | static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, |
| 159 | struct sk_buff *skb, bool last, | 160 | struct sk_buff *skb, bool last, |
| 160 | rxrpc_notify_end_tx_t notify_end_tx) | 161 | rxrpc_notify_end_tx_t notify_end_tx) |
| 161 | { | 162 | { |
| 162 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 163 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
| 163 | unsigned long now; | 164 | unsigned long now; |
| @@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
| 250 | 251 | ||
| 251 | out: | 252 | out: |
| 252 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 253 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
| 253 | _leave(""); | 254 | _leave(" = %d", ret); |
| 255 | return ret; | ||
| 254 | } | 256 | } |
| 255 | 257 | ||
| 256 | /* | 258 | /* |
| @@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
| 423 | if (ret < 0) | 425 | if (ret < 0) |
| 424 | goto out; | 426 | goto out; |
| 425 | 427 | ||
| 426 | rxrpc_queue_packet(rx, call, skb, | 428 | ret = rxrpc_queue_packet(rx, call, skb, |
| 427 | !msg_data_left(msg) && !more, | 429 | !msg_data_left(msg) && !more, |
| 428 | notify_end_tx); | 430 | notify_end_tx); |
| 431 | /* Should check for failure here */ | ||
| 429 | skb = NULL; | 432 | skb = NULL; |
| 430 | } | 433 | } |
| 431 | } while (msg_data_left(msg) > 0); | 434 | } while (msg_data_left(msg) > 0); |
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 4060b0955c97..0f82d50ea232 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
| @@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
| 45 | struct nlattr *tb[TCA_SAMPLE_MAX + 1]; | 45 | struct nlattr *tb[TCA_SAMPLE_MAX + 1]; |
| 46 | struct psample_group *psample_group; | 46 | struct psample_group *psample_group; |
| 47 | struct tcf_chain *goto_ch = NULL; | 47 | struct tcf_chain *goto_ch = NULL; |
| 48 | u32 psample_group_num, rate; | ||
| 48 | struct tc_sample *parm; | 49 | struct tc_sample *parm; |
| 49 | u32 psample_group_num; | ||
| 50 | struct tcf_sample *s; | 50 | struct tcf_sample *s; |
| 51 | bool exists = false; | 51 | bool exists = false; |
| 52 | int ret, err; | 52 | int ret, err; |
| @@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
| 85 | if (err < 0) | 85 | if (err < 0) |
| 86 | goto release_idr; | 86 | goto release_idr; |
| 87 | 87 | ||
| 88 | rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); | ||
| 89 | if (!rate) { | ||
| 90 | NL_SET_ERR_MSG(extack, "invalid sample rate"); | ||
| 91 | err = -EINVAL; | ||
| 92 | goto put_chain; | ||
| 93 | } | ||
| 88 | psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); | 94 | psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); |
| 89 | psample_group = psample_group_get(net, psample_group_num); | 95 | psample_group = psample_group_get(net, psample_group_num); |
| 90 | if (!psample_group) { | 96 | if (!psample_group) { |
| @@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, | |||
| 96 | 102 | ||
| 97 | spin_lock_bh(&s->tcf_lock); | 103 | spin_lock_bh(&s->tcf_lock); |
| 98 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | 104 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 99 | s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); | 105 | s->rate = rate; |
| 100 | s->psample_group_num = psample_group_num; | 106 | s->psample_group_num = psample_group_num; |
| 101 | RCU_INIT_POINTER(s->psample_group, psample_group); | 107 | RCU_INIT_POINTER(s->psample_group, psample_group); |
| 102 | 108 | ||
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 459921bd3d87..a13bc351a414 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
| @@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held, | |||
| 130 | 130 | ||
| 131 | static void *mall_get(struct tcf_proto *tp, u32 handle) | 131 | static void *mall_get(struct tcf_proto *tp, u32 handle) |
| 132 | { | 132 | { |
| 133 | struct cls_mall_head *head = rtnl_dereference(tp->root); | ||
| 134 | |||
| 135 | if (head && head->handle == handle) | ||
| 136 | return head; | ||
| 137 | |||
| 133 | return NULL; | 138 | return NULL; |
| 134 | } | 139 | } |
| 135 | 140 | ||
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index acc9b9da985f..259d97bc2abd 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
| @@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) | |||
| 1517 | 1517 | ||
| 1518 | static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) | 1518 | static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) |
| 1519 | { | 1519 | { |
| 1520 | int wlen = skb_network_offset(skb); | ||
| 1520 | u8 dscp; | 1521 | u8 dscp; |
| 1521 | 1522 | ||
| 1522 | switch (skb->protocol) { | 1523 | switch (tc_skb_protocol(skb)) { |
| 1523 | case htons(ETH_P_IP): | 1524 | case htons(ETH_P_IP): |
| 1525 | wlen += sizeof(struct iphdr); | ||
| 1526 | if (!pskb_may_pull(skb, wlen) || | ||
| 1527 | skb_try_make_writable(skb, wlen)) | ||
| 1528 | return 0; | ||
| 1529 | |||
| 1524 | dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; | 1530 | dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; |
| 1525 | if (wash && dscp) | 1531 | if (wash && dscp) |
| 1526 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); | 1532 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); |
| 1527 | return dscp; | 1533 | return dscp; |
| 1528 | 1534 | ||
| 1529 | case htons(ETH_P_IPV6): | 1535 | case htons(ETH_P_IPV6): |
| 1536 | wlen += sizeof(struct ipv6hdr); | ||
| 1537 | if (!pskb_may_pull(skb, wlen) || | ||
| 1538 | skb_try_make_writable(skb, wlen)) | ||
| 1539 | return 0; | ||
| 1540 | |||
| 1530 | dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; | 1541 | dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; |
| 1531 | if (wash && dscp) | 1542 | if (wash && dscp) |
| 1532 | ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); | 1543 | ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4dc05409e3fb..114b9048ea7e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1358 | { | 1358 | { |
| 1359 | struct cbq_sched_data *q = qdisc_priv(sch); | 1359 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1360 | struct cbq_class *cl = (struct cbq_class *)arg; | 1360 | struct cbq_class *cl = (struct cbq_class *)arg; |
| 1361 | __u32 qlen; | ||
| 1361 | 1362 | ||
| 1362 | cl->xstats.avgidle = cl->avgidle; | 1363 | cl->xstats.avgidle = cl->avgidle; |
| 1363 | cl->xstats.undertime = 0; | 1364 | cl->xstats.undertime = 0; |
| 1365 | qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); | ||
| 1364 | 1366 | ||
| 1365 | if (cl->undertime != PSCHED_PASTPERFECT) | 1367 | if (cl->undertime != PSCHED_PASTPERFECT) |
| 1366 | cl->xstats.undertime = cl->undertime - q->now; | 1368 | cl->xstats.undertime = cl->undertime - q->now; |
| @@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1368 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 1370 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 1369 | d, NULL, &cl->bstats) < 0 || | 1371 | d, NULL, &cl->bstats) < 0 || |
| 1370 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1372 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 1371 | gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) | 1373 | gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) |
| 1372 | return -1; | 1374 | return -1; |
| 1373 | 1375 | ||
| 1374 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); | 1376 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); |
| @@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) | |||
| 1665 | { | 1667 | { |
| 1666 | struct cbq_sched_data *q = qdisc_priv(sch); | 1668 | struct cbq_sched_data *q = qdisc_priv(sch); |
| 1667 | struct cbq_class *cl = (struct cbq_class *)arg; | 1669 | struct cbq_class *cl = (struct cbq_class *)arg; |
| 1668 | unsigned int qlen, backlog; | ||
| 1669 | 1670 | ||
| 1670 | if (cl->filters || cl->children || cl == &q->link) | 1671 | if (cl->filters || cl->children || cl == &q->link) |
| 1671 | return -EBUSY; | 1672 | return -EBUSY; |
| 1672 | 1673 | ||
| 1673 | sch_tree_lock(sch); | 1674 | sch_tree_lock(sch); |
| 1674 | 1675 | ||
| 1675 | qlen = cl->q->q.qlen; | 1676 | qdisc_purge_queue(cl->q); |
| 1676 | backlog = cl->q->qstats.backlog; | ||
| 1677 | qdisc_reset(cl->q); | ||
| 1678 | qdisc_tree_reduce_backlog(cl->q, qlen, backlog); | ||
| 1679 | 1677 | ||
| 1680 | if (cl->next_alive) | 1678 | if (cl->next_alive) |
| 1681 | cbq_deactivate_class(cl); | 1679 | cbq_deactivate_class(cl); |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 09b800991065..430df9a55ec4 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) | |||
| 50 | return container_of(clc, struct drr_class, common); | 50 | return container_of(clc, struct drr_class, common); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static void drr_purge_queue(struct drr_class *cl) | ||
| 54 | { | ||
| 55 | unsigned int len = cl->qdisc->q.qlen; | ||
| 56 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 57 | |||
| 58 | qdisc_reset(cl->qdisc); | ||
| 59 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 60 | } | ||
| 61 | |||
| 62 | static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { | 53 | static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { |
| 63 | [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, | 54 | [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, |
| 64 | }; | 55 | }; |
| @@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 167 | 158 | ||
| 168 | sch_tree_lock(sch); | 159 | sch_tree_lock(sch); |
| 169 | 160 | ||
| 170 | drr_purge_queue(cl); | 161 | qdisc_purge_queue(cl->qdisc); |
| 171 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 162 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| 172 | 163 | ||
| 173 | sch_tree_unlock(sch); | 164 | sch_tree_unlock(sch); |
| @@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 269 | struct gnet_dump *d) | 260 | struct gnet_dump *d) |
| 270 | { | 261 | { |
| 271 | struct drr_class *cl = (struct drr_class *)arg; | 262 | struct drr_class *cl = (struct drr_class *)arg; |
| 272 | __u32 qlen = cl->qdisc->q.qlen; | 263 | __u32 qlen = qdisc_qlen_sum(cl->qdisc); |
| 264 | struct Qdisc *cl_q = cl->qdisc; | ||
| 273 | struct tc_drr_stats xstats; | 265 | struct tc_drr_stats xstats; |
| 274 | 266 | ||
| 275 | memset(&xstats, 0, sizeof(xstats)); | 267 | memset(&xstats, 0, sizeof(xstats)); |
| @@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 279 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 271 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 280 | d, NULL, &cl->bstats) < 0 || | 272 | d, NULL, &cl->bstats) < 0 || |
| 281 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 273 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 282 | gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) | 274 | gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) |
| 283 | return -1; | 275 | return -1; |
| 284 | 276 | ||
| 285 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 277 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 24cc220a3218..d2ab463f22ae 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -845,16 +845,6 @@ qdisc_peek_len(struct Qdisc *sch) | |||
| 845 | } | 845 | } |
| 846 | 846 | ||
| 847 | static void | 847 | static void |
| 848 | hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) | ||
| 849 | { | ||
| 850 | unsigned int len = cl->qdisc->q.qlen; | ||
| 851 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 852 | |||
| 853 | qdisc_reset(cl->qdisc); | ||
| 854 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 855 | } | ||
| 856 | |||
| 857 | static void | ||
| 858 | hfsc_adjust_levels(struct hfsc_class *cl) | 848 | hfsc_adjust_levels(struct hfsc_class *cl) |
| 859 | { | 849 | { |
| 860 | struct hfsc_class *p; | 850 | struct hfsc_class *p; |
| @@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
| 1076 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); | 1066 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); |
| 1077 | list_add_tail(&cl->siblings, &parent->children); | 1067 | list_add_tail(&cl->siblings, &parent->children); |
| 1078 | if (parent->level == 0) | 1068 | if (parent->level == 0) |
| 1079 | hfsc_purge_queue(sch, parent); | 1069 | qdisc_purge_queue(parent->qdisc); |
| 1080 | hfsc_adjust_levels(parent); | 1070 | hfsc_adjust_levels(parent); |
| 1081 | sch_tree_unlock(sch); | 1071 | sch_tree_unlock(sch); |
| 1082 | 1072 | ||
| @@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 1112 | list_del(&cl->siblings); | 1102 | list_del(&cl->siblings); |
| 1113 | hfsc_adjust_levels(cl->cl_parent); | 1103 | hfsc_adjust_levels(cl->cl_parent); |
| 1114 | 1104 | ||
| 1115 | hfsc_purge_queue(sch, cl); | 1105 | qdisc_purge_queue(cl->qdisc); |
| 1116 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); | 1106 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); |
| 1117 | 1107 | ||
| 1118 | sch_tree_unlock(sch); | 1108 | sch_tree_unlock(sch); |
| @@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1328 | { | 1318 | { |
| 1329 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1319 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
| 1330 | struct tc_hfsc_stats xstats; | 1320 | struct tc_hfsc_stats xstats; |
| 1321 | __u32 qlen; | ||
| 1331 | 1322 | ||
| 1332 | cl->qstats.backlog = cl->qdisc->qstats.backlog; | 1323 | qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog); |
| 1333 | xstats.level = cl->level; | 1324 | xstats.level = cl->level; |
| 1334 | xstats.period = cl->cl_vtperiod; | 1325 | xstats.period = cl->cl_vtperiod; |
| 1335 | xstats.work = cl->cl_total; | 1326 | xstats.work = cl->cl_total; |
| @@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 1337 | 1328 | ||
| 1338 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || | 1329 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || |
| 1339 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 1330 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 1340 | gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) | 1331 | gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) |
| 1341 | return -1; | 1332 | return -1; |
| 1342 | 1333 | ||
| 1343 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 1334 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 30f9da7e1076..2f9883b196e8 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | |||
| 1127 | }; | 1127 | }; |
| 1128 | __u32 qlen = 0; | 1128 | __u32 qlen = 0; |
| 1129 | 1129 | ||
| 1130 | if (!cl->level && cl->leaf.q) { | 1130 | if (!cl->level && cl->leaf.q) |
| 1131 | qlen = cl->leaf.q->q.qlen; | 1131 | qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); |
| 1132 | qs.backlog = cl->leaf.q->qstats.backlog; | 1132 | |
| 1133 | } | ||
| 1134 | cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), | 1133 | cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), |
| 1135 | INT_MIN, INT_MAX); | 1134 | INT_MIN, INT_MAX); |
| 1136 | cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), | 1135 | cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), |
| @@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
| 1270 | 1269 | ||
| 1271 | sch_tree_lock(sch); | 1270 | sch_tree_lock(sch); |
| 1272 | 1271 | ||
| 1273 | if (!cl->level) { | 1272 | if (!cl->level) |
| 1274 | unsigned int qlen = cl->leaf.q->q.qlen; | 1273 | qdisc_purge_queue(cl->leaf.q); |
| 1275 | unsigned int backlog = cl->leaf.q->qstats.backlog; | ||
| 1276 | |||
| 1277 | qdisc_reset(cl->leaf.q); | ||
| 1278 | qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog); | ||
| 1279 | } | ||
| 1280 | 1274 | ||
| 1281 | /* delete from hash and active; remainder in destroy_class */ | 1275 | /* delete from hash and active; remainder in destroy_class */ |
| 1282 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 1276 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| @@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
| 1404 | classid, NULL); | 1398 | classid, NULL); |
| 1405 | sch_tree_lock(sch); | 1399 | sch_tree_lock(sch); |
| 1406 | if (parent && !parent->level) { | 1400 | if (parent && !parent->level) { |
| 1407 | unsigned int qlen = parent->leaf.q->q.qlen; | ||
| 1408 | unsigned int backlog = parent->leaf.q->qstats.backlog; | ||
| 1409 | |||
| 1410 | /* turn parent into inner node */ | 1401 | /* turn parent into inner node */ |
| 1411 | qdisc_reset(parent->leaf.q); | 1402 | qdisc_purge_queue(parent->leaf.q); |
| 1412 | qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog); | ||
| 1413 | qdisc_put(parent->leaf.q); | 1403 | qdisc_put(parent->leaf.q); |
| 1414 | if (parent->prio_activity) | 1404 | if (parent->prio_activity) |
| 1415 | htb_deactivate(q, parent); | 1405 | htb_deactivate(q, parent); |
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 203659bc3906..3a3312467692 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
| @@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 249 | 249 | ||
| 250 | sch = dev_queue->qdisc_sleeping; | 250 | sch = dev_queue->qdisc_sleeping; |
| 251 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || | 251 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || |
| 252 | gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) | 252 | qdisc_qstats_copy(d, sch) < 0) |
| 253 | return -1; | 253 | return -1; |
| 254 | return 0; | 254 | return 0; |
| 255 | } | 255 | } |
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index d364e63c396d..ea0dc112b38d 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
| @@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 561 | sch = dev_queue->qdisc_sleeping; | 561 | sch = dev_queue->qdisc_sleeping; |
| 562 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 562 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 563 | d, NULL, &sch->bstats) < 0 || | 563 | d, NULL, &sch->bstats) < 0 || |
| 564 | gnet_stats_copy_queue(d, NULL, | 564 | qdisc_qstats_copy(d, sch) < 0) |
| 565 | &sch->qstats, sch->q.qlen) < 0) | ||
| 566 | return -1; | 565 | return -1; |
| 567 | } | 566 | } |
| 568 | return 0; | 567 | return 0; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 7410ce4d0321..35b03ae08e0f 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
| @@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 201 | for (i = q->bands; i < q->max_bands; i++) { | 201 | for (i = q->bands; i < q->max_bands; i++) { |
| 202 | if (q->queues[i] != &noop_qdisc) { | 202 | if (q->queues[i] != &noop_qdisc) { |
| 203 | struct Qdisc *child = q->queues[i]; | 203 | struct Qdisc *child = q->queues[i]; |
| 204 | |||
| 204 | q->queues[i] = &noop_qdisc; | 205 | q->queues[i] = &noop_qdisc; |
| 205 | qdisc_tree_reduce_backlog(child, child->q.qlen, | 206 | qdisc_tree_flush_backlog(child); |
| 206 | child->qstats.backlog); | ||
| 207 | qdisc_put(child); | 207 | qdisc_put(child); |
| 208 | } | 208 | } |
| 209 | } | 209 | } |
| @@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 225 | qdisc_hash_add(child, true); | 225 | qdisc_hash_add(child, true); |
| 226 | 226 | ||
| 227 | if (old != &noop_qdisc) { | 227 | if (old != &noop_qdisc) { |
| 228 | qdisc_tree_reduce_backlog(old, | 228 | qdisc_tree_flush_backlog(old); |
| 229 | old->q.qlen, | ||
| 230 | old->qstats.backlog); | ||
| 231 | qdisc_put(old); | 229 | qdisc_put(old); |
| 232 | } | 230 | } |
| 233 | sch_tree_unlock(sch); | 231 | sch_tree_unlock(sch); |
| @@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 344 | cl_q = q->queues[cl - 1]; | 342 | cl_q = q->queues[cl - 1]; |
| 345 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 343 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 346 | d, NULL, &cl_q->bstats) < 0 || | 344 | d, NULL, &cl_q->bstats) < 0 || |
| 347 | gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) | 345 | qdisc_qstats_copy(d, cl_q) < 0) |
| 348 | return -1; | 346 | return -1; |
| 349 | 347 | ||
| 350 | return 0; | 348 | return 0; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 847141cd900f..d519b21535b3 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, | |||
| 216 | q->bands = qopt->bands; | 216 | q->bands = qopt->bands; |
| 217 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 217 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
| 218 | 218 | ||
| 219 | for (i = q->bands; i < oldbands; i++) { | 219 | for (i = q->bands; i < oldbands; i++) |
| 220 | struct Qdisc *child = q->queues[i]; | 220 | qdisc_tree_flush_backlog(q->queues[i]); |
| 221 | |||
| 222 | qdisc_tree_reduce_backlog(child, child->q.qlen, | ||
| 223 | child->qstats.backlog); | ||
| 224 | } | ||
| 225 | 221 | ||
| 226 | for (i = oldbands; i < q->bands; i++) { | 222 | for (i = oldbands; i < q->bands; i++) { |
| 227 | q->queues[i] = queues[i]; | 223 | q->queues[i] = queues[i]; |
| @@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 365 | cl_q = q->queues[cl - 1]; | 361 | cl_q = q->queues[cl - 1]; |
| 366 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 362 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 367 | d, NULL, &cl_q->bstats) < 0 || | 363 | d, NULL, &cl_q->bstats) < 0 || |
| 368 | gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) | 364 | qdisc_qstats_copy(d, cl_q) < 0) |
| 369 | return -1; | 365 | return -1; |
| 370 | 366 | ||
| 371 | return 0; | 367 | return 0; |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 29f5c4a24688..1589364b54da 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) | |||
| 217 | return container_of(clc, struct qfq_class, common); | 217 | return container_of(clc, struct qfq_class, common); |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | static void qfq_purge_queue(struct qfq_class *cl) | ||
| 221 | { | ||
| 222 | unsigned int len = cl->qdisc->q.qlen; | ||
| 223 | unsigned int backlog = cl->qdisc->qstats.backlog; | ||
| 224 | |||
| 225 | qdisc_reset(cl->qdisc); | ||
| 226 | qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); | ||
| 227 | } | ||
| 228 | |||
| 229 | static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { | 220 | static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { |
| 230 | [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, | 221 | [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, |
| 231 | [TCA_QFQ_LMAX] = { .type = NLA_U32 }, | 222 | [TCA_QFQ_LMAX] = { .type = NLA_U32 }, |
| @@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg) | |||
| 551 | 542 | ||
| 552 | sch_tree_lock(sch); | 543 | sch_tree_lock(sch); |
| 553 | 544 | ||
| 554 | qfq_purge_queue(cl); | 545 | qdisc_purge_queue(cl->qdisc); |
| 555 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 546 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
| 556 | 547 | ||
| 557 | sch_tree_unlock(sch); | 548 | sch_tree_unlock(sch); |
| @@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
| 655 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), | 646 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
| 656 | d, NULL, &cl->bstats) < 0 || | 647 | d, NULL, &cl->bstats) < 0 || |
| 657 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 648 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || |
| 658 | gnet_stats_copy_queue(d, NULL, | 649 | qdisc_qstats_copy(d, cl->qdisc) < 0) |
| 659 | &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) | ||
| 660 | return -1; | 650 | return -1; |
| 661 | 651 | ||
| 662 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 652 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 9df9942340ea..4e8c0abf6194 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 233 | q->flags = ctl->flags; | 233 | q->flags = ctl->flags; |
| 234 | q->limit = ctl->limit; | 234 | q->limit = ctl->limit; |
| 235 | if (child) { | 235 | if (child) { |
| 236 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 236 | qdisc_tree_flush_backlog(q->qdisc); |
| 237 | q->qdisc->qstats.backlog); | ||
| 238 | old_child = q->qdisc; | 237 | old_child = q->qdisc; |
| 239 | q->qdisc = child; | 238 | q->qdisc = child; |
| 240 | } | 239 | } |
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index bab506b01a32..2419fdb75966 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c | |||
| @@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 521 | qdisc_hash_add(child, true); | 521 | qdisc_hash_add(child, true); |
| 522 | sch_tree_lock(sch); | 522 | sch_tree_lock(sch); |
| 523 | 523 | ||
| 524 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 524 | qdisc_tree_flush_backlog(q->qdisc); |
| 525 | q->qdisc->qstats.backlog); | ||
| 526 | qdisc_put(q->qdisc); | 525 | qdisc_put(q->qdisc); |
| 527 | q->qdisc = child; | 526 | q->qdisc = child; |
| 528 | 527 | ||
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 206e4dbed12f..c7041999eb5d 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c | |||
| @@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 895 | 895 | ||
| 896 | sch = dev_queue->qdisc_sleeping; | 896 | sch = dev_queue->qdisc_sleeping; |
| 897 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || | 897 | if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || |
| 898 | gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) | 898 | qdisc_qstats_copy(d, sch) < 0) |
| 899 | return -1; | 899 | return -1; |
| 900 | return 0; | 900 | return 0; |
| 901 | } | 901 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 7f272a9070c5..f71578dbb9e3 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 391 | 391 | ||
| 392 | sch_tree_lock(sch); | 392 | sch_tree_lock(sch); |
| 393 | if (child) { | 393 | if (child) { |
| 394 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | 394 | qdisc_tree_flush_backlog(q->qdisc); |
| 395 | q->qdisc->qstats.backlog); | ||
| 396 | qdisc_put(q->qdisc); | 395 | qdisc_put(q->qdisc); |
| 397 | q->qdisc = child; | 396 | q->qdisc = child; |
| 398 | } | 397 | } |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6abc8b274270..951afdeea5e9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -600,6 +600,7 @@ out: | |||
| 600 | static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) | 600 | static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) |
| 601 | { | 601 | { |
| 602 | /* No address mapping for V4 sockets */ | 602 | /* No address mapping for V4 sockets */ |
| 603 | memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); | ||
| 603 | return sizeof(struct sockaddr_in); | 604 | return sizeof(struct sockaddr_in); |
| 604 | } | 605 | } |
| 605 | 606 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9874e60c9b0d..4583fa914e62 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -4847,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, | |||
| 4847 | } | 4847 | } |
| 4848 | 4848 | ||
| 4849 | /* Validate addr_len before calling common connect/connectx routine. */ | 4849 | /* Validate addr_len before calling common connect/connectx routine. */ |
| 4850 | af = sctp_get_af_specific(addr->sa_family); | 4850 | af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL : |
| 4851 | sctp_get_af_specific(addr->sa_family); | ||
| 4851 | if (!af || addr_len < af->sockaddr_len) { | 4852 | if (!af || addr_len < af->sockaddr_len) { |
| 4852 | err = -EINVAL; | 4853 | err = -EINVAL; |
| 4853 | } else { | 4854 | } else { |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 77ef53596d18..6f869ef49b32 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -167,10 +167,9 @@ static int smc_release(struct socket *sock) | |||
| 167 | 167 | ||
| 168 | if (sk->sk_state == SMC_CLOSED) { | 168 | if (sk->sk_state == SMC_CLOSED) { |
| 169 | if (smc->clcsock) { | 169 | if (smc->clcsock) { |
| 170 | mutex_lock(&smc->clcsock_release_lock); | 170 | release_sock(sk); |
| 171 | sock_release(smc->clcsock); | 171 | smc_clcsock_release(smc); |
| 172 | smc->clcsock = NULL; | 172 | lock_sock(sk); |
| 173 | mutex_unlock(&smc->clcsock_release_lock); | ||
| 174 | } | 173 | } |
| 175 | if (!smc->use_fallback) | 174 | if (!smc->use_fallback) |
| 176 | smc_conn_free(&smc->conn); | 175 | smc_conn_free(&smc->conn); |
| @@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link, | |||
| 446 | link->peer_mtu = clc->qp_mtu; | 445 | link->peer_mtu = clc->qp_mtu; |
| 447 | } | 446 | } |
| 448 | 447 | ||
| 448 | static void smc_switch_to_fallback(struct smc_sock *smc) | ||
| 449 | { | ||
| 450 | smc->use_fallback = true; | ||
| 451 | if (smc->sk.sk_socket && smc->sk.sk_socket->file) { | ||
| 452 | smc->clcsock->file = smc->sk.sk_socket->file; | ||
| 453 | smc->clcsock->file->private_data = smc->clcsock; | ||
| 454 | } | ||
| 455 | } | ||
| 456 | |||
| 449 | /* fall back during connect */ | 457 | /* fall back during connect */ |
| 450 | static int smc_connect_fallback(struct smc_sock *smc, int reason_code) | 458 | static int smc_connect_fallback(struct smc_sock *smc, int reason_code) |
| 451 | { | 459 | { |
| 452 | smc->use_fallback = true; | 460 | smc_switch_to_fallback(smc); |
| 453 | smc->fallback_rsn = reason_code; | 461 | smc->fallback_rsn = reason_code; |
| 454 | smc_copy_sock_settings_to_clc(smc); | 462 | smc_copy_sock_settings_to_clc(smc); |
| 455 | if (smc->sk.sk_state == SMC_INIT) | 463 | if (smc->sk.sk_state == SMC_INIT) |
| @@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work) | |||
| 775 | smc->sk.sk_err = -rc; | 783 | smc->sk.sk_err = -rc; |
| 776 | 784 | ||
| 777 | out: | 785 | out: |
| 778 | if (smc->sk.sk_err) | 786 | if (!sock_flag(&smc->sk, SOCK_DEAD)) { |
| 779 | smc->sk.sk_state_change(&smc->sk); | 787 | if (smc->sk.sk_err) { |
| 780 | else | 788 | smc->sk.sk_state_change(&smc->sk); |
| 781 | smc->sk.sk_write_space(&smc->sk); | 789 | } else { /* allow polling before and after fallback decision */ |
| 790 | smc->clcsock->sk->sk_write_space(smc->clcsock->sk); | ||
| 791 | smc->sk.sk_write_space(&smc->sk); | ||
| 792 | } | ||
| 793 | } | ||
| 782 | kfree(smc->connect_info); | 794 | kfree(smc->connect_info); |
| 783 | smc->connect_info = NULL; | 795 | smc->connect_info = NULL; |
| 784 | release_sock(&smc->sk); | 796 | release_sock(&smc->sk); |
| @@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) | |||
| 872 | if (rc < 0) | 884 | if (rc < 0) |
| 873 | lsk->sk_err = -rc; | 885 | lsk->sk_err = -rc; |
| 874 | if (rc < 0 || lsk->sk_state == SMC_CLOSED) { | 886 | if (rc < 0 || lsk->sk_state == SMC_CLOSED) { |
| 887 | new_sk->sk_prot->unhash(new_sk); | ||
| 875 | if (new_clcsock) | 888 | if (new_clcsock) |
| 876 | sock_release(new_clcsock); | 889 | sock_release(new_clcsock); |
| 877 | new_sk->sk_state = SMC_CLOSED; | 890 | new_sk->sk_state = SMC_CLOSED; |
| 878 | sock_set_flag(new_sk, SOCK_DEAD); | 891 | sock_set_flag(new_sk, SOCK_DEAD); |
| 879 | new_sk->sk_prot->unhash(new_sk); | ||
| 880 | sock_put(new_sk); /* final */ | 892 | sock_put(new_sk); /* final */ |
| 881 | *new_smc = NULL; | 893 | *new_smc = NULL; |
| 882 | goto out; | 894 | goto out; |
| @@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent, | |||
| 927 | 939 | ||
| 928 | smc_accept_unlink(new_sk); | 940 | smc_accept_unlink(new_sk); |
| 929 | if (new_sk->sk_state == SMC_CLOSED) { | 941 | if (new_sk->sk_state == SMC_CLOSED) { |
| 942 | new_sk->sk_prot->unhash(new_sk); | ||
| 930 | if (isk->clcsock) { | 943 | if (isk->clcsock) { |
| 931 | sock_release(isk->clcsock); | 944 | sock_release(isk->clcsock); |
| 932 | isk->clcsock = NULL; | 945 | isk->clcsock = NULL; |
| 933 | } | 946 | } |
| 934 | new_sk->sk_prot->unhash(new_sk); | ||
| 935 | sock_put(new_sk); /* final */ | 947 | sock_put(new_sk); /* final */ |
| 936 | continue; | 948 | continue; |
| 937 | } | 949 | } |
| 938 | if (new_sock) | 950 | if (new_sock) { |
| 939 | sock_graft(new_sk, new_sock); | 951 | sock_graft(new_sk, new_sock); |
| 952 | if (isk->use_fallback) { | ||
| 953 | smc_sk(new_sk)->clcsock->file = new_sock->file; | ||
| 954 | isk->clcsock->file->private_data = isk->clcsock; | ||
| 955 | } | ||
| 956 | } | ||
| 940 | return new_sk; | 957 | return new_sk; |
| 941 | } | 958 | } |
| 942 | return NULL; | 959 | return NULL; |
| @@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk) | |||
| 956 | sock_set_flag(sk, SOCK_DEAD); | 973 | sock_set_flag(sk, SOCK_DEAD); |
| 957 | sk->sk_shutdown |= SHUTDOWN_MASK; | 974 | sk->sk_shutdown |= SHUTDOWN_MASK; |
| 958 | } | 975 | } |
| 976 | sk->sk_prot->unhash(sk); | ||
| 959 | if (smc->clcsock) { | 977 | if (smc->clcsock) { |
| 960 | struct socket *tcp; | 978 | struct socket *tcp; |
| 961 | 979 | ||
| @@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk) | |||
| 971 | smc_conn_free(&smc->conn); | 989 | smc_conn_free(&smc->conn); |
| 972 | } | 990 | } |
| 973 | release_sock(sk); | 991 | release_sock(sk); |
| 974 | sk->sk_prot->unhash(sk); | ||
| 975 | sock_put(sk); /* final sock_put */ | 992 | sock_put(sk); /* final sock_put */ |
| 976 | } | 993 | } |
| 977 | 994 | ||
| @@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc) | |||
| 1037 | struct smc_sock *lsmc = new_smc->listen_smc; | 1054 | struct smc_sock *lsmc = new_smc->listen_smc; |
| 1038 | struct sock *newsmcsk = &new_smc->sk; | 1055 | struct sock *newsmcsk = &new_smc->sk; |
| 1039 | 1056 | ||
| 1040 | lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); | ||
| 1041 | if (lsmc->sk.sk_state == SMC_LISTEN) { | 1057 | if (lsmc->sk.sk_state == SMC_LISTEN) { |
| 1058 | lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); | ||
| 1042 | smc_accept_enqueue(&lsmc->sk, newsmcsk); | 1059 | smc_accept_enqueue(&lsmc->sk, newsmcsk); |
| 1060 | release_sock(&lsmc->sk); | ||
| 1043 | } else { /* no longer listening */ | 1061 | } else { /* no longer listening */ |
| 1044 | smc_close_non_accepted(newsmcsk); | 1062 | smc_close_non_accepted(newsmcsk); |
| 1045 | } | 1063 | } |
| 1046 | release_sock(&lsmc->sk); | ||
| 1047 | 1064 | ||
| 1048 | /* Wake up accept */ | 1065 | /* Wake up accept */ |
| 1049 | lsmc->sk.sk_data_ready(&lsmc->sk); | 1066 | lsmc->sk.sk_data_ready(&lsmc->sk); |
| @@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, | |||
| 1087 | return; | 1104 | return; |
| 1088 | } | 1105 | } |
| 1089 | smc_conn_free(&new_smc->conn); | 1106 | smc_conn_free(&new_smc->conn); |
| 1090 | new_smc->use_fallback = true; | 1107 | smc_switch_to_fallback(new_smc); |
| 1091 | new_smc->fallback_rsn = reason_code; | 1108 | new_smc->fallback_rsn = reason_code; |
| 1092 | if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { | 1109 | if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { |
| 1093 | if (smc_clc_send_decline(new_smc, reason_code) < 0) { | 1110 | if (smc_clc_send_decline(new_smc, reason_code) < 0) { |
| @@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work) | |||
| 1237 | int rc = 0; | 1254 | int rc = 0; |
| 1238 | u8 ibport; | 1255 | u8 ibport; |
| 1239 | 1256 | ||
| 1257 | if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) | ||
| 1258 | return smc_listen_out_err(new_smc); | ||
| 1259 | |||
| 1240 | if (new_smc->use_fallback) { | 1260 | if (new_smc->use_fallback) { |
| 1241 | smc_listen_out_connected(new_smc); | 1261 | smc_listen_out_connected(new_smc); |
| 1242 | return; | 1262 | return; |
| @@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work) | |||
| 1244 | 1264 | ||
| 1245 | /* check if peer is smc capable */ | 1265 | /* check if peer is smc capable */ |
| 1246 | if (!tcp_sk(newclcsock->sk)->syn_smc) { | 1266 | if (!tcp_sk(newclcsock->sk)->syn_smc) { |
| 1247 | new_smc->use_fallback = true; | 1267 | smc_switch_to_fallback(new_smc); |
| 1248 | new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; | 1268 | new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; |
| 1249 | smc_listen_out_connected(new_smc); | 1269 | smc_listen_out_connected(new_smc); |
| 1250 | return; | 1270 | return; |
| @@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 1501 | 1521 | ||
| 1502 | if (msg->msg_flags & MSG_FASTOPEN) { | 1522 | if (msg->msg_flags & MSG_FASTOPEN) { |
| 1503 | if (sk->sk_state == SMC_INIT) { | 1523 | if (sk->sk_state == SMC_INIT) { |
| 1504 | smc->use_fallback = true; | 1524 | smc_switch_to_fallback(smc); |
| 1505 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; | 1525 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; |
| 1506 | } else { | 1526 | } else { |
| 1507 | rc = -EINVAL; | 1527 | rc = -EINVAL; |
| @@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, | |||
| 1703 | case TCP_FASTOPEN_NO_COOKIE: | 1723 | case TCP_FASTOPEN_NO_COOKIE: |
| 1704 | /* option not supported by SMC */ | 1724 | /* option not supported by SMC */ |
| 1705 | if (sk->sk_state == SMC_INIT) { | 1725 | if (sk->sk_state == SMC_INIT) { |
| 1706 | smc->use_fallback = true; | 1726 | smc_switch_to_fallback(smc); |
| 1707 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; | 1727 | smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; |
| 1708 | } else { | 1728 | } else { |
| 1709 | if (!smc->use_fallback) | 1729 | if (!smc->use_fallback) |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 2ad37e998509..fc06720b53c1 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
| @@ -21,6 +21,22 @@ | |||
| 21 | 21 | ||
| 22 | #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ) | 22 | #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ) |
| 23 | 23 | ||
| 24 | /* release the clcsock that is assigned to the smc_sock */ | ||
| 25 | void smc_clcsock_release(struct smc_sock *smc) | ||
| 26 | { | ||
| 27 | struct socket *tcp; | ||
| 28 | |||
| 29 | if (smc->listen_smc && current_work() != &smc->smc_listen_work) | ||
| 30 | cancel_work_sync(&smc->smc_listen_work); | ||
| 31 | mutex_lock(&smc->clcsock_release_lock); | ||
| 32 | if (smc->clcsock) { | ||
| 33 | tcp = smc->clcsock; | ||
| 34 | smc->clcsock = NULL; | ||
| 35 | sock_release(tcp); | ||
| 36 | } | ||
| 37 | mutex_unlock(&smc->clcsock_release_lock); | ||
| 38 | } | ||
| 39 | |||
| 24 | static void smc_close_cleanup_listen(struct sock *parent) | 40 | static void smc_close_cleanup_listen(struct sock *parent) |
| 25 | { | 41 | { |
| 26 | struct sock *sk; | 42 | struct sock *sk; |
| @@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work) | |||
| 321 | close_work); | 337 | close_work); |
| 322 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 338 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
| 323 | struct smc_cdc_conn_state_flags *rxflags; | 339 | struct smc_cdc_conn_state_flags *rxflags; |
| 340 | bool release_clcsock = false; | ||
| 324 | struct sock *sk = &smc->sk; | 341 | struct sock *sk = &smc->sk; |
| 325 | int old_state; | 342 | int old_state; |
| 326 | 343 | ||
| @@ -400,13 +417,13 @@ wakeup: | |||
| 400 | if ((sk->sk_state == SMC_CLOSED) && | 417 | if ((sk->sk_state == SMC_CLOSED) && |
| 401 | (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { | 418 | (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { |
| 402 | smc_conn_free(conn); | 419 | smc_conn_free(conn); |
| 403 | if (smc->clcsock) { | 420 | if (smc->clcsock) |
| 404 | sock_release(smc->clcsock); | 421 | release_clcsock = true; |
| 405 | smc->clcsock = NULL; | ||
| 406 | } | ||
| 407 | } | 422 | } |
| 408 | } | 423 | } |
| 409 | release_sock(sk); | 424 | release_sock(sk); |
| 425 | if (release_clcsock) | ||
| 426 | smc_clcsock_release(smc); | ||
| 410 | sock_put(sk); /* sock_hold done by schedulers of close_work */ | 427 | sock_put(sk); /* sock_hold done by schedulers of close_work */ |
| 411 | } | 428 | } |
| 412 | 429 | ||
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h index 19eb6a211c23..e0e3b5df25d2 100644 --- a/net/smc/smc_close.h +++ b/net/smc/smc_close.h | |||
| @@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc); | |||
| 23 | int smc_close_active(struct smc_sock *smc); | 23 | int smc_close_active(struct smc_sock *smc); |
| 24 | int smc_close_shutdown_write(struct smc_sock *smc); | 24 | int smc_close_shutdown_write(struct smc_sock *smc); |
| 25 | void smc_close_init(struct smc_sock *smc); | 25 | void smc_close_init(struct smc_sock *smc); |
| 26 | void smc_clcsock_release(struct smc_sock *smc); | ||
| 26 | 27 | ||
| 27 | #endif /* SMC_CLOSE_H */ | 28 | #endif /* SMC_CLOSE_H */ |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index 2fff79db1a59..e89e918b88e0 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
| @@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, | |||
| 289 | INIT_LIST_HEAD(&smcd->vlan); | 289 | INIT_LIST_HEAD(&smcd->vlan); |
| 290 | smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", | 290 | smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", |
| 291 | WQ_MEM_RECLAIM, name); | 291 | WQ_MEM_RECLAIM, name); |
| 292 | if (!smcd->event_wq) { | ||
| 293 | kfree(smcd->conn); | ||
| 294 | kfree(smcd); | ||
| 295 | return NULL; | ||
| 296 | } | ||
| 292 | return smcd; | 297 | return smcd; |
| 293 | } | 298 | } |
| 294 | EXPORT_SYMBOL_GPL(smcd_alloc_dev); | 299 | EXPORT_SYMBOL_GPL(smcd_alloc_dev); |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 8d2f6296279c..0285c7f9e79b 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
| @@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) | |||
| 603 | { | 603 | { |
| 604 | struct net *net = genl_info_net(info); | 604 | struct net *net = genl_info_net(info); |
| 605 | 605 | ||
| 606 | return smc_pnet_remove_by_pnetid(net, NULL); | 606 | smc_pnet_remove_by_pnetid(net, NULL); |
| 607 | return 0; | ||
| 607 | } | 608 | } |
| 608 | 609 | ||
| 609 | /* SMC_PNETID generic netlink operation definition */ | 610 | /* SMC_PNETID generic netlink operation definition */ |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 860dcfb95ee4..fa6c977b4c41 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
| @@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
| 140 | /* We are going to append to the frags_list of head. | 140 | /* We are going to append to the frags_list of head. |
| 141 | * Need to unshare the frag_list. | 141 | * Need to unshare the frag_list. |
| 142 | */ | 142 | */ |
| 143 | if (skb_has_frag_list(head)) { | 143 | err = skb_unclone(head, GFP_ATOMIC); |
| 144 | err = skb_unclone(head, GFP_ATOMIC); | 144 | if (err) { |
| 145 | if (err) { | 145 | STRP_STATS_INCR(strp->stats.mem_fail); |
| 146 | STRP_STATS_INCR(strp->stats.mem_fail); | 146 | desc->error = err; |
| 147 | desc->error = err; | 147 | return 0; |
| 148 | return 0; | ||
| 149 | } | ||
| 150 | } | 148 | } |
| 151 | 149 | ||
| 152 | if (unlikely(skb_shinfo(head)->frag_list)) { | 150 | if (unlikely(skb_shinfo(head)->frag_list)) { |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 12bb23b8e0c5..261131dfa1f1 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
| @@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail) | |||
| 54 | h->last_refresh = now; | 54 | h->last_refresh = now; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static inline int cache_is_valid(struct cache_head *h); | ||
| 57 | static void cache_fresh_locked(struct cache_head *head, time_t expiry, | 58 | static void cache_fresh_locked(struct cache_head *head, time_t expiry, |
| 58 | struct cache_detail *detail); | 59 | struct cache_detail *detail); |
| 59 | static void cache_fresh_unlocked(struct cache_head *head, | 60 | static void cache_fresh_unlocked(struct cache_head *head, |
| @@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, | |||
| 105 | if (cache_is_expired(detail, tmp)) { | 106 | if (cache_is_expired(detail, tmp)) { |
| 106 | hlist_del_init_rcu(&tmp->cache_list); | 107 | hlist_del_init_rcu(&tmp->cache_list); |
| 107 | detail->entries --; | 108 | detail->entries --; |
| 109 | if (cache_is_valid(tmp) == -EAGAIN) | ||
| 110 | set_bit(CACHE_NEGATIVE, &tmp->flags); | ||
| 108 | cache_fresh_locked(tmp, 0, detail); | 111 | cache_fresh_locked(tmp, 0, detail); |
| 109 | freeme = tmp; | 112 | freeme = tmp; |
| 110 | break; | 113 | break; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 187d10443a15..8ff11dc98d7f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -1540,7 +1540,6 @@ call_start(struct rpc_task *task) | |||
| 1540 | clnt->cl_stats->rpccnt++; | 1540 | clnt->cl_stats->rpccnt++; |
| 1541 | task->tk_action = call_reserve; | 1541 | task->tk_action = call_reserve; |
| 1542 | rpc_task_set_transport(task, clnt); | 1542 | rpc_task_set_transport(task, clnt); |
| 1543 | call_reserve(task); | ||
| 1544 | } | 1543 | } |
| 1545 | 1544 | ||
| 1546 | /* | 1545 | /* |
| @@ -1554,9 +1553,6 @@ call_reserve(struct rpc_task *task) | |||
| 1554 | task->tk_status = 0; | 1553 | task->tk_status = 0; |
| 1555 | task->tk_action = call_reserveresult; | 1554 | task->tk_action = call_reserveresult; |
| 1556 | xprt_reserve(task); | 1555 | xprt_reserve(task); |
| 1557 | if (rpc_task_need_resched(task)) | ||
| 1558 | return; | ||
| 1559 | call_reserveresult(task); | ||
| 1560 | } | 1556 | } |
| 1561 | 1557 | ||
| 1562 | static void call_retry_reserve(struct rpc_task *task); | 1558 | static void call_retry_reserve(struct rpc_task *task); |
| @@ -1579,7 +1575,6 @@ call_reserveresult(struct rpc_task *task) | |||
| 1579 | if (status >= 0) { | 1575 | if (status >= 0) { |
| 1580 | if (task->tk_rqstp) { | 1576 | if (task->tk_rqstp) { |
| 1581 | task->tk_action = call_refresh; | 1577 | task->tk_action = call_refresh; |
| 1582 | call_refresh(task); | ||
| 1583 | return; | 1578 | return; |
| 1584 | } | 1579 | } |
| 1585 | 1580 | ||
| @@ -1605,7 +1600,6 @@ call_reserveresult(struct rpc_task *task) | |||
| 1605 | /* fall through */ | 1600 | /* fall through */ |
| 1606 | case -EAGAIN: /* woken up; retry */ | 1601 | case -EAGAIN: /* woken up; retry */ |
| 1607 | task->tk_action = call_retry_reserve; | 1602 | task->tk_action = call_retry_reserve; |
| 1608 | call_retry_reserve(task); | ||
| 1609 | return; | 1603 | return; |
| 1610 | case -EIO: /* probably a shutdown */ | 1604 | case -EIO: /* probably a shutdown */ |
| 1611 | break; | 1605 | break; |
| @@ -1628,9 +1622,6 @@ call_retry_reserve(struct rpc_task *task) | |||
| 1628 | task->tk_status = 0; | 1622 | task->tk_status = 0; |
| 1629 | task->tk_action = call_reserveresult; | 1623 | task->tk_action = call_reserveresult; |
| 1630 | xprt_retry_reserve(task); | 1624 | xprt_retry_reserve(task); |
| 1631 | if (rpc_task_need_resched(task)) | ||
| 1632 | return; | ||
| 1633 | call_reserveresult(task); | ||
| 1634 | } | 1625 | } |
| 1635 | 1626 | ||
| 1636 | /* | 1627 | /* |
| @@ -1645,9 +1636,6 @@ call_refresh(struct rpc_task *task) | |||
| 1645 | task->tk_status = 0; | 1636 | task->tk_status = 0; |
| 1646 | task->tk_client->cl_stats->rpcauthrefresh++; | 1637 | task->tk_client->cl_stats->rpcauthrefresh++; |
| 1647 | rpcauth_refreshcred(task); | 1638 | rpcauth_refreshcred(task); |
| 1648 | if (rpc_task_need_resched(task)) | ||
| 1649 | return; | ||
| 1650 | call_refreshresult(task); | ||
| 1651 | } | 1639 | } |
| 1652 | 1640 | ||
| 1653 | /* | 1641 | /* |
| @@ -1666,7 +1654,6 @@ call_refreshresult(struct rpc_task *task) | |||
| 1666 | case 0: | 1654 | case 0: |
| 1667 | if (rpcauth_uptodatecred(task)) { | 1655 | if (rpcauth_uptodatecred(task)) { |
| 1668 | task->tk_action = call_allocate; | 1656 | task->tk_action = call_allocate; |
| 1669 | call_allocate(task); | ||
| 1670 | return; | 1657 | return; |
| 1671 | } | 1658 | } |
| 1672 | /* Use rate-limiting and a max number of retries if refresh | 1659 | /* Use rate-limiting and a max number of retries if refresh |
| @@ -1685,7 +1672,6 @@ call_refreshresult(struct rpc_task *task) | |||
| 1685 | task->tk_cred_retry--; | 1672 | task->tk_cred_retry--; |
| 1686 | dprintk("RPC: %5u %s: retry refresh creds\n", | 1673 | dprintk("RPC: %5u %s: retry refresh creds\n", |
| 1687 | task->tk_pid, __func__); | 1674 | task->tk_pid, __func__); |
| 1688 | call_refresh(task); | ||
| 1689 | return; | 1675 | return; |
| 1690 | } | 1676 | } |
| 1691 | dprintk("RPC: %5u %s: refresh creds failed with error %d\n", | 1677 | dprintk("RPC: %5u %s: refresh creds failed with error %d\n", |
| @@ -1711,10 +1697,8 @@ call_allocate(struct rpc_task *task) | |||
| 1711 | task->tk_status = 0; | 1697 | task->tk_status = 0; |
| 1712 | task->tk_action = call_encode; | 1698 | task->tk_action = call_encode; |
| 1713 | 1699 | ||
| 1714 | if (req->rq_buffer) { | 1700 | if (req->rq_buffer) |
| 1715 | call_encode(task); | ||
| 1716 | return; | 1701 | return; |
| 1717 | } | ||
| 1718 | 1702 | ||
| 1719 | if (proc->p_proc != 0) { | 1703 | if (proc->p_proc != 0) { |
| 1720 | BUG_ON(proc->p_arglen == 0); | 1704 | BUG_ON(proc->p_arglen == 0); |
| @@ -1740,12 +1724,8 @@ call_allocate(struct rpc_task *task) | |||
| 1740 | 1724 | ||
| 1741 | status = xprt->ops->buf_alloc(task); | 1725 | status = xprt->ops->buf_alloc(task); |
| 1742 | xprt_inject_disconnect(xprt); | 1726 | xprt_inject_disconnect(xprt); |
| 1743 | if (status == 0) { | 1727 | if (status == 0) |
| 1744 | if (rpc_task_need_resched(task)) | ||
| 1745 | return; | ||
| 1746 | call_encode(task); | ||
| 1747 | return; | 1728 | return; |
| 1748 | } | ||
| 1749 | if (status != -ENOMEM) { | 1729 | if (status != -ENOMEM) { |
| 1750 | rpc_exit(task, status); | 1730 | rpc_exit(task, status); |
| 1751 | return; | 1731 | return; |
| @@ -1828,8 +1808,12 @@ call_encode(struct rpc_task *task) | |||
| 1828 | xprt_request_enqueue_receive(task); | 1808 | xprt_request_enqueue_receive(task); |
| 1829 | xprt_request_enqueue_transmit(task); | 1809 | xprt_request_enqueue_transmit(task); |
| 1830 | out: | 1810 | out: |
| 1831 | task->tk_action = call_bind; | 1811 | task->tk_action = call_transmit; |
| 1832 | call_bind(task); | 1812 | /* Check that the connection is OK */ |
| 1813 | if (!xprt_bound(task->tk_xprt)) | ||
| 1814 | task->tk_action = call_bind; | ||
| 1815 | else if (!xprt_connected(task->tk_xprt)) | ||
| 1816 | task->tk_action = call_connect; | ||
| 1833 | } | 1817 | } |
| 1834 | 1818 | ||
| 1835 | /* | 1819 | /* |
| @@ -1847,7 +1831,6 @@ rpc_task_handle_transmitted(struct rpc_task *task) | |||
| 1847 | { | 1831 | { |
| 1848 | xprt_end_transmit(task); | 1832 | xprt_end_transmit(task); |
| 1849 | task->tk_action = call_transmit_status; | 1833 | task->tk_action = call_transmit_status; |
| 1850 | call_transmit_status(task); | ||
| 1851 | } | 1834 | } |
| 1852 | 1835 | ||
| 1853 | /* | 1836 | /* |
| @@ -1865,7 +1848,6 @@ call_bind(struct rpc_task *task) | |||
| 1865 | 1848 | ||
| 1866 | if (xprt_bound(xprt)) { | 1849 | if (xprt_bound(xprt)) { |
| 1867 | task->tk_action = call_connect; | 1850 | task->tk_action = call_connect; |
| 1868 | call_connect(task); | ||
| 1869 | return; | 1851 | return; |
| 1870 | } | 1852 | } |
| 1871 | 1853 | ||
| @@ -1896,7 +1878,6 @@ call_bind_status(struct rpc_task *task) | |||
| 1896 | dprint_status(task); | 1878 | dprint_status(task); |
| 1897 | task->tk_status = 0; | 1879 | task->tk_status = 0; |
| 1898 | task->tk_action = call_connect; | 1880 | task->tk_action = call_connect; |
| 1899 | call_connect(task); | ||
| 1900 | return; | 1881 | return; |
| 1901 | } | 1882 | } |
| 1902 | 1883 | ||
| @@ -1981,7 +1962,6 @@ call_connect(struct rpc_task *task) | |||
| 1981 | 1962 | ||
| 1982 | if (xprt_connected(xprt)) { | 1963 | if (xprt_connected(xprt)) { |
| 1983 | task->tk_action = call_transmit; | 1964 | task->tk_action = call_transmit; |
| 1984 | call_transmit(task); | ||
| 1985 | return; | 1965 | return; |
| 1986 | } | 1966 | } |
| 1987 | 1967 | ||
| @@ -2051,7 +2031,6 @@ call_connect_status(struct rpc_task *task) | |||
| 2051 | case 0: | 2031 | case 0: |
| 2052 | clnt->cl_stats->netreconn++; | 2032 | clnt->cl_stats->netreconn++; |
| 2053 | task->tk_action = call_transmit; | 2033 | task->tk_action = call_transmit; |
| 2054 | call_transmit(task); | ||
| 2055 | return; | 2034 | return; |
| 2056 | } | 2035 | } |
| 2057 | rpc_exit(task, status); | 2036 | rpc_exit(task, status); |
| @@ -2087,9 +2066,6 @@ call_transmit(struct rpc_task *task) | |||
| 2087 | xprt_transmit(task); | 2066 | xprt_transmit(task); |
| 2088 | } | 2067 | } |
| 2089 | xprt_end_transmit(task); | 2068 | xprt_end_transmit(task); |
| 2090 | if (rpc_task_need_resched(task)) | ||
| 2091 | return; | ||
| 2092 | call_transmit_status(task); | ||
| 2093 | } | 2069 | } |
| 2094 | 2070 | ||
| 2095 | /* | 2071 | /* |
| @@ -2105,11 +2081,8 @@ call_transmit_status(struct rpc_task *task) | |||
| 2105 | * test first. | 2081 | * test first. |
| 2106 | */ | 2082 | */ |
| 2107 | if (rpc_task_transmitted(task)) { | 2083 | if (rpc_task_transmitted(task)) { |
| 2108 | if (task->tk_status == 0) | 2084 | task->tk_status = 0; |
| 2109 | xprt_request_wait_receive(task); | 2085 | xprt_request_wait_receive(task); |
| 2110 | if (rpc_task_need_resched(task)) | ||
| 2111 | return; | ||
| 2112 | call_status(task); | ||
| 2113 | return; | 2086 | return; |
| 2114 | } | 2087 | } |
| 2115 | 2088 | ||
| @@ -2170,7 +2143,6 @@ call_bc_encode(struct rpc_task *task) | |||
| 2170 | { | 2143 | { |
| 2171 | xprt_request_enqueue_transmit(task); | 2144 | xprt_request_enqueue_transmit(task); |
| 2172 | task->tk_action = call_bc_transmit; | 2145 | task->tk_action = call_bc_transmit; |
| 2173 | call_bc_transmit(task); | ||
| 2174 | } | 2146 | } |
| 2175 | 2147 | ||
| 2176 | /* | 2148 | /* |
| @@ -2195,6 +2167,9 @@ call_bc_transmit_status(struct rpc_task *task) | |||
| 2195 | { | 2167 | { |
| 2196 | struct rpc_rqst *req = task->tk_rqstp; | 2168 | struct rpc_rqst *req = task->tk_rqstp; |
| 2197 | 2169 | ||
| 2170 | if (rpc_task_transmitted(task)) | ||
| 2171 | task->tk_status = 0; | ||
| 2172 | |||
| 2198 | dprint_status(task); | 2173 | dprint_status(task); |
| 2199 | 2174 | ||
| 2200 | switch (task->tk_status) { | 2175 | switch (task->tk_status) { |
| @@ -2261,7 +2236,6 @@ call_status(struct rpc_task *task) | |||
| 2261 | status = task->tk_status; | 2236 | status = task->tk_status; |
| 2262 | if (status >= 0) { | 2237 | if (status >= 0) { |
| 2263 | task->tk_action = call_decode; | 2238 | task->tk_action = call_decode; |
| 2264 | call_decode(task); | ||
| 2265 | return; | 2239 | return; |
| 2266 | } | 2240 | } |
| 2267 | 2241 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 89a63391d4d4..30cfc0efe699 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) | |||
| 90 | /* Flush Receives, then wait for deferred Reply work | 90 | /* Flush Receives, then wait for deferred Reply work |
| 91 | * to complete. | 91 | * to complete. |
| 92 | */ | 92 | */ |
| 93 | ib_drain_qp(ia->ri_id->qp); | 93 | ib_drain_rq(ia->ri_id->qp); |
| 94 | drain_workqueue(buf->rb_completion_wq); | 94 | drain_workqueue(buf->rb_completion_wq); |
| 95 | 95 | ||
| 96 | /* Deferred Reply processing might have scheduled | 96 | /* Deferred Reply processing might have scheduled |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 341ecd796aa4..131aa2f0fd27 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l) | |||
| 869 | __skb_queue_head_init(&list); | 869 | __skb_queue_head_init(&list); |
| 870 | 870 | ||
| 871 | l->in_session = false; | 871 | l->in_session = false; |
| 872 | /* Force re-synch of peer session number before establishing */ | ||
| 873 | l->peer_session--; | ||
| 872 | l->session++; | 874 | l->session++; |
| 873 | l->mtu = l->advertised_mtu; | 875 | l->mtu = l->advertised_mtu; |
| 874 | 876 | ||
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index bff241f03525..89993afe0fbd 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg, | |||
| 909 | for (; i < TIPC_NAMETBL_SIZE; i++) { | 909 | for (; i < TIPC_NAMETBL_SIZE; i++) { |
| 910 | head = &tn->nametbl->services[i]; | 910 | head = &tn->nametbl->services[i]; |
| 911 | 911 | ||
| 912 | if (*last_type) { | 912 | if (*last_type || |
| 913 | (!i && *last_key && (*last_lower == *last_key))) { | ||
| 913 | service = tipc_service_find(net, *last_type); | 914 | service = tipc_service_find(net, *last_type); |
| 914 | if (!service) | 915 | if (!service) |
| 915 | return -EPIPE; | 916 | return -EPIPE; |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 4ad3586da8f0..340a6e7c43a7 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | |||
| 267 | if (msg->rep_type) | 267 | if (msg->rep_type) |
| 268 | tipc_tlv_init(msg->rep, msg->rep_type); | 268 | tipc_tlv_init(msg->rep, msg->rep_type); |
| 269 | 269 | ||
| 270 | if (cmd->header) | 270 | if (cmd->header) { |
| 271 | (*cmd->header)(msg); | 271 | err = (*cmd->header)(msg); |
| 272 | if (err) { | ||
| 273 | kfree_skb(msg->rep); | ||
| 274 | msg->rep = NULL; | ||
| 275 | return err; | ||
| 276 | } | ||
| 277 | } | ||
| 272 | 278 | ||
| 273 | arg = nlmsg_new(0, GFP_KERNEL); | 279 | arg = nlmsg_new(0, GFP_KERNEL); |
| 274 | if (!arg) { | 280 | if (!arg) { |
| @@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 397 | if (!bearer) | 403 | if (!bearer) |
| 398 | return -EMSGSIZE; | 404 | return -EMSGSIZE; |
| 399 | 405 | ||
| 400 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | 406 | len = TLV_GET_DATA_LEN(msg->req); |
| 407 | len -= offsetof(struct tipc_bearer_config, name); | ||
| 408 | if (len <= 0) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 411 | len = min_t(int, len, TIPC_MAX_BEARER_NAME); | ||
| 401 | if (!string_is_valid(b->name, len)) | 412 | if (!string_is_valid(b->name, len)) |
| 402 | return -EINVAL; | 413 | return -EINVAL; |
| 403 | 414 | ||
| @@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 766 | 777 | ||
| 767 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 778 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
| 768 | 779 | ||
| 769 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | 780 | len = TLV_GET_DATA_LEN(msg->req); |
| 781 | len -= offsetof(struct tipc_link_config, name); | ||
| 782 | if (len <= 0) | ||
| 783 | return -EINVAL; | ||
| 784 | |||
| 785 | len = min_t(int, len, TIPC_MAX_LINK_NAME); | ||
| 770 | if (!string_is_valid(lc->name, len)) | 786 | if (!string_is_valid(lc->name, len)) |
| 771 | return -EINVAL; | 787 | return -EINVAL; |
| 772 | 788 | ||
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c index 3481e4906bd6..9df82a573aa7 100644 --- a/net/tipc/sysctl.c +++ b/net/tipc/sysctl.c | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | 38 | ||
| 39 | #include <linux/sysctl.h> | 39 | #include <linux/sysctl.h> |
| 40 | 40 | ||
| 41 | static int zero; | ||
| 42 | static int one = 1; | ||
| 41 | static struct ctl_table_header *tipc_ctl_hdr; | 43 | static struct ctl_table_header *tipc_ctl_hdr; |
| 42 | 44 | ||
| 43 | static struct ctl_table tipc_table[] = { | 45 | static struct ctl_table tipc_table[] = { |
| @@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = { | |||
| 46 | .data = &sysctl_tipc_rmem, | 48 | .data = &sysctl_tipc_rmem, |
| 47 | .maxlen = sizeof(sysctl_tipc_rmem), | 49 | .maxlen = sizeof(sysctl_tipc_rmem), |
| 48 | .mode = 0644, | 50 | .mode = 0644, |
| 49 | .proc_handler = proc_dointvec, | 51 | .proc_handler = proc_dointvec_minmax, |
| 52 | .extra1 = &one, | ||
| 50 | }, | 53 | }, |
| 51 | { | 54 | { |
| 52 | .procname = "named_timeout", | 55 | .procname = "named_timeout", |
| 53 | .data = &sysctl_tipc_named_timeout, | 56 | .data = &sysctl_tipc_named_timeout, |
| 54 | .maxlen = sizeof(sysctl_tipc_named_timeout), | 57 | .maxlen = sizeof(sysctl_tipc_named_timeout), |
| 55 | .mode = 0644, | 58 | .mode = 0644, |
| 56 | .proc_handler = proc_dointvec, | 59 | .proc_handler = proc_dointvec_minmax, |
| 60 | .extra1 = &zero, | ||
| 57 | }, | 61 | }, |
| 58 | { | 62 | { |
| 59 | .procname = "sk_filter", | 63 | .procname = "sk_filter", |
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 135a7ee9db03..cc0256939eb6 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c | |||
| @@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock); | |||
| 52 | 52 | ||
| 53 | static void tls_device_free_ctx(struct tls_context *ctx) | 53 | static void tls_device_free_ctx(struct tls_context *ctx) |
| 54 | { | 54 | { |
| 55 | if (ctx->tx_conf == TLS_HW) | 55 | if (ctx->tx_conf == TLS_HW) { |
| 56 | kfree(tls_offload_ctx_tx(ctx)); | 56 | kfree(tls_offload_ctx_tx(ctx)); |
| 57 | kfree(ctx->tx.rec_seq); | ||
| 58 | kfree(ctx->tx.iv); | ||
| 59 | } | ||
| 57 | 60 | ||
| 58 | if (ctx->rx_conf == TLS_HW) | 61 | if (ctx->rx_conf == TLS_HW) |
| 59 | kfree(tls_offload_ctx_rx(ctx)); | 62 | kfree(tls_offload_ctx_rx(ctx)); |
| @@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk) | |||
| 216 | } | 219 | } |
| 217 | EXPORT_SYMBOL(tls_device_sk_destruct); | 220 | EXPORT_SYMBOL(tls_device_sk_destruct); |
| 218 | 221 | ||
| 222 | void tls_device_free_resources_tx(struct sock *sk) | ||
| 223 | { | ||
| 224 | struct tls_context *tls_ctx = tls_get_ctx(sk); | ||
| 225 | |||
| 226 | tls_free_partial_record(sk, tls_ctx); | ||
| 227 | } | ||
| 228 | |||
| 219 | static void tls_append_frag(struct tls_record_info *record, | 229 | static void tls_append_frag(struct tls_record_info *record, |
| 220 | struct page_frag *pfrag, | 230 | struct page_frag *pfrag, |
| 221 | int size) | 231 | int size) |
| @@ -894,7 +904,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | |||
| 894 | goto release_netdev; | 904 | goto release_netdev; |
| 895 | 905 | ||
| 896 | free_sw_resources: | 906 | free_sw_resources: |
| 907 | up_read(&device_offload_lock); | ||
| 897 | tls_sw_free_resources_rx(sk); | 908 | tls_sw_free_resources_rx(sk); |
| 909 | down_read(&device_offload_lock); | ||
| 898 | release_ctx: | 910 | release_ctx: |
| 899 | ctx->priv_ctx_rx = NULL; | 911 | ctx->priv_ctx_rx = NULL; |
| 900 | release_netdev: | 912 | release_netdev: |
| @@ -929,8 +941,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk) | |||
| 929 | } | 941 | } |
| 930 | out: | 942 | out: |
| 931 | up_read(&device_offload_lock); | 943 | up_read(&device_offload_lock); |
| 932 | kfree(tls_ctx->rx.rec_seq); | ||
| 933 | kfree(tls_ctx->rx.iv); | ||
| 934 | tls_sw_release_resources_rx(sk); | 944 | tls_sw_release_resources_rx(sk); |
| 935 | } | 945 | } |
| 936 | 946 | ||
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 54c3a758f2a7..a3ebd4b02714 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c | |||
| @@ -194,6 +194,9 @@ static void update_chksum(struct sk_buff *skb, int headln) | |||
| 194 | 194 | ||
| 195 | static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) | 195 | static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) |
| 196 | { | 196 | { |
| 197 | struct sock *sk = skb->sk; | ||
| 198 | int delta; | ||
| 199 | |||
| 197 | skb_copy_header(nskb, skb); | 200 | skb_copy_header(nskb, skb); |
| 198 | 201 | ||
| 199 | skb_put(nskb, skb->len); | 202 | skb_put(nskb, skb->len); |
| @@ -201,11 +204,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) | |||
| 201 | update_chksum(nskb, headln); | 204 | update_chksum(nskb, headln); |
| 202 | 205 | ||
| 203 | nskb->destructor = skb->destructor; | 206 | nskb->destructor = skb->destructor; |
| 204 | nskb->sk = skb->sk; | 207 | nskb->sk = sk; |
| 205 | skb->destructor = NULL; | 208 | skb->destructor = NULL; |
| 206 | skb->sk = NULL; | 209 | skb->sk = NULL; |
| 207 | refcount_add(nskb->truesize - skb->truesize, | 210 | |
| 208 | &nskb->sk->sk_wmem_alloc); | 211 | delta = nskb->truesize - skb->truesize; |
| 212 | if (likely(delta < 0)) | ||
| 213 | WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); | ||
| 214 | else if (delta) | ||
| 215 | refcount_add(delta, &sk->sk_wmem_alloc); | ||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | /* This function may be called after the user socket is already | 218 | /* This function may be called after the user socket is already |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index df921a2904b9..478603f43964 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
| @@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, | |||
| 208 | return tls_push_sg(sk, ctx, sg, offset, flags); | 208 | return tls_push_sg(sk, ctx, sg, offset, flags); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) | ||
| 212 | { | ||
| 213 | struct scatterlist *sg; | ||
| 214 | |||
| 215 | sg = ctx->partially_sent_record; | ||
| 216 | if (!sg) | ||
| 217 | return false; | ||
| 218 | |||
| 219 | while (1) { | ||
| 220 | put_page(sg_page(sg)); | ||
| 221 | sk_mem_uncharge(sk, sg->length); | ||
| 222 | |||
| 223 | if (sg_is_last(sg)) | ||
| 224 | break; | ||
| 225 | sg++; | ||
| 226 | } | ||
| 227 | ctx->partially_sent_record = NULL; | ||
| 228 | return true; | ||
| 229 | } | ||
| 230 | |||
| 211 | static void tls_write_space(struct sock *sk) | 231 | static void tls_write_space(struct sock *sk) |
| 212 | { | 232 | { |
| 213 | struct tls_context *ctx = tls_get_ctx(sk); | 233 | struct tls_context *ctx = tls_get_ctx(sk); |
| @@ -267,13 +287,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | |||
| 267 | kfree(ctx->tx.rec_seq); | 287 | kfree(ctx->tx.rec_seq); |
| 268 | kfree(ctx->tx.iv); | 288 | kfree(ctx->tx.iv); |
| 269 | tls_sw_free_resources_tx(sk); | 289 | tls_sw_free_resources_tx(sk); |
| 290 | #ifdef CONFIG_TLS_DEVICE | ||
| 291 | } else if (ctx->tx_conf == TLS_HW) { | ||
| 292 | tls_device_free_resources_tx(sk); | ||
| 293 | #endif | ||
| 270 | } | 294 | } |
| 271 | 295 | ||
| 272 | if (ctx->rx_conf == TLS_SW) { | 296 | if (ctx->rx_conf == TLS_SW) |
| 273 | kfree(ctx->rx.rec_seq); | ||
| 274 | kfree(ctx->rx.iv); | ||
| 275 | tls_sw_free_resources_rx(sk); | 297 | tls_sw_free_resources_rx(sk); |
| 276 | } | ||
| 277 | 298 | ||
| 278 | #ifdef CONFIG_TLS_DEVICE | 299 | #ifdef CONFIG_TLS_DEVICE |
| 279 | if (ctx->rx_conf == TLS_HW) | 300 | if (ctx->rx_conf == TLS_HW) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 425351ac2a9b..29d6af43dd24 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, | |||
| 1484 | 1484 | ||
| 1485 | return err; | 1485 | return err; |
| 1486 | } | 1486 | } |
| 1487 | } else { | ||
| 1488 | *zc = false; | ||
| 1487 | } | 1489 | } |
| 1488 | 1490 | ||
| 1489 | rxm->full_len -= padding_length(ctx, tls_ctx, skb); | 1491 | rxm->full_len -= padding_length(ctx, tls_ctx, skb); |
| @@ -2050,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk) | |||
| 2050 | /* Free up un-sent records in tx_list. First, free | 2052 | /* Free up un-sent records in tx_list. First, free |
| 2051 | * the partially sent record if any at head of tx_list. | 2053 | * the partially sent record if any at head of tx_list. |
| 2052 | */ | 2054 | */ |
| 2053 | if (tls_ctx->partially_sent_record) { | 2055 | if (tls_free_partial_record(sk, tls_ctx)) { |
| 2054 | struct scatterlist *sg = tls_ctx->partially_sent_record; | ||
| 2055 | |||
| 2056 | while (1) { | ||
| 2057 | put_page(sg_page(sg)); | ||
| 2058 | sk_mem_uncharge(sk, sg->length); | ||
| 2059 | |||
| 2060 | if (sg_is_last(sg)) | ||
| 2061 | break; | ||
| 2062 | sg++; | ||
| 2063 | } | ||
| 2064 | |||
| 2065 | tls_ctx->partially_sent_record = NULL; | ||
| 2066 | |||
| 2067 | rec = list_first_entry(&ctx->tx_list, | 2056 | rec = list_first_entry(&ctx->tx_list, |
| 2068 | struct tls_rec, list); | 2057 | struct tls_rec, list); |
| 2069 | list_del(&rec->list); | 2058 | list_del(&rec->list); |
| @@ -2089,6 +2078,9 @@ void tls_sw_release_resources_rx(struct sock *sk) | |||
| 2089 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 2078 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 2090 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 2079 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); |
| 2091 | 2080 | ||
| 2081 | kfree(tls_ctx->rx.rec_seq); | ||
| 2082 | kfree(tls_ctx->rx.iv); | ||
| 2083 | |||
| 2092 | if (ctx->aead_recv) { | 2084 | if (ctx->aead_recv) { |
| 2093 | kfree_skb(ctx->recv_pkt); | 2085 | kfree_skb(ctx->recv_pkt); |
| 2094 | ctx->recv_pkt = NULL; | 2086 | ctx->recv_pkt = NULL; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25a9e3b5c154..47e30a58566c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 13650 | .policy = nl80211_policy, | 13650 | .policy = nl80211_policy, |
| 13651 | .flags = GENL_UNS_ADMIN_PERM, | 13651 | .flags = GENL_UNS_ADMIN_PERM, |
| 13652 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13652 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
| 13653 | NL80211_FLAG_NEED_RTNL, | 13653 | NL80211_FLAG_NEED_RTNL | |
| 13654 | NL80211_FLAG_CLEAR_SKB, | ||
| 13654 | }, | 13655 | }, |
| 13655 | { | 13656 | { |
| 13656 | .cmd = NL80211_CMD_DEAUTHENTICATE, | 13657 | .cmd = NL80211_CMD_DEAUTHENTICATE, |
| @@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 13701 | .policy = nl80211_policy, | 13702 | .policy = nl80211_policy, |
| 13702 | .flags = GENL_UNS_ADMIN_PERM, | 13703 | .flags = GENL_UNS_ADMIN_PERM, |
| 13703 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13704 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
| 13704 | NL80211_FLAG_NEED_RTNL, | 13705 | NL80211_FLAG_NEED_RTNL | |
| 13706 | NL80211_FLAG_CLEAR_SKB, | ||
| 13705 | }, | 13707 | }, |
| 13706 | { | 13708 | { |
| 13707 | .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, | 13709 | .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, |
| @@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 13709 | .policy = nl80211_policy, | 13711 | .policy = nl80211_policy, |
| 13710 | .flags = GENL_ADMIN_PERM, | 13712 | .flags = GENL_ADMIN_PERM, |
| 13711 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13713 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
| 13712 | NL80211_FLAG_NEED_RTNL, | 13714 | NL80211_FLAG_NEED_RTNL | |
| 13715 | NL80211_FLAG_CLEAR_SKB, | ||
| 13713 | }, | 13716 | }, |
| 13714 | { | 13717 | { |
| 13715 | .cmd = NL80211_CMD_DISCONNECT, | 13718 | .cmd = NL80211_CMD_DISCONNECT, |
| @@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 13738 | .policy = nl80211_policy, | 13741 | .policy = nl80211_policy, |
| 13739 | .flags = GENL_UNS_ADMIN_PERM, | 13742 | .flags = GENL_UNS_ADMIN_PERM, |
| 13740 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 13743 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
| 13741 | NL80211_FLAG_NEED_RTNL, | 13744 | NL80211_FLAG_NEED_RTNL | |
| 13745 | NL80211_FLAG_CLEAR_SKB, | ||
| 13742 | }, | 13746 | }, |
| 13743 | { | 13747 | { |
| 13744 | .cmd = NL80211_CMD_DEL_PMKSA, | 13748 | .cmd = NL80211_CMD_DEL_PMKSA, |
| @@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 14090 | .policy = nl80211_policy, | 14094 | .policy = nl80211_policy, |
| 14091 | .flags = GENL_UNS_ADMIN_PERM, | 14095 | .flags = GENL_UNS_ADMIN_PERM, |
| 14092 | .internal_flags = NL80211_FLAG_NEED_WIPHY | | 14096 | .internal_flags = NL80211_FLAG_NEED_WIPHY | |
| 14093 | NL80211_FLAG_NEED_RTNL, | 14097 | NL80211_FLAG_NEED_RTNL | |
| 14098 | NL80211_FLAG_CLEAR_SKB, | ||
| 14094 | }, | 14099 | }, |
| 14095 | { | 14100 | { |
| 14096 | .cmd = NL80211_CMD_SET_QOS_MAP, | 14101 | .cmd = NL80211_CMD_SET_QOS_MAP, |
| @@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = { | |||
| 14145 | .doit = nl80211_set_pmk, | 14150 | .doit = nl80211_set_pmk, |
| 14146 | .policy = nl80211_policy, | 14151 | .policy = nl80211_policy, |
| 14147 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | | 14152 | .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | |
| 14148 | NL80211_FLAG_NEED_RTNL, | 14153 | NL80211_FLAG_NEED_RTNL | |
| 14154 | NL80211_FLAG_CLEAR_SKB, | ||
| 14149 | }, | 14155 | }, |
| 14150 | { | 14156 | { |
| 14151 | .cmd = NL80211_CMD_DEL_PMK, | 14157 | .cmd = NL80211_CMD_DEL_PMK, |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2f1bf91eb226..0ba778f371cb 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1, | |||
| 1309 | return dfs_region1; | 1309 | return dfs_region1; |
| 1310 | } | 1310 | } |
| 1311 | 1311 | ||
| 1312 | static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1, | ||
| 1313 | const struct ieee80211_wmm_ac *wmm_ac2, | ||
| 1314 | struct ieee80211_wmm_ac *intersect) | ||
| 1315 | { | ||
| 1316 | intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min); | ||
| 1317 | intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max); | ||
| 1318 | intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot); | ||
| 1319 | intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn); | ||
| 1320 | } | ||
| 1321 | |||
| 1312 | /* | 1322 | /* |
| 1313 | * Helper for regdom_intersect(), this does the real | 1323 | * Helper for regdom_intersect(), this does the real |
| 1314 | * mathematical intersection fun | 1324 | * mathematical intersection fun |
| @@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
| 1323 | struct ieee80211_freq_range *freq_range; | 1333 | struct ieee80211_freq_range *freq_range; |
| 1324 | const struct ieee80211_power_rule *power_rule1, *power_rule2; | 1334 | const struct ieee80211_power_rule *power_rule1, *power_rule2; |
| 1325 | struct ieee80211_power_rule *power_rule; | 1335 | struct ieee80211_power_rule *power_rule; |
| 1336 | const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2; | ||
| 1337 | struct ieee80211_wmm_rule *wmm_rule; | ||
| 1326 | u32 freq_diff, max_bandwidth1, max_bandwidth2; | 1338 | u32 freq_diff, max_bandwidth1, max_bandwidth2; |
| 1327 | 1339 | ||
| 1328 | freq_range1 = &rule1->freq_range; | 1340 | freq_range1 = &rule1->freq_range; |
| @@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
| 1333 | power_rule2 = &rule2->power_rule; | 1345 | power_rule2 = &rule2->power_rule; |
| 1334 | power_rule = &intersected_rule->power_rule; | 1346 | power_rule = &intersected_rule->power_rule; |
| 1335 | 1347 | ||
| 1348 | wmm_rule1 = &rule1->wmm_rule; | ||
| 1349 | wmm_rule2 = &rule2->wmm_rule; | ||
| 1350 | wmm_rule = &intersected_rule->wmm_rule; | ||
| 1351 | |||
| 1336 | freq_range->start_freq_khz = max(freq_range1->start_freq_khz, | 1352 | freq_range->start_freq_khz = max(freq_range1->start_freq_khz, |
| 1337 | freq_range2->start_freq_khz); | 1353 | freq_range2->start_freq_khz); |
| 1338 | freq_range->end_freq_khz = min(freq_range1->end_freq_khz, | 1354 | freq_range->end_freq_khz = min(freq_range1->end_freq_khz, |
| @@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, | |||
| 1376 | intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, | 1392 | intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, |
| 1377 | rule2->dfs_cac_ms); | 1393 | rule2->dfs_cac_ms); |
| 1378 | 1394 | ||
| 1395 | if (rule1->has_wmm && rule2->has_wmm) { | ||
| 1396 | u8 ac; | ||
| 1397 | |||
| 1398 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | ||
| 1399 | reg_wmm_rules_intersect(&wmm_rule1->client[ac], | ||
| 1400 | &wmm_rule2->client[ac], | ||
| 1401 | &wmm_rule->client[ac]); | ||
| 1402 | reg_wmm_rules_intersect(&wmm_rule1->ap[ac], | ||
| 1403 | &wmm_rule2->ap[ac], | ||
| 1404 | &wmm_rule->ap[ac]); | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | intersected_rule->has_wmm = true; | ||
| 1408 | } else if (rule1->has_wmm) { | ||
| 1409 | *wmm_rule = *wmm_rule1; | ||
| 1410 | intersected_rule->has_wmm = true; | ||
| 1411 | } else if (rule2->has_wmm) { | ||
| 1412 | *wmm_rule = *wmm_rule2; | ||
| 1413 | intersected_rule->has_wmm = true; | ||
| 1414 | } else { | ||
| 1415 | intersected_rule->has_wmm = false; | ||
| 1416 | } | ||
| 1417 | |||
| 1379 | if (!is_valid_reg_rule(intersected_rule)) | 1418 | if (!is_valid_reg_rule(intersected_rule)) |
| 1380 | return -EINVAL; | 1419 | return -EINVAL; |
| 1381 | 1420 | ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 287518c6caa4..04d888628f29 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
| @@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, | |||
| 190 | /* copy subelement as we need to change its content to | 190 | /* copy subelement as we need to change its content to |
| 191 | * mark an ie after it is processed. | 191 | * mark an ie after it is processed. |
| 192 | */ | 192 | */ |
| 193 | sub_copy = kmalloc(subie_len, gfp); | 193 | sub_copy = kmemdup(subelement, subie_len, gfp); |
| 194 | if (!sub_copy) | 194 | if (!sub_copy) |
| 195 | return 0; | 195 | return 0; |
| 196 | memcpy(sub_copy, subelement, subie_len); | ||
| 197 | 196 | ||
| 198 | pos = &new_ie[0]; | 197 | pos = &new_ie[0]; |
| 199 | 198 | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index e4b8db5e81ec..75899b62bdc9 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) | |||
| 1220 | else if (rate->bw == RATE_INFO_BW_HE_RU && | 1220 | else if (rate->bw == RATE_INFO_BW_HE_RU && |
| 1221 | rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) | 1221 | rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) |
| 1222 | result = rates_26[rate->he_gi]; | 1222 | result = rates_26[rate->he_gi]; |
| 1223 | else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", | 1223 | else { |
| 1224 | rate->bw, rate->he_ru_alloc)) | 1224 | WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", |
| 1225 | rate->bw, rate->he_ru_alloc); | ||
| 1225 | return 0; | 1226 | return 0; |
| 1227 | } | ||
| 1226 | 1228 | ||
| 1227 | /* now scale to the appropriate MCS */ | 1229 | /* now scale to the appropriate MCS */ |
| 1228 | tmp = result; | 1230 | tmp = result; |
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh index 27400b0cd732..000dc6437893 100644 --- a/scripts/atomic/gen-atomics.sh +++ b/scripts/atomic/gen-atomics.sh | |||
| @@ -13,7 +13,7 @@ gen-atomic-long.sh asm-generic/atomic-long.h | |||
| 13 | gen-atomic-fallback.sh linux/atomic-fallback.h | 13 | gen-atomic-fallback.sh linux/atomic-fallback.h |
| 14 | EOF | 14 | EOF |
| 15 | while read script header; do | 15 | while read script header; do |
| 16 | ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header} | 16 | /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header} |
| 17 | HASH="$(sha1sum ${LINUXDIR}/include/${header})" | 17 | HASH="$(sha1sum ${LINUXDIR}/include/${header})" |
| 18 | HASH="${HASH%% *}" | 18 | HASH="${HASH%% *}" |
| 19 | printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header} | 19 | printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header} |
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci new file mode 100644 index 000000000000..350145da7669 --- /dev/null +++ b/scripts/coccinelle/api/stream_open.cocci | |||
| @@ -0,0 +1,363 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Author: Kirill Smelkov (kirr@nexedi.com) | ||
| 3 | // | ||
| 4 | // Search for stream-like files that are using nonseekable_open and convert | ||
| 5 | // them to stream_open. A stream-like file is a file that does not use ppos in | ||
| 6 | // its read and write. Rationale for the conversion is to avoid deadlock in | ||
| 7 | // between read and write. | ||
| 8 | |||
| 9 | virtual report | ||
| 10 | virtual patch | ||
| 11 | virtual explain // explain decisions in the patch (SPFLAGS="-D explain") | ||
| 12 | |||
| 13 | // stream-like reader & writer - ones that do not depend on f_pos. | ||
| 14 | @ stream_reader @ | ||
| 15 | identifier readstream, ppos; | ||
| 16 | identifier f, buf, len; | ||
| 17 | type loff_t; | ||
| 18 | @@ | ||
| 19 | ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos) | ||
| 20 | { | ||
| 21 | ... when != ppos | ||
| 22 | } | ||
| 23 | |||
| 24 | @ stream_writer @ | ||
| 25 | identifier writestream, ppos; | ||
| 26 | identifier f, buf, len; | ||
| 27 | type loff_t; | ||
| 28 | @@ | ||
| 29 | ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos) | ||
| 30 | { | ||
| 31 | ... when != ppos | ||
| 32 | } | ||
| 33 | |||
| 34 | |||
| 35 | // a function that blocks | ||
| 36 | @ blocks @ | ||
| 37 | identifier block_f; | ||
| 38 | identifier wait_event =~ "^wait_event_.*"; | ||
| 39 | @@ | ||
| 40 | block_f(...) { | ||
| 41 | ... when exists | ||
| 42 | wait_event(...) | ||
| 43 | ... when exists | ||
| 44 | } | ||
| 45 | |||
| 46 | // stream_reader that can block inside. | ||
| 47 | // | ||
| 48 | // XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait()) | ||
| 49 | // XXX currently reader_blocks supports only direct and 1-level indirect cases. | ||
| 50 | @ reader_blocks_direct @ | ||
| 51 | identifier stream_reader.readstream; | ||
| 52 | identifier wait_event =~ "^wait_event_.*"; | ||
| 53 | @@ | ||
| 54 | readstream(...) | ||
| 55 | { | ||
| 56 | ... when exists | ||
| 57 | wait_event(...) | ||
| 58 | ... when exists | ||
| 59 | } | ||
| 60 | |||
| 61 | @ reader_blocks_1 @ | ||
| 62 | identifier stream_reader.readstream; | ||
| 63 | identifier blocks.block_f; | ||
| 64 | @@ | ||
| 65 | readstream(...) | ||
| 66 | { | ||
| 67 | ... when exists | ||
| 68 | block_f(...) | ||
| 69 | ... when exists | ||
| 70 | } | ||
| 71 | |||
| 72 | @ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @ | ||
| 73 | identifier stream_reader.readstream; | ||
| 74 | @@ | ||
| 75 | readstream(...) { | ||
| 76 | ... | ||
| 77 | } | ||
| 78 | |||
| 79 | |||
| 80 | // file_operations + whether they have _any_ .read, .write, .llseek ... at all. | ||
| 81 | // | ||
| 82 | // XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c) | ||
| 83 | @ fops0 @ | ||
| 84 | identifier fops; | ||
| 85 | @@ | ||
| 86 | struct file_operations fops = { | ||
| 87 | ... | ||
| 88 | }; | ||
| 89 | |||
| 90 | @ has_read @ | ||
| 91 | identifier fops0.fops; | ||
| 92 | identifier read_f; | ||
| 93 | @@ | ||
| 94 | struct file_operations fops = { | ||
| 95 | .read = read_f, | ||
| 96 | }; | ||
| 97 | |||
| 98 | @ has_read_iter @ | ||
| 99 | identifier fops0.fops; | ||
| 100 | identifier read_iter_f; | ||
| 101 | @@ | ||
| 102 | struct file_operations fops = { | ||
| 103 | .read_iter = read_iter_f, | ||
| 104 | }; | ||
| 105 | |||
| 106 | @ has_write @ | ||
| 107 | identifier fops0.fops; | ||
| 108 | identifier write_f; | ||
| 109 | @@ | ||
| 110 | struct file_operations fops = { | ||
| 111 | .write = write_f, | ||
| 112 | }; | ||
| 113 | |||
| 114 | @ has_write_iter @ | ||
| 115 | identifier fops0.fops; | ||
| 116 | identifier write_iter_f; | ||
| 117 | @@ | ||
| 118 | struct file_operations fops = { | ||
| 119 | .write_iter = write_iter_f, | ||
| 120 | }; | ||
| 121 | |||
| 122 | @ has_llseek @ | ||
| 123 | identifier fops0.fops; | ||
| 124 | identifier llseek_f; | ||
| 125 | @@ | ||
| 126 | struct file_operations fops = { | ||
| 127 | .llseek = llseek_f, | ||
| 128 | }; | ||
| 129 | |||
| 130 | @ has_no_llseek @ | ||
| 131 | identifier fops0.fops; | ||
| 132 | @@ | ||
| 133 | struct file_operations fops = { | ||
| 134 | .llseek = no_llseek, | ||
| 135 | }; | ||
| 136 | |||
| 137 | @ has_mmap @ | ||
| 138 | identifier fops0.fops; | ||
| 139 | identifier mmap_f; | ||
| 140 | @@ | ||
| 141 | struct file_operations fops = { | ||
| 142 | .mmap = mmap_f, | ||
| 143 | }; | ||
| 144 | |||
| 145 | @ has_copy_file_range @ | ||
| 146 | identifier fops0.fops; | ||
| 147 | identifier copy_file_range_f; | ||
| 148 | @@ | ||
| 149 | struct file_operations fops = { | ||
| 150 | .copy_file_range = copy_file_range_f, | ||
| 151 | }; | ||
| 152 | |||
| 153 | @ has_remap_file_range @ | ||
| 154 | identifier fops0.fops; | ||
| 155 | identifier remap_file_range_f; | ||
| 156 | @@ | ||
| 157 | struct file_operations fops = { | ||
| 158 | .remap_file_range = remap_file_range_f, | ||
| 159 | }; | ||
| 160 | |||
| 161 | @ has_splice_read @ | ||
| 162 | identifier fops0.fops; | ||
| 163 | identifier splice_read_f; | ||
| 164 | @@ | ||
| 165 | struct file_operations fops = { | ||
| 166 | .splice_read = splice_read_f, | ||
| 167 | }; | ||
| 168 | |||
| 169 | @ has_splice_write @ | ||
| 170 | identifier fops0.fops; | ||
| 171 | identifier splice_write_f; | ||
| 172 | @@ | ||
| 173 | struct file_operations fops = { | ||
| 174 | .splice_write = splice_write_f, | ||
| 175 | }; | ||
| 176 | |||
| 177 | |||
| 178 | // file_operations that is candidate for stream_open conversion - it does not | ||
| 179 | // use mmap and other methods that assume @offset access to file. | ||
| 180 | // | ||
| 181 | // XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now. | ||
| 182 | // XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops". | ||
| 183 | @ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @ | ||
| 184 | identifier fops0.fops; | ||
| 185 | @@ | ||
| 186 | struct file_operations fops = { | ||
| 187 | }; | ||
| 188 | |||
| 189 | |||
| 190 | // ---- conversions ---- | ||
| 191 | |||
| 192 | // XXX .open = nonseekable_open -> .open = stream_open | ||
| 193 | // XXX .open = func -> openfunc -> nonseekable_open | ||
| 194 | |||
| 195 | // read & write | ||
| 196 | // | ||
| 197 | // if both are used in the same file_operations together with an opener - | ||
| 198 | // under that conditions we can use stream_open instead of nonseekable_open. | ||
| 199 | @ fops_rw depends on maybe_stream @ | ||
| 200 | identifier fops0.fops, openfunc; | ||
| 201 | identifier stream_reader.readstream; | ||
| 202 | identifier stream_writer.writestream; | ||
| 203 | @@ | ||
| 204 | struct file_operations fops = { | ||
| 205 | .open = openfunc, | ||
| 206 | .read = readstream, | ||
| 207 | .write = writestream, | ||
| 208 | }; | ||
| 209 | |||
| 210 | @ report_rw depends on report @ | ||
| 211 | identifier fops_rw.openfunc; | ||
| 212 | position p1; | ||
| 213 | @@ | ||
| 214 | openfunc(...) { | ||
| 215 | <... | ||
| 216 | nonseekable_open@p1 | ||
| 217 | ...> | ||
| 218 | } | ||
| 219 | |||
| 220 | @ script:python depends on report && reader_blocks @ | ||
| 221 | fops << fops0.fops; | ||
| 222 | p << report_rw.p1; | ||
| 223 | @@ | ||
| 224 | coccilib.report.print_report(p[0], | ||
| 225 | "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,)) | ||
| 226 | |||
| 227 | @ script:python depends on report && !reader_blocks @ | ||
| 228 | fops << fops0.fops; | ||
| 229 | p << report_rw.p1; | ||
| 230 | @@ | ||
| 231 | coccilib.report.print_report(p[0], | ||
| 232 | "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 233 | |||
| 234 | |||
| 235 | @ explain_rw_deadlocked depends on explain && reader_blocks @ | ||
| 236 | identifier fops_rw.openfunc; | ||
| 237 | @@ | ||
| 238 | openfunc(...) { | ||
| 239 | <... | ||
| 240 | - nonseekable_open | ||
| 241 | + nonseekable_open /* read & write (was deadlock) */ | ||
| 242 | ...> | ||
| 243 | } | ||
| 244 | |||
| 245 | |||
| 246 | @ explain_rw_nodeadlock depends on explain && !reader_blocks @ | ||
| 247 | identifier fops_rw.openfunc; | ||
| 248 | @@ | ||
| 249 | openfunc(...) { | ||
| 250 | <... | ||
| 251 | - nonseekable_open | ||
| 252 | + nonseekable_open /* read & write (no direct deadlock) */ | ||
| 253 | ...> | ||
| 254 | } | ||
| 255 | |||
| 256 | @ patch_rw depends on patch @ | ||
| 257 | identifier fops_rw.openfunc; | ||
| 258 | @@ | ||
| 259 | openfunc(...) { | ||
| 260 | <... | ||
| 261 | - nonseekable_open | ||
| 262 | + stream_open | ||
| 263 | ...> | ||
| 264 | } | ||
| 265 | |||
| 266 | |||
| 267 | // read, but not write | ||
| 268 | @ fops_r depends on maybe_stream && !has_write @ | ||
| 269 | identifier fops0.fops, openfunc; | ||
| 270 | identifier stream_reader.readstream; | ||
| 271 | @@ | ||
| 272 | struct file_operations fops = { | ||
| 273 | .open = openfunc, | ||
| 274 | .read = readstream, | ||
| 275 | }; | ||
| 276 | |||
| 277 | @ report_r depends on report @ | ||
| 278 | identifier fops_r.openfunc; | ||
| 279 | position p1; | ||
| 280 | @@ | ||
| 281 | openfunc(...) { | ||
| 282 | <... | ||
| 283 | nonseekable_open@p1 | ||
| 284 | ...> | ||
| 285 | } | ||
| 286 | |||
| 287 | @ script:python depends on report @ | ||
| 288 | fops << fops0.fops; | ||
| 289 | p << report_r.p1; | ||
| 290 | @@ | ||
| 291 | coccilib.report.print_report(p[0], | ||
| 292 | "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 293 | |||
| 294 | @ explain_r depends on explain @ | ||
| 295 | identifier fops_r.openfunc; | ||
| 296 | @@ | ||
| 297 | openfunc(...) { | ||
| 298 | <... | ||
| 299 | - nonseekable_open | ||
| 300 | + nonseekable_open /* read only */ | ||
| 301 | ...> | ||
| 302 | } | ||
| 303 | |||
| 304 | @ patch_r depends on patch @ | ||
| 305 | identifier fops_r.openfunc; | ||
| 306 | @@ | ||
| 307 | openfunc(...) { | ||
| 308 | <... | ||
| 309 | - nonseekable_open | ||
| 310 | + stream_open | ||
| 311 | ...> | ||
| 312 | } | ||
| 313 | |||
| 314 | |||
| 315 | // write, but not read | ||
| 316 | @ fops_w depends on maybe_stream && !has_read @ | ||
| 317 | identifier fops0.fops, openfunc; | ||
| 318 | identifier stream_writer.writestream; | ||
| 319 | @@ | ||
| 320 | struct file_operations fops = { | ||
| 321 | .open = openfunc, | ||
| 322 | .write = writestream, | ||
| 323 | }; | ||
| 324 | |||
| 325 | @ report_w depends on report @ | ||
| 326 | identifier fops_w.openfunc; | ||
| 327 | position p1; | ||
| 328 | @@ | ||
| 329 | openfunc(...) { | ||
| 330 | <... | ||
| 331 | nonseekable_open@p1 | ||
| 332 | ...> | ||
| 333 | } | ||
| 334 | |||
| 335 | @ script:python depends on report @ | ||
| 336 | fops << fops0.fops; | ||
| 337 | p << report_w.p1; | ||
| 338 | @@ | ||
| 339 | coccilib.report.print_report(p[0], | ||
| 340 | "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) | ||
| 341 | |||
| 342 | @ explain_w depends on explain @ | ||
| 343 | identifier fops_w.openfunc; | ||
| 344 | @@ | ||
| 345 | openfunc(...) { | ||
| 346 | <... | ||
| 347 | - nonseekable_open | ||
| 348 | + nonseekable_open /* write only */ | ||
| 349 | ...> | ||
| 350 | } | ||
| 351 | |||
| 352 | @ patch_w depends on patch @ | ||
| 353 | identifier fops_w.openfunc; | ||
| 354 | @@ | ||
| 355 | openfunc(...) { | ||
| 356 | <... | ||
| 357 | - nonseekable_open | ||
| 358 | + stream_open | ||
| 359 | ...> | ||
| 360 | } | ||
| 361 | |||
| 362 | |||
| 363 | // no read, no write - don't change anything | ||
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 49d664ddff44..87500bde5a92 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
| @@ -1336,9 +1336,16 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR); | |||
| 1336 | bool aa_g_paranoid_load = true; | 1336 | bool aa_g_paranoid_load = true; |
| 1337 | module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO); | 1337 | module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO); |
| 1338 | 1338 | ||
| 1339 | static int param_get_aaintbool(char *buffer, const struct kernel_param *kp); | ||
| 1340 | static int param_set_aaintbool(const char *val, const struct kernel_param *kp); | ||
| 1341 | #define param_check_aaintbool param_check_int | ||
| 1342 | static const struct kernel_param_ops param_ops_aaintbool = { | ||
| 1343 | .set = param_set_aaintbool, | ||
| 1344 | .get = param_get_aaintbool | ||
| 1345 | }; | ||
| 1339 | /* Boot time disable flag */ | 1346 | /* Boot time disable flag */ |
| 1340 | static int apparmor_enabled __lsm_ro_after_init = 1; | 1347 | static int apparmor_enabled __lsm_ro_after_init = 1; |
| 1341 | module_param_named(enabled, apparmor_enabled, int, 0444); | 1348 | module_param_named(enabled, apparmor_enabled, aaintbool, 0444); |
| 1342 | 1349 | ||
| 1343 | static int __init apparmor_enabled_setup(char *str) | 1350 | static int __init apparmor_enabled_setup(char *str) |
| 1344 | { | 1351 | { |
| @@ -1413,6 +1420,46 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp) | |||
| 1413 | return param_get_uint(buffer, kp); | 1420 | return param_get_uint(buffer, kp); |
| 1414 | } | 1421 | } |
| 1415 | 1422 | ||
| 1423 | /* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */ | ||
| 1424 | static int param_set_aaintbool(const char *val, const struct kernel_param *kp) | ||
| 1425 | { | ||
| 1426 | struct kernel_param kp_local; | ||
| 1427 | bool value; | ||
| 1428 | int error; | ||
| 1429 | |||
| 1430 | if (apparmor_initialized) | ||
| 1431 | return -EPERM; | ||
| 1432 | |||
| 1433 | /* Create local copy, with arg pointing to bool type. */ | ||
| 1434 | value = !!*((int *)kp->arg); | ||
| 1435 | memcpy(&kp_local, kp, sizeof(kp_local)); | ||
| 1436 | kp_local.arg = &value; | ||
| 1437 | |||
| 1438 | error = param_set_bool(val, &kp_local); | ||
| 1439 | if (!error) | ||
| 1440 | *((int *)kp->arg) = *((bool *)kp_local.arg); | ||
| 1441 | return error; | ||
| 1442 | } | ||
| 1443 | |||
| 1444 | /* | ||
| 1445 | * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to | ||
| 1446 | * 1/0, this converts the "int that is actually bool" back to bool for | ||
| 1447 | * display in the /sys filesystem, while keeping it "int" for the LSM | ||
| 1448 | * infrastructure. | ||
| 1449 | */ | ||
| 1450 | static int param_get_aaintbool(char *buffer, const struct kernel_param *kp) | ||
| 1451 | { | ||
| 1452 | struct kernel_param kp_local; | ||
| 1453 | bool value; | ||
| 1454 | |||
| 1455 | /* Create local copy, with arg pointing to bool type. */ | ||
| 1456 | value = !!*((int *)kp->arg); | ||
| 1457 | memcpy(&kp_local, kp, sizeof(kp_local)); | ||
| 1458 | kp_local.arg = &value; | ||
| 1459 | |||
| 1460 | return param_get_bool(buffer, &kp_local); | ||
| 1461 | } | ||
| 1462 | |||
| 1416 | static int param_get_audit(char *buffer, const struct kernel_param *kp) | 1463 | static int param_get_audit(char *buffer, const struct kernel_param *kp) |
| 1417 | { | 1464 | { |
| 1418 | if (!apparmor_enabled) | 1465 | if (!apparmor_enabled) |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index cd97929fac66..dc28914fa72e 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
| @@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root, | |||
| 560 | devcg->behavior == DEVCG_DEFAULT_ALLOW) { | 560 | devcg->behavior == DEVCG_DEFAULT_ALLOW) { |
| 561 | rc = dev_exception_add(devcg, ex); | 561 | rc = dev_exception_add(devcg, ex); |
| 562 | if (rc) | 562 | if (rc) |
| 563 | break; | 563 | return rc; |
| 564 | } else { | 564 | } else { |
| 565 | /* | 565 | /* |
| 566 | * in the other possible cases: | 566 | * in the other possible cases: |
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index bcc9c6ead7fd..efdbf17f3915 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
| @@ -125,7 +125,7 @@ out: | |||
| 125 | */ | 125 | */ |
| 126 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, | 126 | int TSS_authhmac(unsigned char *digest, const unsigned char *key, |
| 127 | unsigned int keylen, unsigned char *h1, | 127 | unsigned int keylen, unsigned char *h1, |
| 128 | unsigned char *h2, unsigned char h3, ...) | 128 | unsigned char *h2, unsigned int h3, ...) |
| 129 | { | 129 | { |
| 130 | unsigned char paramdigest[SHA1_DIGEST_SIZE]; | 130 | unsigned char paramdigest[SHA1_DIGEST_SIZE]; |
| 131 | struct sdesc *sdesc; | 131 | struct sdesc *sdesc; |
| @@ -135,13 +135,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key, | |||
| 135 | int ret; | 135 | int ret; |
| 136 | va_list argp; | 136 | va_list argp; |
| 137 | 137 | ||
| 138 | if (!chip) | ||
| 139 | return -ENODEV; | ||
| 140 | |||
| 138 | sdesc = init_sdesc(hashalg); | 141 | sdesc = init_sdesc(hashalg); |
| 139 | if (IS_ERR(sdesc)) { | 142 | if (IS_ERR(sdesc)) { |
| 140 | pr_info("trusted_key: can't alloc %s\n", hash_alg); | 143 | pr_info("trusted_key: can't alloc %s\n", hash_alg); |
| 141 | return PTR_ERR(sdesc); | 144 | return PTR_ERR(sdesc); |
| 142 | } | 145 | } |
| 143 | 146 | ||
| 144 | c = h3; | 147 | c = !!h3; |
| 145 | ret = crypto_shash_init(&sdesc->shash); | 148 | ret = crypto_shash_init(&sdesc->shash); |
| 146 | if (ret < 0) | 149 | if (ret < 0) |
| 147 | goto out; | 150 | goto out; |
| @@ -196,6 +199,9 @@ int TSS_checkhmac1(unsigned char *buffer, | |||
| 196 | va_list argp; | 199 | va_list argp; |
| 197 | int ret; | 200 | int ret; |
| 198 | 201 | ||
| 202 | if (!chip) | ||
| 203 | return -ENODEV; | ||
| 204 | |||
| 199 | bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); | 205 | bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); |
| 200 | tag = LOAD16(buffer, 0); | 206 | tag = LOAD16(buffer, 0); |
| 201 | ordinal = command; | 207 | ordinal = command; |
| @@ -363,6 +369,9 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen) | |||
| 363 | { | 369 | { |
| 364 | int rc; | 370 | int rc; |
| 365 | 371 | ||
| 372 | if (!chip) | ||
| 373 | return -ENODEV; | ||
| 374 | |||
| 366 | dump_tpm_buf(cmd); | 375 | dump_tpm_buf(cmd); |
| 367 | rc = tpm_send(chip, cmd, buflen); | 376 | rc = tpm_send(chip, cmd, buflen); |
| 368 | dump_tpm_buf(cmd); | 377 | dump_tpm_buf(cmd); |
| @@ -429,6 +438,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) | |||
| 429 | { | 438 | { |
| 430 | int ret; | 439 | int ret; |
| 431 | 440 | ||
| 441 | if (!chip) | ||
| 442 | return -ENODEV; | ||
| 443 | |||
| 432 | INIT_BUF(tb); | 444 | INIT_BUF(tb); |
| 433 | store16(tb, TPM_TAG_RQU_COMMAND); | 445 | store16(tb, TPM_TAG_RQU_COMMAND); |
| 434 | store32(tb, TPM_OIAP_SIZE); | 446 | store32(tb, TPM_OIAP_SIZE); |
| @@ -1245,9 +1257,13 @@ static int __init init_trusted(void) | |||
| 1245 | { | 1257 | { |
| 1246 | int ret; | 1258 | int ret; |
| 1247 | 1259 | ||
| 1260 | /* encrypted_keys.ko depends on successful load of this module even if | ||
| 1261 | * TPM is not used. | ||
| 1262 | */ | ||
| 1248 | chip = tpm_default_chip(); | 1263 | chip = tpm_default_chip(); |
| 1249 | if (!chip) | 1264 | if (!chip) |
| 1250 | return -ENOENT; | 1265 | return 0; |
| 1266 | |||
| 1251 | ret = init_digests(); | 1267 | ret = init_digests(); |
| 1252 | if (ret < 0) | 1268 | if (ret < 0) |
| 1253 | goto err_put; | 1269 | goto err_put; |
| @@ -1269,10 +1285,12 @@ err_put: | |||
| 1269 | 1285 | ||
| 1270 | static void __exit cleanup_trusted(void) | 1286 | static void __exit cleanup_trusted(void) |
| 1271 | { | 1287 | { |
| 1272 | put_device(&chip->dev); | 1288 | if (chip) { |
| 1273 | kfree(digests); | 1289 | put_device(&chip->dev); |
| 1274 | trusted_shash_release(); | 1290 | kfree(digests); |
| 1275 | unregister_key_type(&key_type_trusted); | 1291 | trusted_shash_release(); |
| 1292 | unregister_key_type(&key_type_trusted); | ||
| 1293 | } | ||
| 1276 | } | 1294 | } |
| 1277 | 1295 | ||
| 1278 | late_initcall(init_trusted); | 1296 | late_initcall(init_trusted); |
diff --git a/sound/core/info.c b/sound/core/info.c index 96a074019c33..0eb169acc850 100644 --- a/sound/core/info.c +++ b/sound/core/info.c | |||
| @@ -713,8 +713,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent, | |||
| 713 | INIT_LIST_HEAD(&entry->list); | 713 | INIT_LIST_HEAD(&entry->list); |
| 714 | entry->parent = parent; | 714 | entry->parent = parent; |
| 715 | entry->module = module; | 715 | entry->module = module; |
| 716 | if (parent) | 716 | if (parent) { |
| 717 | mutex_lock(&parent->access); | ||
| 717 | list_add_tail(&entry->list, &parent->children); | 718 | list_add_tail(&entry->list, &parent->children); |
| 719 | mutex_unlock(&parent->access); | ||
| 720 | } | ||
| 718 | return entry; | 721 | return entry; |
| 719 | } | 722 | } |
| 720 | 723 | ||
| @@ -792,7 +795,12 @@ void snd_info_free_entry(struct snd_info_entry * entry) | |||
| 792 | list_for_each_entry_safe(p, n, &entry->children, list) | 795 | list_for_each_entry_safe(p, n, &entry->children, list) |
| 793 | snd_info_free_entry(p); | 796 | snd_info_free_entry(p); |
| 794 | 797 | ||
| 795 | list_del(&entry->list); | 798 | p = entry->parent; |
| 799 | if (p) { | ||
| 800 | mutex_lock(&p->access); | ||
| 801 | list_del(&entry->list); | ||
| 802 | mutex_unlock(&p->access); | ||
| 803 | } | ||
| 796 | kfree(entry->name); | 804 | kfree(entry->name); |
| 797 | if (entry->private_free) | 805 | if (entry->private_free) |
| 798 | entry->private_free(entry); | 806 | entry->private_free(entry); |
diff --git a/sound/core/init.c b/sound/core/init.c index 0c4dc40376a7..079c12d64b0e 100644 --- a/sound/core/init.c +++ b/sound/core/init.c | |||
| @@ -382,14 +382,7 @@ int snd_card_disconnect(struct snd_card *card) | |||
| 382 | card->shutdown = 1; | 382 | card->shutdown = 1; |
| 383 | spin_unlock(&card->files_lock); | 383 | spin_unlock(&card->files_lock); |
| 384 | 384 | ||
| 385 | /* phase 1: disable fops (user space) operations for ALSA API */ | 385 | /* replace file->f_op with special dummy operations */ |
| 386 | mutex_lock(&snd_card_mutex); | ||
| 387 | snd_cards[card->number] = NULL; | ||
| 388 | clear_bit(card->number, snd_cards_lock); | ||
| 389 | mutex_unlock(&snd_card_mutex); | ||
| 390 | |||
| 391 | /* phase 2: replace file->f_op with special dummy operations */ | ||
| 392 | |||
| 393 | spin_lock(&card->files_lock); | 386 | spin_lock(&card->files_lock); |
| 394 | list_for_each_entry(mfile, &card->files_list, list) { | 387 | list_for_each_entry(mfile, &card->files_list, list) { |
| 395 | /* it's critical part, use endless loop */ | 388 | /* it's critical part, use endless loop */ |
| @@ -405,7 +398,7 @@ int snd_card_disconnect(struct snd_card *card) | |||
| 405 | } | 398 | } |
| 406 | spin_unlock(&card->files_lock); | 399 | spin_unlock(&card->files_lock); |
| 407 | 400 | ||
| 408 | /* phase 3: notify all connected devices about disconnection */ | 401 | /* notify all connected devices about disconnection */ |
| 409 | /* at this point, they cannot respond to any calls except release() */ | 402 | /* at this point, they cannot respond to any calls except release() */ |
| 410 | 403 | ||
| 411 | #if IS_ENABLED(CONFIG_SND_MIXER_OSS) | 404 | #if IS_ENABLED(CONFIG_SND_MIXER_OSS) |
| @@ -421,6 +414,13 @@ int snd_card_disconnect(struct snd_card *card) | |||
| 421 | device_del(&card->card_dev); | 414 | device_del(&card->card_dev); |
| 422 | card->registered = false; | 415 | card->registered = false; |
| 423 | } | 416 | } |
| 417 | |||
| 418 | /* disable fops (user space) operations for ALSA API */ | ||
| 419 | mutex_lock(&snd_card_mutex); | ||
| 420 | snd_cards[card->number] = NULL; | ||
| 421 | clear_bit(card->number, snd_cards_lock); | ||
| 422 | mutex_unlock(&snd_card_mutex); | ||
| 423 | |||
| 424 | #ifdef CONFIG_PM | 424 | #ifdef CONFIG_PM |
| 425 | wake_up(&card->power_sleep); | 425 | wake_up(&card->power_sleep); |
| 426 | #endif | 426 | #endif |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 7d4640d1fe9f..38e7deab6384 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
| @@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client, | |||
| 1252 | 1252 | ||
| 1253 | /* fill the info fields */ | 1253 | /* fill the info fields */ |
| 1254 | if (client_info->name[0]) | 1254 | if (client_info->name[0]) |
| 1255 | strlcpy(client->name, client_info->name, sizeof(client->name)); | 1255 | strscpy(client->name, client_info->name, sizeof(client->name)); |
| 1256 | 1256 | ||
| 1257 | client->filter = client_info->filter; | 1257 | client->filter = client_info->filter; |
| 1258 | client->event_lost = client_info->event_lost; | 1258 | client->event_lost = client_info->event_lost; |
| @@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) | |||
| 1530 | /* set queue name */ | 1530 | /* set queue name */ |
| 1531 | if (!info->name[0]) | 1531 | if (!info->name[0]) |
| 1532 | snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); | 1532 | snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); |
| 1533 | strlcpy(q->name, info->name, sizeof(q->name)); | 1533 | strscpy(q->name, info->name, sizeof(q->name)); |
| 1534 | snd_use_lock_free(&q->use_lock); | 1534 | snd_use_lock_free(&q->use_lock); |
| 1535 | 1535 | ||
| 1536 | return 0; | 1536 | return 0; |
| @@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client, | |||
| 1592 | queuefree(q); | 1592 | queuefree(q); |
| 1593 | return -EPERM; | 1593 | return -EPERM; |
| 1594 | } | 1594 | } |
| 1595 | strlcpy(q->name, info->name, sizeof(q->name)); | 1595 | strscpy(q->name, info->name, sizeof(q->name)); |
| 1596 | queuefree(q); | 1596 | queuefree(q); |
| 1597 | 1597 | ||
| 1598 | return 0; | 1598 | return 0; |
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c index 9c37d9af3023..ec7715c6b0c0 100644 --- a/sound/hda/ext/hdac_ext_bus.c +++ b/sound/hda/ext/hdac_ext_bus.c | |||
| @@ -107,7 +107,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev, | |||
| 107 | INIT_LIST_HEAD(&bus->hlink_list); | 107 | INIT_LIST_HEAD(&bus->hlink_list); |
| 108 | bus->idx = idx++; | 108 | bus->idx = idx++; |
| 109 | 109 | ||
| 110 | mutex_init(&bus->lock); | ||
| 111 | bus->cmd_dma_state = true; | 110 | bus->cmd_dma_state = true; |
| 112 | 111 | ||
| 113 | return 0; | 112 | return 0; |
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 012305177f68..ad8eee08013f 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c | |||
| @@ -38,6 +38,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, | |||
| 38 | INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events); | 38 | INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events); |
| 39 | spin_lock_init(&bus->reg_lock); | 39 | spin_lock_init(&bus->reg_lock); |
| 40 | mutex_init(&bus->cmd_mutex); | 40 | mutex_init(&bus->cmd_mutex); |
| 41 | mutex_init(&bus->lock); | ||
| 41 | bus->irq = -1; | 42 | bus->irq = -1; |
| 42 | return 0; | 43 | return 0; |
| 43 | } | 44 | } |
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c index 5c95933e739a..1ea51e3b942a 100644 --- a/sound/hda/hdac_component.c +++ b/sound/hda/hdac_component.c | |||
| @@ -69,13 +69,15 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable) | |||
| 69 | 69 | ||
| 70 | dev_dbg(bus->dev, "display power %s\n", | 70 | dev_dbg(bus->dev, "display power %s\n", |
| 71 | enable ? "enable" : "disable"); | 71 | enable ? "enable" : "disable"); |
| 72 | |||
| 73 | mutex_lock(&bus->lock); | ||
| 72 | if (enable) | 74 | if (enable) |
| 73 | set_bit(idx, &bus->display_power_status); | 75 | set_bit(idx, &bus->display_power_status); |
| 74 | else | 76 | else |
| 75 | clear_bit(idx, &bus->display_power_status); | 77 | clear_bit(idx, &bus->display_power_status); |
| 76 | 78 | ||
| 77 | if (!acomp || !acomp->ops) | 79 | if (!acomp || !acomp->ops) |
| 78 | return; | 80 | goto unlock; |
| 79 | 81 | ||
| 80 | if (bus->display_power_status) { | 82 | if (bus->display_power_status) { |
| 81 | if (!bus->display_power_active) { | 83 | if (!bus->display_power_active) { |
| @@ -92,6 +94,8 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable) | |||
| 92 | bus->display_power_active = false; | 94 | bus->display_power_active = false; |
| 93 | } | 95 | } |
| 94 | } | 96 | } |
| 97 | unlock: | ||
| 98 | mutex_unlock(&bus->lock); | ||
| 95 | } | 99 | } |
| 96 | EXPORT_SYMBOL_GPL(snd_hdac_display_power); | 100 | EXPORT_SYMBOL_GPL(snd_hdac_display_power); |
| 97 | 101 | ||
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ec0b8595eb4d..701a69d856f5 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
| @@ -969,6 +969,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card, | |||
| 969 | 969 | ||
| 970 | /* power-up all before initialization */ | 970 | /* power-up all before initialization */ |
| 971 | hda_set_power_state(codec, AC_PWRST_D0); | 971 | hda_set_power_state(codec, AC_PWRST_D0); |
| 972 | codec->core.dev.power.power_state = PMSG_ON; | ||
| 972 | 973 | ||
| 973 | snd_hda_codec_proc_new(codec); | 974 | snd_hda_codec_proc_new(codec); |
| 974 | 975 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index ece256a3b48f..2ec91085fa3e 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { | |||
| 2142 | SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0), | 2142 | SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0), |
| 2143 | /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */ | 2143 | /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */ |
| 2144 | SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0), | 2144 | SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0), |
| 2145 | /* https://bugs.launchpad.net/bugs/1821663 */ | ||
| 2146 | SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0), | ||
| 2145 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */ | 2147 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */ |
| 2146 | SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0), | 2148 | SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0), |
| 2147 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ | 2149 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ |
| @@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { | |||
| 2150 | SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0), | 2152 | SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0), |
| 2151 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ | 2153 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ |
| 2152 | SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), | 2154 | SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), |
| 2155 | /* https://bugs.launchpad.net/bugs/1821663 */ | ||
| 2156 | SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0), | ||
| 2153 | {} | 2157 | {} |
| 2154 | }; | 2158 | }; |
| 2155 | #endif /* CONFIG_PM */ | 2159 | #endif /* CONFIG_PM */ |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a3fb3d4c5730..f5b510f119ed 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -1864,8 +1864,8 @@ enum { | |||
| 1864 | ALC887_FIXUP_BASS_CHMAP, | 1864 | ALC887_FIXUP_BASS_CHMAP, |
| 1865 | ALC1220_FIXUP_GB_DUAL_CODECS, | 1865 | ALC1220_FIXUP_GB_DUAL_CODECS, |
| 1866 | ALC1220_FIXUP_CLEVO_P950, | 1866 | ALC1220_FIXUP_CLEVO_P950, |
| 1867 | ALC1220_FIXUP_SYSTEM76_ORYP5, | 1867 | ALC1220_FIXUP_CLEVO_PB51ED, |
| 1868 | ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, | 1868 | ALC1220_FIXUP_CLEVO_PB51ED_PINS, |
| 1869 | }; | 1869 | }; |
| 1870 | 1870 | ||
| 1871 | static void alc889_fixup_coef(struct hda_codec *codec, | 1871 | static void alc889_fixup_coef(struct hda_codec *codec, |
| @@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, | |||
| 2070 | static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, | 2070 | static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, |
| 2071 | const struct hda_fixup *fix, int action); | 2071 | const struct hda_fixup *fix, int action); |
| 2072 | 2072 | ||
| 2073 | static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, | 2073 | static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, |
| 2074 | const struct hda_fixup *fix, | 2074 | const struct hda_fixup *fix, |
| 2075 | int action) | 2075 | int action) |
| 2076 | { | 2076 | { |
| @@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = { | |||
| 2322 | .type = HDA_FIXUP_FUNC, | 2322 | .type = HDA_FIXUP_FUNC, |
| 2323 | .v.func = alc1220_fixup_clevo_p950, | 2323 | .v.func = alc1220_fixup_clevo_p950, |
| 2324 | }, | 2324 | }, |
| 2325 | [ALC1220_FIXUP_SYSTEM76_ORYP5] = { | 2325 | [ALC1220_FIXUP_CLEVO_PB51ED] = { |
| 2326 | .type = HDA_FIXUP_FUNC, | 2326 | .type = HDA_FIXUP_FUNC, |
| 2327 | .v.func = alc1220_fixup_system76_oryp5, | 2327 | .v.func = alc1220_fixup_clevo_pb51ed, |
| 2328 | }, | 2328 | }, |
| 2329 | [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { | 2329 | [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = { |
| 2330 | .type = HDA_FIXUP_PINS, | 2330 | .type = HDA_FIXUP_PINS, |
| 2331 | .v.pins = (const struct hda_pintbl[]) { | 2331 | .v.pins = (const struct hda_pintbl[]) { |
| 2332 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | 2332 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ |
| 2333 | {} | 2333 | {} |
| 2334 | }, | 2334 | }, |
| 2335 | .chained = true, | 2335 | .chained = true, |
| 2336 | .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, | 2336 | .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, |
| 2337 | }, | 2337 | }, |
| 2338 | }; | 2338 | }; |
| 2339 | 2339 | ||
| @@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
| 2411 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2411 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
| 2412 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), | 2412 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), |
| 2413 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | 2413 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), |
| 2414 | SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | 2414 | SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
| 2415 | SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | 2415 | SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), |
| 2416 | SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS), | ||
| 2416 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2417 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
| 2417 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2418 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
| 2418 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), | 2419 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), |
| @@ -5491,7 +5492,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec, | |||
| 5491 | jack->jack->button_state = report; | 5492 | jack->jack->button_state = report; |
| 5492 | } | 5493 | } |
| 5493 | 5494 | ||
| 5494 | static void alc295_fixup_chromebook(struct hda_codec *codec, | 5495 | static void alc_fixup_headset_jack(struct hda_codec *codec, |
| 5495 | const struct hda_fixup *fix, int action) | 5496 | const struct hda_fixup *fix, int action) |
| 5496 | { | 5497 | { |
| 5497 | 5498 | ||
| @@ -5501,16 +5502,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec, | |||
| 5501 | alc_headset_btn_callback); | 5502 | alc_headset_btn_callback); |
| 5502 | snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, | 5503 | snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, |
| 5503 | SND_JACK_HEADSET, alc_headset_btn_keymap); | 5504 | SND_JACK_HEADSET, alc_headset_btn_keymap); |
| 5504 | switch (codec->core.vendor_id) { | ||
| 5505 | case 0x10ec0295: | ||
| 5506 | alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ | ||
| 5507 | alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); | ||
| 5508 | break; | ||
| 5509 | case 0x10ec0236: | ||
| 5510 | alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ | ||
| 5511 | alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); | ||
| 5512 | break; | ||
| 5513 | } | ||
| 5514 | break; | 5505 | break; |
| 5515 | case HDA_FIXUP_ACT_INIT: | 5506 | case HDA_FIXUP_ACT_INIT: |
| 5516 | switch (codec->core.vendor_id) { | 5507 | switch (codec->core.vendor_id) { |
| @@ -5531,6 +5522,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec, | |||
| 5531 | } | 5522 | } |
| 5532 | } | 5523 | } |
| 5533 | 5524 | ||
| 5525 | static void alc295_fixup_chromebook(struct hda_codec *codec, | ||
| 5526 | const struct hda_fixup *fix, int action) | ||
| 5527 | { | ||
| 5528 | switch (action) { | ||
| 5529 | case HDA_FIXUP_ACT_INIT: | ||
| 5530 | switch (codec->core.vendor_id) { | ||
| 5531 | case 0x10ec0295: | ||
| 5532 | alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ | ||
| 5533 | alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); | ||
| 5534 | break; | ||
| 5535 | case 0x10ec0236: | ||
| 5536 | alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ | ||
| 5537 | alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); | ||
| 5538 | break; | ||
| 5539 | } | ||
| 5540 | break; | ||
| 5541 | } | ||
| 5542 | } | ||
| 5543 | |||
| 5534 | static void alc_fixup_disable_mic_vref(struct hda_codec *codec, | 5544 | static void alc_fixup_disable_mic_vref(struct hda_codec *codec, |
| 5535 | const struct hda_fixup *fix, int action) | 5545 | const struct hda_fixup *fix, int action) |
| 5536 | { | 5546 | { |
| @@ -5663,6 +5673,7 @@ enum { | |||
| 5663 | ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, | 5673 | ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, |
| 5664 | ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, | 5674 | ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, |
| 5665 | ALC233_FIXUP_LENOVO_MULTI_CODECS, | 5675 | ALC233_FIXUP_LENOVO_MULTI_CODECS, |
| 5676 | ALC233_FIXUP_ACER_HEADSET_MIC, | ||
| 5666 | ALC294_FIXUP_LENOVO_MIC_LOCATION, | 5677 | ALC294_FIXUP_LENOVO_MIC_LOCATION, |
| 5667 | ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, | 5678 | ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, |
| 5668 | ALC700_FIXUP_INTEL_REFERENCE, | 5679 | ALC700_FIXUP_INTEL_REFERENCE, |
| @@ -5684,6 +5695,7 @@ enum { | |||
| 5684 | ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, | 5695 | ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, |
| 5685 | ALC255_FIXUP_ACER_HEADSET_MIC, | 5696 | ALC255_FIXUP_ACER_HEADSET_MIC, |
| 5686 | ALC295_FIXUP_CHROME_BOOK, | 5697 | ALC295_FIXUP_CHROME_BOOK, |
| 5698 | ALC225_FIXUP_HEADSET_JACK, | ||
| 5687 | ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE, | 5699 | ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE, |
| 5688 | ALC225_FIXUP_WYSE_AUTO_MUTE, | 5700 | ALC225_FIXUP_WYSE_AUTO_MUTE, |
| 5689 | ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, | 5701 | ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, |
| @@ -6490,6 +6502,16 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 6490 | .type = HDA_FIXUP_FUNC, | 6502 | .type = HDA_FIXUP_FUNC, |
| 6491 | .v.func = alc233_alc662_fixup_lenovo_dual_codecs, | 6503 | .v.func = alc233_alc662_fixup_lenovo_dual_codecs, |
| 6492 | }, | 6504 | }, |
| 6505 | [ALC233_FIXUP_ACER_HEADSET_MIC] = { | ||
| 6506 | .type = HDA_FIXUP_VERBS, | ||
| 6507 | .v.verbs = (const struct hda_verb[]) { | ||
| 6508 | { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, | ||
| 6509 | { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, | ||
| 6510 | { } | ||
| 6511 | }, | ||
| 6512 | .chained = true, | ||
| 6513 | .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE | ||
| 6514 | }, | ||
| 6493 | [ALC294_FIXUP_LENOVO_MIC_LOCATION] = { | 6515 | [ALC294_FIXUP_LENOVO_MIC_LOCATION] = { |
| 6494 | .type = HDA_FIXUP_PINS, | 6516 | .type = HDA_FIXUP_PINS, |
| 6495 | .v.pins = (const struct hda_pintbl[]) { | 6517 | .v.pins = (const struct hda_pintbl[]) { |
| @@ -6635,6 +6657,12 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 6635 | [ALC295_FIXUP_CHROME_BOOK] = { | 6657 | [ALC295_FIXUP_CHROME_BOOK] = { |
| 6636 | .type = HDA_FIXUP_FUNC, | 6658 | .type = HDA_FIXUP_FUNC, |
| 6637 | .v.func = alc295_fixup_chromebook, | 6659 | .v.func = alc295_fixup_chromebook, |
| 6660 | .chained = true, | ||
| 6661 | .chain_id = ALC225_FIXUP_HEADSET_JACK | ||
| 6662 | }, | ||
| 6663 | [ALC225_FIXUP_HEADSET_JACK] = { | ||
| 6664 | .type = HDA_FIXUP_FUNC, | ||
| 6665 | .v.func = alc_fixup_headset_jack, | ||
| 6638 | }, | 6666 | }, |
| 6639 | [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { | 6667 | [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { |
| 6640 | .type = HDA_FIXUP_PINS, | 6668 | .type = HDA_FIXUP_PINS, |
| @@ -6737,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6737 | SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6765 | SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6738 | SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6766 | SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6739 | SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), | 6767 | SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), |
| 6768 | SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), | ||
| 6740 | SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), | 6769 | SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), |
| 6741 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), | 6770 | SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), |
| 6742 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), | 6771 | SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), |
| @@ -7132,7 +7161,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
| 7132 | {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"}, | 7161 | {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"}, |
| 7133 | {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, | 7162 | {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, |
| 7134 | {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, | 7163 | {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, |
| 7135 | {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"}, | 7164 | {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"}, |
| 7165 | {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"}, | ||
| 7136 | {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, | 7166 | {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, |
| 7137 | {} | 7167 | {} |
| 7138 | }; | 7168 | }; |
| @@ -7236,6 +7266,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 7236 | {0x12, 0x90a60140}, | 7266 | {0x12, 0x90a60140}, |
| 7237 | {0x14, 0x90170150}, | 7267 | {0x14, 0x90170150}, |
| 7238 | {0x21, 0x02211020}), | 7268 | {0x21, 0x02211020}), |
| 7269 | SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 7270 | {0x21, 0x02211020}), | ||
| 7239 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, | 7271 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, |
| 7240 | {0x14, 0x90170110}, | 7272 | {0x14, 0x90170110}, |
| 7241 | {0x21, 0x02211020}), | 7273 | {0x21, 0x02211020}), |
| @@ -7346,6 +7378,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 7346 | {0x21, 0x0221101f}), | 7378 | {0x21, 0x0221101f}), |
| 7347 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 7379 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 7348 | ALC256_STANDARD_PINS), | 7380 | ALC256_STANDARD_PINS), |
| 7381 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 7382 | {0x14, 0x90170110}, | ||
| 7383 | {0x1b, 0x01011020}, | ||
| 7384 | {0x21, 0x0221101f}), | ||
| 7349 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, | 7385 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, |
| 7350 | {0x14, 0x90170110}, | 7386 | {0x14, 0x90170110}, |
| 7351 | {0x1b, 0x90a70130}, | 7387 | {0x1b, 0x90a70130}, |
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 419114edfd57..667fc1d59e18 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig | |||
| @@ -1151,6 +1151,7 @@ config SND_SOC_WCD9335 | |||
| 1151 | tristate "WCD9335 Codec" | 1151 | tristate "WCD9335 Codec" |
| 1152 | depends on SLIMBUS | 1152 | depends on SLIMBUS |
| 1153 | select REGMAP_SLIMBUS | 1153 | select REGMAP_SLIMBUS |
| 1154 | select REGMAP_IRQ | ||
| 1154 | help | 1155 | help |
| 1155 | The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports | 1156 | The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports |
| 1156 | Qualcomm Technologies, Inc. (QTI) multimedia solutions, | 1157 | Qualcomm Technologies, Inc. (QTI) multimedia solutions, |
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c index 03bbbcd3b6c1..87616b126018 100644 --- a/sound/soc/codecs/ab8500-codec.c +++ b/sound/soc/codecs/ab8500-codec.c | |||
| @@ -2129,6 +2129,7 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) | |||
| 2129 | dev_err(dai->component->dev, | 2129 | dev_err(dai->component->dev, |
| 2130 | "%s: ERROR: The device is either a master or a slave.\n", | 2130 | "%s: ERROR: The device is either a master or a slave.\n", |
| 2131 | __func__); | 2131 | __func__); |
| 2132 | /* fall through */ | ||
| 2132 | default: | 2133 | default: |
| 2133 | dev_err(dai->component->dev, | 2134 | dev_err(dai->component->dev, |
| 2134 | "%s: ERROR: Unsupporter master mask 0x%x\n", | 2135 | "%s: ERROR: Unsupporter master mask 0x%x\n", |
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c index 9f4a59871cee..c71696146c5e 100644 --- a/sound/soc/codecs/cs35l35.c +++ b/sound/soc/codecs/cs35l35.c | |||
| @@ -1635,6 +1635,16 @@ err: | |||
| 1635 | return ret; | 1635 | return ret; |
| 1636 | } | 1636 | } |
| 1637 | 1637 | ||
| 1638 | static int cs35l35_i2c_remove(struct i2c_client *i2c_client) | ||
| 1639 | { | ||
| 1640 | struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client); | ||
| 1641 | |||
| 1642 | regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies); | ||
| 1643 | gpiod_set_value_cansleep(cs35l35->reset_gpio, 0); | ||
| 1644 | |||
| 1645 | return 0; | ||
| 1646 | } | ||
| 1647 | |||
| 1638 | static const struct of_device_id cs35l35_of_match[] = { | 1648 | static const struct of_device_id cs35l35_of_match[] = { |
| 1639 | {.compatible = "cirrus,cs35l35"}, | 1649 | {.compatible = "cirrus,cs35l35"}, |
| 1640 | {}, | 1650 | {}, |
| @@ -1655,6 +1665,7 @@ static struct i2c_driver cs35l35_i2c_driver = { | |||
| 1655 | }, | 1665 | }, |
| 1656 | .id_table = cs35l35_id, | 1666 | .id_table = cs35l35_id, |
| 1657 | .probe = cs35l35_i2c_probe, | 1667 | .probe = cs35l35_i2c_probe, |
| 1668 | .remove = cs35l35_i2c_remove, | ||
| 1658 | }; | 1669 | }; |
| 1659 | 1670 | ||
| 1660 | module_i2c_driver(cs35l35_i2c_driver); | 1671 | module_i2c_driver(cs35l35_i2c_driver); |
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index 33d74f163bd7..793a14d58667 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c | |||
| @@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = { | |||
| 642 | .reg_defaults = cs4270_reg_defaults, | 642 | .reg_defaults = cs4270_reg_defaults, |
| 643 | .num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults), | 643 | .num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults), |
| 644 | .cache_type = REGCACHE_RBTREE, | 644 | .cache_type = REGCACHE_RBTREE, |
| 645 | .write_flag_mask = CS4270_I2C_INCR, | ||
| 645 | 646 | ||
| 646 | .readable_reg = cs4270_reg_is_readable, | 647 | .readable_reg = cs4270_reg_is_readable, |
| 647 | .volatile_reg = cs4270_reg_is_volatile, | 648 | .volatile_reg = cs4270_reg_is_volatile, |
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index ffecdaaa8cf2..f889d94c8e3c 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c | |||
| @@ -38,6 +38,9 @@ static void hdac_hda_dai_close(struct snd_pcm_substream *substream, | |||
| 38 | struct snd_soc_dai *dai); | 38 | struct snd_soc_dai *dai); |
| 39 | static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream, | 39 | static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream, |
| 40 | struct snd_soc_dai *dai); | 40 | struct snd_soc_dai *dai); |
| 41 | static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream, | ||
| 42 | struct snd_pcm_hw_params *params, | ||
| 43 | struct snd_soc_dai *dai); | ||
| 41 | static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, | 44 | static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, |
| 42 | struct snd_soc_dai *dai); | 45 | struct snd_soc_dai *dai); |
| 43 | static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, | 46 | static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, |
| @@ -50,6 +53,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = { | |||
| 50 | .startup = hdac_hda_dai_open, | 53 | .startup = hdac_hda_dai_open, |
| 51 | .shutdown = hdac_hda_dai_close, | 54 | .shutdown = hdac_hda_dai_close, |
| 52 | .prepare = hdac_hda_dai_prepare, | 55 | .prepare = hdac_hda_dai_prepare, |
| 56 | .hw_params = hdac_hda_dai_hw_params, | ||
| 53 | .hw_free = hdac_hda_dai_hw_free, | 57 | .hw_free = hdac_hda_dai_hw_free, |
| 54 | .set_tdm_slot = hdac_hda_dai_set_tdm_slot, | 58 | .set_tdm_slot = hdac_hda_dai_set_tdm_slot, |
| 55 | }; | 59 | }; |
| @@ -139,6 +143,39 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai, | |||
| 139 | return 0; | 143 | return 0; |
| 140 | } | 144 | } |
| 141 | 145 | ||
| 146 | static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream, | ||
| 147 | struct snd_pcm_hw_params *params, | ||
| 148 | struct snd_soc_dai *dai) | ||
| 149 | { | ||
| 150 | struct snd_soc_component *component = dai->component; | ||
| 151 | struct hdac_hda_priv *hda_pvt; | ||
| 152 | unsigned int format_val; | ||
| 153 | unsigned int maxbps; | ||
| 154 | |||
| 155 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | ||
| 156 | maxbps = dai->driver->playback.sig_bits; | ||
| 157 | else | ||
| 158 | maxbps = dai->driver->capture.sig_bits; | ||
| 159 | |||
| 160 | hda_pvt = snd_soc_component_get_drvdata(component); | ||
| 161 | format_val = snd_hdac_calc_stream_format(params_rate(params), | ||
| 162 | params_channels(params), | ||
| 163 | params_format(params), | ||
| 164 | maxbps, | ||
| 165 | 0); | ||
| 166 | if (!format_val) { | ||
| 167 | dev_err(dai->dev, | ||
| 168 | "invalid format_val, rate=%d, ch=%d, format=%d, maxbps=%d\n", | ||
| 169 | params_rate(params), params_channels(params), | ||
| 170 | params_format(params), maxbps); | ||
| 171 | |||
| 172 | return -EINVAL; | ||
| 173 | } | ||
| 174 | |||
| 175 | hda_pvt->pcm[dai->id].format_val[substream->stream] = format_val; | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 142 | static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, | 179 | static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream, |
| 143 | struct snd_soc_dai *dai) | 180 | struct snd_soc_dai *dai) |
| 144 | { | 181 | { |
| @@ -162,10 +199,9 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream, | |||
| 162 | struct snd_soc_dai *dai) | 199 | struct snd_soc_dai *dai) |
| 163 | { | 200 | { |
| 164 | struct snd_soc_component *component = dai->component; | 201 | struct snd_soc_component *component = dai->component; |
| 202 | struct hda_pcm_stream *hda_stream; | ||
| 165 | struct hdac_hda_priv *hda_pvt; | 203 | struct hdac_hda_priv *hda_pvt; |
| 166 | struct snd_pcm_runtime *runtime = substream->runtime; | ||
| 167 | struct hdac_device *hdev; | 204 | struct hdac_device *hdev; |
| 168 | struct hda_pcm_stream *hda_stream; | ||
| 169 | unsigned int format_val; | 205 | unsigned int format_val; |
| 170 | struct hda_pcm *pcm; | 206 | struct hda_pcm *pcm; |
| 171 | unsigned int stream; | 207 | unsigned int stream; |
| @@ -179,19 +215,8 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream, | |||
| 179 | 215 | ||
| 180 | hda_stream = &pcm->stream[substream->stream]; | 216 | hda_stream = &pcm->stream[substream->stream]; |
| 181 | 217 | ||
| 182 | format_val = snd_hdac_calc_stream_format(runtime->rate, | ||
| 183 | runtime->channels, | ||
| 184 | runtime->format, | ||
| 185 | hda_stream->maxbps, | ||
| 186 | 0); | ||
| 187 | if (!format_val) { | ||
| 188 | dev_err(&hdev->dev, | ||
| 189 | "invalid format_val, rate=%d, ch=%d, format=%d\n", | ||
| 190 | runtime->rate, runtime->channels, runtime->format); | ||
| 191 | return -EINVAL; | ||
| 192 | } | ||
| 193 | |||
| 194 | stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream]; | 218 | stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream]; |
| 219 | format_val = hda_pvt->pcm[dai->id].format_val[substream->stream]; | ||
| 195 | 220 | ||
| 196 | ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream, | 221 | ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream, |
| 197 | stream, format_val, substream); | 222 | stream, format_val, substream); |
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h index e444ef593360..6b1bd4f428e7 100644 --- a/sound/soc/codecs/hdac_hda.h +++ b/sound/soc/codecs/hdac_hda.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | struct hdac_hda_pcm { | 9 | struct hdac_hda_pcm { |
| 10 | int stream_tag[2]; | 10 | int stream_tag[2]; |
| 11 | unsigned int format_val[2]; | ||
| 11 | }; | 12 | }; |
| 12 | 13 | ||
| 13 | struct hdac_hda_priv { | 14 | struct hdac_hda_priv { |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index e5b6769b9797..35df73e42cbc 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
| @@ -484,9 +484,6 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream, | |||
| 484 | params_width(params), params_rate(params), | 484 | params_width(params), params_rate(params), |
| 485 | params_channels(params)); | 485 | params_channels(params)); |
| 486 | 486 | ||
| 487 | if (params_width(params) > 24) | ||
| 488 | params->msbits = 24; | ||
| 489 | |||
| 490 | ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status, | 487 | ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status, |
| 491 | sizeof(hp.iec.status)); | 488 | sizeof(hp.iec.status)); |
| 492 | if (ret < 0) { | 489 | if (ret < 0) { |
| @@ -529,73 +526,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai, | |||
| 529 | { | 526 | { |
| 530 | struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); | 527 | struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); |
| 531 | struct hdmi_codec_daifmt cf = { 0 }; | 528 | struct hdmi_codec_daifmt cf = { 0 }; |
| 532 | int ret = 0; | ||
| 533 | 529 | ||
| 534 | dev_dbg(dai->dev, "%s()\n", __func__); | 530 | dev_dbg(dai->dev, "%s()\n", __func__); |
| 535 | 531 | ||
| 536 | if (dai->id == DAI_ID_SPDIF) { | 532 | if (dai->id == DAI_ID_SPDIF) |
| 537 | cf.fmt = HDMI_SPDIF; | 533 | return 0; |
| 538 | } else { | 534 | |
| 539 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { | 535 | switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { |
| 540 | case SND_SOC_DAIFMT_CBM_CFM: | 536 | case SND_SOC_DAIFMT_CBM_CFM: |
| 541 | cf.bit_clk_master = 1; | 537 | cf.bit_clk_master = 1; |
| 542 | cf.frame_clk_master = 1; | 538 | cf.frame_clk_master = 1; |
| 543 | break; | 539 | break; |
| 544 | case SND_SOC_DAIFMT_CBS_CFM: | 540 | case SND_SOC_DAIFMT_CBS_CFM: |
| 545 | cf.frame_clk_master = 1; | 541 | cf.frame_clk_master = 1; |
| 546 | break; | 542 | break; |
| 547 | case SND_SOC_DAIFMT_CBM_CFS: | 543 | case SND_SOC_DAIFMT_CBM_CFS: |
| 548 | cf.bit_clk_master = 1; | 544 | cf.bit_clk_master = 1; |
| 549 | break; | 545 | break; |
| 550 | case SND_SOC_DAIFMT_CBS_CFS: | 546 | case SND_SOC_DAIFMT_CBS_CFS: |
| 551 | break; | 547 | break; |
| 552 | default: | 548 | default: |
| 553 | return -EINVAL; | 549 | return -EINVAL; |
| 554 | } | 550 | } |
| 555 | 551 | ||
| 556 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { | 552 | switch (fmt & SND_SOC_DAIFMT_INV_MASK) { |
| 557 | case SND_SOC_DAIFMT_NB_NF: | 553 | case SND_SOC_DAIFMT_NB_NF: |
| 558 | break; | 554 | break; |
| 559 | case SND_SOC_DAIFMT_NB_IF: | 555 | case SND_SOC_DAIFMT_NB_IF: |
| 560 | cf.frame_clk_inv = 1; | 556 | cf.frame_clk_inv = 1; |
| 561 | break; | 557 | break; |
| 562 | case SND_SOC_DAIFMT_IB_NF: | 558 | case SND_SOC_DAIFMT_IB_NF: |
| 563 | cf.bit_clk_inv = 1; | 559 | cf.bit_clk_inv = 1; |
| 564 | break; | 560 | break; |
| 565 | case SND_SOC_DAIFMT_IB_IF: | 561 | case SND_SOC_DAIFMT_IB_IF: |
| 566 | cf.frame_clk_inv = 1; | 562 | cf.frame_clk_inv = 1; |
| 567 | cf.bit_clk_inv = 1; | 563 | cf.bit_clk_inv = 1; |
| 568 | break; | 564 | break; |
| 569 | } | 565 | } |
| 570 | 566 | ||
| 571 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { | 567 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { |
| 572 | case SND_SOC_DAIFMT_I2S: | 568 | case SND_SOC_DAIFMT_I2S: |
| 573 | cf.fmt = HDMI_I2S; | 569 | cf.fmt = HDMI_I2S; |
| 574 | break; | 570 | break; |
| 575 | case SND_SOC_DAIFMT_DSP_A: | 571 | case SND_SOC_DAIFMT_DSP_A: |
| 576 | cf.fmt = HDMI_DSP_A; | 572 | cf.fmt = HDMI_DSP_A; |
| 577 | break; | 573 | break; |
| 578 | case SND_SOC_DAIFMT_DSP_B: | 574 | case SND_SOC_DAIFMT_DSP_B: |
| 579 | cf.fmt = HDMI_DSP_B; | 575 | cf.fmt = HDMI_DSP_B; |
| 580 | break; | 576 | break; |
| 581 | case SND_SOC_DAIFMT_RIGHT_J: | 577 | case SND_SOC_DAIFMT_RIGHT_J: |
| 582 | cf.fmt = HDMI_RIGHT_J; | 578 | cf.fmt = HDMI_RIGHT_J; |
| 583 | break; | 579 | break; |
| 584 | case SND_SOC_DAIFMT_LEFT_J: | 580 | case SND_SOC_DAIFMT_LEFT_J: |
| 585 | cf.fmt = HDMI_LEFT_J; | 581 | cf.fmt = HDMI_LEFT_J; |
| 586 | break; | 582 | break; |
| 587 | case SND_SOC_DAIFMT_AC97: | 583 | case SND_SOC_DAIFMT_AC97: |
| 588 | cf.fmt = HDMI_AC97; | 584 | cf.fmt = HDMI_AC97; |
| 589 | break; | 585 | break; |
| 590 | default: | 586 | default: |
| 591 | dev_err(dai->dev, "Invalid DAI interface format\n"); | 587 | dev_err(dai->dev, "Invalid DAI interface format\n"); |
| 592 | return -EINVAL; | 588 | return -EINVAL; |
| 593 | } | ||
| 594 | } | 589 | } |
| 595 | 590 | ||
| 596 | hcp->daifmt[dai->id] = cf; | 591 | hcp->daifmt[dai->id] = cf; |
| 597 | 592 | ||
| 598 | return ret; | 593 | return 0; |
| 599 | } | 594 | } |
| 600 | 595 | ||
| 601 | static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute) | 596 | static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute) |
| @@ -792,8 +787,10 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
| 792 | i++; | 787 | i++; |
| 793 | } | 788 | } |
| 794 | 789 | ||
| 795 | if (hcd->spdif) | 790 | if (hcd->spdif) { |
| 796 | hcp->daidrv[i] = hdmi_spdif_dai; | 791 | hcp->daidrv[i] = hdmi_spdif_dai; |
| 792 | hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF; | ||
| 793 | } | ||
| 797 | 794 | ||
| 798 | dev_set_drvdata(dev, hcp); | 795 | dev_set_drvdata(dev, hcp); |
| 799 | 796 | ||
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c index bfd74b86c9d2..645aa0794123 100644 --- a/sound/soc/codecs/nau8810.c +++ b/sound/soc/codecs/nau8810.c | |||
| @@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = { | |||
| 411 | SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3, | 411 | SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3, |
| 412 | NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0], | 412 | NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0], |
| 413 | ARRAY_SIZE(nau8810_mono_mixer_controls)), | 413 | ARRAY_SIZE(nau8810_mono_mixer_controls)), |
| 414 | SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3, | 414 | SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3, |
| 415 | NAU8810_DAC_EN_SFT, 0), | 415 | NAU8810_DAC_EN_SFT, 0), |
| 416 | SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2, | 416 | SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2, |
| 417 | NAU8810_ADC_EN_SFT, 0), | 417 | NAU8810_ADC_EN_SFT, 0), |
| 418 | SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3, | 418 | SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3, |
| 419 | NAU8810_NSPK_EN_SFT, 0, NULL, 0), | 419 | NAU8810_NSPK_EN_SFT, 0, NULL, 0), |
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c index 87ed3dc496dc..5ab05e75edea 100644 --- a/sound/soc/codecs/nau8824.c +++ b/sound/soc/codecs/nau8824.c | |||
| @@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = { | |||
| 681 | SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2, | 681 | SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2, |
| 682 | NAU8824_ADCR_EN_SFT, 0), | 682 | NAU8824_ADCR_EN_SFT, 0), |
| 683 | 683 | ||
| 684 | SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0), | 684 | SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0), |
| 685 | SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0), | 685 | SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0), |
| 686 | 686 | ||
| 687 | SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC, | 687 | SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC, |
| 688 | NAU8824_DACL_EN_SFT, 0), | 688 | NAU8824_DACL_EN_SFT, 0), |
| @@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap) | |||
| 831 | } | 831 | } |
| 832 | } | 832 | } |
| 833 | 833 | ||
| 834 | static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin) | ||
| 835 | { | ||
| 836 | struct snd_soc_dapm_context *dapm = nau8824->dapm; | ||
| 837 | const char *prefix = dapm->component->name_prefix; | ||
| 838 | char prefixed_pin[80]; | ||
| 839 | |||
| 840 | if (prefix) { | ||
| 841 | snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s", | ||
| 842 | prefix, pin); | ||
| 843 | snd_soc_dapm_disable_pin(dapm, prefixed_pin); | ||
| 844 | } else { | ||
| 845 | snd_soc_dapm_disable_pin(dapm, pin); | ||
| 846 | } | ||
| 847 | } | ||
| 848 | |||
| 849 | static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin) | ||
| 850 | { | ||
| 851 | struct snd_soc_dapm_context *dapm = nau8824->dapm; | ||
| 852 | const char *prefix = dapm->component->name_prefix; | ||
| 853 | char prefixed_pin[80]; | ||
| 854 | |||
| 855 | if (prefix) { | ||
| 856 | snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s", | ||
| 857 | prefix, pin); | ||
| 858 | snd_soc_dapm_force_enable_pin(dapm, prefixed_pin); | ||
| 859 | } else { | ||
| 860 | snd_soc_dapm_force_enable_pin(dapm, pin); | ||
| 861 | } | ||
| 862 | } | ||
| 863 | |||
| 834 | static void nau8824_eject_jack(struct nau8824 *nau8824) | 864 | static void nau8824_eject_jack(struct nau8824 *nau8824) |
| 835 | { | 865 | { |
| 836 | struct snd_soc_dapm_context *dapm = nau8824->dapm; | 866 | struct snd_soc_dapm_context *dapm = nau8824->dapm; |
| @@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824) | |||
| 839 | /* Clear all interruption status */ | 869 | /* Clear all interruption status */ |
| 840 | nau8824_int_status_clear_all(regmap); | 870 | nau8824_int_status_clear_all(regmap); |
| 841 | 871 | ||
| 842 | snd_soc_dapm_disable_pin(dapm, "SAR"); | 872 | nau8824_dapm_disable_pin(nau8824, "SAR"); |
| 843 | snd_soc_dapm_disable_pin(dapm, "MICBIAS"); | 873 | nau8824_dapm_disable_pin(nau8824, "MICBIAS"); |
| 844 | snd_soc_dapm_sync(dapm); | 874 | snd_soc_dapm_sync(dapm); |
| 845 | 875 | ||
| 846 | /* Enable the insertion interruption, disable the ejection | 876 | /* Enable the insertion interruption, disable the ejection |
| @@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work) | |||
| 870 | struct regmap *regmap = nau8824->regmap; | 900 | struct regmap *regmap = nau8824->regmap; |
| 871 | int adc_value, event = 0, event_mask = 0; | 901 | int adc_value, event = 0, event_mask = 0; |
| 872 | 902 | ||
| 873 | snd_soc_dapm_force_enable_pin(dapm, "MICBIAS"); | 903 | nau8824_dapm_enable_pin(nau8824, "MICBIAS"); |
| 874 | snd_soc_dapm_force_enable_pin(dapm, "SAR"); | 904 | nau8824_dapm_enable_pin(nau8824, "SAR"); |
| 875 | snd_soc_dapm_sync(dapm); | 905 | snd_soc_dapm_sync(dapm); |
| 876 | 906 | ||
| 877 | msleep(100); | 907 | msleep(100); |
| @@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work) | |||
| 882 | if (adc_value < HEADSET_SARADC_THD) { | 912 | if (adc_value < HEADSET_SARADC_THD) { |
| 883 | event |= SND_JACK_HEADPHONE; | 913 | event |= SND_JACK_HEADPHONE; |
| 884 | 914 | ||
| 885 | snd_soc_dapm_disable_pin(dapm, "SAR"); | 915 | nau8824_dapm_disable_pin(nau8824, "SAR"); |
| 886 | snd_soc_dapm_disable_pin(dapm, "MICBIAS"); | 916 | nau8824_dapm_disable_pin(nau8824, "MICBIAS"); |
| 887 | snd_soc_dapm_sync(dapm); | 917 | snd_soc_dapm_sync(dapm); |
| 888 | } else { | 918 | } else { |
| 889 | event |= SND_JACK_HEADSET; | 919 | event |= SND_JACK_HEADSET; |
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 9d5acd2d04ab..86a7fa31c294 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
| @@ -910,13 +910,21 @@ static int rt5682_headset_detect(struct snd_soc_component *component, | |||
| 910 | int jack_insert) | 910 | int jack_insert) |
| 911 | { | 911 | { |
| 912 | struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); | 912 | struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); |
| 913 | struct snd_soc_dapm_context *dapm = | ||
| 914 | snd_soc_component_get_dapm(component); | ||
| 915 | unsigned int val, count; | 913 | unsigned int val, count; |
| 916 | 914 | ||
| 917 | if (jack_insert) { | 915 | if (jack_insert) { |
| 918 | snd_soc_dapm_force_enable_pin(dapm, "CBJ Power"); | 916 | |
| 919 | snd_soc_dapm_sync(dapm); | 917 | snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, |
| 918 | RT5682_PWR_VREF2 | RT5682_PWR_MB, | ||
| 919 | RT5682_PWR_VREF2 | RT5682_PWR_MB); | ||
| 920 | snd_soc_component_update_bits(component, | ||
| 921 | RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0); | ||
| 922 | usleep_range(15000, 20000); | ||
| 923 | snd_soc_component_update_bits(component, | ||
| 924 | RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2); | ||
| 925 | snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, | ||
| 926 | RT5682_PWR_CBJ, RT5682_PWR_CBJ); | ||
| 927 | |||
| 920 | snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, | 928 | snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, |
| 921 | RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH); | 929 | RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH); |
| 922 | 930 | ||
| @@ -944,8 +952,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component, | |||
| 944 | rt5682_enable_push_button_irq(component, false); | 952 | rt5682_enable_push_button_irq(component, false); |
| 945 | snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, | 953 | snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, |
| 946 | RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); | 954 | RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); |
| 947 | snd_soc_dapm_disable_pin(dapm, "CBJ Power"); | 955 | snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, |
| 948 | snd_soc_dapm_sync(dapm); | 956 | RT5682_PWR_VREF2 | RT5682_PWR_MB, 0); |
| 957 | snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, | ||
| 958 | RT5682_PWR_CBJ, 0); | ||
| 949 | 959 | ||
| 950 | rt5682->jack_type = 0; | 960 | rt5682->jack_type = 0; |
| 951 | } | 961 | } |
| @@ -1198,7 +1208,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w, | |||
| 1198 | struct snd_soc_component *component = | 1208 | struct snd_soc_component *component = |
| 1199 | snd_soc_dapm_to_component(w->dapm); | 1209 | snd_soc_dapm_to_component(w->dapm); |
| 1200 | struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); | 1210 | struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); |
| 1201 | int ref, val, reg, sft, mask, idx = -EINVAL; | 1211 | int ref, val, reg, idx = -EINVAL; |
| 1202 | static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48}; | 1212 | static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48}; |
| 1203 | static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48}; | 1213 | static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48}; |
| 1204 | 1214 | ||
| @@ -1212,15 +1222,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w, | |||
| 1212 | 1222 | ||
| 1213 | idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f)); | 1223 | idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f)); |
| 1214 | 1224 | ||
| 1215 | if (w->shift == RT5682_PWR_ADC_S1F_BIT) { | 1225 | if (w->shift == RT5682_PWR_ADC_S1F_BIT) |
| 1216 | reg = RT5682_PLL_TRACK_3; | 1226 | reg = RT5682_PLL_TRACK_3; |
| 1217 | sft = RT5682_ADC_OSR_SFT; | 1227 | else |
| 1218 | mask = RT5682_ADC_OSR_MASK; | ||
| 1219 | } else { | ||
| 1220 | reg = RT5682_PLL_TRACK_2; | 1228 | reg = RT5682_PLL_TRACK_2; |
| 1221 | sft = RT5682_DAC_OSR_SFT; | ||
| 1222 | mask = RT5682_DAC_OSR_MASK; | ||
| 1223 | } | ||
| 1224 | 1229 | ||
| 1225 | snd_soc_component_update_bits(component, reg, | 1230 | snd_soc_component_update_bits(component, reg, |
| 1226 | RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT); | 1231 | RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT); |
| @@ -1232,7 +1237,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w, | |||
| 1232 | } | 1237 | } |
| 1233 | 1238 | ||
| 1234 | snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1, | 1239 | snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1, |
| 1235 | mask, idx << sft); | 1240 | RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK, |
| 1241 | (idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT)); | ||
| 1236 | 1242 | ||
| 1237 | return 0; | 1243 | return 0; |
| 1238 | } | 1244 | } |
| @@ -1591,8 +1597,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = { | |||
| 1591 | 0, NULL, 0), | 1597 | 0, NULL, 0), |
| 1592 | SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0, | 1598 | SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0, |
| 1593 | rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), | 1599 | rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), |
| 1594 | SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0, | ||
| 1595 | rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), | ||
| 1596 | 1600 | ||
| 1597 | /* ASRC */ | 1601 | /* ASRC */ |
| 1598 | SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1, | 1602 | SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1, |
| @@ -1627,9 +1631,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = { | |||
| 1627 | SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, | 1631 | SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, |
| 1628 | 0, 0, NULL, 0), | 1632 | 0, 0, NULL, 0), |
| 1629 | 1633 | ||
| 1630 | SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3, | ||
| 1631 | RT5682_PWR_CBJ_BIT, 0, NULL, 0), | ||
| 1632 | |||
| 1633 | /* REC Mixer */ | 1634 | /* REC Mixer */ |
| 1634 | SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix, | 1635 | SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix, |
| 1635 | ARRAY_SIZE(rt5682_rec1_l_mix)), | 1636 | ARRAY_SIZE(rt5682_rec1_l_mix)), |
| @@ -1792,17 +1793,13 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = { | |||
| 1792 | 1793 | ||
| 1793 | /*Vref*/ | 1794 | /*Vref*/ |
| 1794 | {"MICBIAS1", NULL, "Vref1"}, | 1795 | {"MICBIAS1", NULL, "Vref1"}, |
| 1795 | {"MICBIAS1", NULL, "Vref2"}, | ||
| 1796 | {"MICBIAS2", NULL, "Vref1"}, | 1796 | {"MICBIAS2", NULL, "Vref1"}, |
| 1797 | {"MICBIAS2", NULL, "Vref2"}, | ||
| 1798 | 1797 | ||
| 1799 | {"CLKDET SYS", NULL, "CLKDET"}, | 1798 | {"CLKDET SYS", NULL, "CLKDET"}, |
| 1800 | 1799 | ||
| 1801 | {"IN1P", NULL, "LDO2"}, | 1800 | {"IN1P", NULL, "LDO2"}, |
| 1802 | 1801 | ||
| 1803 | {"BST1 CBJ", NULL, "IN1P"}, | 1802 | {"BST1 CBJ", NULL, "IN1P"}, |
| 1804 | {"BST1 CBJ", NULL, "CBJ Power"}, | ||
| 1805 | {"CBJ Power", NULL, "Vref2"}, | ||
| 1806 | 1803 | ||
| 1807 | {"RECMIX1L", "CBJ Switch", "BST1 CBJ"}, | 1804 | {"RECMIX1L", "CBJ Switch", "BST1 CBJ"}, |
| 1808 | {"RECMIX1L", NULL, "RECMIX1L Power"}, | 1805 | {"RECMIX1L", NULL, "RECMIX1L Power"}, |
| @@ -1912,9 +1909,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = { | |||
| 1912 | {"HP Amp", NULL, "Capless"}, | 1909 | {"HP Amp", NULL, "Capless"}, |
| 1913 | {"HP Amp", NULL, "Charge Pump"}, | 1910 | {"HP Amp", NULL, "Charge Pump"}, |
| 1914 | {"HP Amp", NULL, "CLKDET SYS"}, | 1911 | {"HP Amp", NULL, "CLKDET SYS"}, |
| 1915 | {"HP Amp", NULL, "CBJ Power"}, | ||
| 1916 | {"HP Amp", NULL, "Vref1"}, | 1912 | {"HP Amp", NULL, "Vref1"}, |
| 1917 | {"HP Amp", NULL, "Vref2"}, | ||
| 1918 | {"HPOL Playback", "Switch", "HP Amp"}, | 1913 | {"HPOL Playback", "Switch", "HP Amp"}, |
| 1919 | {"HPOR Playback", "Switch", "HP Amp"}, | 1914 | {"HPOR Playback", "Switch", "HP Amp"}, |
| 1920 | {"HPOL", NULL, "HPOL Playback"}, | 1915 | {"HPOL", NULL, "HPOL Playback"}, |
| @@ -2303,16 +2298,13 @@ static int rt5682_set_bias_level(struct snd_soc_component *component, | |||
| 2303 | switch (level) { | 2298 | switch (level) { |
| 2304 | case SND_SOC_BIAS_PREPARE: | 2299 | case SND_SOC_BIAS_PREPARE: |
| 2305 | regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1, | 2300 | regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1, |
| 2306 | RT5682_PWR_MB | RT5682_PWR_BG, | 2301 | RT5682_PWR_BG, RT5682_PWR_BG); |
| 2307 | RT5682_PWR_MB | RT5682_PWR_BG); | ||
| 2308 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, | 2302 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, |
| 2309 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, | 2303 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, |
| 2310 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO); | 2304 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO); |
| 2311 | break; | 2305 | break; |
| 2312 | 2306 | ||
| 2313 | case SND_SOC_BIAS_STANDBY: | 2307 | case SND_SOC_BIAS_STANDBY: |
| 2314 | regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1, | ||
| 2315 | RT5682_PWR_MB, RT5682_PWR_MB); | ||
| 2316 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, | 2308 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, |
| 2317 | RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL); | 2309 | RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL); |
| 2318 | break; | 2310 | break; |
| @@ -2320,7 +2312,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component, | |||
| 2320 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, | 2312 | regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1, |
| 2321 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0); | 2313 | RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0); |
| 2322 | regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1, | 2314 | regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1, |
| 2323 | RT5682_PWR_MB | RT5682_PWR_BG, 0); | 2315 | RT5682_PWR_BG, 0); |
| 2324 | break; | 2316 | break; |
| 2325 | 2317 | ||
| 2326 | default: | 2318 | default: |
| @@ -2363,6 +2355,8 @@ static int rt5682_resume(struct snd_soc_component *component) | |||
| 2363 | regcache_cache_only(rt5682->regmap, false); | 2355 | regcache_cache_only(rt5682->regmap, false); |
| 2364 | regcache_sync(rt5682->regmap); | 2356 | regcache_sync(rt5682->regmap); |
| 2365 | 2357 | ||
| 2358 | rt5682_irq(0, rt5682); | ||
| 2359 | |||
| 2366 | return 0; | 2360 | return 0; |
| 2367 | } | 2361 | } |
| 2368 | #else | 2362 | #else |
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c index 385fa2e9525a..22c3a6bc0b6c 100644 --- a/sound/soc/codecs/tlv320aic32x4-i2c.c +++ b/sound/soc/codecs/tlv320aic32x4-i2c.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright 2011 NW Digital Radio | 4 | * Copyright 2011 NW Digital Radio |
| 5 | * | 5 | * |
| 6 | * Author: Jeremy McDermond <nh6z@nh6z.net> | 6 | * Author: Annaliese McDermond <nh6z@nh6z.net> |
| 7 | * | 7 | * |
| 8 | * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27. | 8 | * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27. |
| 9 | * | 9 | * |
| @@ -72,5 +72,5 @@ static struct i2c_driver aic32x4_i2c_driver = { | |||
| 72 | module_i2c_driver(aic32x4_i2c_driver); | 72 | module_i2c_driver(aic32x4_i2c_driver); |
| 73 | 73 | ||
| 74 | MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C"); | 74 | MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C"); |
| 75 | MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>"); | 75 | MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>"); |
| 76 | MODULE_LICENSE("GPL"); | 76 | MODULE_LICENSE("GPL"); |
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c index 07d78ae51e05..aa5b7ba0254b 100644 --- a/sound/soc/codecs/tlv320aic32x4-spi.c +++ b/sound/soc/codecs/tlv320aic32x4-spi.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright 2011 NW Digital Radio | 4 | * Copyright 2011 NW Digital Radio |
| 5 | * | 5 | * |
| 6 | * Author: Jeremy McDermond <nh6z@nh6z.net> | 6 | * Author: Annaliese McDermond <nh6z@nh6z.net> |
| 7 | * | 7 | * |
| 8 | * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27. | 8 | * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27. |
| 9 | * | 9 | * |
| @@ -74,5 +74,5 @@ static struct spi_driver aic32x4_spi_driver = { | |||
| 74 | module_spi_driver(aic32x4_spi_driver); | 74 | module_spi_driver(aic32x4_spi_driver); |
| 75 | 75 | ||
| 76 | MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI"); | 76 | MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI"); |
| 77 | MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>"); | 77 | MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>"); |
| 78 | MODULE_LICENSE("GPL"); | 78 | MODULE_LICENSE("GPL"); |
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index 96f1526cb258..5520044929f4 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c | |||
| @@ -490,6 +490,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = { | |||
| 490 | SND_SOC_DAPM_INPUT("IN2_R"), | 490 | SND_SOC_DAPM_INPUT("IN2_R"), |
| 491 | SND_SOC_DAPM_INPUT("IN3_L"), | 491 | SND_SOC_DAPM_INPUT("IN3_L"), |
| 492 | SND_SOC_DAPM_INPUT("IN3_R"), | 492 | SND_SOC_DAPM_INPUT("IN3_R"), |
| 493 | SND_SOC_DAPM_INPUT("CM_L"), | ||
| 494 | SND_SOC_DAPM_INPUT("CM_R"), | ||
| 493 | }; | 495 | }; |
| 494 | 496 | ||
| 495 | static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = { | 497 | static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = { |
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 283583d1db60..516d17cb2182 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c | |||
| @@ -1609,7 +1609,6 @@ static int aic3x_probe(struct snd_soc_component *component) | |||
| 1609 | struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component); | 1609 | struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component); |
| 1610 | int ret, i; | 1610 | int ret, i; |
| 1611 | 1611 | ||
| 1612 | INIT_LIST_HEAD(&aic3x->list); | ||
| 1613 | aic3x->component = component; | 1612 | aic3x->component = component; |
| 1614 | 1613 | ||
| 1615 | for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) { | 1614 | for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) { |
| @@ -1873,6 +1872,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c, | |||
| 1873 | if (ret != 0) | 1872 | if (ret != 0) |
| 1874 | goto err_gpio; | 1873 | goto err_gpio; |
| 1875 | 1874 | ||
| 1875 | INIT_LIST_HEAD(&aic3x->list); | ||
| 1876 | list_add(&aic3x->list, &reset_list); | 1876 | list_add(&aic3x->list, &reset_list); |
| 1877 | 1877 | ||
| 1878 | return 0; | 1878 | return 0; |
| @@ -1889,6 +1889,8 @@ static int aic3x_i2c_remove(struct i2c_client *client) | |||
| 1889 | { | 1889 | { |
| 1890 | struct aic3x_priv *aic3x = i2c_get_clientdata(client); | 1890 | struct aic3x_priv *aic3x = i2c_get_clientdata(client); |
| 1891 | 1891 | ||
| 1892 | list_del(&aic3x->list); | ||
| 1893 | |||
| 1892 | if (gpio_is_valid(aic3x->gpio_reset) && | 1894 | if (gpio_is_valid(aic3x->gpio_reset) && |
| 1893 | !aic3x_is_shared_reset(aic3x)) { | 1895 | !aic3x_is_shared_reset(aic3x)) { |
| 1894 | gpio_set_value(aic3x->gpio_reset, 0); | 1896 | gpio_set_value(aic3x->gpio_reset, 0); |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index b93fdc8d2d6f..b0b48eb9c7c9 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
| @@ -2905,6 +2905,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w, | |||
| 2905 | if (wm_adsp_fw[dsp->fw].num_caps != 0) | 2905 | if (wm_adsp_fw[dsp->fw].num_caps != 0) |
| 2906 | wm_adsp_buffer_free(dsp); | 2906 | wm_adsp_buffer_free(dsp); |
| 2907 | 2907 | ||
| 2908 | dsp->fatal_error = false; | ||
| 2909 | |||
| 2908 | mutex_unlock(&dsp->pwr_lock); | 2910 | mutex_unlock(&dsp->pwr_lock); |
| 2909 | 2911 | ||
| 2910 | adsp_dbg(dsp, "Execution stopped\n"); | 2912 | adsp_dbg(dsp, "Execution stopped\n"); |
| @@ -3000,6 +3002,9 @@ static int wm_adsp_compr_attach(struct wm_adsp_compr *compr) | |||
| 3000 | { | 3002 | { |
| 3001 | struct wm_adsp_compr_buf *buf = NULL, *tmp; | 3003 | struct wm_adsp_compr_buf *buf = NULL, *tmp; |
| 3002 | 3004 | ||
| 3005 | if (compr->dsp->fatal_error) | ||
| 3006 | return -EINVAL; | ||
| 3007 | |||
| 3003 | list_for_each_entry(tmp, &compr->dsp->buffer_list, list) { | 3008 | list_for_each_entry(tmp, &compr->dsp->buffer_list, list) { |
| 3004 | if (!tmp->name || !strcmp(compr->name, tmp->name)) { | 3009 | if (!tmp->name || !strcmp(compr->name, tmp->name)) { |
| 3005 | buf = tmp; | 3010 | buf = tmp; |
| @@ -3535,11 +3540,11 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf) | |||
| 3535 | 3540 | ||
| 3536 | ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error); | 3541 | ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error); |
| 3537 | if (ret < 0) { | 3542 | if (ret < 0) { |
| 3538 | adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret); | 3543 | compr_err(buf, "Failed to check buffer error: %d\n", ret); |
| 3539 | return ret; | 3544 | return ret; |
| 3540 | } | 3545 | } |
| 3541 | if (buf->error != 0) { | 3546 | if (buf->error != 0) { |
| 3542 | adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error); | 3547 | compr_err(buf, "Buffer error occurred: %d\n", buf->error); |
| 3543 | return -EIO; | 3548 | return -EIO; |
| 3544 | } | 3549 | } |
| 3545 | 3550 | ||
| @@ -3571,8 +3576,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd) | |||
| 3571 | if (ret < 0) | 3576 | if (ret < 0) |
| 3572 | break; | 3577 | break; |
| 3573 | 3578 | ||
| 3574 | wm_adsp_buffer_clear(compr->buf); | ||
| 3575 | |||
| 3576 | /* Trigger the IRQ at one fragment of data */ | 3579 | /* Trigger the IRQ at one fragment of data */ |
| 3577 | ret = wm_adsp_buffer_write(compr->buf, | 3580 | ret = wm_adsp_buffer_write(compr->buf, |
| 3578 | HOST_BUFFER_FIELD(high_water_mark), | 3581 | HOST_BUFFER_FIELD(high_water_mark), |
| @@ -3584,6 +3587,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd) | |||
| 3584 | } | 3587 | } |
| 3585 | break; | 3588 | break; |
| 3586 | case SNDRV_PCM_TRIGGER_STOP: | 3589 | case SNDRV_PCM_TRIGGER_STOP: |
| 3590 | if (wm_adsp_compr_attached(compr)) | ||
| 3591 | wm_adsp_buffer_clear(compr->buf); | ||
| 3587 | break; | 3592 | break; |
| 3588 | default: | 3593 | default: |
| 3589 | ret = -EINVAL; | 3594 | ret = -EINVAL; |
| @@ -3917,22 +3922,40 @@ int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions) | |||
| 3917 | } | 3922 | } |
| 3918 | EXPORT_SYMBOL_GPL(wm_adsp2_lock); | 3923 | EXPORT_SYMBOL_GPL(wm_adsp2_lock); |
| 3919 | 3924 | ||
| 3925 | static void wm_adsp_fatal_error(struct wm_adsp *dsp) | ||
| 3926 | { | ||
| 3927 | struct wm_adsp_compr *compr; | ||
| 3928 | |||
| 3929 | dsp->fatal_error = true; | ||
| 3930 | |||
| 3931 | list_for_each_entry(compr, &dsp->compr_list, list) { | ||
| 3932 | if (compr->stream) { | ||
| 3933 | snd_compr_stop_error(compr->stream, | ||
| 3934 | SNDRV_PCM_STATE_XRUN); | ||
| 3935 | snd_compr_fragment_elapsed(compr->stream); | ||
| 3936 | } | ||
| 3937 | } | ||
| 3938 | } | ||
| 3939 | |||
| 3920 | irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp) | 3940 | irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp) |
| 3921 | { | 3941 | { |
| 3922 | unsigned int val; | 3942 | unsigned int val; |
| 3923 | struct regmap *regmap = dsp->regmap; | 3943 | struct regmap *regmap = dsp->regmap; |
| 3924 | int ret = 0; | 3944 | int ret = 0; |
| 3925 | 3945 | ||
| 3946 | mutex_lock(&dsp->pwr_lock); | ||
| 3947 | |||
| 3926 | ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val); | 3948 | ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val); |
| 3927 | if (ret) { | 3949 | if (ret) { |
| 3928 | adsp_err(dsp, | 3950 | adsp_err(dsp, |
| 3929 | "Failed to read Region Lock Ctrl register: %d\n", ret); | 3951 | "Failed to read Region Lock Ctrl register: %d\n", ret); |
| 3930 | return IRQ_HANDLED; | 3952 | goto error; |
| 3931 | } | 3953 | } |
| 3932 | 3954 | ||
| 3933 | if (val & ADSP2_WDT_TIMEOUT_STS_MASK) { | 3955 | if (val & ADSP2_WDT_TIMEOUT_STS_MASK) { |
| 3934 | adsp_err(dsp, "watchdog timeout error\n"); | 3956 | adsp_err(dsp, "watchdog timeout error\n"); |
| 3935 | wm_adsp_stop_watchdog(dsp); | 3957 | wm_adsp_stop_watchdog(dsp); |
| 3958 | wm_adsp_fatal_error(dsp); | ||
| 3936 | } | 3959 | } |
| 3937 | 3960 | ||
| 3938 | if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) { | 3961 | if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) { |
| @@ -3946,7 +3969,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp) | |||
| 3946 | adsp_err(dsp, | 3969 | adsp_err(dsp, |
| 3947 | "Failed to read Bus Err Addr register: %d\n", | 3970 | "Failed to read Bus Err Addr register: %d\n", |
| 3948 | ret); | 3971 | ret); |
| 3949 | return IRQ_HANDLED; | 3972 | goto error; |
| 3950 | } | 3973 | } |
| 3951 | 3974 | ||
| 3952 | adsp_err(dsp, "bus error address = 0x%x\n", | 3975 | adsp_err(dsp, "bus error address = 0x%x\n", |
| @@ -3959,7 +3982,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp) | |||
| 3959 | adsp_err(dsp, | 3982 | adsp_err(dsp, |
| 3960 | "Failed to read Pmem Xmem Err Addr register: %d\n", | 3983 | "Failed to read Pmem Xmem Err Addr register: %d\n", |
| 3961 | ret); | 3984 | ret); |
| 3962 | return IRQ_HANDLED; | 3985 | goto error; |
| 3963 | } | 3986 | } |
| 3964 | 3987 | ||
| 3965 | adsp_err(dsp, "xmem error address = 0x%x\n", | 3988 | adsp_err(dsp, "xmem error address = 0x%x\n", |
| @@ -3972,6 +3995,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp) | |||
| 3972 | regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, | 3995 | regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, |
| 3973 | ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT); | 3996 | ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT); |
| 3974 | 3997 | ||
| 3998 | error: | ||
| 3999 | mutex_unlock(&dsp->pwr_lock); | ||
| 4000 | |||
| 3975 | return IRQ_HANDLED; | 4001 | return IRQ_HANDLED; |
| 3976 | } | 4002 | } |
| 3977 | EXPORT_SYMBOL_GPL(wm_adsp2_bus_error); | 4003 | EXPORT_SYMBOL_GPL(wm_adsp2_bus_error); |
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h index 59e07ad16329..8f09b4419a91 100644 --- a/sound/soc/codecs/wm_adsp.h +++ b/sound/soc/codecs/wm_adsp.h | |||
| @@ -85,6 +85,7 @@ struct wm_adsp { | |||
| 85 | bool preloaded; | 85 | bool preloaded; |
| 86 | bool booted; | 86 | bool booted; |
| 87 | bool running; | 87 | bool running; |
| 88 | bool fatal_error; | ||
| 88 | 89 | ||
| 89 | struct list_head ctl_list; | 90 | struct list_head ctl_list; |
| 90 | 91 | ||
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 528e8b108422..0b937924d2e4 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c | |||
| @@ -445,6 +445,19 @@ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir) | |||
| 445 | } | 445 | } |
| 446 | EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel); | 446 | EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel); |
| 447 | 447 | ||
| 448 | static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream, | ||
| 449 | struct snd_soc_dai *dai) | ||
| 450 | { | ||
| 451 | struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai); | ||
| 452 | |||
| 453 | /* Odd channel number is not valid for older ASRC (channel_bits==3) */ | ||
| 454 | if (asrc_priv->channel_bits == 3) | ||
| 455 | snd_pcm_hw_constraint_step(substream->runtime, 0, | ||
| 456 | SNDRV_PCM_HW_PARAM_CHANNELS, 2); | ||
| 457 | |||
| 458 | return 0; | ||
| 459 | } | ||
| 460 | |||
| 448 | static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream, | 461 | static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream, |
| 449 | struct snd_pcm_hw_params *params, | 462 | struct snd_pcm_hw_params *params, |
| 450 | struct snd_soc_dai *dai) | 463 | struct snd_soc_dai *dai) |
| @@ -539,6 +552,7 @@ static int fsl_asrc_dai_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 539 | } | 552 | } |
| 540 | 553 | ||
| 541 | static const struct snd_soc_dai_ops fsl_asrc_dai_ops = { | 554 | static const struct snd_soc_dai_ops fsl_asrc_dai_ops = { |
| 555 | .startup = fsl_asrc_dai_startup, | ||
| 542 | .hw_params = fsl_asrc_dai_hw_params, | 556 | .hw_params = fsl_asrc_dai_hw_params, |
| 543 | .hw_free = fsl_asrc_dai_hw_free, | 557 | .hw_free = fsl_asrc_dai_hw_free, |
| 544 | .trigger = fsl_asrc_dai_trigger, | 558 | .trigger = fsl_asrc_dai_trigger, |
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index afe67c865330..3623aa9a6f2e 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c | |||
| @@ -54,6 +54,8 @@ struct fsl_esai { | |||
| 54 | u32 fifo_depth; | 54 | u32 fifo_depth; |
| 55 | u32 slot_width; | 55 | u32 slot_width; |
| 56 | u32 slots; | 56 | u32 slots; |
| 57 | u32 tx_mask; | ||
| 58 | u32 rx_mask; | ||
| 57 | u32 hck_rate[2]; | 59 | u32 hck_rate[2]; |
| 58 | u32 sck_rate[2]; | 60 | u32 sck_rate[2]; |
| 59 | bool hck_dir[2]; | 61 | bool hck_dir[2]; |
| @@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, | |||
| 361 | regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, | 363 | regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, |
| 362 | ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); | 364 | ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); |
| 363 | 365 | ||
| 364 | regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA, | ||
| 365 | ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask)); | ||
| 366 | regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB, | ||
| 367 | ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask)); | ||
| 368 | |||
| 369 | regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, | 366 | regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, |
| 370 | ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); | 367 | ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); |
| 371 | 368 | ||
| 372 | regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA, | ||
| 373 | ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask)); | ||
| 374 | regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB, | ||
| 375 | ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask)); | ||
| 376 | |||
| 377 | esai_priv->slot_width = slot_width; | 369 | esai_priv->slot_width = slot_width; |
| 378 | esai_priv->slots = slots; | 370 | esai_priv->slots = slots; |
| 371 | esai_priv->tx_mask = tx_mask; | ||
| 372 | esai_priv->rx_mask = rx_mask; | ||
| 379 | 373 | ||
| 380 | return 0; | 374 | return 0; |
| 381 | } | 375 | } |
| @@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 596 | bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; | 590 | bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; |
| 597 | u8 i, channels = substream->runtime->channels; | 591 | u8 i, channels = substream->runtime->channels; |
| 598 | u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); | 592 | u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); |
| 593 | u32 mask; | ||
| 599 | 594 | ||
| 600 | switch (cmd) { | 595 | switch (cmd) { |
| 601 | case SNDRV_PCM_TRIGGER_START: | 596 | case SNDRV_PCM_TRIGGER_START: |
| @@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 608 | for (i = 0; tx && i < channels; i++) | 603 | for (i = 0; tx && i < channels; i++) |
| 609 | regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0); | 604 | regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0); |
| 610 | 605 | ||
| 606 | /* | ||
| 607 | * When set the TE/RE in the end of enablement flow, there | ||
| 608 | * will be channel swap issue for multi data line case. | ||
| 609 | * In order to workaround this issue, we switch the bit | ||
| 610 | * enablement sequence to below sequence | ||
| 611 | * 1) clear the xSMB & xSMA: which is done in probe and | ||
| 612 | * stop state. | ||
| 613 | * 2) set TE/RE | ||
| 614 | * 3) set xSMB | ||
| 615 | * 4) set xSMA: xSMA is the last one in this flow, which | ||
| 616 | * will trigger esai to start. | ||
| 617 | */ | ||
| 611 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), | 618 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), |
| 612 | tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, | 619 | tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, |
| 613 | tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins)); | 620 | tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins)); |
| 621 | mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask; | ||
| 622 | |||
| 623 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx), | ||
| 624 | ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask)); | ||
| 625 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx), | ||
| 626 | ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask)); | ||
| 627 | |||
| 614 | break; | 628 | break; |
| 615 | case SNDRV_PCM_TRIGGER_SUSPEND: | 629 | case SNDRV_PCM_TRIGGER_SUSPEND: |
| 616 | case SNDRV_PCM_TRIGGER_STOP: | 630 | case SNDRV_PCM_TRIGGER_STOP: |
| 617 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 631 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
| 618 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), | 632 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), |
| 619 | tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0); | 633 | tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0); |
| 634 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx), | ||
| 635 | ESAI_xSMA_xS_MASK, 0); | ||
| 636 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx), | ||
| 637 | ESAI_xSMB_xS_MASK, 0); | ||
| 620 | 638 | ||
| 621 | /* Disable and reset FIFO */ | 639 | /* Disable and reset FIFO */ |
| 622 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), | 640 | regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), |
| @@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev) | |||
| 906 | return ret; | 924 | return ret; |
| 907 | } | 925 | } |
| 908 | 926 | ||
| 927 | esai_priv->tx_mask = 0xFFFFFFFF; | ||
| 928 | esai_priv->rx_mask = 0xFFFFFFFF; | ||
| 929 | |||
| 930 | /* Clear the TSMA, TSMB, RSMA, RSMB */ | ||
| 931 | regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0); | ||
| 932 | regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0); | ||
| 933 | regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0); | ||
| 934 | regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0); | ||
| 935 | |||
| 909 | ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component, | 936 | ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component, |
| 910 | &fsl_esai_dai, 1); | 937 | &fsl_esai_dai, 1); |
| 911 | if (ret) { | 938 | if (ret) { |
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index bb12351330e8..69bc4848d787 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
| 21 | #include <sound/simple_card_utils.h> | 21 | #include <sound/simple_card_utils.h> |
| 22 | 22 | ||
| 23 | #define DPCM_SELECTABLE 1 | ||
| 24 | |||
| 23 | struct graph_priv { | 25 | struct graph_priv { |
| 24 | struct snd_soc_card snd_card; | 26 | struct snd_soc_card snd_card; |
| 25 | struct graph_dai_props { | 27 | struct graph_dai_props { |
| @@ -440,6 +442,7 @@ static int graph_for_each_link(struct graph_priv *priv, | |||
| 440 | struct device_node *codec_port; | 442 | struct device_node *codec_port; |
| 441 | struct device_node *codec_port_old = NULL; | 443 | struct device_node *codec_port_old = NULL; |
| 442 | struct asoc_simple_card_data adata; | 444 | struct asoc_simple_card_data adata; |
| 445 | uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev); | ||
| 443 | int rc, ret; | 446 | int rc, ret; |
| 444 | 447 | ||
| 445 | /* loop for all listed CPU port */ | 448 | /* loop for all listed CPU port */ |
| @@ -470,8 +473,9 @@ static int graph_for_each_link(struct graph_priv *priv, | |||
| 470 | * if Codec port has many endpoints, | 473 | * if Codec port has many endpoints, |
| 471 | * or has convert-xxx property | 474 | * or has convert-xxx property |
| 472 | */ | 475 | */ |
| 473 | if ((of_get_child_count(codec_port) > 1) || | 476 | if (dpcm_selectable && |
| 474 | adata.convert_rate || adata.convert_channels) | 477 | ((of_get_child_count(codec_port) > 1) || |
| 478 | adata.convert_rate || adata.convert_channels)) | ||
| 475 | ret = func_dpcm(priv, cpu_ep, codec_ep, li, | 479 | ret = func_dpcm(priv, cpu_ep, codec_ep, li, |
| 476 | (codec_port_old == codec_port)); | 480 | (codec_port_old == codec_port)); |
| 477 | /* else normal sound */ | 481 | /* else normal sound */ |
| @@ -732,7 +736,8 @@ static int graph_remove(struct platform_device *pdev) | |||
| 732 | 736 | ||
| 733 | static const struct of_device_id graph_of_match[] = { | 737 | static const struct of_device_id graph_of_match[] = { |
| 734 | { .compatible = "audio-graph-card", }, | 738 | { .compatible = "audio-graph-card", }, |
| 735 | { .compatible = "audio-graph-scu-card", }, | 739 | { .compatible = "audio-graph-scu-card", |
| 740 | .data = (void *)DPCM_SELECTABLE }, | ||
| 736 | {}, | 741 | {}, |
| 737 | }; | 742 | }; |
| 738 | MODULE_DEVICE_TABLE(of, graph_of_match); | 743 | MODULE_DEVICE_TABLE(of, graph_of_match); |
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 7147bba45a2a..34de32efc4c4 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
| @@ -9,12 +9,15 @@ | |||
| 9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
| 12 | #include <linux/of_device.h> | ||
| 12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
| 13 | #include <linux/string.h> | 14 | #include <linux/string.h> |
| 14 | #include <sound/simple_card.h> | 15 | #include <sound/simple_card.h> |
| 15 | #include <sound/soc-dai.h> | 16 | #include <sound/soc-dai.h> |
| 16 | #include <sound/soc.h> | 17 | #include <sound/soc.h> |
| 17 | 18 | ||
| 19 | #define DPCM_SELECTABLE 1 | ||
| 20 | |||
| 18 | struct simple_priv { | 21 | struct simple_priv { |
| 19 | struct snd_soc_card snd_card; | 22 | struct snd_soc_card snd_card; |
| 20 | struct simple_dai_props { | 23 | struct simple_dai_props { |
| @@ -441,6 +444,7 @@ static int simple_for_each_link(struct simple_priv *priv, | |||
| 441 | struct device *dev = simple_priv_to_dev(priv); | 444 | struct device *dev = simple_priv_to_dev(priv); |
| 442 | struct device_node *top = dev->of_node; | 445 | struct device_node *top = dev->of_node; |
| 443 | struct device_node *node; | 446 | struct device_node *node; |
| 447 | uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev); | ||
| 444 | bool is_top = 0; | 448 | bool is_top = 0; |
| 445 | int ret = 0; | 449 | int ret = 0; |
| 446 | 450 | ||
| @@ -480,8 +484,9 @@ static int simple_for_each_link(struct simple_priv *priv, | |||
| 480 | * if it has many CPUs, | 484 | * if it has many CPUs, |
| 481 | * or has convert-xxx property | 485 | * or has convert-xxx property |
| 482 | */ | 486 | */ |
| 483 | if (num > 2 || | 487 | if (dpcm_selectable && |
| 484 | adata.convert_rate || adata.convert_channels) | 488 | (num > 2 || |
| 489 | adata.convert_rate || adata.convert_channels)) | ||
| 485 | ret = func_dpcm(priv, np, codec, li, is_top); | 490 | ret = func_dpcm(priv, np, codec, li, is_top); |
| 486 | /* else normal sound */ | 491 | /* else normal sound */ |
| 487 | else | 492 | else |
| @@ -822,7 +827,8 @@ static int simple_remove(struct platform_device *pdev) | |||
| 822 | 827 | ||
| 823 | static const struct of_device_id simple_of_match[] = { | 828 | static const struct of_device_id simple_of_match[] = { |
| 824 | { .compatible = "simple-audio-card", }, | 829 | { .compatible = "simple-audio-card", }, |
| 825 | { .compatible = "simple-scu-audio-card", }, | 830 | { .compatible = "simple-scu-audio-card", |
| 831 | .data = (void *)DPCM_SELECTABLE }, | ||
| 826 | {}, | 832 | {}, |
| 827 | }; | 833 | }; |
| 828 | MODULE_DEVICE_TABLE(of, simple_of_match); | 834 | MODULE_DEVICE_TABLE(of, simple_of_match); |
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 08cea5b5cda9..0e8b1c5eec88 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c | |||
| @@ -706,9 +706,17 @@ static int sst_soc_probe(struct snd_soc_component *component) | |||
| 706 | return sst_dsp_init_v2_dpcm(component); | 706 | return sst_dsp_init_v2_dpcm(component); |
| 707 | } | 707 | } |
| 708 | 708 | ||
| 709 | static void sst_soc_remove(struct snd_soc_component *component) | ||
| 710 | { | ||
| 711 | struct sst_data *drv = dev_get_drvdata(component->dev); | ||
| 712 | |||
| 713 | drv->soc_card = NULL; | ||
| 714 | } | ||
| 715 | |||
| 709 | static const struct snd_soc_component_driver sst_soc_platform_drv = { | 716 | static const struct snd_soc_component_driver sst_soc_platform_drv = { |
| 710 | .name = DRV_NAME, | 717 | .name = DRV_NAME, |
| 711 | .probe = sst_soc_probe, | 718 | .probe = sst_soc_probe, |
| 719 | .remove = sst_soc_remove, | ||
| 712 | .ops = &sst_platform_ops, | 720 | .ops = &sst_platform_ops, |
| 713 | .compr_ops = &sst_platform_compr_ops, | 721 | .compr_ops = &sst_platform_compr_ops, |
| 714 | .pcm_new = sst_pcm_new, | 722 | .pcm_new = sst_pcm_new, |
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c index 3263b0495853..c0e0844f75b9 100644 --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c | |||
| @@ -43,6 +43,7 @@ struct cht_mc_private { | |||
| 43 | struct clk *mclk; | 43 | struct clk *mclk; |
| 44 | struct snd_soc_jack jack; | 44 | struct snd_soc_jack jack; |
| 45 | bool ts3a227e_present; | 45 | bool ts3a227e_present; |
| 46 | int quirks; | ||
| 46 | }; | 47 | }; |
| 47 | 48 | ||
| 48 | static int platform_clock_control(struct snd_soc_dapm_widget *w, | 49 | static int platform_clock_control(struct snd_soc_dapm_widget *w, |
| @@ -54,6 +55,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w, | |||
| 54 | struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); | 55 | struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); |
| 55 | int ret; | 56 | int ret; |
| 56 | 57 | ||
| 58 | /* See the comment in snd_cht_mc_probe() */ | ||
| 59 | if (ctx->quirks & QUIRK_PMC_PLT_CLK_0) | ||
| 60 | return 0; | ||
| 61 | |||
| 57 | codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI); | 62 | codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI); |
| 58 | if (!codec_dai) { | 63 | if (!codec_dai) { |
| 59 | dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n"); | 64 | dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n"); |
| @@ -223,6 +228,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) | |||
| 223 | "jack detection gpios not added, error %d\n", ret); | 228 | "jack detection gpios not added, error %d\n", ret); |
| 224 | } | 229 | } |
| 225 | 230 | ||
| 231 | /* See the comment in snd_cht_mc_probe() */ | ||
| 232 | if (ctx->quirks & QUIRK_PMC_PLT_CLK_0) | ||
| 233 | return 0; | ||
| 234 | |||
| 226 | /* | 235 | /* |
| 227 | * The firmware might enable the clock at | 236 | * The firmware might enable the clock at |
| 228 | * boot (this information may or may not | 237 | * boot (this information may or may not |
| @@ -423,16 +432,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev) | |||
| 423 | const char *mclk_name; | 432 | const char *mclk_name; |
| 424 | struct snd_soc_acpi_mach *mach; | 433 | struct snd_soc_acpi_mach *mach; |
| 425 | const char *platform_name; | 434 | const char *platform_name; |
| 426 | int quirks = 0; | ||
| 427 | |||
| 428 | dmi_id = dmi_first_match(cht_max98090_quirk_table); | ||
| 429 | if (dmi_id) | ||
| 430 | quirks = (unsigned long)dmi_id->driver_data; | ||
| 431 | 435 | ||
| 432 | drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); | 436 | drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); |
| 433 | if (!drv) | 437 | if (!drv) |
| 434 | return -ENOMEM; | 438 | return -ENOMEM; |
| 435 | 439 | ||
| 440 | dmi_id = dmi_first_match(cht_max98090_quirk_table); | ||
| 441 | if (dmi_id) | ||
| 442 | drv->quirks = (unsigned long)dmi_id->driver_data; | ||
| 443 | |||
| 436 | drv->ts3a227e_present = acpi_dev_found("104C227E"); | 444 | drv->ts3a227e_present = acpi_dev_found("104C227E"); |
| 437 | if (!drv->ts3a227e_present) { | 445 | if (!drv->ts3a227e_present) { |
| 438 | /* no need probe TI jack detection chip */ | 446 | /* no need probe TI jack detection chip */ |
| @@ -458,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev) | |||
| 458 | snd_soc_card_cht.dev = &pdev->dev; | 466 | snd_soc_card_cht.dev = &pdev->dev; |
| 459 | snd_soc_card_set_drvdata(&snd_soc_card_cht, drv); | 467 | snd_soc_card_set_drvdata(&snd_soc_card_cht, drv); |
| 460 | 468 | ||
| 461 | if (quirks & QUIRK_PMC_PLT_CLK_0) | 469 | if (drv->quirks & QUIRK_PMC_PLT_CLK_0) |
| 462 | mclk_name = "pmc_plt_clk_0"; | 470 | mclk_name = "pmc_plt_clk_0"; |
| 463 | else | 471 | else |
| 464 | mclk_name = "pmc_plt_clk_3"; | 472 | mclk_name = "pmc_plt_clk_3"; |
| @@ -471,6 +479,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev) | |||
| 471 | return PTR_ERR(drv->mclk); | 479 | return PTR_ERR(drv->mclk); |
| 472 | } | 480 | } |
| 473 | 481 | ||
| 482 | /* | ||
| 483 | * Boards which have the MAX98090's clk connected to clk_0 do not seem | ||
| 484 | * to like it if we muck with the clock. If we disable the clock when | ||
| 485 | * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors | ||
| 486 | * and the PLL never seems to lock again. | ||
| 487 | * So for these boards we enable it here once and leave it at that. | ||
| 488 | */ | ||
| 489 | if (drv->quirks & QUIRK_PMC_PLT_CLK_0) { | ||
| 490 | ret_val = clk_prepare_enable(drv->mclk); | ||
| 491 | if (ret_val < 0) { | ||
| 492 | dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val); | ||
| 493 | return ret_val; | ||
| 494 | } | ||
| 495 | } | ||
| 496 | |||
| 474 | ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht); | 497 | ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht); |
| 475 | if (ret_val) { | 498 | if (ret_val) { |
| 476 | dev_err(&pdev->dev, | 499 | dev_err(&pdev->dev, |
| @@ -481,11 +504,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev) | |||
| 481 | return ret_val; | 504 | return ret_val; |
| 482 | } | 505 | } |
| 483 | 506 | ||
| 507 | static int snd_cht_mc_remove(struct platform_device *pdev) | ||
| 508 | { | ||
| 509 | struct snd_soc_card *card = platform_get_drvdata(pdev); | ||
| 510 | struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card); | ||
| 511 | |||
| 512 | if (ctx->quirks & QUIRK_PMC_PLT_CLK_0) | ||
| 513 | clk_disable_unprepare(ctx->mclk); | ||
| 514 | |||
| 515 | return 0; | ||
| 516 | } | ||
| 517 | |||
| 484 | static struct platform_driver snd_cht_mc_driver = { | 518 | static struct platform_driver snd_cht_mc_driver = { |
| 485 | .driver = { | 519 | .driver = { |
| 486 | .name = "cht-bsw-max98090", | 520 | .name = "cht-bsw-max98090", |
| 487 | }, | 521 | }, |
| 488 | .probe = snd_cht_mc_probe, | 522 | .probe = snd_cht_mc_probe, |
| 523 | .remove = snd_cht_mc_remove, | ||
| 489 | }; | 524 | }; |
| 490 | 525 | ||
| 491 | module_platform_driver(snd_cht_mc_driver) | 526 | module_platform_driver(snd_cht_mc_driver) |
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c index 7044d8c2b187..879f14257a3e 100644 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c | |||
| @@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = { | |||
| 405 | }; | 405 | }; |
| 406 | 406 | ||
| 407 | static const unsigned int dmic_2ch[] = { | 407 | static const unsigned int dmic_2ch[] = { |
| 408 | 4, | 408 | 2, |
| 409 | }; | 409 | }; |
| 410 | 410 | ||
| 411 | static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = { | 411 | static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = { |
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c index 28c4806b196a..4bf70b4429f0 100644 --- a/sound/soc/intel/skylake/skl-messages.c +++ b/sound/soc/intel/skylake/skl-messages.c | |||
| @@ -483,6 +483,7 @@ static void skl_set_base_module_format(struct skl_sst *ctx, | |||
| 483 | base_cfg->audio_fmt.bit_depth = format->bit_depth; | 483 | base_cfg->audio_fmt.bit_depth = format->bit_depth; |
| 484 | base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; | 484 | base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; |
| 485 | base_cfg->audio_fmt.ch_cfg = format->ch_cfg; | 485 | base_cfg->audio_fmt.ch_cfg = format->ch_cfg; |
| 486 | base_cfg->audio_fmt.sample_type = format->sample_type; | ||
| 486 | 487 | ||
| 487 | dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", | 488 | dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", |
| 488 | format->bit_depth, format->valid_bit_depth, | 489 | format->bit_depth, format->valid_bit_depth, |
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index 1ae83f4ccc36..9735e2412251 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c | |||
| @@ -181,6 +181,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params) | |||
| 181 | struct hdac_stream *hstream; | 181 | struct hdac_stream *hstream; |
| 182 | struct hdac_ext_stream *stream; | 182 | struct hdac_ext_stream *stream; |
| 183 | struct hdac_ext_link *link; | 183 | struct hdac_ext_link *link; |
| 184 | unsigned char stream_tag; | ||
| 184 | 185 | ||
| 185 | hstream = snd_hdac_get_stream(bus, params->stream, | 186 | hstream = snd_hdac_get_stream(bus, params->stream, |
| 186 | params->link_dma_id + 1); | 187 | params->link_dma_id + 1); |
| @@ -199,10 +200,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params) | |||
| 199 | 200 | ||
| 200 | snd_hdac_ext_link_stream_setup(stream, format_val); | 201 | snd_hdac_ext_link_stream_setup(stream, format_val); |
| 201 | 202 | ||
| 202 | list_for_each_entry(link, &bus->hlink_list, list) { | 203 | stream_tag = hstream->stream_tag; |
| 203 | if (link->index == params->link_index) | 204 | if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) { |
| 204 | snd_hdac_ext_link_set_stream_id(link, | 205 | list_for_each_entry(link, &bus->hlink_list, list) { |
| 205 | hstream->stream_tag); | 206 | if (link->index == params->link_index) |
| 207 | snd_hdac_ext_link_set_stream_id(link, | ||
| 208 | stream_tag); | ||
| 209 | } | ||
| 206 | } | 210 | } |
| 207 | 211 | ||
| 208 | stream->link_prepared = 1; | 212 | stream->link_prepared = 1; |
| @@ -645,6 +649,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream, | |||
| 645 | struct hdac_ext_stream *link_dev = | 649 | struct hdac_ext_stream *link_dev = |
| 646 | snd_soc_dai_get_dma_data(dai, substream); | 650 | snd_soc_dai_get_dma_data(dai, substream); |
| 647 | struct hdac_ext_link *link; | 651 | struct hdac_ext_link *link; |
| 652 | unsigned char stream_tag; | ||
| 648 | 653 | ||
| 649 | dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); | 654 | dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); |
| 650 | 655 | ||
| @@ -654,7 +659,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream, | |||
| 654 | if (!link) | 659 | if (!link) |
| 655 | return -EINVAL; | 660 | return -EINVAL; |
| 656 | 661 | ||
| 657 | snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag); | 662 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
| 663 | stream_tag = hdac_stream(link_dev)->stream_tag; | ||
| 664 | snd_hdac_ext_link_clear_stream_id(link, stream_tag); | ||
| 665 | } | ||
| 666 | |||
| 658 | snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK); | 667 | snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK); |
| 659 | return 0; | 668 | return 0; |
| 660 | } | 669 | } |
| @@ -1453,13 +1462,20 @@ static int skl_platform_soc_probe(struct snd_soc_component *component) | |||
| 1453 | return 0; | 1462 | return 0; |
| 1454 | } | 1463 | } |
| 1455 | 1464 | ||
| 1465 | static void skl_pcm_remove(struct snd_soc_component *component) | ||
| 1466 | { | ||
| 1467 | /* remove topology */ | ||
| 1468 | snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL); | ||
| 1469 | } | ||
| 1470 | |||
| 1456 | static const struct snd_soc_component_driver skl_component = { | 1471 | static const struct snd_soc_component_driver skl_component = { |
| 1457 | .name = "pcm", | 1472 | .name = "pcm", |
| 1458 | .probe = skl_platform_soc_probe, | 1473 | .probe = skl_platform_soc_probe, |
| 1474 | .remove = skl_pcm_remove, | ||
| 1459 | .ops = &skl_platform_ops, | 1475 | .ops = &skl_platform_ops, |
| 1460 | .pcm_new = skl_pcm_new, | 1476 | .pcm_new = skl_pcm_new, |
| 1461 | .pcm_free = skl_pcm_free, | 1477 | .pcm_free = skl_pcm_free, |
| 1462 | .ignore_module_refcount = 1, /* do not increase the refcount in core */ | 1478 | .module_get_upon_open = 1, /* increment refcount when a pcm is opened */ |
| 1463 | }; | 1479 | }; |
| 1464 | 1480 | ||
| 1465 | int skl_platform_register(struct device *dev) | 1481 | int skl_platform_register(struct device *dev) |
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c index 1b8bcdaf02d1..9a163d7064d1 100644 --- a/sound/soc/mediatek/common/mtk-btcvsd.c +++ b/sound/soc/mediatek/common/mtk-btcvsd.c | |||
| @@ -49,6 +49,7 @@ enum bt_sco_state { | |||
| 49 | BT_SCO_STATE_IDLE, | 49 | BT_SCO_STATE_IDLE, |
| 50 | BT_SCO_STATE_RUNNING, | 50 | BT_SCO_STATE_RUNNING, |
| 51 | BT_SCO_STATE_ENDING, | 51 | BT_SCO_STATE_ENDING, |
| 52 | BT_SCO_STATE_LOOPBACK, | ||
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | enum bt_sco_direct { | 55 | enum bt_sco_direct { |
| @@ -486,7 +487,8 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev) | |||
| 486 | if (bt->rx->state != BT_SCO_STATE_RUNNING && | 487 | if (bt->rx->state != BT_SCO_STATE_RUNNING && |
| 487 | bt->rx->state != BT_SCO_STATE_ENDING && | 488 | bt->rx->state != BT_SCO_STATE_ENDING && |
| 488 | bt->tx->state != BT_SCO_STATE_RUNNING && | 489 | bt->tx->state != BT_SCO_STATE_RUNNING && |
| 489 | bt->tx->state != BT_SCO_STATE_ENDING) { | 490 | bt->tx->state != BT_SCO_STATE_ENDING && |
| 491 | bt->tx->state != BT_SCO_STATE_LOOPBACK) { | ||
| 490 | dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n", | 492 | dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n", |
| 491 | __func__, bt->rx->state, bt->tx->state); | 493 | __func__, bt->rx->state, bt->tx->state); |
| 492 | goto irq_handler_exit; | 494 | goto irq_handler_exit; |
| @@ -512,6 +514,42 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev) | |||
| 512 | buf_cnt_tx = btsco_packet_info[packet_type][2]; | 514 | buf_cnt_tx = btsco_packet_info[packet_type][2]; |
| 513 | buf_cnt_rx = btsco_packet_info[packet_type][3]; | 515 | buf_cnt_rx = btsco_packet_info[packet_type][3]; |
| 514 | 516 | ||
| 517 | if (bt->tx->state == BT_SCO_STATE_LOOPBACK) { | ||
| 518 | u8 *src, *dst; | ||
| 519 | unsigned long connsys_addr_rx, ap_addr_rx; | ||
| 520 | unsigned long connsys_addr_tx, ap_addr_tx; | ||
| 521 | |||
| 522 | connsys_addr_rx = *bt->bt_reg_pkt_r; | ||
| 523 | ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base + | ||
| 524 | (connsys_addr_rx & 0xFFFF); | ||
| 525 | |||
| 526 | connsys_addr_tx = *bt->bt_reg_pkt_w; | ||
| 527 | ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base + | ||
| 528 | (connsys_addr_tx & 0xFFFF); | ||
| 529 | |||
| 530 | if (connsys_addr_tx == 0xdeadfeed || | ||
| 531 | connsys_addr_rx == 0xdeadfeed) { | ||
| 532 | /* bt return 0xdeadfeed if read reg during bt sleep */ | ||
| 533 | dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n", | ||
| 534 | __func__); | ||
| 535 | goto irq_handler_exit; | ||
| 536 | } | ||
| 537 | |||
| 538 | src = (u8 *)ap_addr_rx; | ||
| 539 | dst = (u8 *)ap_addr_tx; | ||
| 540 | |||
| 541 | mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src, | ||
| 542 | bt->tx->temp_packet_buf, | ||
| 543 | packet_length, | ||
| 544 | packet_num); | ||
| 545 | mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT, | ||
| 546 | bt->tx->temp_packet_buf, dst, | ||
| 547 | packet_length, | ||
| 548 | packet_num); | ||
| 549 | bt->rx->rw_cnt++; | ||
| 550 | bt->tx->rw_cnt++; | ||
| 551 | } | ||
| 552 | |||
| 515 | if (bt->rx->state == BT_SCO_STATE_RUNNING || | 553 | if (bt->rx->state == BT_SCO_STATE_RUNNING || |
| 516 | bt->rx->state == BT_SCO_STATE_ENDING) { | 554 | bt->rx->state == BT_SCO_STATE_ENDING) { |
| 517 | if (bt->rx->xrun) { | 555 | if (bt->rx->xrun) { |
| @@ -1067,6 +1105,33 @@ static int btcvsd_band_set(struct snd_kcontrol *kcontrol, | |||
| 1067 | return 0; | 1105 | return 0; |
| 1068 | } | 1106 | } |
| 1069 | 1107 | ||
| 1108 | static int btcvsd_loopback_get(struct snd_kcontrol *kcontrol, | ||
| 1109 | struct snd_ctl_elem_value *ucontrol) | ||
| 1110 | { | ||
| 1111 | struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); | ||
| 1112 | struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt); | ||
| 1113 | bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK; | ||
| 1114 | |||
| 1115 | ucontrol->value.integer.value[0] = lpbk_en; | ||
| 1116 | return 0; | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | static int btcvsd_loopback_set(struct snd_kcontrol *kcontrol, | ||
| 1120 | struct snd_ctl_elem_value *ucontrol) | ||
| 1121 | { | ||
| 1122 | struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); | ||
| 1123 | struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt); | ||
| 1124 | |||
| 1125 | if (ucontrol->value.integer.value[0]) { | ||
| 1126 | mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK); | ||
| 1127 | mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK); | ||
| 1128 | } else { | ||
| 1129 | mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING); | ||
| 1130 | mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING); | ||
| 1131 | } | ||
| 1132 | return 0; | ||
| 1133 | } | ||
| 1134 | |||
| 1070 | static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol, | 1135 | static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol, |
| 1071 | struct snd_ctl_elem_value *ucontrol) | 1136 | struct snd_ctl_elem_value *ucontrol) |
| 1072 | { | 1137 | { |
| @@ -1202,6 +1267,8 @@ static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol, | |||
| 1202 | static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = { | 1267 | static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = { |
| 1203 | SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0], | 1268 | SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0], |
| 1204 | btcvsd_band_get, btcvsd_band_set), | 1269 | btcvsd_band_get, btcvsd_band_set), |
| 1270 | SOC_SINGLE_BOOL_EXT("BTCVSD Loopback Switch", 0, | ||
| 1271 | btcvsd_loopback_get, btcvsd_loopback_set), | ||
| 1205 | SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0, | 1272 | SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0, |
| 1206 | btcvsd_tx_mute_get, btcvsd_tx_mute_set), | 1273 | btcvsd_tx_mute_get, btcvsd_tx_mute_set), |
| 1207 | SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0, | 1274 | SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0, |
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c index f523ad103acc..48e81c5d52fc 100644 --- a/sound/soc/mediatek/mt8183/mt8183-afe-clk.c +++ b/sound/soc/mediatek/mt8183/mt8183-afe-clk.c | |||
| @@ -605,6 +605,10 @@ void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id) | |||
| 605 | int m_sel_id = mck_div[mck_id].m_sel_id; | 605 | int m_sel_id = mck_div[mck_id].m_sel_id; |
| 606 | int div_clk_id = mck_div[mck_id].div_clk_id; | 606 | int div_clk_id = mck_div[mck_id].div_clk_id; |
| 607 | 607 | ||
| 608 | /* i2s5 mck not support */ | ||
| 609 | if (mck_id == MT8183_I2S5_MCK) | ||
| 610 | return; | ||
| 611 | |||
| 608 | clk_disable_unprepare(afe_priv->clk[div_clk_id]); | 612 | clk_disable_unprepare(afe_priv->clk[div_clk_id]); |
| 609 | if (m_sel_id >= 0) | 613 | if (m_sel_id >= 0) |
| 610 | clk_disable_unprepare(afe_priv->clk[m_sel_id]); | 614 | clk_disable_unprepare(afe_priv->clk[m_sel_id]); |
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 400e29edb1c9..d0b403a0e27b 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include "rockchip_pdm.h" | 25 | #include "rockchip_pdm.h" |
| 26 | 26 | ||
| 27 | #define PDM_DMA_BURST_SIZE (16) /* size * width: 16*4 = 64 bytes */ | 27 | #define PDM_DMA_BURST_SIZE (8) /* size * width: 8*4 = 32 bytes */ |
| 28 | 28 | ||
| 29 | struct rk_pdm_dev { | 29 | struct rk_pdm_dev { |
| 30 | struct device *dev; | 30 | struct device *dev; |
| @@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai, | |||
| 208 | return -EINVAL; | 208 | return -EINVAL; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | pm_runtime_get_sync(cpu_dai->dev); | ||
| 211 | regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val); | 212 | regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val); |
| 213 | pm_runtime_put(cpu_dai->dev); | ||
| 212 | 214 | ||
| 213 | return 0; | 215 | return 0; |
| 214 | } | 216 | } |
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 4231001226f4..ab471d550d17 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
| @@ -1130,11 +1130,11 @@ static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = { | |||
| 1130 | }; | 1130 | }; |
| 1131 | 1131 | ||
| 1132 | static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = { | 1132 | static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = { |
| 1133 | { "Playback Mixer", NULL, "Primary" }, | 1133 | { "Playback Mixer", NULL, "Primary Playback" }, |
| 1134 | { "Playback Mixer", NULL, "Secondary" }, | 1134 | { "Playback Mixer", NULL, "Secondary Playback" }, |
| 1135 | 1135 | ||
| 1136 | { "Mixer DAI TX", NULL, "Playback Mixer" }, | 1136 | { "Mixer DAI TX", NULL, "Playback Mixer" }, |
| 1137 | { "Playback Mixer", NULL, "Mixer DAI RX" }, | 1137 | { "Primary Capture", NULL, "Mixer DAI RX" }, |
| 1138 | }; | 1138 | }; |
| 1139 | 1139 | ||
| 1140 | static const struct snd_soc_component_driver samsung_i2s_component = { | 1140 | static const struct snd_soc_component_driver samsung_i2s_component = { |
| @@ -1155,7 +1155,8 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv, | |||
| 1155 | int num_dais) | 1155 | int num_dais) |
| 1156 | { | 1156 | { |
| 1157 | static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" }; | 1157 | static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" }; |
| 1158 | static const char *stream_names[] = { "Primary", "Secondary" }; | 1158 | static const char *stream_names[] = { "Primary Playback", |
| 1159 | "Secondary Playback" }; | ||
| 1159 | struct snd_soc_dai_driver *dai_drv; | 1160 | struct snd_soc_dai_driver *dai_drv; |
| 1160 | struct i2s_dai *dai; | 1161 | struct i2s_dai *dai; |
| 1161 | int i; | 1162 | int i; |
| @@ -1201,6 +1202,7 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv, | |||
| 1201 | dai_drv->capture.channels_max = 2; | 1202 | dai_drv->capture.channels_max = 2; |
| 1202 | dai_drv->capture.rates = i2s_dai_data->pcm_rates; | 1203 | dai_drv->capture.rates = i2s_dai_data->pcm_rates; |
| 1203 | dai_drv->capture.formats = SAMSUNG_I2S_FMTS; | 1204 | dai_drv->capture.formats = SAMSUNG_I2S_FMTS; |
| 1205 | dai_drv->capture.stream_name = "Primary Capture"; | ||
| 1204 | 1206 | ||
| 1205 | return 0; | 1207 | return 0; |
| 1206 | } | 1208 | } |
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c index 694512f980fd..1dc54c4206f0 100644 --- a/sound/soc/samsung/odroid.c +++ b/sound/soc/samsung/odroid.c | |||
| @@ -91,11 +91,11 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream, | |||
| 91 | return ret; | 91 | return ret; |
| 92 | 92 | ||
| 93 | /* | 93 | /* |
| 94 | * We add 1 to the rclk_freq value in order to avoid too low clock | 94 | * We add 2 to the rclk_freq value in order to avoid too low clock |
| 95 | * frequency values due to the EPLL output frequency not being exact | 95 | * frequency values due to the EPLL output frequency not being exact |
| 96 | * multiple of the audio sampling rate. | 96 | * multiple of the audio sampling rate. |
| 97 | */ | 97 | */ |
| 98 | rclk_freq = params_rate(params) * rfs + 1; | 98 | rclk_freq = params_rate(params) * rfs + 2; |
| 99 | 99 | ||
| 100 | ret = clk_set_rate(priv->sclk_i2s, rclk_freq); | 100 | ret = clk_set_rate(priv->sclk_i2s, rclk_freq); |
| 101 | if (ret < 0) | 101 | if (ret < 0) |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 022996d2db13..4fe83e611c01 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
| @@ -110,6 +110,8 @@ static const struct of_device_id rsnd_of_match[] = { | |||
| 110 | { .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 }, | 110 | { .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 }, |
| 111 | { .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 }, | 111 | { .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 }, |
| 112 | { .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 }, | 112 | { .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 }, |
| 113 | /* Special Handling */ | ||
| 114 | { .compatible = "renesas,rcar_sound-r8a77990", .data = (void *)(RSND_GEN3 | RSND_SOC_E) }, | ||
| 113 | {}, | 115 | {}, |
| 114 | }; | 116 | }; |
| 115 | MODULE_DEVICE_TABLE(of, rsnd_of_match); | 117 | MODULE_DEVICE_TABLE(of, rsnd_of_match); |
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 90625c57847b..0e6ef4e18400 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h | |||
| @@ -607,6 +607,8 @@ struct rsnd_priv { | |||
| 607 | #define RSND_GEN1 (1 << 0) | 607 | #define RSND_GEN1 (1 << 0) |
| 608 | #define RSND_GEN2 (2 << 0) | 608 | #define RSND_GEN2 (2 << 0) |
| 609 | #define RSND_GEN3 (3 << 0) | 609 | #define RSND_GEN3 (3 << 0) |
| 610 | #define RSND_SOC_MASK (0xFF << 4) | ||
| 611 | #define RSND_SOC_E (1 << 4) /* E1/E2/E3 */ | ||
| 610 | 612 | ||
| 611 | /* | 613 | /* |
| 612 | * below value will be filled on rsnd_gen_probe() | 614 | * below value will be filled on rsnd_gen_probe() |
| @@ -679,6 +681,9 @@ struct rsnd_priv { | |||
| 679 | #define rsnd_is_gen1(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1) | 681 | #define rsnd_is_gen1(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1) |
| 680 | #define rsnd_is_gen2(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2) | 682 | #define rsnd_is_gen2(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2) |
| 681 | #define rsnd_is_gen3(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3) | 683 | #define rsnd_is_gen3(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3) |
| 684 | #define rsnd_is_e3(priv) (((priv)->flags & \ | ||
| 685 | (RSND_GEN_MASK | RSND_SOC_MASK)) == \ | ||
| 686 | (RSND_GEN3 | RSND_SOC_E)) | ||
| 682 | 687 | ||
| 683 | #define rsnd_flags_has(p, f) ((p)->flags & (f)) | 688 | #define rsnd_flags_has(p, f) ((p)->flags & (f)) |
| 684 | #define rsnd_flags_set(p, f) ((p)->flags |= (f)) | 689 | #define rsnd_flags_set(p, f) ((p)->flags |= (f)) |
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index db81e066b92e..585ffba0244b 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include "rsnd.h" | 16 | #include "rsnd.h" |
| 17 | #include <linux/sys_soc.h> | ||
| 18 | 17 | ||
| 19 | #define SRC_NAME "src" | 18 | #define SRC_NAME "src" |
| 20 | 19 | ||
| @@ -135,7 +134,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv, | |||
| 135 | return rate; | 134 | return rate; |
| 136 | } | 135 | } |
| 137 | 136 | ||
| 138 | const static u32 bsdsr_table_pattern1[] = { | 137 | static const u32 bsdsr_table_pattern1[] = { |
| 139 | 0x01800000, /* 6 - 1/6 */ | 138 | 0x01800000, /* 6 - 1/6 */ |
| 140 | 0x01000000, /* 6 - 1/4 */ | 139 | 0x01000000, /* 6 - 1/4 */ |
| 141 | 0x00c00000, /* 6 - 1/3 */ | 140 | 0x00c00000, /* 6 - 1/3 */ |
| @@ -144,7 +143,7 @@ const static u32 bsdsr_table_pattern1[] = { | |||
| 144 | 0x00400000, /* 6 - 1 */ | 143 | 0x00400000, /* 6 - 1 */ |
| 145 | }; | 144 | }; |
| 146 | 145 | ||
| 147 | const static u32 bsdsr_table_pattern2[] = { | 146 | static const u32 bsdsr_table_pattern2[] = { |
| 148 | 0x02400000, /* 6 - 1/6 */ | 147 | 0x02400000, /* 6 - 1/6 */ |
| 149 | 0x01800000, /* 6 - 1/4 */ | 148 | 0x01800000, /* 6 - 1/4 */ |
| 150 | 0x01200000, /* 6 - 1/3 */ | 149 | 0x01200000, /* 6 - 1/3 */ |
| @@ -153,7 +152,7 @@ const static u32 bsdsr_table_pattern2[] = { | |||
| 153 | 0x00600000, /* 6 - 1 */ | 152 | 0x00600000, /* 6 - 1 */ |
| 154 | }; | 153 | }; |
| 155 | 154 | ||
| 156 | const static u32 bsisr_table[] = { | 155 | static const u32 bsisr_table[] = { |
| 157 | 0x00100060, /* 6 - 1/6 */ | 156 | 0x00100060, /* 6 - 1/6 */ |
| 158 | 0x00100040, /* 6 - 1/4 */ | 157 | 0x00100040, /* 6 - 1/4 */ |
| 159 | 0x00100030, /* 6 - 1/3 */ | 158 | 0x00100030, /* 6 - 1/3 */ |
| @@ -162,7 +161,7 @@ const static u32 bsisr_table[] = { | |||
| 162 | 0x00100020, /* 6 - 1 */ | 161 | 0x00100020, /* 6 - 1 */ |
| 163 | }; | 162 | }; |
| 164 | 163 | ||
| 165 | const static u32 chan288888[] = { | 164 | static const u32 chan288888[] = { |
| 166 | 0x00000006, /* 1 to 2 */ | 165 | 0x00000006, /* 1 to 2 */ |
| 167 | 0x000001fe, /* 1 to 8 */ | 166 | 0x000001fe, /* 1 to 8 */ |
| 168 | 0x000001fe, /* 1 to 8 */ | 167 | 0x000001fe, /* 1 to 8 */ |
| @@ -171,7 +170,7 @@ const static u32 chan288888[] = { | |||
| 171 | 0x000001fe, /* 1 to 8 */ | 170 | 0x000001fe, /* 1 to 8 */ |
| 172 | }; | 171 | }; |
| 173 | 172 | ||
| 174 | const static u32 chan244888[] = { | 173 | static const u32 chan244888[] = { |
| 175 | 0x00000006, /* 1 to 2 */ | 174 | 0x00000006, /* 1 to 2 */ |
| 176 | 0x0000001e, /* 1 to 4 */ | 175 | 0x0000001e, /* 1 to 4 */ |
| 177 | 0x0000001e, /* 1 to 4 */ | 176 | 0x0000001e, /* 1 to 4 */ |
| @@ -180,7 +179,7 @@ const static u32 chan244888[] = { | |||
| 180 | 0x000001fe, /* 1 to 8 */ | 179 | 0x000001fe, /* 1 to 8 */ |
| 181 | }; | 180 | }; |
| 182 | 181 | ||
| 183 | const static u32 chan222222[] = { | 182 | static const u32 chan222222[] = { |
| 184 | 0x00000006, /* 1 to 2 */ | 183 | 0x00000006, /* 1 to 2 */ |
| 185 | 0x00000006, /* 1 to 2 */ | 184 | 0x00000006, /* 1 to 2 */ |
| 186 | 0x00000006, /* 1 to 2 */ | 185 | 0x00000006, /* 1 to 2 */ |
| @@ -189,18 +188,12 @@ const static u32 chan222222[] = { | |||
| 189 | 0x00000006, /* 1 to 2 */ | 188 | 0x00000006, /* 1 to 2 */ |
| 190 | }; | 189 | }; |
| 191 | 190 | ||
| 192 | static const struct soc_device_attribute ov_soc[] = { | ||
| 193 | { .soc_id = "r8a77990" }, /* E3 */ | ||
| 194 | { /* sentinel */ } | ||
| 195 | }; | ||
| 196 | |||
| 197 | static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, | 191 | static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, |
| 198 | struct rsnd_mod *mod) | 192 | struct rsnd_mod *mod) |
| 199 | { | 193 | { |
| 200 | struct rsnd_priv *priv = rsnd_mod_to_priv(mod); | 194 | struct rsnd_priv *priv = rsnd_mod_to_priv(mod); |
| 201 | struct device *dev = rsnd_priv_to_dev(priv); | 195 | struct device *dev = rsnd_priv_to_dev(priv); |
| 202 | struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); | 196 | struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); |
| 203 | const struct soc_device_attribute *soc = soc_device_match(ov_soc); | ||
| 204 | int is_play = rsnd_io_is_play(io); | 197 | int is_play = rsnd_io_is_play(io); |
| 205 | int use_src = 0; | 198 | int use_src = 0; |
| 206 | u32 fin, fout; | 199 | u32 fin, fout; |
| @@ -307,7 +300,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, | |||
| 307 | /* | 300 | /* |
| 308 | * E3 need to overwrite | 301 | * E3 need to overwrite |
| 309 | */ | 302 | */ |
| 310 | if (soc) | 303 | if (rsnd_is_e3(priv)) |
| 311 | switch (rsnd_mod_id(mod)) { | 304 | switch (rsnd_mod_id(mod)) { |
| 312 | case 0: | 305 | case 0: |
| 313 | case 4: | 306 | case 4: |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 93d316d5bf8e..46e3ab0fced4 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
| @@ -947,7 +947,7 @@ static void soc_cleanup_component(struct snd_soc_component *component) | |||
| 947 | snd_soc_dapm_free(snd_soc_component_get_dapm(component)); | 947 | snd_soc_dapm_free(snd_soc_component_get_dapm(component)); |
| 948 | soc_cleanup_component_debugfs(component); | 948 | soc_cleanup_component_debugfs(component); |
| 949 | component->card = NULL; | 949 | component->card = NULL; |
| 950 | if (!component->driver->ignore_module_refcount) | 950 | if (!component->driver->module_get_upon_open) |
| 951 | module_put(component->dev->driver->owner); | 951 | module_put(component->dev->driver->owner); |
| 952 | } | 952 | } |
| 953 | 953 | ||
| @@ -1381,7 +1381,7 @@ static int soc_probe_component(struct snd_soc_card *card, | |||
| 1381 | return 0; | 1381 | return 0; |
| 1382 | } | 1382 | } |
| 1383 | 1383 | ||
| 1384 | if (!component->driver->ignore_module_refcount && | 1384 | if (!component->driver->module_get_upon_open && |
| 1385 | !try_module_get(component->dev->driver->owner)) | 1385 | !try_module_get(component->dev->driver->owner)) |
| 1386 | return -ENODEV; | 1386 | return -ENODEV; |
| 1387 | 1387 | ||
| @@ -2797,6 +2797,7 @@ int snd_soc_register_card(struct snd_soc_card *card) | |||
| 2797 | 2797 | ||
| 2798 | ret = soc_init_dai_link(card, link); | 2798 | ret = soc_init_dai_link(card, link); |
| 2799 | if (ret) { | 2799 | if (ret) { |
| 2800 | soc_cleanup_platform(card); | ||
| 2800 | dev_err(card->dev, "ASoC: failed to init link %s\n", | 2801 | dev_err(card->dev, "ASoC: failed to init link %s\n", |
| 2801 | link->name); | 2802 | link->name); |
| 2802 | mutex_unlock(&client_mutex); | 2803 | mutex_unlock(&client_mutex); |
| @@ -2819,6 +2820,7 @@ int snd_soc_register_card(struct snd_soc_card *card) | |||
| 2819 | card->instantiated = 0; | 2820 | card->instantiated = 0; |
| 2820 | mutex_init(&card->mutex); | 2821 | mutex_init(&card->mutex); |
| 2821 | mutex_init(&card->dapm_mutex); | 2822 | mutex_init(&card->dapm_mutex); |
| 2823 | spin_lock_init(&card->dpcm_lock); | ||
| 2822 | 2824 | ||
| 2823 | return snd_soc_bind_card(card); | 2825 | return snd_soc_bind_card(card); |
| 2824 | } | 2826 | } |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1ec06ef6d161..0382a47b30bd 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
| @@ -3650,6 +3650,13 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, | |||
| 3650 | case snd_soc_dapm_dac: | 3650 | case snd_soc_dapm_dac: |
| 3651 | case snd_soc_dapm_aif_in: | 3651 | case snd_soc_dapm_aif_in: |
| 3652 | case snd_soc_dapm_pga: | 3652 | case snd_soc_dapm_pga: |
| 3653 | case snd_soc_dapm_buffer: | ||
| 3654 | case snd_soc_dapm_scheduler: | ||
| 3655 | case snd_soc_dapm_effect: | ||
| 3656 | case snd_soc_dapm_src: | ||
| 3657 | case snd_soc_dapm_asrc: | ||
| 3658 | case snd_soc_dapm_encoder: | ||
| 3659 | case snd_soc_dapm_decoder: | ||
| 3653 | case snd_soc_dapm_out_drv: | 3660 | case snd_soc_dapm_out_drv: |
| 3654 | case snd_soc_dapm_micbias: | 3661 | case snd_soc_dapm_micbias: |
| 3655 | case snd_soc_dapm_line: | 3662 | case snd_soc_dapm_line: |
| @@ -3957,6 +3964,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card, | |||
| 3957 | int count; | 3964 | int count; |
| 3958 | 3965 | ||
| 3959 | devm_kfree(card->dev, (void *)*private_value); | 3966 | devm_kfree(card->dev, (void *)*private_value); |
| 3967 | |||
| 3968 | if (!w_param_text) | ||
| 3969 | return; | ||
| 3970 | |||
| 3960 | for (count = 0 ; count < num_params; count++) | 3971 | for (count = 0 ; count < num_params; count++) |
| 3961 | devm_kfree(card->dev, (void *)w_param_text[count]); | 3972 | devm_kfree(card->dev, (void *)w_param_text[count]); |
| 3962 | devm_kfree(card->dev, w_param_text); | 3973 | devm_kfree(card->dev, w_param_text); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 0d5ec68a1e50..be80a12fba27 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
| 16 | #include <linux/pinctrl/consumer.h> | 16 | #include <linux/pinctrl/consumer.h> |
| 17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
| 18 | #include <linux/module.h> | ||
| 18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 19 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
| 20 | #include <linux/export.h> | 21 | #include <linux/export.h> |
| @@ -463,6 +464,9 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream, | |||
| 463 | continue; | 464 | continue; |
| 464 | 465 | ||
| 465 | component->driver->ops->close(substream); | 466 | component->driver->ops->close(substream); |
| 467 | |||
| 468 | if (component->driver->module_get_upon_open) | ||
| 469 | module_put(component->dev->driver->owner); | ||
| 466 | } | 470 | } |
| 467 | 471 | ||
| 468 | return 0; | 472 | return 0; |
| @@ -513,6 +517,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream) | |||
| 513 | !component->driver->ops->open) | 517 | !component->driver->ops->open) |
| 514 | continue; | 518 | continue; |
| 515 | 519 | ||
| 520 | if (component->driver->module_get_upon_open && | ||
| 521 | !try_module_get(component->dev->driver->owner)) { | ||
| 522 | ret = -ENODEV; | ||
| 523 | goto module_err; | ||
| 524 | } | ||
| 525 | |||
| 516 | ret = component->driver->ops->open(substream); | 526 | ret = component->driver->ops->open(substream); |
| 517 | if (ret < 0) { | 527 | if (ret < 0) { |
| 518 | dev_err(component->dev, | 528 | dev_err(component->dev, |
| @@ -628,7 +638,7 @@ codec_dai_err: | |||
| 628 | 638 | ||
| 629 | component_err: | 639 | component_err: |
| 630 | soc_pcm_components_close(substream, component); | 640 | soc_pcm_components_close(substream, component); |
| 631 | 641 | module_err: | |
| 632 | if (cpu_dai->driver->ops->shutdown) | 642 | if (cpu_dai->driver->ops->shutdown) |
| 633 | cpu_dai->driver->ops->shutdown(substream, cpu_dai); | 643 | cpu_dai->driver->ops->shutdown(substream, cpu_dai); |
| 634 | out: | 644 | out: |
| @@ -954,10 +964,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, | |||
| 954 | codec_params = *params; | 964 | codec_params = *params; |
| 955 | 965 | ||
| 956 | /* fixup params based on TDM slot masks */ | 966 | /* fixup params based on TDM slot masks */ |
| 957 | if (codec_dai->tx_mask) | 967 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && |
| 968 | codec_dai->tx_mask) | ||
| 958 | soc_pcm_codec_params_fixup(&codec_params, | 969 | soc_pcm_codec_params_fixup(&codec_params, |
| 959 | codec_dai->tx_mask); | 970 | codec_dai->tx_mask); |
| 960 | if (codec_dai->rx_mask) | 971 | |
| 972 | if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && | ||
| 973 | codec_dai->rx_mask) | ||
| 961 | soc_pcm_codec_params_fixup(&codec_params, | 974 | soc_pcm_codec_params_fixup(&codec_params, |
| 962 | codec_dai->rx_mask); | 975 | codec_dai->rx_mask); |
| 963 | 976 | ||
| @@ -1213,6 +1226,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, | |||
| 1213 | struct snd_soc_pcm_runtime *be, int stream) | 1226 | struct snd_soc_pcm_runtime *be, int stream) |
| 1214 | { | 1227 | { |
| 1215 | struct snd_soc_dpcm *dpcm; | 1228 | struct snd_soc_dpcm *dpcm; |
| 1229 | unsigned long flags; | ||
| 1216 | 1230 | ||
| 1217 | /* only add new dpcms */ | 1231 | /* only add new dpcms */ |
| 1218 | for_each_dpcm_be(fe, stream, dpcm) { | 1232 | for_each_dpcm_be(fe, stream, dpcm) { |
| @@ -1228,8 +1242,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, | |||
| 1228 | dpcm->fe = fe; | 1242 | dpcm->fe = fe; |
| 1229 | be->dpcm[stream].runtime = fe->dpcm[stream].runtime; | 1243 | be->dpcm[stream].runtime = fe->dpcm[stream].runtime; |
| 1230 | dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW; | 1244 | dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW; |
| 1245 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 1231 | list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients); | 1246 | list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients); |
| 1232 | list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients); | 1247 | list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients); |
| 1248 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 1233 | 1249 | ||
| 1234 | dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n", | 1250 | dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n", |
| 1235 | stream ? "capture" : "playback", fe->dai_link->name, | 1251 | stream ? "capture" : "playback", fe->dai_link->name, |
| @@ -1275,6 +1291,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe, | |||
| 1275 | void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) | 1291 | void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) |
| 1276 | { | 1292 | { |
| 1277 | struct snd_soc_dpcm *dpcm, *d; | 1293 | struct snd_soc_dpcm *dpcm, *d; |
| 1294 | unsigned long flags; | ||
| 1278 | 1295 | ||
| 1279 | for_each_dpcm_be_safe(fe, stream, dpcm, d) { | 1296 | for_each_dpcm_be_safe(fe, stream, dpcm, d) { |
| 1280 | dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n", | 1297 | dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n", |
| @@ -1294,8 +1311,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) | |||
| 1294 | #ifdef CONFIG_DEBUG_FS | 1311 | #ifdef CONFIG_DEBUG_FS |
| 1295 | debugfs_remove(dpcm->debugfs_state); | 1312 | debugfs_remove(dpcm->debugfs_state); |
| 1296 | #endif | 1313 | #endif |
| 1314 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 1297 | list_del(&dpcm->list_be); | 1315 | list_del(&dpcm->list_be); |
| 1298 | list_del(&dpcm->list_fe); | 1316 | list_del(&dpcm->list_fe); |
| 1317 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 1299 | kfree(dpcm); | 1318 | kfree(dpcm); |
| 1300 | } | 1319 | } |
| 1301 | } | 1320 | } |
| @@ -1547,10 +1566,13 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe, | |||
| 1547 | void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream) | 1566 | void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream) |
| 1548 | { | 1567 | { |
| 1549 | struct snd_soc_dpcm *dpcm; | 1568 | struct snd_soc_dpcm *dpcm; |
| 1569 | unsigned long flags; | ||
| 1550 | 1570 | ||
| 1571 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 1551 | for_each_dpcm_be(fe, stream, dpcm) | 1572 | for_each_dpcm_be(fe, stream, dpcm) |
| 1552 | dpcm->be->dpcm[stream].runtime_update = | 1573 | dpcm->be->dpcm[stream].runtime_update = |
| 1553 | SND_SOC_DPCM_UPDATE_NO; | 1574 | SND_SOC_DPCM_UPDATE_NO; |
| 1575 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 1554 | } | 1576 | } |
| 1555 | 1577 | ||
| 1556 | static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe, | 1578 | static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe, |
| @@ -1899,10 +1921,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream, | |||
| 1899 | struct snd_soc_pcm_runtime *be = dpcm->be; | 1921 | struct snd_soc_pcm_runtime *be = dpcm->be; |
| 1900 | struct snd_pcm_substream *be_substream = | 1922 | struct snd_pcm_substream *be_substream = |
| 1901 | snd_soc_dpcm_get_substream(be, stream); | 1923 | snd_soc_dpcm_get_substream(be, stream); |
| 1902 | struct snd_soc_pcm_runtime *rtd = be_substream->private_data; | 1924 | struct snd_soc_pcm_runtime *rtd; |
| 1903 | struct snd_soc_dai *codec_dai; | 1925 | struct snd_soc_dai *codec_dai; |
| 1904 | int i; | 1926 | int i; |
| 1905 | 1927 | ||
| 1928 | /* A backend may not have the requested substream */ | ||
| 1929 | if (!be_substream) | ||
| 1930 | continue; | ||
| 1931 | |||
| 1932 | rtd = be_substream->private_data; | ||
| 1906 | if (rtd->dai_link->be_hw_params_fixup) | 1933 | if (rtd->dai_link->be_hw_params_fixup) |
| 1907 | continue; | 1934 | continue; |
| 1908 | 1935 | ||
| @@ -2571,6 +2598,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream) | |||
| 2571 | struct snd_soc_dpcm *dpcm; | 2598 | struct snd_soc_dpcm *dpcm; |
| 2572 | enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; | 2599 | enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; |
| 2573 | int ret; | 2600 | int ret; |
| 2601 | unsigned long flags; | ||
| 2574 | 2602 | ||
| 2575 | dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n", | 2603 | dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n", |
| 2576 | stream ? "capture" : "playback", fe->dai_link->name); | 2604 | stream ? "capture" : "playback", fe->dai_link->name); |
| @@ -2640,11 +2668,13 @@ close: | |||
| 2640 | dpcm_be_dai_shutdown(fe, stream); | 2668 | dpcm_be_dai_shutdown(fe, stream); |
| 2641 | disconnect: | 2669 | disconnect: |
| 2642 | /* disconnect any non started BEs */ | 2670 | /* disconnect any non started BEs */ |
| 2671 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 2643 | for_each_dpcm_be(fe, stream, dpcm) { | 2672 | for_each_dpcm_be(fe, stream, dpcm) { |
| 2644 | struct snd_soc_pcm_runtime *be = dpcm->be; | 2673 | struct snd_soc_pcm_runtime *be = dpcm->be; |
| 2645 | if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) | 2674 | if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) |
| 2646 | dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; | 2675 | dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; |
| 2647 | } | 2676 | } |
| 2677 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 2648 | 2678 | ||
| 2649 | return ret; | 2679 | return ret; |
| 2650 | } | 2680 | } |
| @@ -3221,7 +3251,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, | |||
| 3221 | { | 3251 | { |
| 3222 | struct snd_soc_dpcm *dpcm; | 3252 | struct snd_soc_dpcm *dpcm; |
| 3223 | int state; | 3253 | int state; |
| 3254 | int ret = 1; | ||
| 3255 | unsigned long flags; | ||
| 3224 | 3256 | ||
| 3257 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 3225 | for_each_dpcm_fe(be, stream, dpcm) { | 3258 | for_each_dpcm_fe(be, stream, dpcm) { |
| 3226 | 3259 | ||
| 3227 | if (dpcm->fe == fe) | 3260 | if (dpcm->fe == fe) |
| @@ -3230,12 +3263,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, | |||
| 3230 | state = dpcm->fe->dpcm[stream].state; | 3263 | state = dpcm->fe->dpcm[stream].state; |
| 3231 | if (state == SND_SOC_DPCM_STATE_START || | 3264 | if (state == SND_SOC_DPCM_STATE_START || |
| 3232 | state == SND_SOC_DPCM_STATE_PAUSED || | 3265 | state == SND_SOC_DPCM_STATE_PAUSED || |
| 3233 | state == SND_SOC_DPCM_STATE_SUSPEND) | 3266 | state == SND_SOC_DPCM_STATE_SUSPEND) { |
| 3234 | return 0; | 3267 | ret = 0; |
| 3268 | break; | ||
| 3269 | } | ||
| 3235 | } | 3270 | } |
| 3271 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 3236 | 3272 | ||
| 3237 | /* it's safe to free/stop this BE DAI */ | 3273 | /* it's safe to free/stop this BE DAI */ |
| 3238 | return 1; | 3274 | return ret; |
| 3239 | } | 3275 | } |
| 3240 | EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop); | 3276 | EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop); |
| 3241 | 3277 | ||
| @@ -3248,7 +3284,10 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe, | |||
| 3248 | { | 3284 | { |
| 3249 | struct snd_soc_dpcm *dpcm; | 3285 | struct snd_soc_dpcm *dpcm; |
| 3250 | int state; | 3286 | int state; |
| 3287 | int ret = 1; | ||
| 3288 | unsigned long flags; | ||
| 3251 | 3289 | ||
| 3290 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 3252 | for_each_dpcm_fe(be, stream, dpcm) { | 3291 | for_each_dpcm_fe(be, stream, dpcm) { |
| 3253 | 3292 | ||
| 3254 | if (dpcm->fe == fe) | 3293 | if (dpcm->fe == fe) |
| @@ -3258,12 +3297,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe, | |||
| 3258 | if (state == SND_SOC_DPCM_STATE_START || | 3297 | if (state == SND_SOC_DPCM_STATE_START || |
| 3259 | state == SND_SOC_DPCM_STATE_PAUSED || | 3298 | state == SND_SOC_DPCM_STATE_PAUSED || |
| 3260 | state == SND_SOC_DPCM_STATE_SUSPEND || | 3299 | state == SND_SOC_DPCM_STATE_SUSPEND || |
| 3261 | state == SND_SOC_DPCM_STATE_PREPARE) | 3300 | state == SND_SOC_DPCM_STATE_PREPARE) { |
| 3262 | return 0; | 3301 | ret = 0; |
| 3302 | break; | ||
| 3303 | } | ||
| 3263 | } | 3304 | } |
| 3305 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | ||
| 3264 | 3306 | ||
| 3265 | /* it's safe to change hw_params */ | 3307 | /* it's safe to change hw_params */ |
| 3266 | return 1; | 3308 | return ret; |
| 3267 | } | 3309 | } |
| 3268 | EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params); | 3310 | EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params); |
| 3269 | 3311 | ||
| @@ -3302,6 +3344,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, | |||
| 3302 | struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params; | 3344 | struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params; |
| 3303 | struct snd_soc_dpcm *dpcm; | 3345 | struct snd_soc_dpcm *dpcm; |
| 3304 | ssize_t offset = 0; | 3346 | ssize_t offset = 0; |
| 3347 | unsigned long flags; | ||
| 3305 | 3348 | ||
| 3306 | /* FE state */ | 3349 | /* FE state */ |
| 3307 | offset += snprintf(buf + offset, size - offset, | 3350 | offset += snprintf(buf + offset, size - offset, |
| @@ -3329,6 +3372,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, | |||
| 3329 | goto out; | 3372 | goto out; |
| 3330 | } | 3373 | } |
| 3331 | 3374 | ||
| 3375 | spin_lock_irqsave(&fe->card->dpcm_lock, flags); | ||
| 3332 | for_each_dpcm_be(fe, stream, dpcm) { | 3376 | for_each_dpcm_be(fe, stream, dpcm) { |
| 3333 | struct snd_soc_pcm_runtime *be = dpcm->be; | 3377 | struct snd_soc_pcm_runtime *be = dpcm->be; |
| 3334 | params = &dpcm->hw_params; | 3378 | params = &dpcm->hw_params; |
| @@ -3349,7 +3393,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, | |||
| 3349 | params_channels(params), | 3393 | params_channels(params), |
| 3350 | params_rate(params)); | 3394 | params_rate(params)); |
| 3351 | } | 3395 | } |
| 3352 | 3396 | spin_unlock_irqrestore(&fe->card->dpcm_lock, flags); | |
| 3353 | out: | 3397 | out: |
| 3354 | return offset; | 3398 | return offset; |
| 3355 | } | 3399 | } |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 25fca7055464..96852d250619 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
| @@ -482,10 +482,11 @@ static void remove_widget(struct snd_soc_component *comp, | |||
| 482 | 482 | ||
| 483 | snd_ctl_remove(card, kcontrol); | 483 | snd_ctl_remove(card, kcontrol); |
| 484 | 484 | ||
| 485 | kfree(dobj->control.dvalues); | 485 | /* free enum kcontrol's dvalues and dtexts */ |
| 486 | kfree(se->dobj.control.dvalues); | ||
| 486 | for (j = 0; j < se->items; j++) | 487 | for (j = 0; j < se->items; j++) |
| 487 | kfree(dobj->control.dtexts[j]); | 488 | kfree(se->dobj.control.dtexts[j]); |
| 488 | kfree(dobj->control.dtexts); | 489 | kfree(se->dobj.control.dtexts); |
| 489 | 490 | ||
| 490 | kfree(se); | 491 | kfree(se); |
| 491 | kfree(w->kcontrol_news[i].name); | 492 | kfree(w->kcontrol_news[i].name); |
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c index 47901983a6ff..78bed9734713 100644 --- a/sound/soc/stm/stm32_adfsdm.c +++ b/sound/soc/stm/stm32_adfsdm.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| 12 | #include <linux/mutex.h> | ||
| 12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
| 13 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 14 | 15 | ||
| @@ -37,6 +38,8 @@ struct stm32_adfsdm_priv { | |||
| 37 | /* PCM buffer */ | 38 | /* PCM buffer */ |
| 38 | unsigned char *pcm_buff; | 39 | unsigned char *pcm_buff; |
| 39 | unsigned int pos; | 40 | unsigned int pos; |
| 41 | |||
| 42 | struct mutex lock; /* protect against race condition on iio state */ | ||
| 40 | }; | 43 | }; |
| 41 | 44 | ||
| 42 | static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = { | 45 | static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = { |
| @@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream, | |||
| 62 | { | 65 | { |
| 63 | struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai); | 66 | struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai); |
| 64 | 67 | ||
| 68 | mutex_lock(&priv->lock); | ||
| 65 | if (priv->iio_active) { | 69 | if (priv->iio_active) { |
| 66 | iio_channel_stop_all_cb(priv->iio_cb); | 70 | iio_channel_stop_all_cb(priv->iio_cb); |
| 67 | priv->iio_active = false; | 71 | priv->iio_active = false; |
| 68 | } | 72 | } |
| 73 | mutex_unlock(&priv->lock); | ||
| 69 | } | 74 | } |
| 70 | 75 | ||
| 71 | static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream, | 76 | static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream, |
| @@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream, | |||
| 74 | struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai); | 79 | struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai); |
| 75 | int ret; | 80 | int ret; |
| 76 | 81 | ||
| 82 | mutex_lock(&priv->lock); | ||
| 83 | if (priv->iio_active) { | ||
| 84 | iio_channel_stop_all_cb(priv->iio_cb); | ||
| 85 | priv->iio_active = false; | ||
| 86 | } | ||
| 87 | |||
| 77 | ret = iio_write_channel_attribute(priv->iio_ch, | 88 | ret = iio_write_channel_attribute(priv->iio_ch, |
| 78 | substream->runtime->rate, 0, | 89 | substream->runtime->rate, 0, |
| 79 | IIO_CHAN_INFO_SAMP_FREQ); | 90 | IIO_CHAN_INFO_SAMP_FREQ); |
| 80 | if (ret < 0) { | 91 | if (ret < 0) { |
| 81 | dev_err(dai->dev, "%s: Failed to set %d sampling rate\n", | 92 | dev_err(dai->dev, "%s: Failed to set %d sampling rate\n", |
| 82 | __func__, substream->runtime->rate); | 93 | __func__, substream->runtime->rate); |
| 83 | return ret; | 94 | goto out; |
| 84 | } | 95 | } |
| 85 | 96 | ||
| 86 | if (!priv->iio_active) { | 97 | if (!priv->iio_active) { |
| @@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream, | |||
| 92 | __func__, ret); | 103 | __func__, ret); |
| 93 | } | 104 | } |
| 94 | 105 | ||
| 106 | out: | ||
| 107 | mutex_unlock(&priv->lock); | ||
| 108 | |||
| 95 | return ret; | 109 | return ret; |
| 96 | } | 110 | } |
| 97 | 111 | ||
| @@ -291,6 +305,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match); | |||
| 291 | static int stm32_adfsdm_probe(struct platform_device *pdev) | 305 | static int stm32_adfsdm_probe(struct platform_device *pdev) |
| 292 | { | 306 | { |
| 293 | struct stm32_adfsdm_priv *priv; | 307 | struct stm32_adfsdm_priv *priv; |
| 308 | struct snd_soc_component *component; | ||
| 294 | int ret; | 309 | int ret; |
| 295 | 310 | ||
| 296 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 311 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
| @@ -299,6 +314,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev) | |||
| 299 | 314 | ||
| 300 | priv->dev = &pdev->dev; | 315 | priv->dev = &pdev->dev; |
| 301 | priv->dai_drv = stm32_adfsdm_dai; | 316 | priv->dai_drv = stm32_adfsdm_dai; |
| 317 | mutex_init(&priv->lock); | ||
| 302 | 318 | ||
| 303 | dev_set_drvdata(&pdev->dev, priv); | 319 | dev_set_drvdata(&pdev->dev, priv); |
| 304 | 320 | ||
| @@ -317,9 +333,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev) | |||
| 317 | if (IS_ERR(priv->iio_cb)) | 333 | if (IS_ERR(priv->iio_cb)) |
| 318 | return PTR_ERR(priv->iio_cb); | 334 | return PTR_ERR(priv->iio_cb); |
| 319 | 335 | ||
| 320 | ret = devm_snd_soc_register_component(&pdev->dev, | 336 | component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL); |
| 321 | &stm32_adfsdm_soc_platform, | 337 | if (!component) |
| 322 | NULL, 0); | 338 | return -ENOMEM; |
| 339 | #ifdef CONFIG_DEBUG_FS | ||
| 340 | component->debugfs_prefix = "pcm"; | ||
| 341 | #endif | ||
| 342 | |||
| 343 | ret = snd_soc_add_component(&pdev->dev, component, | ||
| 344 | &stm32_adfsdm_soc_platform, NULL, 0); | ||
| 323 | if (ret < 0) | 345 | if (ret < 0) |
| 324 | dev_err(&pdev->dev, "%s: Failed to register PCM platform\n", | 346 | dev_err(&pdev->dev, "%s: Failed to register PCM platform\n", |
| 325 | __func__); | 347 | __func__); |
| @@ -327,12 +349,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev) | |||
| 327 | return ret; | 349 | return ret; |
| 328 | } | 350 | } |
| 329 | 351 | ||
| 352 | static int stm32_adfsdm_remove(struct platform_device *pdev) | ||
| 353 | { | ||
| 354 | snd_soc_unregister_component(&pdev->dev); | ||
| 355 | |||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 330 | static struct platform_driver stm32_adfsdm_driver = { | 359 | static struct platform_driver stm32_adfsdm_driver = { |
| 331 | .driver = { | 360 | .driver = { |
| 332 | .name = STM32_ADFSDM_DRV_NAME, | 361 | .name = STM32_ADFSDM_DRV_NAME, |
| 333 | .of_match_table = stm32_adfsdm_of_match, | 362 | .of_match_table = stm32_adfsdm_of_match, |
| 334 | }, | 363 | }, |
| 335 | .probe = stm32_adfsdm_probe, | 364 | .probe = stm32_adfsdm_probe, |
| 365 | .remove = stm32_adfsdm_remove, | ||
| 336 | }; | 366 | }; |
| 337 | 367 | ||
| 338 | module_platform_driver(stm32_adfsdm_driver); | 368 | module_platform_driver(stm32_adfsdm_driver); |
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c index 47c334de6b09..8968458eec62 100644 --- a/sound/soc/stm/stm32_i2s.c +++ b/sound/soc/stm/stm32_i2s.c | |||
| @@ -281,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg) | |||
| 281 | case STM32_I2S_CFG2_REG: | 281 | case STM32_I2S_CFG2_REG: |
| 282 | case STM32_I2S_IER_REG: | 282 | case STM32_I2S_IER_REG: |
| 283 | case STM32_I2S_SR_REG: | 283 | case STM32_I2S_SR_REG: |
| 284 | case STM32_I2S_TXDR_REG: | ||
| 285 | case STM32_I2S_RXDR_REG: | 284 | case STM32_I2S_RXDR_REG: |
| 286 | case STM32_I2S_CGFR_REG: | 285 | case STM32_I2S_CGFR_REG: |
| 287 | return true; | 286 | return true; |
| @@ -293,7 +292,7 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg) | |||
| 293 | static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg) | 292 | static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg) |
| 294 | { | 293 | { |
| 295 | switch (reg) { | 294 | switch (reg) { |
| 296 | case STM32_I2S_TXDR_REG: | 295 | case STM32_I2S_SR_REG: |
| 297 | case STM32_I2S_RXDR_REG: | 296 | case STM32_I2S_RXDR_REG: |
| 298 | return true; | 297 | return true; |
| 299 | default: | 298 | default: |
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c index 14c9591aae42..d68d62f12df5 100644 --- a/sound/soc/stm/stm32_sai.c +++ b/sound/soc/stm/stm32_sai.c | |||
| @@ -105,6 +105,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client, | |||
| 105 | if (!pdev) { | 105 | if (!pdev) { |
| 106 | dev_err(&sai_client->pdev->dev, | 106 | dev_err(&sai_client->pdev->dev, |
| 107 | "Device not found for node %pOFn\n", np_provider); | 107 | "Device not found for node %pOFn\n", np_provider); |
| 108 | of_node_put(np_provider); | ||
| 108 | return -ENODEV; | 109 | return -ENODEV; |
| 109 | } | 110 | } |
| 110 | 111 | ||
| @@ -113,19 +114,20 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client, | |||
| 113 | dev_err(&sai_client->pdev->dev, | 114 | dev_err(&sai_client->pdev->dev, |
| 114 | "SAI sync provider data not found\n"); | 115 | "SAI sync provider data not found\n"); |
| 115 | ret = -EINVAL; | 116 | ret = -EINVAL; |
| 116 | goto out_put_dev; | 117 | goto error; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | /* Configure sync client */ | 120 | /* Configure sync client */ |
| 120 | ret = stm32_sai_sync_conf_client(sai_client, synci); | 121 | ret = stm32_sai_sync_conf_client(sai_client, synci); |
| 121 | if (ret < 0) | 122 | if (ret < 0) |
| 122 | goto out_put_dev; | 123 | goto error; |
| 123 | 124 | ||
| 124 | /* Configure sync provider */ | 125 | /* Configure sync provider */ |
| 125 | ret = stm32_sai_sync_conf_provider(sai_provider, synco); | 126 | ret = stm32_sai_sync_conf_provider(sai_provider, synco); |
| 126 | 127 | ||
| 127 | out_put_dev: | 128 | error: |
| 128 | put_device(&pdev->dev); | 129 | put_device(&pdev->dev); |
| 130 | of_node_put(np_provider); | ||
| 129 | return ret; | 131 | return ret; |
| 130 | } | 132 | } |
| 131 | 133 | ||
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index f9297228c41c..d7045aa520de 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c | |||
| @@ -70,6 +70,7 @@ | |||
| 70 | #define SAI_IEC60958_STATUS_BYTES 24 | 70 | #define SAI_IEC60958_STATUS_BYTES 24 |
| 71 | 71 | ||
| 72 | #define SAI_MCLK_NAME_LEN 32 | 72 | #define SAI_MCLK_NAME_LEN 32 |
| 73 | #define SAI_RATE_11K 11025 | ||
| 73 | 74 | ||
| 74 | /** | 75 | /** |
| 75 | * struct stm32_sai_sub_data - private data of SAI sub block (block A or B) | 76 | * struct stm32_sai_sub_data - private data of SAI sub block (block A or B) |
| @@ -100,8 +101,9 @@ | |||
| 100 | * @slot_mask: rx or tx active slots mask. set at init or at runtime | 101 | * @slot_mask: rx or tx active slots mask. set at init or at runtime |
| 101 | * @data_size: PCM data width. corresponds to PCM substream width. | 102 | * @data_size: PCM data width. corresponds to PCM substream width. |
| 102 | * @spdif_frm_cnt: S/PDIF playback frame counter | 103 | * @spdif_frm_cnt: S/PDIF playback frame counter |
| 103 | * @snd_aes_iec958: iec958 data | 104 | * @iec958: iec958 data |
| 104 | * @ctrl_lock: control lock | 105 | * @ctrl_lock: control lock |
| 106 | * @irq_lock: prevent race condition with IRQ | ||
| 105 | */ | 107 | */ |
| 106 | struct stm32_sai_sub_data { | 108 | struct stm32_sai_sub_data { |
| 107 | struct platform_device *pdev; | 109 | struct platform_device *pdev; |
| @@ -133,6 +135,7 @@ struct stm32_sai_sub_data { | |||
| 133 | unsigned int spdif_frm_cnt; | 135 | unsigned int spdif_frm_cnt; |
| 134 | struct snd_aes_iec958 iec958; | 136 | struct snd_aes_iec958 iec958; |
| 135 | struct mutex ctrl_lock; /* protect resources accessed by controls */ | 137 | struct mutex ctrl_lock; /* protect resources accessed by controls */ |
| 138 | spinlock_t irq_lock; /* used to prevent race condition with IRQ */ | ||
| 136 | }; | 139 | }; |
| 137 | 140 | ||
| 138 | enum stm32_sai_fifo_th { | 141 | enum stm32_sai_fifo_th { |
| @@ -307,6 +310,25 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai, | |||
| 307 | return ret; | 310 | return ret; |
| 308 | } | 311 | } |
| 309 | 312 | ||
| 313 | static int stm32_sai_set_parent_clock(struct stm32_sai_sub_data *sai, | ||
| 314 | unsigned int rate) | ||
| 315 | { | ||
| 316 | struct platform_device *pdev = sai->pdev; | ||
| 317 | struct clk *parent_clk = sai->pdata->clk_x8k; | ||
| 318 | int ret; | ||
| 319 | |||
| 320 | if (!(rate % SAI_RATE_11K)) | ||
| 321 | parent_clk = sai->pdata->clk_x11k; | ||
| 322 | |||
| 323 | ret = clk_set_parent(sai->sai_ck, parent_clk); | ||
| 324 | if (ret) | ||
| 325 | dev_err(&pdev->dev, " Error %d setting sai_ck parent clock. %s", | ||
| 326 | ret, ret == -EBUSY ? | ||
| 327 | "Active stream rates conflict\n" : "\n"); | ||
| 328 | |||
| 329 | return ret; | ||
| 330 | } | ||
| 331 | |||
| 310 | static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate, | 332 | static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate, |
| 311 | unsigned long *prate) | 333 | unsigned long *prate) |
| 312 | { | 334 | { |
| @@ -474,8 +496,10 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid) | |||
| 474 | status = SNDRV_PCM_STATE_XRUN; | 496 | status = SNDRV_PCM_STATE_XRUN; |
| 475 | } | 497 | } |
| 476 | 498 | ||
| 477 | if (status != SNDRV_PCM_STATE_RUNNING) | 499 | spin_lock(&sai->irq_lock); |
| 500 | if (status != SNDRV_PCM_STATE_RUNNING && sai->substream) | ||
| 478 | snd_pcm_stop_xrun(sai->substream); | 501 | snd_pcm_stop_xrun(sai->substream); |
| 502 | spin_unlock(&sai->irq_lock); | ||
| 479 | 503 | ||
| 480 | return IRQ_HANDLED; | 504 | return IRQ_HANDLED; |
| 481 | } | 505 | } |
| @@ -486,25 +510,29 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai, | |||
| 486 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); | 510 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); |
| 487 | int ret; | 511 | int ret; |
| 488 | 512 | ||
| 489 | if (dir == SND_SOC_CLOCK_OUT) { | 513 | if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) { |
| 490 | ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, | 514 | ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, |
| 491 | SAI_XCR1_NODIV, | 515 | SAI_XCR1_NODIV, |
| 492 | (unsigned int)~SAI_XCR1_NODIV); | 516 | (unsigned int)~SAI_XCR1_NODIV); |
| 493 | if (ret < 0) | 517 | if (ret < 0) |
| 494 | return ret; | 518 | return ret; |
| 495 | 519 | ||
| 496 | dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq); | 520 | /* If master clock is used, set parent clock now */ |
| 497 | sai->mclk_rate = freq; | 521 | ret = stm32_sai_set_parent_clock(sai, freq); |
| 522 | if (ret) | ||
| 523 | return ret; | ||
| 498 | 524 | ||
| 499 | if (sai->sai_mclk) { | 525 | ret = clk_set_rate_exclusive(sai->sai_mclk, freq); |
| 500 | ret = clk_set_rate_exclusive(sai->sai_mclk, | 526 | if (ret) { |
| 501 | sai->mclk_rate); | 527 | dev_err(cpu_dai->dev, |
| 502 | if (ret) { | 528 | ret == -EBUSY ? |
| 503 | dev_err(cpu_dai->dev, | 529 | "Active streams have incompatible rates" : |
| 504 | "Could not set mclk rate\n"); | 530 | "Could not set mclk rate\n"); |
| 505 | return ret; | 531 | return ret; |
| 506 | } | ||
| 507 | } | 532 | } |
| 533 | |||
| 534 | dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq); | ||
| 535 | sai->mclk_rate = freq; | ||
| 508 | } | 536 | } |
| 509 | 537 | ||
| 510 | return 0; | 538 | return 0; |
| @@ -679,8 +707,19 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream, | |||
| 679 | { | 707 | { |
| 680 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); | 708 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); |
| 681 | int imr, cr2, ret; | 709 | int imr, cr2, ret; |
| 710 | unsigned long flags; | ||
| 682 | 711 | ||
| 712 | spin_lock_irqsave(&sai->irq_lock, flags); | ||
| 683 | sai->substream = substream; | 713 | sai->substream = substream; |
| 714 | spin_unlock_irqrestore(&sai->irq_lock, flags); | ||
| 715 | |||
| 716 | if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) { | ||
| 717 | snd_pcm_hw_constraint_mask64(substream->runtime, | ||
| 718 | SNDRV_PCM_HW_PARAM_FORMAT, | ||
| 719 | SNDRV_PCM_FMTBIT_S32_LE); | ||
| 720 | snd_pcm_hw_constraint_single(substream->runtime, | ||
| 721 | SNDRV_PCM_HW_PARAM_CHANNELS, 2); | ||
| 722 | } | ||
| 684 | 723 | ||
| 685 | ret = clk_prepare_enable(sai->sai_ck); | 724 | ret = clk_prepare_enable(sai->sai_ck); |
| 686 | if (ret < 0) { | 725 | if (ret < 0) { |
| @@ -898,14 +937,16 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, | |||
| 898 | struct snd_pcm_hw_params *params) | 937 | struct snd_pcm_hw_params *params) |
| 899 | { | 938 | { |
| 900 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); | 939 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); |
| 901 | int div = 0; | 940 | int div = 0, cr1 = 0; |
| 902 | int sai_clk_rate, mclk_ratio, den; | 941 | int sai_clk_rate, mclk_ratio, den; |
| 903 | unsigned int rate = params_rate(params); | 942 | unsigned int rate = params_rate(params); |
| 943 | int ret; | ||
| 904 | 944 | ||
| 905 | if (!(rate % 11025)) | 945 | if (!sai->sai_mclk) { |
| 906 | clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k); | 946 | ret = stm32_sai_set_parent_clock(sai, rate); |
| 907 | else | 947 | if (ret) |
| 908 | clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k); | 948 | return ret; |
| 949 | } | ||
| 909 | sai_clk_rate = clk_get_rate(sai->sai_ck); | 950 | sai_clk_rate = clk_get_rate(sai->sai_ck); |
| 910 | 951 | ||
| 911 | if (STM_SAI_IS_F4(sai->pdata)) { | 952 | if (STM_SAI_IS_F4(sai->pdata)) { |
| @@ -943,13 +984,19 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai, | |||
| 943 | } else { | 984 | } else { |
| 944 | if (sai->mclk_rate) { | 985 | if (sai->mclk_rate) { |
| 945 | mclk_ratio = sai->mclk_rate / rate; | 986 | mclk_ratio = sai->mclk_rate / rate; |
| 946 | if ((mclk_ratio != 512) && | 987 | if (mclk_ratio == 512) { |
| 947 | (mclk_ratio != 256)) { | 988 | cr1 = SAI_XCR1_OSR; |
| 989 | } else if (mclk_ratio != 256) { | ||
| 948 | dev_err(cpu_dai->dev, | 990 | dev_err(cpu_dai->dev, |
| 949 | "Wrong mclk ratio %d\n", | 991 | "Wrong mclk ratio %d\n", |
| 950 | mclk_ratio); | 992 | mclk_ratio); |
| 951 | return -EINVAL; | 993 | return -EINVAL; |
| 952 | } | 994 | } |
| 995 | |||
| 996 | regmap_update_bits(sai->regmap, | ||
| 997 | STM_SAI_CR1_REGX, | ||
| 998 | SAI_XCR1_OSR, cr1); | ||
| 999 | |||
| 953 | div = stm32_sai_get_clk_div(sai, sai_clk_rate, | 1000 | div = stm32_sai_get_clk_div(sai, sai_clk_rate, |
| 954 | sai->mclk_rate); | 1001 | sai->mclk_rate); |
| 955 | if (div < 0) | 1002 | if (div < 0) |
| @@ -1051,28 +1098,36 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream, | |||
| 1051 | struct snd_soc_dai *cpu_dai) | 1098 | struct snd_soc_dai *cpu_dai) |
| 1052 | { | 1099 | { |
| 1053 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); | 1100 | struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai); |
| 1101 | unsigned long flags; | ||
| 1054 | 1102 | ||
| 1055 | regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); | 1103 | regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0); |
| 1056 | 1104 | ||
| 1057 | regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV, | 1105 | regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV, |
| 1058 | SAI_XCR1_NODIV); | 1106 | SAI_XCR1_NODIV); |
| 1059 | 1107 | ||
| 1060 | clk_disable_unprepare(sai->sai_ck); | 1108 | /* Release mclk rate only if rate was actually set */ |
| 1109 | if (sai->mclk_rate) { | ||
| 1110 | clk_rate_exclusive_put(sai->sai_mclk); | ||
| 1111 | sai->mclk_rate = 0; | ||
| 1112 | } | ||
| 1061 | 1113 | ||
| 1062 | clk_rate_exclusive_put(sai->sai_mclk); | 1114 | clk_disable_unprepare(sai->sai_ck); |
| 1063 | 1115 | ||
| 1116 | spin_lock_irqsave(&sai->irq_lock, flags); | ||
| 1064 | sai->substream = NULL; | 1117 | sai->substream = NULL; |
| 1118 | spin_unlock_irqrestore(&sai->irq_lock, flags); | ||
| 1065 | } | 1119 | } |
| 1066 | 1120 | ||
| 1067 | static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd, | 1121 | static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd, |
| 1068 | struct snd_soc_dai *cpu_dai) | 1122 | struct snd_soc_dai *cpu_dai) |
| 1069 | { | 1123 | { |
| 1070 | struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev); | 1124 | struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev); |
| 1125 | struct snd_kcontrol_new knew = iec958_ctls; | ||
| 1071 | 1126 | ||
| 1072 | if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) { | 1127 | if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) { |
| 1073 | dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__); | 1128 | dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__); |
| 1074 | return snd_ctl_add(rtd->pcm->card, | 1129 | knew.device = rtd->pcm->device; |
| 1075 | snd_ctl_new1(&iec958_ctls, sai)); | 1130 | return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai)); |
| 1076 | } | 1131 | } |
| 1077 | 1132 | ||
| 1078 | return 0; | 1133 | return 0; |
| @@ -1081,7 +1136,7 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd, | |||
| 1081 | static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai) | 1136 | static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai) |
| 1082 | { | 1137 | { |
| 1083 | struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev); | 1138 | struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev); |
| 1084 | int cr1 = 0, cr1_mask; | 1139 | int cr1 = 0, cr1_mask, ret; |
| 1085 | 1140 | ||
| 1086 | sai->cpu_dai = cpu_dai; | 1141 | sai->cpu_dai = cpu_dai; |
| 1087 | 1142 | ||
| @@ -1111,8 +1166,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai) | |||
| 1111 | /* Configure synchronization */ | 1166 | /* Configure synchronization */ |
| 1112 | if (sai->sync == SAI_SYNC_EXTERNAL) { | 1167 | if (sai->sync == SAI_SYNC_EXTERNAL) { |
| 1113 | /* Configure synchro client and provider */ | 1168 | /* Configure synchro client and provider */ |
| 1114 | sai->pdata->set_sync(sai->pdata, sai->np_sync_provider, | 1169 | ret = sai->pdata->set_sync(sai->pdata, sai->np_sync_provider, |
| 1115 | sai->synco, sai->synci); | 1170 | sai->synco, sai->synci); |
| 1171 | if (ret) | ||
| 1172 | return ret; | ||
| 1116 | } | 1173 | } |
| 1117 | 1174 | ||
| 1118 | cr1_mask |= SAI_XCR1_SYNCEN_MASK; | 1175 | cr1_mask |= SAI_XCR1_SYNCEN_MASK; |
| @@ -1392,7 +1449,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev, | |||
| 1392 | if (!sai->cpu_dai_drv) | 1449 | if (!sai->cpu_dai_drv) |
| 1393 | return -ENOMEM; | 1450 | return -ENOMEM; |
| 1394 | 1451 | ||
| 1395 | sai->cpu_dai_drv->name = dev_name(&pdev->dev); | ||
| 1396 | if (STM_SAI_IS_PLAYBACK(sai)) { | 1452 | if (STM_SAI_IS_PLAYBACK(sai)) { |
| 1397 | memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai, | 1453 | memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai, |
| 1398 | sizeof(stm32_sai_playback_dai)); | 1454 | sizeof(stm32_sai_playback_dai)); |
| @@ -1402,6 +1458,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev, | |||
| 1402 | sizeof(stm32_sai_capture_dai)); | 1458 | sizeof(stm32_sai_capture_dai)); |
| 1403 | sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name; | 1459 | sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name; |
| 1404 | } | 1460 | } |
| 1461 | sai->cpu_dai_drv->name = dev_name(&pdev->dev); | ||
| 1405 | 1462 | ||
| 1406 | return 0; | 1463 | return 0; |
| 1407 | } | 1464 | } |
| @@ -1424,6 +1481,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) | |||
| 1424 | 1481 | ||
| 1425 | sai->pdev = pdev; | 1482 | sai->pdev = pdev; |
| 1426 | mutex_init(&sai->ctrl_lock); | 1483 | mutex_init(&sai->ctrl_lock); |
| 1484 | spin_lock_init(&sai->irq_lock); | ||
| 1427 | platform_set_drvdata(pdev, sai); | 1485 | platform_set_drvdata(pdev, sai); |
| 1428 | 1486 | ||
| 1429 | sai->pdata = dev_get_drvdata(pdev->dev.parent); | 1487 | sai->pdata = dev_get_drvdata(pdev->dev.parent); |
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c index a7f413cb704d..b14ab512c2ce 100644 --- a/sound/xen/xen_snd_front_alsa.c +++ b/sound/xen/xen_snd_front_alsa.c | |||
| @@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream, | |||
| 441 | { | 441 | { |
| 442 | int i; | 442 | int i; |
| 443 | 443 | ||
| 444 | stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL); | 444 | stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL); |
| 445 | if (!stream->buffer) | 445 | if (!stream->buffer) |
| 446 | return -ENOMEM; | 446 | return -ENOMEM; |
| 447 | 447 | ||
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 404d4b9ffe76..df1153cea0b7 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | #ifndef __KERNEL__ | 33 | #ifndef __KERNEL__ |
| 34 | #include <stdlib.h> | 34 | #include <stdlib.h> |
| 35 | #include <time.h> | ||
| 35 | #endif | 36 | #endif |
| 36 | 37 | ||
| 37 | /* | 38 | /* |
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c index 512306a37531..0f257139b003 100644 --- a/tools/io_uring/io_uring-bench.c +++ b/tools/io_uring/io_uring-bench.c | |||
| @@ -32,10 +32,6 @@ | |||
| 32 | #include "liburing.h" | 32 | #include "liburing.h" |
| 33 | #include "barrier.h" | 33 | #include "barrier.h" |
| 34 | 34 | ||
| 35 | #ifndef IOCQE_FLAG_CACHEHIT | ||
| 36 | #define IOCQE_FLAG_CACHEHIT (1U << 0) | ||
| 37 | #endif | ||
| 38 | |||
| 39 | #define min(a, b) ((a < b) ? (a) : (b)) | 35 | #define min(a, b) ((a < b) ? (a) : (b)) |
| 40 | 36 | ||
| 41 | struct io_sq_ring { | 37 | struct io_sq_ring { |
| @@ -85,7 +81,6 @@ struct submitter { | |||
| 85 | unsigned long reaps; | 81 | unsigned long reaps; |
| 86 | unsigned long done; | 82 | unsigned long done; |
| 87 | unsigned long calls; | 83 | unsigned long calls; |
| 88 | unsigned long cachehit, cachemiss; | ||
| 89 | volatile int finish; | 84 | volatile int finish; |
| 90 | 85 | ||
| 91 | __s32 *fds; | 86 | __s32 *fds; |
| @@ -270,10 +265,6 @@ static int reap_events(struct submitter *s) | |||
| 270 | return -1; | 265 | return -1; |
| 271 | } | 266 | } |
| 272 | } | 267 | } |
| 273 | if (cqe->flags & IOCQE_FLAG_CACHEHIT) | ||
| 274 | s->cachehit++; | ||
| 275 | else | ||
| 276 | s->cachemiss++; | ||
| 277 | reaped++; | 268 | reaped++; |
| 278 | head++; | 269 | head++; |
| 279 | } while (1); | 270 | } while (1); |
| @@ -489,7 +480,7 @@ static void file_depths(char *buf) | |||
| 489 | int main(int argc, char *argv[]) | 480 | int main(int argc, char *argv[]) |
| 490 | { | 481 | { |
| 491 | struct submitter *s = &submitters[0]; | 482 | struct submitter *s = &submitters[0]; |
| 492 | unsigned long done, calls, reap, cache_hit, cache_miss; | 483 | unsigned long done, calls, reap; |
| 493 | int err, i, flags, fd; | 484 | int err, i, flags, fd; |
| 494 | char *fdepths; | 485 | char *fdepths; |
| 495 | void *ret; | 486 | void *ret; |
| @@ -569,44 +560,29 @@ int main(int argc, char *argv[]) | |||
| 569 | pthread_create(&s->thread, NULL, submitter_fn, s); | 560 | pthread_create(&s->thread, NULL, submitter_fn, s); |
| 570 | 561 | ||
| 571 | fdepths = malloc(8 * s->nr_files); | 562 | fdepths = malloc(8 * s->nr_files); |
| 572 | cache_hit = cache_miss = reap = calls = done = 0; | 563 | reap = calls = done = 0; |
| 573 | do { | 564 | do { |
| 574 | unsigned long this_done = 0; | 565 | unsigned long this_done = 0; |
| 575 | unsigned long this_reap = 0; | 566 | unsigned long this_reap = 0; |
| 576 | unsigned long this_call = 0; | 567 | unsigned long this_call = 0; |
| 577 | unsigned long this_cache_hit = 0; | ||
| 578 | unsigned long this_cache_miss = 0; | ||
| 579 | unsigned long rpc = 0, ipc = 0; | 568 | unsigned long rpc = 0, ipc = 0; |
| 580 | double hit = 0.0; | ||
| 581 | 569 | ||
| 582 | sleep(1); | 570 | sleep(1); |
| 583 | this_done += s->done; | 571 | this_done += s->done; |
| 584 | this_call += s->calls; | 572 | this_call += s->calls; |
| 585 | this_reap += s->reaps; | 573 | this_reap += s->reaps; |
| 586 | this_cache_hit += s->cachehit; | ||
| 587 | this_cache_miss += s->cachemiss; | ||
| 588 | if (this_cache_hit && this_cache_miss) { | ||
| 589 | unsigned long hits, total; | ||
| 590 | |||
| 591 | hits = this_cache_hit - cache_hit; | ||
| 592 | total = hits + this_cache_miss - cache_miss; | ||
| 593 | hit = (double) hits / (double) total; | ||
| 594 | hit *= 100.0; | ||
| 595 | } | ||
| 596 | if (this_call - calls) { | 574 | if (this_call - calls) { |
| 597 | rpc = (this_done - done) / (this_call - calls); | 575 | rpc = (this_done - done) / (this_call - calls); |
| 598 | ipc = (this_reap - reap) / (this_call - calls); | 576 | ipc = (this_reap - reap) / (this_call - calls); |
| 599 | } else | 577 | } else |
| 600 | rpc = ipc = -1; | 578 | rpc = ipc = -1; |
| 601 | file_depths(fdepths); | 579 | file_depths(fdepths); |
| 602 | printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n", | 580 | printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n", |
| 603 | this_done - done, rpc, ipc, s->inflight, | 581 | this_done - done, rpc, ipc, s->inflight, |
| 604 | fdepths, hit); | 582 | fdepths); |
| 605 | done = this_done; | 583 | done = this_done; |
| 606 | calls = this_call; | 584 | calls = this_call; |
| 607 | reap = this_reap; | 585 | reap = this_reap; |
| 608 | cache_hit = s->cachehit; | ||
| 609 | cache_miss = s->cachemiss; | ||
| 610 | } while (!finish); | 586 | } while (!finish); |
| 611 | 587 | ||
| 612 | pthread_join(s->thread, &ret); | 588 | pthread_join(s->thread, &ret); |
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 5bf8e52c41fc..8e7c56e9590f 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile | |||
| @@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) | |||
| 177 | 177 | ||
| 178 | $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) | 178 | $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) |
| 179 | $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ | 179 | $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ |
| 180 | -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@ | 180 | -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@ |
| 181 | @ln -sf $(@F) $(OUTPUT)libbpf.so | 181 | @ln -sf $(@F) $(OUTPUT)libbpf.so |
| 182 | @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) | 182 | @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) |
| 183 | 183 | ||
| @@ -220,8 +220,9 @@ install_lib: all_cmd | |||
| 220 | install_headers: | 220 | install_headers: |
| 221 | $(call QUIET_INSTALL, headers) \ | 221 | $(call QUIET_INSTALL, headers) \ |
| 222 | $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ | 222 | $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ |
| 223 | $(call do_install,libbpf.h,$(prefix)/include/bpf,644); | 223 | $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \ |
| 224 | $(call do_install,btf.h,$(prefix)/include/bpf,644); | 224 | $(call do_install,btf.h,$(prefix)/include/bpf,644); \ |
| 225 | $(call do_install,xsk.h,$(prefix)/include/bpf,644); | ||
| 225 | 226 | ||
| 226 | install: install_lib | 227 | install: install_lib |
| 227 | 228 | ||
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 87e3020ac1bc..cf119c9b6f27 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c | |||
| @@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, | |||
| 2107 | return fwd_kind == real_kind; | 2107 | return fwd_kind == real_kind; |
| 2108 | } | 2108 | } |
| 2109 | 2109 | ||
| 2110 | if (cand_kind != canon_kind) | ||
| 2111 | return 0; | ||
| 2112 | |||
| 2110 | switch (cand_kind) { | 2113 | switch (cand_kind) { |
| 2111 | case BTF_KIND_INT: | 2114 | case BTF_KIND_INT: |
| 2112 | return btf_equal_int(cand_type, canon_type); | 2115 | return btf_equal_int(cand_type, canon_type); |
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 87494c7c619d..981c6ce2da2c 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
| @@ -2233,7 +2233,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer) | |||
| 2233 | return val & 0xffffffff; | 2233 | return val & 0xffffffff; |
| 2234 | 2234 | ||
| 2235 | if (strcmp(type, "u64") == 0 || | 2235 | if (strcmp(type, "u64") == 0 || |
| 2236 | strcmp(type, "s64")) | 2236 | strcmp(type, "s64") == 0) |
| 2237 | return val; | 2237 | return val; |
| 2238 | 2238 | ||
| 2239 | if (strcmp(type, "s8") == 0) | 2239 | if (strcmp(type, "s8") == 0) |
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5dde107083c6..479196aeb409 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
| @@ -165,6 +165,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
| 165 | "fortify_panic", | 165 | "fortify_panic", |
| 166 | "usercopy_abort", | 166 | "usercopy_abort", |
| 167 | "machine_real_restart", | 167 | "machine_real_restart", |
| 168 | "rewind_stack_do_exit", | ||
| 168 | }; | 169 | }; |
| 169 | 170 | ||
| 170 | if (func->bind == STB_WEAK) | 171 | if (func->bind == STB_WEAK) |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 49ee3c2033ec..c3625ec374e0 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
| @@ -1308,6 +1308,7 @@ static void init_features(struct perf_session *session) | |||
| 1308 | for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) | 1308 | for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) |
| 1309 | perf_header__set_feat(&session->header, feat); | 1309 | perf_header__set_feat(&session->header, feat); |
| 1310 | 1310 | ||
| 1311 | perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT); | ||
| 1311 | perf_header__clear_feat(&session->header, HEADER_BUILD_ID); | 1312 | perf_header__clear_feat(&session->header, HEADER_BUILD_ID); |
| 1312 | perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); | 1313 | perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); |
| 1313 | perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); | 1314 | perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1999d6533d12..fbbb0da43abb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
| @@ -1377,6 +1377,7 @@ int cmd_top(int argc, const char **argv) | |||
| 1377 | * */ | 1377 | * */ |
| 1378 | .overwrite = 0, | 1378 | .overwrite = 0, |
| 1379 | .sample_time = true, | 1379 | .sample_time = true, |
| 1380 | .sample_time_set = true, | ||
| 1380 | }, | 1381 | }, |
| 1381 | .max_stack = sysctl__max_stack(), | 1382 | .max_stack = sysctl__max_stack(), |
| 1382 | .annotation_opts = annotation__default_options, | 1383 | .annotation_opts = annotation__default_options, |
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index 3b71902a5a21..bf271fbc3a88 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py | |||
| @@ -331,7 +331,7 @@ if perf_db_export_calls: | |||
| 331 | 'return_id,' | 331 | 'return_id,' |
| 332 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' | 332 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' |
| 333 | 'parent_call_path_id,' | 333 | 'parent_call_path_id,' |
| 334 | 'parent_id' | 334 | 'calls.parent_id' |
| 335 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') | 335 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') |
| 336 | 336 | ||
| 337 | do_query(query, 'CREATE VIEW samples_view AS ' | 337 | do_query(query, 'CREATE VIEW samples_view AS ' |
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index c6351b557bb0..9494f9dc61ec 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c | |||
| @@ -57,9 +57,11 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, | |||
| 57 | else if (prog_id > node->info_linear->info.id) | 57 | else if (prog_id > node->info_linear->info.id) |
| 58 | n = n->rb_right; | 58 | n = n->rb_right; |
| 59 | else | 59 | else |
| 60 | break; | 60 | goto out; |
| 61 | } | 61 | } |
| 62 | node = NULL; | ||
| 62 | 63 | ||
| 64 | out: | ||
| 63 | up_read(&env->bpf_progs.lock); | 65 | up_read(&env->bpf_progs.lock); |
| 64 | return node; | 66 | return node; |
| 65 | } | 67 | } |
| @@ -109,10 +111,12 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) | |||
| 109 | else if (btf_id > node->id) | 111 | else if (btf_id > node->id) |
| 110 | n = n->rb_right; | 112 | n = n->rb_right; |
| 111 | else | 113 | else |
| 112 | break; | 114 | goto out; |
| 113 | } | 115 | } |
| 116 | node = NULL; | ||
| 114 | 117 | ||
| 115 | up_read(&env->bpf_progs.lock); | 118 | up_read(&env->bpf_progs.lock); |
| 119 | out: | ||
| 116 | return node; | 120 | return node; |
| 117 | } | 121 | } |
| 118 | 122 | ||
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 6689378ee577..51ead577533f 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
| @@ -1868,12 +1868,12 @@ static void *perf_evlist__poll_thread(void *arg) | |||
| 1868 | { | 1868 | { |
| 1869 | struct perf_evlist *evlist = arg; | 1869 | struct perf_evlist *evlist = arg; |
| 1870 | bool draining = false; | 1870 | bool draining = false; |
| 1871 | int i; | 1871 | int i, done = 0; |
| 1872 | |||
| 1873 | while (!done) { | ||
| 1874 | bool got_data = false; | ||
| 1872 | 1875 | ||
| 1873 | while (draining || !(evlist->thread.done)) { | 1876 | if (evlist->thread.done) |
| 1874 | if (draining) | ||
| 1875 | draining = false; | ||
| 1876 | else if (evlist->thread.done) | ||
| 1877 | draining = true; | 1877 | draining = true; |
| 1878 | 1878 | ||
| 1879 | if (!draining) | 1879 | if (!draining) |
| @@ -1894,9 +1894,13 @@ static void *perf_evlist__poll_thread(void *arg) | |||
| 1894 | pr_warning("cannot locate proper evsel for the side band event\n"); | 1894 | pr_warning("cannot locate proper evsel for the side band event\n"); |
| 1895 | 1895 | ||
| 1896 | perf_mmap__consume(map); | 1896 | perf_mmap__consume(map); |
| 1897 | got_data = true; | ||
| 1897 | } | 1898 | } |
| 1898 | perf_mmap__read_done(map); | 1899 | perf_mmap__read_done(map); |
| 1899 | } | 1900 | } |
| 1901 | |||
| 1902 | if (draining && !got_data) | ||
| 1903 | break; | ||
| 1900 | } | 1904 | } |
| 1901 | return NULL; | 1905 | return NULL; |
| 1902 | } | 1906 | } |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 66d066f18b5b..966360844fff 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
| @@ -2368,7 +2368,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
| 2368 | if (data->user_regs.abi) { | 2368 | if (data->user_regs.abi) { |
| 2369 | u64 mask = evsel->attr.sample_regs_user; | 2369 | u64 mask = evsel->attr.sample_regs_user; |
| 2370 | 2370 | ||
| 2371 | sz = hweight_long(mask) * sizeof(u64); | 2371 | sz = hweight64(mask) * sizeof(u64); |
| 2372 | OVERFLOW_CHECK(array, sz, max_size); | 2372 | OVERFLOW_CHECK(array, sz, max_size); |
| 2373 | data->user_regs.mask = mask; | 2373 | data->user_regs.mask = mask; |
| 2374 | data->user_regs.regs = (u64 *)array; | 2374 | data->user_regs.regs = (u64 *)array; |
| @@ -2424,7 +2424,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
| 2424 | if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { | 2424 | if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { |
| 2425 | u64 mask = evsel->attr.sample_regs_intr; | 2425 | u64 mask = evsel->attr.sample_regs_intr; |
| 2426 | 2426 | ||
| 2427 | sz = hweight_long(mask) * sizeof(u64); | 2427 | sz = hweight64(mask) * sizeof(u64); |
| 2428 | OVERFLOW_CHECK(array, sz, max_size); | 2428 | OVERFLOW_CHECK(array, sz, max_size); |
| 2429 | data->intr_regs.mask = mask; | 2429 | data->intr_regs.mask = mask; |
| 2430 | data->intr_regs.regs = (u64 *)array; | 2430 | data->intr_regs.regs = (u64 *)array; |
| @@ -2552,7 +2552,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | |||
| 2552 | if (type & PERF_SAMPLE_REGS_USER) { | 2552 | if (type & PERF_SAMPLE_REGS_USER) { |
| 2553 | if (sample->user_regs.abi) { | 2553 | if (sample->user_regs.abi) { |
| 2554 | result += sizeof(u64); | 2554 | result += sizeof(u64); |
| 2555 | sz = hweight_long(sample->user_regs.mask) * sizeof(u64); | 2555 | sz = hweight64(sample->user_regs.mask) * sizeof(u64); |
| 2556 | result += sz; | 2556 | result += sz; |
| 2557 | } else { | 2557 | } else { |
| 2558 | result += sizeof(u64); | 2558 | result += sizeof(u64); |
| @@ -2580,7 +2580,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | |||
| 2580 | if (type & PERF_SAMPLE_REGS_INTR) { | 2580 | if (type & PERF_SAMPLE_REGS_INTR) { |
| 2581 | if (sample->intr_regs.abi) { | 2581 | if (sample->intr_regs.abi) { |
| 2582 | result += sizeof(u64); | 2582 | result += sizeof(u64); |
| 2583 | sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); | 2583 | sz = hweight64(sample->intr_regs.mask) * sizeof(u64); |
| 2584 | result += sz; | 2584 | result += sz; |
| 2585 | } else { | 2585 | } else { |
| 2586 | result += sizeof(u64); | 2586 | result += sizeof(u64); |
| @@ -2710,7 +2710,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
| 2710 | if (type & PERF_SAMPLE_REGS_USER) { | 2710 | if (type & PERF_SAMPLE_REGS_USER) { |
| 2711 | if (sample->user_regs.abi) { | 2711 | if (sample->user_regs.abi) { |
| 2712 | *array++ = sample->user_regs.abi; | 2712 | *array++ = sample->user_regs.abi; |
| 2713 | sz = hweight_long(sample->user_regs.mask) * sizeof(u64); | 2713 | sz = hweight64(sample->user_regs.mask) * sizeof(u64); |
| 2714 | memcpy(array, sample->user_regs.regs, sz); | 2714 | memcpy(array, sample->user_regs.regs, sz); |
| 2715 | array = (void *)array + sz; | 2715 | array = (void *)array + sz; |
| 2716 | } else { | 2716 | } else { |
| @@ -2746,7 +2746,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
| 2746 | if (type & PERF_SAMPLE_REGS_INTR) { | 2746 | if (type & PERF_SAMPLE_REGS_INTR) { |
| 2747 | if (sample->intr_regs.abi) { | 2747 | if (sample->intr_regs.abi) { |
| 2748 | *array++ = sample->intr_regs.abi; | 2748 | *array++ = sample->intr_regs.abi; |
| 2749 | sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); | 2749 | sz = hweight64(sample->intr_regs.mask) * sizeof(u64); |
| 2750 | memcpy(array, sample->intr_regs.regs, sz); | 2750 | memcpy(array, sample->intr_regs.regs, sz); |
| 2751 | array = (void *)array + sz; | 2751 | array = (void *)array + sz; |
| 2752 | } else { | 2752 | } else { |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index b9e693825873..2d2af2ac2b1e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
| @@ -2606,6 +2606,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) | |||
| 2606 | perf_env__insert_bpf_prog_info(env, info_node); | 2606 | perf_env__insert_bpf_prog_info(env, info_node); |
| 2607 | } | 2607 | } |
| 2608 | 2608 | ||
| 2609 | up_write(&env->bpf_progs.lock); | ||
| 2609 | return 0; | 2610 | return 0; |
| 2610 | out: | 2611 | out: |
| 2611 | free(info_linear); | 2612 | free(info_linear); |
| @@ -2623,7 +2624,9 @@ static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data _ | |||
| 2623 | static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) | 2624 | static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) |
| 2624 | { | 2625 | { |
| 2625 | struct perf_env *env = &ff->ph->env; | 2626 | struct perf_env *env = &ff->ph->env; |
| 2627 | struct btf_node *node = NULL; | ||
| 2626 | u32 count, i; | 2628 | u32 count, i; |
| 2629 | int err = -1; | ||
| 2627 | 2630 | ||
| 2628 | if (ff->ph->needs_swap) { | 2631 | if (ff->ph->needs_swap) { |
| 2629 | pr_warning("interpreting btf from systems with endianity is not yet supported\n"); | 2632 | pr_warning("interpreting btf from systems with endianity is not yet supported\n"); |
| @@ -2636,31 +2639,32 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) | |||
| 2636 | down_write(&env->bpf_progs.lock); | 2639 | down_write(&env->bpf_progs.lock); |
| 2637 | 2640 | ||
| 2638 | for (i = 0; i < count; ++i) { | 2641 | for (i = 0; i < count; ++i) { |
| 2639 | struct btf_node *node; | ||
| 2640 | u32 id, data_size; | 2642 | u32 id, data_size; |
| 2641 | 2643 | ||
| 2642 | if (do_read_u32(ff, &id)) | 2644 | if (do_read_u32(ff, &id)) |
| 2643 | return -1; | 2645 | goto out; |
| 2644 | if (do_read_u32(ff, &data_size)) | 2646 | if (do_read_u32(ff, &data_size)) |
| 2645 | return -1; | 2647 | goto out; |
| 2646 | 2648 | ||
| 2647 | node = malloc(sizeof(struct btf_node) + data_size); | 2649 | node = malloc(sizeof(struct btf_node) + data_size); |
| 2648 | if (!node) | 2650 | if (!node) |
| 2649 | return -1; | 2651 | goto out; |
| 2650 | 2652 | ||
| 2651 | node->id = id; | 2653 | node->id = id; |
| 2652 | node->data_size = data_size; | 2654 | node->data_size = data_size; |
| 2653 | 2655 | ||
| 2654 | if (__do_read(ff, node->data, data_size)) { | 2656 | if (__do_read(ff, node->data, data_size)) |
| 2655 | free(node); | 2657 | goto out; |
| 2656 | return -1; | ||
| 2657 | } | ||
| 2658 | 2658 | ||
| 2659 | perf_env__insert_btf(env, node); | 2659 | perf_env__insert_btf(env, node); |
| 2660 | node = NULL; | ||
| 2660 | } | 2661 | } |
| 2661 | 2662 | ||
| 2663 | err = 0; | ||
| 2664 | out: | ||
| 2662 | up_write(&env->bpf_progs.lock); | 2665 | up_write(&env->bpf_progs.lock); |
| 2663 | return 0; | 2666 | free(node); |
| 2667 | return err; | ||
| 2664 | } | 2668 | } |
| 2665 | 2669 | ||
| 2666 | struct feature_ops { | 2670 | struct feature_ops { |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index e32628cd20a7..ee71efb9db62 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
| @@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map) | |||
| 261 | return kmap && kmap->name[0]; | 261 | return kmap && kmap->name[0]; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | bool __map__is_bpf_prog(const struct map *map) | ||
| 265 | { | ||
| 266 | const char *name; | ||
| 267 | |||
| 268 | if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) | ||
| 269 | return true; | ||
| 270 | |||
| 271 | /* | ||
| 272 | * If PERF_RECORD_BPF_EVENT is not included, the dso will not have | ||
| 273 | * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can | ||
| 274 | * guess the type based on name. | ||
| 275 | */ | ||
| 276 | name = map->dso->short_name; | ||
| 277 | return name && (strstr(name, "bpf_prog_") == name); | ||
| 278 | } | ||
| 279 | |||
| 264 | bool map__has_symbols(const struct map *map) | 280 | bool map__has_symbols(const struct map *map) |
| 265 | { | 281 | { |
| 266 | return dso__has_symbols(map->dso); | 282 | return dso__has_symbols(map->dso); |
| @@ -910,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map) | |||
| 910 | rc = strcmp(m->dso->short_name, map->dso->short_name); | 926 | rc = strcmp(m->dso->short_name, map->dso->short_name); |
| 911 | if (rc < 0) | 927 | if (rc < 0) |
| 912 | p = &(*p)->rb_left; | 928 | p = &(*p)->rb_left; |
| 913 | else if (rc > 0) | ||
| 914 | p = &(*p)->rb_right; | ||
| 915 | else | 929 | else |
| 916 | return; | 930 | p = &(*p)->rb_right; |
| 917 | } | 931 | } |
| 918 | rb_link_node(&map->rb_node_name, parent, p); | 932 | rb_link_node(&map->rb_node_name, parent, p); |
| 919 | rb_insert_color(&map->rb_node_name, &maps->names); | 933 | rb_insert_color(&map->rb_node_name, &maps->names); |
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 0e20749f2c55..dc93787c74f0 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
| @@ -159,10 +159,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, | |||
| 159 | 159 | ||
| 160 | bool __map__is_kernel(const struct map *map); | 160 | bool __map__is_kernel(const struct map *map); |
| 161 | bool __map__is_extra_kernel_map(const struct map *map); | 161 | bool __map__is_extra_kernel_map(const struct map *map); |
| 162 | bool __map__is_bpf_prog(const struct map *map); | ||
| 162 | 163 | ||
| 163 | static inline bool __map__is_kmodule(const struct map *map) | 164 | static inline bool __map__is_kmodule(const struct map *map) |
| 164 | { | 165 | { |
| 165 | return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map); | 166 | return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) && |
| 167 | !__map__is_bpf_prog(map); | ||
| 166 | } | 168 | } |
| 167 | 169 | ||
| 168 | bool map__has_symbols(const struct map *map); | 170 | bool map__has_symbols(const struct map *map); |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index c3fad065c89c..c7727be9719f 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <cpuid.h> | 44 | #include <cpuid.h> |
| 45 | #include <linux/capability.h> | 45 | #include <linux/capability.h> |
| 46 | #include <errno.h> | 46 | #include <errno.h> |
| 47 | #include <math.h> | ||
| 47 | 48 | ||
| 48 | char *proc_stat = "/proc/stat"; | 49 | char *proc_stat = "/proc/stat"; |
| 49 | FILE *outf; | 50 | FILE *outf; |
| @@ -63,7 +64,6 @@ unsigned int dump_only; | |||
| 63 | unsigned int do_snb_cstates; | 64 | unsigned int do_snb_cstates; |
| 64 | unsigned int do_knl_cstates; | 65 | unsigned int do_knl_cstates; |
| 65 | unsigned int do_slm_cstates; | 66 | unsigned int do_slm_cstates; |
| 66 | unsigned int do_cnl_cstates; | ||
| 67 | unsigned int use_c1_residency_msr; | 67 | unsigned int use_c1_residency_msr; |
| 68 | unsigned int has_aperf; | 68 | unsigned int has_aperf; |
| 69 | unsigned int has_epb; | 69 | unsigned int has_epb; |
| @@ -141,9 +141,21 @@ unsigned int first_counter_read = 1; | |||
| 141 | 141 | ||
| 142 | #define RAPL_CORES_ENERGY_STATUS (1 << 9) | 142 | #define RAPL_CORES_ENERGY_STATUS (1 << 9) |
| 143 | /* 0x639 MSR_PP0_ENERGY_STATUS */ | 143 | /* 0x639 MSR_PP0_ENERGY_STATUS */ |
| 144 | #define RAPL_PER_CORE_ENERGY (1 << 10) | ||
| 145 | /* Indicates cores energy collection is per-core, | ||
| 146 | * not per-package. */ | ||
| 147 | #define RAPL_AMD_F17H (1 << 11) | ||
| 148 | /* 0xc0010299 MSR_RAPL_PWR_UNIT */ | ||
| 149 | /* 0xc001029a MSR_CORE_ENERGY_STAT */ | ||
| 150 | /* 0xc001029b MSR_PKG_ENERGY_STAT */ | ||
| 144 | #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) | 151 | #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) |
| 145 | #define TJMAX_DEFAULT 100 | 152 | #define TJMAX_DEFAULT 100 |
| 146 | 153 | ||
| 154 | /* MSRs that are not yet in the kernel-provided header. */ | ||
| 155 | #define MSR_RAPL_PWR_UNIT 0xc0010299 | ||
| 156 | #define MSR_CORE_ENERGY_STAT 0xc001029a | ||
| 157 | #define MSR_PKG_ENERGY_STAT 0xc001029b | ||
| 158 | |||
| 147 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) | 159 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) |
| 148 | 160 | ||
| 149 | /* | 161 | /* |
| @@ -187,6 +199,7 @@ struct core_data { | |||
| 187 | unsigned long long c7; | 199 | unsigned long long c7; |
| 188 | unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ | 200 | unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ |
| 189 | unsigned int core_temp_c; | 201 | unsigned int core_temp_c; |
| 202 | unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */ | ||
| 190 | unsigned int core_id; | 203 | unsigned int core_id; |
| 191 | unsigned long long counter[MAX_ADDED_COUNTERS]; | 204 | unsigned long long counter[MAX_ADDED_COUNTERS]; |
| 192 | } *core_even, *core_odd; | 205 | } *core_even, *core_odd; |
| @@ -273,6 +286,7 @@ struct system_summary { | |||
| 273 | 286 | ||
| 274 | struct cpu_topology { | 287 | struct cpu_topology { |
| 275 | int physical_package_id; | 288 | int physical_package_id; |
| 289 | int die_id; | ||
| 276 | int logical_cpu_id; | 290 | int logical_cpu_id; |
| 277 | int physical_node_id; | 291 | int physical_node_id; |
| 278 | int logical_node_id; /* 0-based count within the package */ | 292 | int logical_node_id; /* 0-based count within the package */ |
| @@ -283,6 +297,7 @@ struct cpu_topology { | |||
| 283 | 297 | ||
| 284 | struct topo_params { | 298 | struct topo_params { |
| 285 | int num_packages; | 299 | int num_packages; |
| 300 | int num_die; | ||
| 286 | int num_cpus; | 301 | int num_cpus; |
| 287 | int num_cores; | 302 | int num_cores; |
| 288 | int max_cpu_num; | 303 | int max_cpu_num; |
| @@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg | |||
| 314 | int retval, pkg_no, core_no, thread_no, node_no; | 329 | int retval, pkg_no, core_no, thread_no, node_no; |
| 315 | 330 | ||
| 316 | for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { | 331 | for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { |
| 317 | for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { | 332 | for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) { |
| 318 | for (node_no = 0; node_no < topo.nodes_per_pkg; | 333 | for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { |
| 319 | node_no++) { | ||
| 320 | for (thread_no = 0; thread_no < | 334 | for (thread_no = 0; thread_no < |
| 321 | topo.threads_per_core; ++thread_no) { | 335 | topo.threads_per_core; ++thread_no) { |
| 322 | struct thread_data *t; | 336 | struct thread_data *t; |
| @@ -442,6 +456,7 @@ struct msr_counter bic[] = { | |||
| 442 | { 0x0, "CPU" }, | 456 | { 0x0, "CPU" }, |
| 443 | { 0x0, "APIC" }, | 457 | { 0x0, "APIC" }, |
| 444 | { 0x0, "X2APIC" }, | 458 | { 0x0, "X2APIC" }, |
| 459 | { 0x0, "Die" }, | ||
| 445 | }; | 460 | }; |
| 446 | 461 | ||
| 447 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) | 462 | #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) |
| @@ -495,6 +510,7 @@ struct msr_counter bic[] = { | |||
| 495 | #define BIC_CPU (1ULL << 47) | 510 | #define BIC_CPU (1ULL << 47) |
| 496 | #define BIC_APIC (1ULL << 48) | 511 | #define BIC_APIC (1ULL << 48) |
| 497 | #define BIC_X2APIC (1ULL << 49) | 512 | #define BIC_X2APIC (1ULL << 49) |
| 513 | #define BIC_Die (1ULL << 50) | ||
| 498 | 514 | ||
| 499 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) | 515 | #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) |
| 500 | 516 | ||
| @@ -621,6 +637,8 @@ void print_header(char *delim) | |||
| 621 | outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); | 637 | outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); |
| 622 | if (DO_BIC(BIC_Package)) | 638 | if (DO_BIC(BIC_Package)) |
| 623 | outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); | 639 | outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); |
| 640 | if (DO_BIC(BIC_Die)) | ||
| 641 | outp += sprintf(outp, "%sDie", (printed++ ? delim : "")); | ||
| 624 | if (DO_BIC(BIC_Node)) | 642 | if (DO_BIC(BIC_Node)) |
| 625 | outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); | 643 | outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); |
| 626 | if (DO_BIC(BIC_Core)) | 644 | if (DO_BIC(BIC_Core)) |
| @@ -667,7 +685,7 @@ void print_header(char *delim) | |||
| 667 | 685 | ||
| 668 | if (DO_BIC(BIC_CPU_c1)) | 686 | if (DO_BIC(BIC_CPU_c1)) |
| 669 | outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); | 687 | outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); |
| 670 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) | 688 | if (DO_BIC(BIC_CPU_c3)) |
| 671 | outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); | 689 | outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); |
| 672 | if (DO_BIC(BIC_CPU_c6)) | 690 | if (DO_BIC(BIC_CPU_c6)) |
| 673 | outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); | 691 | outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); |
| @@ -680,6 +698,14 @@ void print_header(char *delim) | |||
| 680 | if (DO_BIC(BIC_CoreTmp)) | 698 | if (DO_BIC(BIC_CoreTmp)) |
| 681 | outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); | 699 | outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); |
| 682 | 700 | ||
| 701 | if (do_rapl && !rapl_joules) { | ||
| 702 | if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 703 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); | ||
| 704 | } else if (do_rapl && rapl_joules) { | ||
| 705 | if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 706 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); | ||
| 707 | } | ||
| 708 | |||
| 683 | for (mp = sys.cp; mp; mp = mp->next) { | 709 | for (mp = sys.cp; mp; mp = mp->next) { |
| 684 | if (mp->format == FORMAT_RAW) { | 710 | if (mp->format == FORMAT_RAW) { |
| 685 | if (mp->width == 64) | 711 | if (mp->width == 64) |
| @@ -734,7 +760,7 @@ void print_header(char *delim) | |||
| 734 | if (do_rapl && !rapl_joules) { | 760 | if (do_rapl && !rapl_joules) { |
| 735 | if (DO_BIC(BIC_PkgWatt)) | 761 | if (DO_BIC(BIC_PkgWatt)) |
| 736 | outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); | 762 | outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); |
| 737 | if (DO_BIC(BIC_CorWatt)) | 763 | if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 738 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); | 764 | outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); |
| 739 | if (DO_BIC(BIC_GFXWatt)) | 765 | if (DO_BIC(BIC_GFXWatt)) |
| 740 | outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); | 766 | outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); |
| @@ -747,7 +773,7 @@ void print_header(char *delim) | |||
| 747 | } else if (do_rapl && rapl_joules) { | 773 | } else if (do_rapl && rapl_joules) { |
| 748 | if (DO_BIC(BIC_Pkg_J)) | 774 | if (DO_BIC(BIC_Pkg_J)) |
| 749 | outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); | 775 | outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); |
| 750 | if (DO_BIC(BIC_Cor_J)) | 776 | if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 751 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); | 777 | outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); |
| 752 | if (DO_BIC(BIC_GFX_J)) | 778 | if (DO_BIC(BIC_GFX_J)) |
| 753 | outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); | 779 | outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); |
| @@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, | |||
| 808 | outp += sprintf(outp, "c6: %016llX\n", c->c6); | 834 | outp += sprintf(outp, "c6: %016llX\n", c->c6); |
| 809 | outp += sprintf(outp, "c7: %016llX\n", c->c7); | 835 | outp += sprintf(outp, "c7: %016llX\n", c->c7); |
| 810 | outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); | 836 | outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); |
| 837 | outp += sprintf(outp, "Joules: %0X\n", c->core_energy); | ||
| 811 | 838 | ||
| 812 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 839 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 813 | outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", | 840 | outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", |
| @@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 904 | if (t == &average.threads) { | 931 | if (t == &average.threads) { |
| 905 | if (DO_BIC(BIC_Package)) | 932 | if (DO_BIC(BIC_Package)) |
| 906 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 933 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 934 | if (DO_BIC(BIC_Die)) | ||
| 935 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 907 | if (DO_BIC(BIC_Node)) | 936 | if (DO_BIC(BIC_Node)) |
| 908 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 937 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 909 | if (DO_BIC(BIC_Core)) | 938 | if (DO_BIC(BIC_Core)) |
| @@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 921 | else | 950 | else |
| 922 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | 951 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); |
| 923 | } | 952 | } |
| 953 | if (DO_BIC(BIC_Die)) { | ||
| 954 | if (c) | ||
| 955 | outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id); | ||
| 956 | else | ||
| 957 | outp += sprintf(outp, "%s-", (printed++ ? delim : "")); | ||
| 958 | } | ||
| 924 | if (DO_BIC(BIC_Node)) { | 959 | if (DO_BIC(BIC_Node)) { |
| 925 | if (t) | 960 | if (t) |
| 926 | outp += sprintf(outp, "%s%d", | 961 | outp += sprintf(outp, "%s%d", |
| @@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1003 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 1038 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 1004 | goto done; | 1039 | goto done; |
| 1005 | 1040 | ||
| 1006 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) | 1041 | if (DO_BIC(BIC_CPU_c3)) |
| 1007 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); | 1042 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); |
| 1008 | if (DO_BIC(BIC_CPU_c6)) | 1043 | if (DO_BIC(BIC_CPU_c6)) |
| 1009 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); | 1044 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); |
| @@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1033 | } | 1068 | } |
| 1034 | } | 1069 | } |
| 1035 | 1070 | ||
| 1071 | /* | ||
| 1072 | * If measurement interval exceeds minimum RAPL Joule Counter range, | ||
| 1073 | * indicate that results are suspect by printing "**" in fraction place. | ||
| 1074 | */ | ||
| 1075 | if (interval_float < rapl_joule_counter_range) | ||
| 1076 | fmt8 = "%s%.2f"; | ||
| 1077 | else | ||
| 1078 | fmt8 = "%6.0f**"; | ||
| 1079 | |||
| 1080 | if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 1081 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); | ||
| 1082 | if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) | ||
| 1083 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); | ||
| 1084 | |||
| 1036 | /* print per-package data only for 1st core in package */ | 1085 | /* print per-package data only for 1st core in package */ |
| 1037 | if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) | 1086 | if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) |
| 1038 | goto done; | 1087 | goto done; |
| @@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1085 | if (DO_BIC(BIC_SYS_LPI)) | 1134 | if (DO_BIC(BIC_SYS_LPI)) |
| 1086 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); | 1135 | outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); |
| 1087 | 1136 | ||
| 1088 | /* | ||
| 1089 | * If measurement interval exceeds minimum RAPL Joule Counter range, | ||
| 1090 | * indicate that results are suspect by printing "**" in fraction place. | ||
| 1091 | */ | ||
| 1092 | if (interval_float < rapl_joule_counter_range) | ||
| 1093 | fmt8 = "%s%.2f"; | ||
| 1094 | else | ||
| 1095 | fmt8 = "%6.0f**"; | ||
| 1096 | |||
| 1097 | if (DO_BIC(BIC_PkgWatt)) | 1137 | if (DO_BIC(BIC_PkgWatt)) |
| 1098 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); | 1138 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); |
| 1099 | if (DO_BIC(BIC_CorWatt)) | 1139 | if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 1100 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); | 1140 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); |
| 1101 | if (DO_BIC(BIC_GFXWatt)) | 1141 | if (DO_BIC(BIC_GFXWatt)) |
| 1102 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); | 1142 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); |
| @@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
| 1104 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); | 1144 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); |
| 1105 | if (DO_BIC(BIC_Pkg_J)) | 1145 | if (DO_BIC(BIC_Pkg_J)) |
| 1106 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); | 1146 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); |
| 1107 | if (DO_BIC(BIC_Cor_J)) | 1147 | if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) |
| 1108 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); | 1148 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); |
| 1109 | if (DO_BIC(BIC_GFX_J)) | 1149 | if (DO_BIC(BIC_GFX_J)) |
| 1110 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); | 1150 | outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); |
| @@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old) | |||
| 1249 | old->core_temp_c = new->core_temp_c; | 1289 | old->core_temp_c = new->core_temp_c; |
| 1250 | old->mc6_us = new->mc6_us - old->mc6_us; | 1290 | old->mc6_us = new->mc6_us - old->mc6_us; |
| 1251 | 1291 | ||
| 1292 | DELTA_WRAP32(new->core_energy, old->core_energy); | ||
| 1293 | |||
| 1252 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1294 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1253 | if (mp->format == FORMAT_RAW) | 1295 | if (mp->format == FORMAT_RAW) |
| 1254 | old->counter[i] = new->counter[i]; | 1296 | old->counter[i] = new->counter[i]; |
| @@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data | |||
| 1391 | c->c7 = 0; | 1433 | c->c7 = 0; |
| 1392 | c->mc6_us = 0; | 1434 | c->mc6_us = 0; |
| 1393 | c->core_temp_c = 0; | 1435 | c->core_temp_c = 0; |
| 1436 | c->core_energy = 0; | ||
| 1394 | 1437 | ||
| 1395 | p->pkg_wtd_core_c0 = 0; | 1438 | p->pkg_wtd_core_c0 = 0; |
| 1396 | p->pkg_any_core_c0 = 0; | 1439 | p->pkg_any_core_c0 = 0; |
| @@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c, | |||
| 1473 | 1516 | ||
| 1474 | average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); | 1517 | average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); |
| 1475 | 1518 | ||
| 1519 | average.cores.core_energy += c->core_energy; | ||
| 1520 | |||
| 1476 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1521 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1477 | if (mp->format == FORMAT_RAW) | 1522 | if (mp->format == FORMAT_RAW) |
| 1478 | continue; | 1523 | continue; |
| @@ -1818,7 +1863,7 @@ retry: | |||
| 1818 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) | 1863 | if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) |
| 1819 | goto done; | 1864 | goto done; |
| 1820 | 1865 | ||
| 1821 | if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { | 1866 | if (DO_BIC(BIC_CPU_c3)) { |
| 1822 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) | 1867 | if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) |
| 1823 | return -6; | 1868 | return -6; |
| 1824 | } | 1869 | } |
| @@ -1845,6 +1890,12 @@ retry: | |||
| 1845 | c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); | 1890 | c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); |
| 1846 | } | 1891 | } |
| 1847 | 1892 | ||
| 1893 | if (do_rapl & RAPL_AMD_F17H) { | ||
| 1894 | if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) | ||
| 1895 | return -14; | ||
| 1896 | c->core_energy = msr & 0xFFFFFFFF; | ||
| 1897 | } | ||
| 1898 | |||
| 1848 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { | 1899 | for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { |
| 1849 | if (get_mp(cpu, mp, &c->counter[i])) | 1900 | if (get_mp(cpu, mp, &c->counter[i])) |
| 1850 | return -10; | 1901 | return -10; |
| @@ -1934,6 +1985,11 @@ retry: | |||
| 1934 | return -16; | 1985 | return -16; |
| 1935 | p->rapl_dram_perf_status = msr & 0xFFFFFFFF; | 1986 | p->rapl_dram_perf_status = msr & 0xFFFFFFFF; |
| 1936 | } | 1987 | } |
| 1988 | if (do_rapl & RAPL_AMD_F17H) { | ||
| 1989 | if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr)) | ||
| 1990 | return -13; | ||
| 1991 | p->energy_pkg = msr & 0xFFFFFFFF; | ||
| 1992 | } | ||
| 1937 | if (DO_BIC(BIC_PkgTmp)) { | 1993 | if (DO_BIC(BIC_PkgTmp)) { |
| 1938 | if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) | 1994 | if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) |
| 1939 | return -17; | 1995 | return -17; |
| @@ -2456,6 +2512,8 @@ void free_all_buffers(void) | |||
| 2456 | 2512 | ||
| 2457 | /* | 2513 | /* |
| 2458 | * Parse a file containing a single int. | 2514 | * Parse a file containing a single int. |
| 2515 | * Return 0 if file can not be opened | ||
| 2516 | * Exit if file can be opened, but can not be parsed | ||
| 2459 | */ | 2517 | */ |
| 2460 | int parse_int_file(const char *fmt, ...) | 2518 | int parse_int_file(const char *fmt, ...) |
| 2461 | { | 2519 | { |
| @@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...) | |||
| 2467 | va_start(args, fmt); | 2525 | va_start(args, fmt); |
| 2468 | vsnprintf(path, sizeof(path), fmt, args); | 2526 | vsnprintf(path, sizeof(path), fmt, args); |
| 2469 | va_end(args); | 2527 | va_end(args); |
| 2470 | filep = fopen_or_die(path, "r"); | 2528 | filep = fopen(path, "r"); |
| 2529 | if (!filep) | ||
| 2530 | return 0; | ||
| 2471 | if (fscanf(filep, "%d", &value) != 1) | 2531 | if (fscanf(filep, "%d", &value) != 1) |
| 2472 | err(1, "%s: failed to parse number from file", path); | 2532 | err(1, "%s: failed to parse number from file", path); |
| 2473 | fclose(filep); | 2533 | fclose(filep); |
| @@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu) | |||
| 2488 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); | 2548 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); |
| 2489 | } | 2549 | } |
| 2490 | 2550 | ||
| 2551 | int get_die_id(int cpu) | ||
| 2552 | { | ||
| 2553 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu); | ||
| 2554 | } | ||
| 2555 | |||
| 2491 | int get_core_id(int cpu) | 2556 | int get_core_id(int cpu) |
| 2492 | { | 2557 | { |
| 2493 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); | 2558 | return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); |
| @@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu) | |||
| 2578 | filep = fopen_or_die(path, "r"); | 2643 | filep = fopen_or_die(path, "r"); |
| 2579 | do { | 2644 | do { |
| 2580 | offset -= BITMASK_SIZE; | 2645 | offset -= BITMASK_SIZE; |
| 2581 | fscanf(filep, "%lx%c", &map, &character); | 2646 | if (fscanf(filep, "%lx%c", &map, &character) != 2) |
| 2647 | err(1, "%s: failed to parse file", path); | ||
| 2582 | for (shift = 0; shift < BITMASK_SIZE; shift++) { | 2648 | for (shift = 0; shift < BITMASK_SIZE; shift++) { |
| 2583 | if ((map >> shift) & 0x1) { | 2649 | if ((map >> shift) & 0x1) { |
| 2584 | so = shift + offset; | 2650 | so = shift + offset; |
| @@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void) | |||
| 2855 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); | 2921 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); |
| 2856 | 2922 | ||
| 2857 | retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); | 2923 | retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); |
| 2858 | if (retval != 1) | 2924 | if (retval != 1) { |
| 2859 | err(1, "CPU LPI"); | 2925 | fprintf(stderr, "Disabling Low Power Idle CPU output\n"); |
| 2926 | BIC_NOT_PRESENT(BIC_CPU_LPI); | ||
| 2927 | return -1; | ||
| 2928 | } | ||
| 2860 | 2929 | ||
| 2861 | fclose(fp); | 2930 | fclose(fp); |
| 2862 | 2931 | ||
| @@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void) | |||
| 2878 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); | 2947 | fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); |
| 2879 | 2948 | ||
| 2880 | retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); | 2949 | retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); |
| 2881 | if (retval != 1) | 2950 | if (retval != 1) { |
| 2882 | err(1, "SYS LPI"); | 2951 | fprintf(stderr, "Disabling Low Power Idle System output\n"); |
| 2883 | 2952 | BIC_NOT_PRESENT(BIC_SYS_LPI); | |
| 2953 | return -1; | ||
| 2954 | } | ||
| 2884 | fclose(fp); | 2955 | fclose(fp); |
| 2885 | 2956 | ||
| 2886 | return 0; | 2957 | return 0; |
| @@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void) | |||
| 3410 | input = fopen(path, "r"); | 3481 | input = fopen(path, "r"); |
| 3411 | if (input == NULL) | 3482 | if (input == NULL) |
| 3412 | continue; | 3483 | continue; |
| 3413 | fgets(name_buf, sizeof(name_buf), input); | 3484 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 3485 | err(1, "%s: failed to read file", path); | ||
| 3414 | 3486 | ||
| 3415 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 3487 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 3416 | sp = strchr(name_buf, '-'); | 3488 | sp = strchr(name_buf, '-'); |
| 3417 | if (!sp) | 3489 | if (!sp) |
| 3418 | sp = strchrnul(name_buf, '\n'); | 3490 | sp = strchrnul(name_buf, '\n'); |
| 3419 | *sp = '\0'; | 3491 | *sp = '\0'; |
| 3420 | |||
| 3421 | fclose(input); | 3492 | fclose(input); |
| 3422 | 3493 | ||
| 3423 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", | 3494 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", |
| @@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void) | |||
| 3425 | input = fopen(path, "r"); | 3496 | input = fopen(path, "r"); |
| 3426 | if (input == NULL) | 3497 | if (input == NULL) |
| 3427 | continue; | 3498 | continue; |
| 3428 | fgets(desc, sizeof(desc), input); | 3499 | if (!fgets(desc, sizeof(desc), input)) |
| 3500 | err(1, "%s: failed to read file", path); | ||
| 3429 | 3501 | ||
| 3430 | fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); | 3502 | fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); |
| 3431 | fclose(input); | 3503 | fclose(input); |
| @@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void) | |||
| 3444 | base_cpu); | 3516 | base_cpu); |
| 3445 | input = fopen(path, "r"); | 3517 | input = fopen(path, "r"); |
| 3446 | if (input == NULL) { | 3518 | if (input == NULL) { |
| 3447 | fprintf(stderr, "NSFOD %s\n", path); | 3519 | fprintf(outf, "NSFOD %s\n", path); |
| 3448 | return; | 3520 | return; |
| 3449 | } | 3521 | } |
| 3450 | fgets(driver_buf, sizeof(driver_buf), input); | 3522 | if (!fgets(driver_buf, sizeof(driver_buf), input)) |
| 3523 | err(1, "%s: failed to read file", path); | ||
| 3451 | fclose(input); | 3524 | fclose(input); |
| 3452 | 3525 | ||
| 3453 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", | 3526 | sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", |
| 3454 | base_cpu); | 3527 | base_cpu); |
| 3455 | input = fopen(path, "r"); | 3528 | input = fopen(path, "r"); |
| 3456 | if (input == NULL) { | 3529 | if (input == NULL) { |
| 3457 | fprintf(stderr, "NSFOD %s\n", path); | 3530 | fprintf(outf, "NSFOD %s\n", path); |
| 3458 | return; | 3531 | return; |
| 3459 | } | 3532 | } |
| 3460 | fgets(governor_buf, sizeof(governor_buf), input); | 3533 | if (!fgets(governor_buf, sizeof(governor_buf), input)) |
| 3534 | err(1, "%s: failed to read file", path); | ||
| 3461 | fclose(input); | 3535 | fclose(input); |
| 3462 | 3536 | ||
| 3463 | fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); | 3537 | fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); |
| @@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void) | |||
| 3466 | sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); | 3540 | sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); |
| 3467 | input = fopen(path, "r"); | 3541 | input = fopen(path, "r"); |
| 3468 | if (input != NULL) { | 3542 | if (input != NULL) { |
| 3469 | fscanf(input, "%d", &turbo); | 3543 | if (fscanf(input, "%d", &turbo) != 1) |
| 3544 | err(1, "%s: failed to parse number from file", path); | ||
| 3470 | fprintf(outf, "cpufreq boost: %d\n", turbo); | 3545 | fprintf(outf, "cpufreq boost: %d\n", turbo); |
| 3471 | fclose(input); | 3546 | fclose(input); |
| 3472 | } | 3547 | } |
| @@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void) | |||
| 3474 | sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); | 3549 | sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); |
| 3475 | input = fopen(path, "r"); | 3550 | input = fopen(path, "r"); |
| 3476 | if (input != NULL) { | 3551 | if (input != NULL) { |
| 3477 | fscanf(input, "%d", &turbo); | 3552 | if (fscanf(input, "%d", &turbo) != 1) |
| 3553 | err(1, "%s: failed to parse number from file", path); | ||
| 3478 | fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); | 3554 | fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); |
| 3479 | fclose(input); | 3555 | fclose(input); |
| 3480 | } | 3556 | } |
| @@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data | |||
| 3718 | #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ | 3794 | #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ |
| 3719 | #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ | 3795 | #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ |
| 3720 | 3796 | ||
| 3721 | double get_tdp(unsigned int model) | 3797 | double get_tdp_intel(unsigned int model) |
| 3722 | { | 3798 | { |
| 3723 | unsigned long long msr; | 3799 | unsigned long long msr; |
| 3724 | 3800 | ||
| @@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model) | |||
| 3735 | } | 3811 | } |
| 3736 | } | 3812 | } |
| 3737 | 3813 | ||
| 3814 | double get_tdp_amd(unsigned int family) | ||
| 3815 | { | ||
| 3816 | switch (family) { | ||
| 3817 | case 0x17: | ||
| 3818 | default: | ||
| 3819 | /* This is the max stock TDP of HEDT/Server Fam17h chips */ | ||
| 3820 | return 250.0; | ||
| 3821 | } | ||
| 3822 | } | ||
| 3823 | |||
| 3738 | /* | 3824 | /* |
| 3739 | * rapl_dram_energy_units_probe() | 3825 | * rapl_dram_energy_units_probe() |
| 3740 | * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. | 3826 | * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. |
| @@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) | |||
| 3754 | } | 3840 | } |
| 3755 | } | 3841 | } |
| 3756 | 3842 | ||
| 3757 | 3843 | void rapl_probe_intel(unsigned int family, unsigned int model) | |
| 3758 | /* | ||
| 3759 | * rapl_probe() | ||
| 3760 | * | ||
| 3761 | * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units | ||
| 3762 | */ | ||
| 3763 | void rapl_probe(unsigned int family, unsigned int model) | ||
| 3764 | { | 3844 | { |
| 3765 | unsigned long long msr; | 3845 | unsigned long long msr; |
| 3766 | unsigned int time_unit; | 3846 | unsigned int time_unit; |
| 3767 | double tdp; | 3847 | double tdp; |
| 3768 | 3848 | ||
| 3769 | if (!genuine_intel) | ||
| 3770 | return; | ||
| 3771 | |||
| 3772 | if (family != 6) | 3849 | if (family != 6) |
| 3773 | return; | 3850 | return; |
| 3774 | 3851 | ||
| @@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model) | |||
| 3892 | 3969 | ||
| 3893 | rapl_time_units = 1.0 / (1 << (time_unit)); | 3970 | rapl_time_units = 1.0 / (1 << (time_unit)); |
| 3894 | 3971 | ||
| 3895 | tdp = get_tdp(model); | 3972 | tdp = get_tdp_intel(model); |
| 3896 | 3973 | ||
| 3897 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; | 3974 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; |
| 3898 | if (!quiet) | 3975 | if (!quiet) |
| 3899 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); | 3976 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); |
| 3977 | } | ||
| 3900 | 3978 | ||
| 3901 | return; | 3979 | void rapl_probe_amd(unsigned int family, unsigned int model) |
| 3980 | { | ||
| 3981 | unsigned long long msr; | ||
| 3982 | unsigned int eax, ebx, ecx, edx; | ||
| 3983 | unsigned int has_rapl = 0; | ||
| 3984 | double tdp; | ||
| 3985 | |||
| 3986 | if (max_extended_level >= 0x80000007) { | ||
| 3987 | __cpuid(0x80000007, eax, ebx, ecx, edx); | ||
| 3988 | /* RAPL (Fam 17h) */ | ||
| 3989 | has_rapl = edx & (1 << 14); | ||
| 3990 | } | ||
| 3991 | |||
| 3992 | if (!has_rapl) | ||
| 3993 | return; | ||
| 3994 | |||
| 3995 | switch (family) { | ||
| 3996 | case 0x17: /* Zen, Zen+ */ | ||
| 3997 | do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; | ||
| 3998 | if (rapl_joules) { | ||
| 3999 | BIC_PRESENT(BIC_Pkg_J); | ||
| 4000 | BIC_PRESENT(BIC_Cor_J); | ||
| 4001 | } else { | ||
| 4002 | BIC_PRESENT(BIC_PkgWatt); | ||
| 4003 | BIC_PRESENT(BIC_CorWatt); | ||
| 4004 | } | ||
| 4005 | break; | ||
| 4006 | default: | ||
| 4007 | return; | ||
| 4008 | } | ||
| 4009 | |||
| 4010 | if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr)) | ||
| 4011 | return; | ||
| 4012 | |||
| 4013 | rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf)); | ||
| 4014 | rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); | ||
| 4015 | rapl_power_units = ldexp(1.0, -(msr & 0xf)); | ||
| 4016 | |||
| 4017 | tdp = get_tdp_amd(model); | ||
| 4018 | |||
| 4019 | rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; | ||
| 4020 | if (!quiet) | ||
| 4021 | fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); | ||
| 4022 | } | ||
| 4023 | |||
| 4024 | /* | ||
| 4025 | * rapl_probe() | ||
| 4026 | * | ||
| 4027 | * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units | ||
| 4028 | */ | ||
| 4029 | void rapl_probe(unsigned int family, unsigned int model) | ||
| 4030 | { | ||
| 4031 | if (genuine_intel) | ||
| 4032 | rapl_probe_intel(family, model); | ||
| 4033 | if (authentic_amd) | ||
| 4034 | rapl_probe_amd(family, model); | ||
| 3902 | } | 4035 | } |
| 3903 | 4036 | ||
| 3904 | void perf_limit_reasons_probe(unsigned int family, unsigned int model) | 4037 | void perf_limit_reasons_probe(unsigned int family, unsigned int model) |
| @@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label) | |||
| 4003 | int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) | 4136 | int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) |
| 4004 | { | 4137 | { |
| 4005 | unsigned long long msr; | 4138 | unsigned long long msr; |
| 4139 | const char *msr_name; | ||
| 4006 | int cpu; | 4140 | int cpu; |
| 4007 | 4141 | ||
| 4008 | if (!do_rapl) | 4142 | if (!do_rapl) |
| @@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
| 4018 | return -1; | 4152 | return -1; |
| 4019 | } | 4153 | } |
| 4020 | 4154 | ||
| 4021 | if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) | 4155 | if (do_rapl & RAPL_AMD_F17H) { |
| 4022 | return -1; | 4156 | msr_name = "MSR_RAPL_PWR_UNIT"; |
| 4157 | if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr)) | ||
| 4158 | return -1; | ||
| 4159 | } else { | ||
| 4160 | msr_name = "MSR_RAPL_POWER_UNIT"; | ||
| 4161 | if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) | ||
| 4162 | return -1; | ||
| 4163 | } | ||
| 4023 | 4164 | ||
| 4024 | fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, | 4165 | fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr, |
| 4025 | rapl_power_units, rapl_energy_units, rapl_time_units); | 4166 | rapl_power_units, rapl_energy_units, rapl_time_units); |
| 4026 | 4167 | ||
| 4027 | if (do_rapl & RAPL_PKG_POWER_INFO) { | 4168 | if (do_rapl & RAPL_PKG_POWER_INFO) { |
| @@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model) | |||
| 4451 | case INTEL_FAM6_KABYLAKE_MOBILE: | 4592 | case INTEL_FAM6_KABYLAKE_MOBILE: |
| 4452 | case INTEL_FAM6_KABYLAKE_DESKTOP: | 4593 | case INTEL_FAM6_KABYLAKE_DESKTOP: |
| 4453 | return INTEL_FAM6_SKYLAKE_MOBILE; | 4594 | return INTEL_FAM6_SKYLAKE_MOBILE; |
| 4595 | |||
| 4596 | case INTEL_FAM6_ICELAKE_MOBILE: | ||
| 4597 | return INTEL_FAM6_CANNONLAKE_MOBILE; | ||
| 4454 | } | 4598 | } |
| 4455 | return model; | 4599 | return model; |
| 4456 | } | 4600 | } |
| @@ -4702,7 +4846,9 @@ void process_cpuid() | |||
| 4702 | } | 4846 | } |
| 4703 | do_slm_cstates = is_slm(family, model); | 4847 | do_slm_cstates = is_slm(family, model); |
| 4704 | do_knl_cstates = is_knl(family, model); | 4848 | do_knl_cstates = is_knl(family, model); |
| 4705 | do_cnl_cstates = is_cnl(family, model); | 4849 | |
| 4850 | if (do_slm_cstates || do_knl_cstates || is_cnl(family, model)) | ||
| 4851 | BIC_NOT_PRESENT(BIC_CPU_c3); | ||
| 4706 | 4852 | ||
| 4707 | if (!quiet) | 4853 | if (!quiet) |
| 4708 | decode_misc_pwr_mgmt_msr(); | 4854 | decode_misc_pwr_mgmt_msr(); |
| @@ -4769,6 +4915,7 @@ void topology_probe() | |||
| 4769 | int i; | 4915 | int i; |
| 4770 | int max_core_id = 0; | 4916 | int max_core_id = 0; |
| 4771 | int max_package_id = 0; | 4917 | int max_package_id = 0; |
| 4918 | int max_die_id = 0; | ||
| 4772 | int max_siblings = 0; | 4919 | int max_siblings = 0; |
| 4773 | 4920 | ||
| 4774 | /* Initialize num_cpus, max_cpu_num */ | 4921 | /* Initialize num_cpus, max_cpu_num */ |
| @@ -4835,6 +4982,11 @@ void topology_probe() | |||
| 4835 | if (cpus[i].physical_package_id > max_package_id) | 4982 | if (cpus[i].physical_package_id > max_package_id) |
| 4836 | max_package_id = cpus[i].physical_package_id; | 4983 | max_package_id = cpus[i].physical_package_id; |
| 4837 | 4984 | ||
| 4985 | /* get die information */ | ||
| 4986 | cpus[i].die_id = get_die_id(i); | ||
| 4987 | if (cpus[i].die_id > max_die_id) | ||
| 4988 | max_die_id = cpus[i].die_id; | ||
| 4989 | |||
| 4838 | /* get numa node information */ | 4990 | /* get numa node information */ |
| 4839 | cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); | 4991 | cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); |
| 4840 | if (cpus[i].physical_node_id > topo.max_node_num) | 4992 | if (cpus[i].physical_node_id > topo.max_node_num) |
| @@ -4860,6 +5012,13 @@ void topology_probe() | |||
| 4860 | if (!summary_only && topo.cores_per_node > 1) | 5012 | if (!summary_only && topo.cores_per_node > 1) |
| 4861 | BIC_PRESENT(BIC_Core); | 5013 | BIC_PRESENT(BIC_Core); |
| 4862 | 5014 | ||
| 5015 | topo.num_die = max_die_id + 1; | ||
| 5016 | if (debug > 1) | ||
| 5017 | fprintf(outf, "max_die_id %d, sizing for %d die\n", | ||
| 5018 | max_die_id, topo.num_die); | ||
| 5019 | if (!summary_only && topo.num_die > 1) | ||
| 5020 | BIC_PRESENT(BIC_Die); | ||
| 5021 | |||
| 4863 | topo.num_packages = max_package_id + 1; | 5022 | topo.num_packages = max_package_id + 1; |
| 4864 | if (debug > 1) | 5023 | if (debug > 1) |
| 4865 | fprintf(outf, "max_package_id %d, sizing for %d packages\n", | 5024 | fprintf(outf, "max_package_id %d, sizing for %d packages\n", |
| @@ -4884,8 +5043,8 @@ void topology_probe() | |||
| 4884 | if (cpu_is_not_present(i)) | 5043 | if (cpu_is_not_present(i)) |
| 4885 | continue; | 5044 | continue; |
| 4886 | fprintf(outf, | 5045 | fprintf(outf, |
| 4887 | "cpu %d pkg %d node %d lnode %d core %d thread %d\n", | 5046 | "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n", |
| 4888 | i, cpus[i].physical_package_id, | 5047 | i, cpus[i].physical_package_id, cpus[i].die_id, |
| 4889 | cpus[i].physical_node_id, | 5048 | cpus[i].physical_node_id, |
| 4890 | cpus[i].logical_node_id, | 5049 | cpus[i].logical_node_id, |
| 4891 | cpus[i].physical_core_id, | 5050 | cpus[i].physical_core_id, |
| @@ -5122,7 +5281,7 @@ int get_and_dump_counters(void) | |||
| 5122 | } | 5281 | } |
| 5123 | 5282 | ||
| 5124 | void print_version() { | 5283 | void print_version() { |
| 5125 | fprintf(outf, "turbostat version 18.07.27" | 5284 | fprintf(outf, "turbostat version 19.03.20" |
| 5126 | " - Len Brown <lenb@kernel.org>\n"); | 5285 | " - Len Brown <lenb@kernel.org>\n"); |
| 5127 | } | 5286 | } |
| 5128 | 5287 | ||
| @@ -5319,7 +5478,8 @@ void probe_sysfs(void) | |||
| 5319 | input = fopen(path, "r"); | 5478 | input = fopen(path, "r"); |
| 5320 | if (input == NULL) | 5479 | if (input == NULL) |
| 5321 | continue; | 5480 | continue; |
| 5322 | fgets(name_buf, sizeof(name_buf), input); | 5481 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 5482 | err(1, "%s: failed to read file", path); | ||
| 5323 | 5483 | ||
| 5324 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 5484 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 5325 | sp = strchr(name_buf, '-'); | 5485 | sp = strchr(name_buf, '-'); |
| @@ -5346,7 +5506,8 @@ void probe_sysfs(void) | |||
| 5346 | input = fopen(path, "r"); | 5506 | input = fopen(path, "r"); |
| 5347 | if (input == NULL) | 5507 | if (input == NULL) |
| 5348 | continue; | 5508 | continue; |
| 5349 | fgets(name_buf, sizeof(name_buf), input); | 5509 | if (!fgets(name_buf, sizeof(name_buf), input)) |
| 5510 | err(1, "%s: failed to read file", path); | ||
| 5350 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ | 5511 | /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ |
| 5351 | sp = strchr(name_buf, '-'); | 5512 | sp = strchr(name_buf, '-'); |
| 5352 | if (!sp) | 5513 | if (!sp) |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index b579f962451d..85ffdcfa596b 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
| @@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)]; | |||
| 146 | struct nfit_test_sec { | 146 | struct nfit_test_sec { |
| 147 | u8 state; | 147 | u8 state; |
| 148 | u8 ext_state; | 148 | u8 ext_state; |
| 149 | u8 old_state; | ||
| 149 | u8 passphrase[32]; | 150 | u8 passphrase[32]; |
| 150 | u8 master_passphrase[32]; | 151 | u8 master_passphrase[32]; |
| 151 | u64 overwrite_end_time; | 152 | u64 overwrite_end_time; |
| @@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq; | |||
| 225 | 226 | ||
| 226 | static struct gen_pool *nfit_pool; | 227 | static struct gen_pool *nfit_pool; |
| 227 | 228 | ||
| 229 | static const char zero_key[NVDIMM_PASSPHRASE_LEN]; | ||
| 230 | |||
| 228 | static struct nfit_test *to_nfit_test(struct device *dev) | 231 | static struct nfit_test *to_nfit_test(struct device *dev) |
| 229 | { | 232 | { |
| 230 | struct platform_device *pdev = to_platform_device(dev); | 233 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t, | |||
| 1059 | struct device *dev = &t->pdev.dev; | 1062 | struct device *dev = &t->pdev.dev; |
| 1060 | struct nfit_test_sec *sec = &dimm_sec_info[dimm]; | 1063 | struct nfit_test_sec *sec = &dimm_sec_info[dimm]; |
| 1061 | 1064 | ||
| 1062 | if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) || | 1065 | if (sec->state & ND_INTEL_SEC_STATE_FROZEN) { |
| 1063 | (sec->state & ND_INTEL_SEC_STATE_FROZEN)) { | ||
| 1064 | nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE; | 1066 | nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE; |
| 1065 | dev_dbg(dev, "secure erase: wrong security state\n"); | 1067 | dev_dbg(dev, "secure erase: wrong security state\n"); |
| 1066 | } else if (memcmp(nd_cmd->passphrase, sec->passphrase, | 1068 | } else if (memcmp(nd_cmd->passphrase, sec->passphrase, |
| @@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t, | |||
| 1068 | nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS; | 1070 | nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS; |
| 1069 | dev_dbg(dev, "secure erase: wrong passphrase\n"); | 1071 | dev_dbg(dev, "secure erase: wrong passphrase\n"); |
| 1070 | } else { | 1072 | } else { |
| 1073 | if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) | ||
| 1074 | && (memcmp(nd_cmd->passphrase, zero_key, | ||
| 1075 | ND_INTEL_PASSPHRASE_SIZE) != 0)) { | ||
| 1076 | dev_dbg(dev, "invalid zero key\n"); | ||
| 1077 | return 0; | ||
| 1078 | } | ||
| 1071 | memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE); | 1079 | memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE); |
| 1072 | memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE); | 1080 | memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE); |
| 1073 | sec->state = 0; | 1081 | sec->state = 0; |
| @@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t, | |||
| 1093 | return 0; | 1101 | return 0; |
| 1094 | } | 1102 | } |
| 1095 | 1103 | ||
| 1096 | memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE); | 1104 | sec->old_state = sec->state; |
| 1097 | sec->state = ND_INTEL_SEC_STATE_OVERWRITE; | 1105 | sec->state = ND_INTEL_SEC_STATE_OVERWRITE; |
| 1098 | dev_dbg(dev, "overwrite progressing.\n"); | 1106 | dev_dbg(dev, "overwrite progressing.\n"); |
| 1099 | sec->overwrite_end_time = get_jiffies_64() + 5 * HZ; | 1107 | sec->overwrite_end_time = get_jiffies_64() + 5 * HZ; |
| @@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t, | |||
| 1115 | 1123 | ||
| 1116 | if (time_is_before_jiffies64(sec->overwrite_end_time)) { | 1124 | if (time_is_before_jiffies64(sec->overwrite_end_time)) { |
| 1117 | sec->overwrite_end_time = 0; | 1125 | sec->overwrite_end_time = 0; |
| 1118 | sec->state = 0; | 1126 | sec->state = sec->old_state; |
| 1127 | sec->old_state = 0; | ||
| 1119 | sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED; | 1128 | sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED; |
| 1120 | dev_dbg(dev, "overwrite is complete\n"); | 1129 | dev_dbg(dev, "overwrite is complete\n"); |
| 1121 | } else | 1130 | } else |
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index bcbd928c96ab..fc818bc1d729 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c | |||
| @@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = { | |||
| 39 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), | 39 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | #define VLAN_HLEN 4 | ||
| 43 | |||
| 44 | static struct { | ||
| 45 | struct ethhdr eth; | ||
| 46 | __u16 vlan_tci; | ||
| 47 | __u16 vlan_proto; | ||
| 48 | struct iphdr iph; | ||
| 49 | struct tcphdr tcp; | ||
| 50 | } __packed pkt_vlan_v4 = { | ||
| 51 | .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), | ||
| 52 | .vlan_proto = __bpf_constant_htons(ETH_P_IP), | ||
| 53 | .iph.ihl = 5, | ||
| 54 | .iph.protocol = IPPROTO_TCP, | ||
| 55 | .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), | ||
| 56 | .tcp.urg_ptr = 123, | ||
| 57 | .tcp.doff = 5, | ||
| 58 | }; | ||
| 59 | |||
| 60 | static struct bpf_flow_keys pkt_vlan_v4_flow_keys = { | ||
| 61 | .nhoff = VLAN_HLEN, | ||
| 62 | .thoff = VLAN_HLEN + sizeof(struct iphdr), | ||
| 63 | .addr_proto = ETH_P_IP, | ||
| 64 | .ip_proto = IPPROTO_TCP, | ||
| 65 | .n_proto = __bpf_constant_htons(ETH_P_IP), | ||
| 66 | }; | ||
| 67 | |||
| 68 | static struct { | ||
| 69 | struct ethhdr eth; | ||
| 70 | __u16 vlan_tci; | ||
| 71 | __u16 vlan_proto; | ||
| 72 | __u16 vlan_tci2; | ||
| 73 | __u16 vlan_proto2; | ||
| 74 | struct ipv6hdr iph; | ||
| 75 | struct tcphdr tcp; | ||
| 76 | } __packed pkt_vlan_v6 = { | ||
| 77 | .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), | ||
| 78 | .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), | ||
| 79 | .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), | ||
| 80 | .iph.nexthdr = IPPROTO_TCP, | ||
| 81 | .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), | ||
| 82 | .tcp.urg_ptr = 123, | ||
| 83 | .tcp.doff = 5, | ||
| 84 | }; | ||
| 85 | |||
| 86 | static struct bpf_flow_keys pkt_vlan_v6_flow_keys = { | ||
| 87 | .nhoff = VLAN_HLEN * 2, | ||
| 88 | .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr), | ||
| 89 | .addr_proto = ETH_P_IPV6, | ||
| 90 | .ip_proto = IPPROTO_TCP, | ||
| 91 | .n_proto = __bpf_constant_htons(ETH_P_IPV6), | ||
| 92 | }; | ||
| 93 | |||
| 42 | void test_flow_dissector(void) | 94 | void test_flow_dissector(void) |
| 43 | { | 95 | { |
| 44 | struct bpf_flow_keys flow_keys; | 96 | struct bpf_flow_keys flow_keys; |
| @@ -68,5 +120,21 @@ void test_flow_dissector(void) | |||
| 68 | err, errno, retval, duration, size, sizeof(flow_keys)); | 120 | err, errno, retval, duration, size, sizeof(flow_keys)); |
| 69 | CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); | 121 | CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); |
| 70 | 122 | ||
| 123 | err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4), | ||
| 124 | &flow_keys, &size, &retval, &duration); | ||
| 125 | CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4", | ||
| 126 | "err %d errno %d retval %d duration %d size %u/%lu\n", | ||
| 127 | err, errno, retval, duration, size, sizeof(flow_keys)); | ||
| 128 | CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys, | ||
| 129 | pkt_vlan_v4_flow_keys); | ||
| 130 | |||
| 131 | err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6), | ||
| 132 | &flow_keys, &size, &retval, &duration); | ||
| 133 | CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6", | ||
| 134 | "err %d errno %d retval %d duration %d size %u/%lu\n", | ||
| 135 | err, errno, retval, duration, size, sizeof(flow_keys)); | ||
| 136 | CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys, | ||
| 137 | pkt_vlan_v6_flow_keys); | ||
| 138 | |||
| 71 | bpf_object__close(obj); | 139 | bpf_object__close(obj); |
| 72 | } | 140 | } |
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index 284660f5aa95..75b17cada539 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c | |||
| @@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) | |||
| 92 | { | 92 | { |
| 93 | struct bpf_flow_keys *keys = skb->flow_keys; | 93 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 94 | 94 | ||
| 95 | keys->n_proto = proto; | ||
| 96 | switch (proto) { | 95 | switch (proto) { |
| 97 | case bpf_htons(ETH_P_IP): | 96 | case bpf_htons(ETH_P_IP): |
| 98 | bpf_tail_call(skb, &jmp_table, IP); | 97 | bpf_tail_call(skb, &jmp_table, IP); |
| @@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) | |||
| 119 | SEC("flow_dissector") | 118 | SEC("flow_dissector") |
| 120 | int _dissect(struct __sk_buff *skb) | 119 | int _dissect(struct __sk_buff *skb) |
| 121 | { | 120 | { |
| 122 | if (!skb->vlan_present) | 121 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 123 | return parse_eth_proto(skb, skb->protocol); | 122 | |
| 124 | else | 123 | return parse_eth_proto(skb, keys->n_proto); |
| 125 | return parse_eth_proto(skb, skb->vlan_proto); | ||
| 126 | } | 124 | } |
| 127 | 125 | ||
| 128 | /* Parses on IPPROTO_* */ | 126 | /* Parses on IPPROTO_* */ |
| @@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 336 | { | 334 | { |
| 337 | struct bpf_flow_keys *keys = skb->flow_keys; | 335 | struct bpf_flow_keys *keys = skb->flow_keys; |
| 338 | struct vlan_hdr *vlan, _vlan; | 336 | struct vlan_hdr *vlan, _vlan; |
| 339 | __be16 proto; | ||
| 340 | |||
| 341 | /* Peek back to see if single or double-tagging */ | ||
| 342 | if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto, | ||
| 343 | sizeof(proto))) | ||
| 344 | return BPF_DROP; | ||
| 345 | 337 | ||
| 346 | /* Account for double-tagging */ | 338 | /* Account for double-tagging */ |
| 347 | if (proto == bpf_htons(ETH_P_8021AD)) { | 339 | if (keys->n_proto == bpf_htons(ETH_P_8021AD)) { |
| 348 | vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); | 340 | vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); |
| 349 | if (!vlan) | 341 | if (!vlan) |
| 350 | return BPF_DROP; | 342 | return BPF_DROP; |
| @@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 352 | if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) | 344 | if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) |
| 353 | return BPF_DROP; | 345 | return BPF_DROP; |
| 354 | 346 | ||
| 347 | keys->nhoff += sizeof(*vlan); | ||
| 355 | keys->thoff += sizeof(*vlan); | 348 | keys->thoff += sizeof(*vlan); |
| 356 | } | 349 | } |
| 357 | 350 | ||
| @@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb) | |||
| 359 | if (!vlan) | 352 | if (!vlan) |
| 360 | return BPF_DROP; | 353 | return BPF_DROP; |
| 361 | 354 | ||
| 355 | keys->nhoff += sizeof(*vlan); | ||
| 362 | keys->thoff += sizeof(*vlan); | 356 | keys->thoff += sizeof(*vlan); |
| 363 | /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ | 357 | /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ |
| 364 | if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || | 358 | if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || |
| 365 | vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) | 359 | vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) |
| 366 | return BPF_DROP; | 360 | return BPF_DROP; |
| 367 | 361 | ||
| 362 | keys->n_proto = vlan->h_vlan_encapsulated_proto; | ||
| 368 | return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); | 363 | return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); |
| 369 | } | 364 | } |
| 370 | 365 | ||
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 23e3b314ca60..ec5794e4205b 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
| @@ -5777,6 +5777,53 @@ const struct btf_dedup_test dedup_tests[] = { | |||
| 5777 | }, | 5777 | }, |
| 5778 | }, | 5778 | }, |
| 5779 | { | 5779 | { |
| 5780 | .descr = "dedup: void equiv check", | ||
| 5781 | /* | ||
| 5782 | * // CU 1: | ||
| 5783 | * struct s { | ||
| 5784 | * struct {} *x; | ||
| 5785 | * }; | ||
| 5786 | * // CU 2: | ||
| 5787 | * struct s { | ||
| 5788 | * int *x; | ||
| 5789 | * }; | ||
| 5790 | */ | ||
| 5791 | .input = { | ||
| 5792 | .raw_types = { | ||
| 5793 | /* CU 1 */ | ||
| 5794 | BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ | ||
| 5795 | BTF_PTR_ENC(1), /* [2] ptr -> [1] */ | ||
| 5796 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ | ||
| 5797 | BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), | ||
| 5798 | /* CU 2 */ | ||
| 5799 | BTF_PTR_ENC(0), /* [4] ptr -> void */ | ||
| 5800 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ | ||
| 5801 | BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), | ||
| 5802 | BTF_END_RAW, | ||
| 5803 | }, | ||
| 5804 | BTF_STR_SEC("\0s\0x"), | ||
| 5805 | }, | ||
| 5806 | .expect = { | ||
| 5807 | .raw_types = { | ||
| 5808 | /* CU 1 */ | ||
| 5809 | BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ | ||
| 5810 | BTF_PTR_ENC(1), /* [2] ptr -> [1] */ | ||
| 5811 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ | ||
| 5812 | BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), | ||
| 5813 | /* CU 2 */ | ||
| 5814 | BTF_PTR_ENC(0), /* [4] ptr -> void */ | ||
| 5815 | BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ | ||
| 5816 | BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), | ||
| 5817 | BTF_END_RAW, | ||
| 5818 | }, | ||
| 5819 | BTF_STR_SEC("\0s\0x"), | ||
| 5820 | }, | ||
| 5821 | .opts = { | ||
| 5822 | .dont_resolve_fwds = false, | ||
| 5823 | .dedup_table_size = 1, /* force hash collisions */ | ||
| 5824 | }, | ||
| 5825 | }, | ||
| 5826 | { | ||
| 5780 | .descr = "dedup: all possible kinds (no duplicates)", | 5827 | .descr = "dedup: all possible kinds (no duplicates)", |
| 5781 | .input = { | 5828 | .input = { |
| 5782 | .raw_types = { | 5829 | .raw_types = { |
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index f2ccae39ee66..fb11240b758b 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c | |||
| @@ -908,6 +908,44 @@ | |||
| 908 | .result = REJECT, | 908 | .result = REJECT, |
| 909 | }, | 909 | }, |
| 910 | { | 910 | { |
| 911 | "calls: stack depth check in dead code", | ||
| 912 | .insns = { | ||
| 913 | /* main */ | ||
| 914 | BPF_MOV64_IMM(BPF_REG_1, 0), | ||
| 915 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ | ||
| 916 | BPF_EXIT_INSN(), | ||
| 917 | /* A */ | ||
| 918 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), | ||
| 919 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */ | ||
| 920 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 921 | BPF_EXIT_INSN(), | ||
| 922 | /* B */ | ||
| 923 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ | ||
| 924 | BPF_EXIT_INSN(), | ||
| 925 | /* C */ | ||
| 926 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ | ||
| 927 | BPF_EXIT_INSN(), | ||
| 928 | /* D */ | ||
| 929 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ | ||
| 930 | BPF_EXIT_INSN(), | ||
| 931 | /* E */ | ||
| 932 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ | ||
| 933 | BPF_EXIT_INSN(), | ||
| 934 | /* F */ | ||
| 935 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ | ||
| 936 | BPF_EXIT_INSN(), | ||
| 937 | /* G */ | ||
| 938 | BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ | ||
| 939 | BPF_EXIT_INSN(), | ||
| 940 | /* H */ | ||
| 941 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 942 | BPF_EXIT_INSN(), | ||
| 943 | }, | ||
| 944 | .prog_type = BPF_PROG_TYPE_XDP, | ||
| 945 | .errstr = "call stack", | ||
| 946 | .result = REJECT, | ||
| 947 | }, | ||
| 948 | { | ||
| 911 | "calls: spill into caller stack frame", | 949 | "calls: spill into caller stack frame", |
| 912 | .insns = { | 950 | .insns = { |
| 913 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | 951 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index c4cf6e6d800e..a6c196c8534c 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh | |||
| @@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding | |||
| 11 | 11 | ||
| 12 | ALL_TESTS=" | 12 | ALL_TESTS=" |
| 13 | rif_set_addr_test | 13 | rif_set_addr_test |
| 14 | rif_vrf_set_addr_test | ||
| 14 | rif_inherit_bridge_addr_test | 15 | rif_inherit_bridge_addr_test |
| 15 | rif_non_inherit_bridge_addr_test | 16 | rif_non_inherit_bridge_addr_test |
| 16 | vlan_interface_deletion_test | 17 | vlan_interface_deletion_test |
| @@ -98,6 +99,25 @@ rif_set_addr_test() | |||
| 98 | ip link set dev $swp1 addr $swp1_mac | 99 | ip link set dev $swp1 addr $swp1_mac |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 102 | rif_vrf_set_addr_test() | ||
| 103 | { | ||
| 104 | # Test that it is possible to set an IP address on a VRF upper despite | ||
| 105 | # its random MAC address. | ||
| 106 | RET=0 | ||
| 107 | |||
| 108 | ip link add name vrf-test type vrf table 10 | ||
| 109 | ip link set dev $swp1 master vrf-test | ||
| 110 | |||
| 111 | ip -4 address add 192.0.2.1/24 dev vrf-test | ||
| 112 | check_err $? "failed to set IPv4 address on VRF" | ||
| 113 | ip -6 address add 2001:db8:1::1/64 dev vrf-test | ||
| 114 | check_err $? "failed to set IPv6 address on VRF" | ||
| 115 | |||
| 116 | log_test "RIF - setting IP address on VRF" | ||
| 117 | |||
| 118 | ip link del dev vrf-test | ||
| 119 | } | ||
| 120 | |||
| 101 | rif_inherit_bridge_addr_test() | 121 | rif_inherit_bridge_addr_test() |
| 102 | { | 122 | { |
| 103 | RET=0 | 123 | RET=0 |
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 7514fcea91a7..f8588cca2bef 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
| @@ -1,3 +1,5 @@ | |||
| 1 | include ../../../../scripts/Kbuild.include | ||
| 2 | |||
| 1 | all: | 3 | all: |
| 2 | 4 | ||
| 3 | top_srcdir = ../../../.. | 5 | top_srcdir = ../../../.. |
| @@ -17,6 +19,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test | |||
| 17 | TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test | 19 | TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test |
| 18 | TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid | 20 | TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid |
| 19 | TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test | 21 | TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test |
| 22 | TEST_GEN_PROGS_x86_64 += x86_64/smm_test | ||
| 20 | TEST_GEN_PROGS_x86_64 += dirty_log_test | 23 | TEST_GEN_PROGS_x86_64 += dirty_log_test |
| 21 | TEST_GEN_PROGS_x86_64 += clear_dirty_log_test | 24 | TEST_GEN_PROGS_x86_64 += clear_dirty_log_test |
| 22 | 25 | ||
| @@ -30,7 +33,11 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr | |||
| 30 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 33 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
| 31 | LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include | 34 | LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include |
| 32 | CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. | 35 | CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. |
| 33 | LDFLAGS += -pthread -no-pie | 36 | |
| 37 | no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \ | ||
| 38 | $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) | ||
| 39 | |||
| 40 | LDFLAGS += -pthread $(no-pie-option) | ||
| 34 | 41 | ||
| 35 | # After inclusion, $(OUTPUT) is defined and | 42 | # After inclusion, $(OUTPUT) is defined and |
| 36 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 43 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index e2884c2b81ff..6063d5b2f356 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h | |||
| @@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, | |||
| 778 | #define MSR_IA32_APICBASE_ENABLE (1<<11) | 778 | #define MSR_IA32_APICBASE_ENABLE (1<<11) |
| 779 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) | 779 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
| 780 | 780 | ||
| 781 | #define APIC_BASE_MSR 0x800 | ||
| 782 | #define X2APIC_ENABLE (1UL << 10) | ||
| 783 | #define APIC_ICR 0x300 | ||
| 784 | #define APIC_DEST_SELF 0x40000 | ||
| 785 | #define APIC_DEST_ALLINC 0x80000 | ||
| 786 | #define APIC_DEST_ALLBUT 0xC0000 | ||
| 787 | #define APIC_ICR_RR_MASK 0x30000 | ||
| 788 | #define APIC_ICR_RR_INVALID 0x00000 | ||
| 789 | #define APIC_ICR_RR_INPROG 0x10000 | ||
| 790 | #define APIC_ICR_RR_VALID 0x20000 | ||
| 791 | #define APIC_INT_LEVELTRIG 0x08000 | ||
| 792 | #define APIC_INT_ASSERT 0x04000 | ||
| 793 | #define APIC_ICR_BUSY 0x01000 | ||
| 794 | #define APIC_DEST_LOGICAL 0x00800 | ||
| 795 | #define APIC_DEST_PHYSICAL 0x00000 | ||
| 796 | #define APIC_DM_FIXED 0x00000 | ||
| 797 | #define APIC_DM_FIXED_MASK 0x00700 | ||
| 798 | #define APIC_DM_LOWEST 0x00100 | ||
| 799 | #define APIC_DM_SMI 0x00200 | ||
| 800 | #define APIC_DM_REMRD 0x00300 | ||
| 801 | #define APIC_DM_NMI 0x00400 | ||
| 802 | #define APIC_DM_INIT 0x00500 | ||
| 803 | #define APIC_DM_STARTUP 0x00600 | ||
| 804 | #define APIC_DM_EXTINT 0x00700 | ||
| 805 | #define APIC_VECTOR_MASK 0x000FF | ||
| 806 | #define APIC_ICR2 0x310 | ||
| 807 | |||
| 781 | #define MSR_IA32_TSCDEADLINE 0x000006e0 | 808 | #define MSR_IA32_TSCDEADLINE 0x000006e0 |
| 782 | 809 | ||
| 783 | #define MSR_IA32_UCODE_WRITE 0x00000079 | 810 | #define MSR_IA32_UCODE_WRITE 0x00000079 |
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index efa0aad8b3c6..4ca96b228e46 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type) | |||
| 91 | if (vm->kvm_fd < 0) | 91 | if (vm->kvm_fd < 0) |
| 92 | exit(KSFT_SKIP); | 92 | exit(KSFT_SKIP); |
| 93 | 93 | ||
| 94 | if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { | ||
| 95 | fprintf(stderr, "immediate_exit not available, skipping test\n"); | ||
| 96 | exit(KSFT_SKIP); | ||
| 97 | } | ||
| 98 | |||
| 94 | vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type); | 99 | vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type); |
| 95 | TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " | 100 | TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " |
| 96 | "rc: %i errno: %i", vm->fd, errno); | 101 | "rc: %i errno: %i", vm->fd, errno); |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index f28127f4a3af..dc7fae9fa424 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c | |||
| @@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) | |||
| 1030 | nested_size, sizeof(state->nested_)); | 1030 | nested_size, sizeof(state->nested_)); |
| 1031 | } | 1031 | } |
| 1032 | 1032 | ||
| 1033 | /* | ||
| 1034 | * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees | ||
| 1035 | * guest state is consistent only after userspace re-enters the | ||
| 1036 | * kernel with KVM_RUN. Complete IO prior to migrating state | ||
| 1037 | * to a new VM. | ||
| 1038 | */ | ||
| 1039 | vcpu_run_complete_io(vm, vcpuid); | ||
| 1040 | |||
| 1033 | nmsrs = kvm_get_num_msrs(vm); | 1041 | nmsrs = kvm_get_num_msrs(vm); |
| 1034 | list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); | 1042 | list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); |
| 1035 | list->nmsrs = nmsrs; | 1043 | list->nmsrs = nmsrs; |
| @@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s | |||
| 1093 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | 1101 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); |
| 1094 | int r; | 1102 | int r; |
| 1095 | 1103 | ||
| 1096 | if (state->nested.size) { | ||
| 1097 | r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested); | ||
| 1098 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i", | ||
| 1099 | r); | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave); | 1104 | r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave); |
| 1103 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", | 1105 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", |
| 1104 | r); | 1106 | r); |
| @@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s | |||
| 1130 | r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs); | 1132 | r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs); |
| 1131 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i", | 1133 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i", |
| 1132 | r); | 1134 | r); |
| 1135 | |||
| 1136 | if (state->nested.size) { | ||
| 1137 | r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested); | ||
| 1138 | TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i", | ||
| 1139 | r); | ||
| 1140 | } | ||
| 1133 | } | 1141 | } |
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index c49c2a28b0eb..36669684eca5 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c | |||
| @@ -123,8 +123,6 @@ int main(int argc, char *argv[]) | |||
| 123 | stage, run->exit_reason, | 123 | stage, run->exit_reason, |
| 124 | exit_reason_str(run->exit_reason)); | 124 | exit_reason_str(run->exit_reason)); |
| 125 | 125 | ||
| 126 | memset(®s1, 0, sizeof(regs1)); | ||
| 127 | vcpu_regs_get(vm, VCPU_ID, ®s1); | ||
| 128 | switch (get_ucall(vm, VCPU_ID, &uc)) { | 126 | switch (get_ucall(vm, VCPU_ID, &uc)) { |
| 129 | case UCALL_ABORT: | 127 | case UCALL_ABORT: |
| 130 | TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], | 128 | TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], |
| @@ -144,6 +142,9 @@ int main(int argc, char *argv[]) | |||
| 144 | stage, (ulong)uc.args[1]); | 142 | stage, (ulong)uc.args[1]); |
| 145 | 143 | ||
| 146 | state = vcpu_save_state(vm, VCPU_ID); | 144 | state = vcpu_save_state(vm, VCPU_ID); |
| 145 | memset(®s1, 0, sizeof(regs1)); | ||
| 146 | vcpu_regs_get(vm, VCPU_ID, ®s1); | ||
| 147 | |||
| 147 | kvm_vm_release(vm); | 148 | kvm_vm_release(vm); |
| 148 | 149 | ||
| 149 | /* Restore state in a new VM. */ | 150 | /* Restore state in a new VM. */ |
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c new file mode 100644 index 000000000000..fb8086964d83 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2018, Red Hat, Inc. | ||
| 4 | * | ||
| 5 | * Tests for SMM. | ||
| 6 | */ | ||
| 7 | #define _GNU_SOURCE /* for program_invocation_short_name */ | ||
| 8 | #include <fcntl.h> | ||
| 9 | #include <stdio.h> | ||
| 10 | #include <stdlib.h> | ||
| 11 | #include <stdint.h> | ||
| 12 | #include <string.h> | ||
| 13 | #include <sys/ioctl.h> | ||
| 14 | |||
| 15 | #include "test_util.h" | ||
| 16 | |||
| 17 | #include "kvm_util.h" | ||
| 18 | |||
| 19 | #include "vmx.h" | ||
| 20 | |||
| 21 | #define VCPU_ID 1 | ||
| 22 | |||
| 23 | #define PAGE_SIZE 4096 | ||
| 24 | |||
| 25 | #define SMRAM_SIZE 65536 | ||
| 26 | #define SMRAM_MEMSLOT ((1 << 16) | 1) | ||
| 27 | #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) | ||
| 28 | #define SMRAM_GPA 0x1000000 | ||
| 29 | #define SMRAM_STAGE 0xfe | ||
| 30 | |||
| 31 | #define STR(x) #x | ||
| 32 | #define XSTR(s) STR(s) | ||
| 33 | |||
| 34 | #define SYNC_PORT 0xe | ||
| 35 | #define DONE 0xff | ||
| 36 | |||
| 37 | /* | ||
| 38 | * This is compiled as normal 64-bit code, however, SMI handler is executed | ||
| 39 | * in real-address mode. To stay simple we're limiting ourselves to a mode | ||
| 40 | * independent subset of asm here. | ||
| 41 | * SMI handler always report back fixed stage SMRAM_STAGE. | ||
| 42 | */ | ||
| 43 | uint8_t smi_handler[] = { | ||
| 44 | 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ | ||
| 45 | 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ | ||
| 46 | 0x0f, 0xaa, /* rsm */ | ||
| 47 | }; | ||
| 48 | |||
| 49 | void sync_with_host(uint64_t phase) | ||
| 50 | { | ||
| 51 | asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" | ||
| 52 | : : "a" (phase)); | ||
| 53 | } | ||
| 54 | |||
| 55 | void self_smi(void) | ||
| 56 | { | ||
| 57 | wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4), | ||
| 58 | APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); | ||
| 59 | } | ||
| 60 | |||
| 61 | void guest_code(struct vmx_pages *vmx_pages) | ||
| 62 | { | ||
| 63 | uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); | ||
| 64 | |||
| 65 | sync_with_host(1); | ||
| 66 | |||
| 67 | wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE); | ||
| 68 | |||
| 69 | sync_with_host(2); | ||
| 70 | |||
| 71 | self_smi(); | ||
| 72 | |||
| 73 | sync_with_host(4); | ||
| 74 | |||
| 75 | if (vmx_pages) { | ||
| 76 | GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); | ||
| 77 | |||
| 78 | sync_with_host(5); | ||
| 79 | |||
| 80 | self_smi(); | ||
| 81 | |||
| 82 | sync_with_host(7); | ||
| 83 | } | ||
| 84 | |||
| 85 | sync_with_host(DONE); | ||
| 86 | } | ||
| 87 | |||
| 88 | int main(int argc, char *argv[]) | ||
| 89 | { | ||
| 90 | struct vmx_pages *vmx_pages = NULL; | ||
| 91 | vm_vaddr_t vmx_pages_gva = 0; | ||
| 92 | |||
| 93 | struct kvm_regs regs; | ||
| 94 | struct kvm_vm *vm; | ||
| 95 | struct kvm_run *run; | ||
| 96 | struct kvm_x86_state *state; | ||
| 97 | int stage, stage_reported; | ||
| 98 | |||
| 99 | /* Create VM */ | ||
| 100 | vm = vm_create_default(VCPU_ID, 0, guest_code); | ||
| 101 | |||
| 102 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); | ||
| 103 | |||
| 104 | run = vcpu_state(vm, VCPU_ID); | ||
| 105 | |||
| 106 | vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, | ||
| 107 | SMRAM_MEMSLOT, SMRAM_PAGES, 0); | ||
| 108 | TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) | ||
| 109 | == SMRAM_GPA, "could not allocate guest physical addresses?"); | ||
| 110 | |||
| 111 | memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); | ||
| 112 | memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, | ||
| 113 | sizeof(smi_handler)); | ||
| 114 | |||
| 115 | vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); | ||
| 116 | |||
| 117 | if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { | ||
| 118 | vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); | ||
| 119 | vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); | ||
| 120 | } else { | ||
| 121 | printf("will skip SMM test with VMX enabled\n"); | ||
| 122 | vcpu_args_set(vm, VCPU_ID, 1, 0); | ||
| 123 | } | ||
| 124 | |||
| 125 | for (stage = 1;; stage++) { | ||
| 126 | _vcpu_run(vm, VCPU_ID); | ||
| 127 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | ||
| 128 | "Stage %d: unexpected exit reason: %u (%s),\n", | ||
| 129 | stage, run->exit_reason, | ||
| 130 | exit_reason_str(run->exit_reason)); | ||
| 131 | |||
| 132 | memset(®s, 0, sizeof(regs)); | ||
| 133 | vcpu_regs_get(vm, VCPU_ID, ®s); | ||
| 134 | |||
| 135 | stage_reported = regs.rax & 0xff; | ||
| 136 | |||
| 137 | if (stage_reported == DONE) | ||
| 138 | goto done; | ||
| 139 | |||
| 140 | TEST_ASSERT(stage_reported == stage || | ||
| 141 | stage_reported == SMRAM_STAGE, | ||
| 142 | "Unexpected stage: #%x, got %x", | ||
| 143 | stage, stage_reported); | ||
| 144 | |||
| 145 | state = vcpu_save_state(vm, VCPU_ID); | ||
| 146 | kvm_vm_release(vm); | ||
| 147 | kvm_vm_restart(vm, O_RDWR); | ||
| 148 | vm_vcpu_add(vm, VCPU_ID, 0, 0); | ||
| 149 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); | ||
| 150 | vcpu_load_state(vm, VCPU_ID, state); | ||
| 151 | run = vcpu_state(vm, VCPU_ID); | ||
| 152 | free(state); | ||
| 153 | } | ||
| 154 | |||
| 155 | done: | ||
| 156 | kvm_vm_free(vm); | ||
| 157 | } | ||
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index 30f75856cf39..e0a3c0204b7c 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c | |||
| @@ -134,11 +134,6 @@ int main(int argc, char *argv[]) | |||
| 134 | 134 | ||
| 135 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | 135 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); |
| 136 | 136 | ||
| 137 | if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { | ||
| 138 | fprintf(stderr, "immediate_exit not available, skipping test\n"); | ||
| 139 | exit(KSFT_SKIP); | ||
| 140 | } | ||
| 141 | |||
| 142 | /* Create VM */ | 137 | /* Create VM */ |
| 143 | vm = vm_create_default(VCPU_ID, 0, guest_code); | 138 | vm = vm_create_default(VCPU_ID, 0, guest_code); |
| 144 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); | 139 | vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); |
| @@ -179,18 +174,10 @@ int main(int argc, char *argv[]) | |||
| 179 | uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", | 174 | uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", |
| 180 | stage, (ulong)uc.args[1]); | 175 | stage, (ulong)uc.args[1]); |
| 181 | 176 | ||
| 182 | /* | 177 | state = vcpu_save_state(vm, VCPU_ID); |
| 183 | * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees | ||
| 184 | * guest state is consistent only after userspace re-enters the | ||
| 185 | * kernel with KVM_RUN. Complete IO prior to migrating state | ||
| 186 | * to a new VM. | ||
| 187 | */ | ||
| 188 | vcpu_run_complete_io(vm, VCPU_ID); | ||
| 189 | |||
| 190 | memset(®s1, 0, sizeof(regs1)); | 178 | memset(®s1, 0, sizeof(regs1)); |
| 191 | vcpu_regs_get(vm, VCPU_ID, ®s1); | 179 | vcpu_regs_get(vm, VCPU_ID, ®s1); |
| 192 | 180 | ||
| 193 | state = vcpu_save_state(vm, VCPU_ID); | ||
| 194 | kvm_vm_release(vm); | 181 | kvm_vm_release(vm); |
| 195 | 182 | ||
| 196 | /* Restore state in a new VM. */ | 183 | /* Restore state in a new VM. */ |
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 1080ff55a788..0d2a5f4f1e63 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||
| @@ -605,6 +605,39 @@ run_cmd() | |||
| 605 | return $rc | 605 | return $rc |
| 606 | } | 606 | } |
| 607 | 607 | ||
| 608 | check_expected() | ||
| 609 | { | ||
| 610 | local out="$1" | ||
| 611 | local expected="$2" | ||
| 612 | local rc=0 | ||
| 613 | |||
| 614 | [ "${out}" = "${expected}" ] && return 0 | ||
| 615 | |||
| 616 | if [ -z "${out}" ]; then | ||
| 617 | if [ "$VERBOSE" = "1" ]; then | ||
| 618 | printf "\nNo route entry found\n" | ||
| 619 | printf "Expected:\n" | ||
| 620 | printf " ${expected}\n" | ||
| 621 | fi | ||
| 622 | return 1 | ||
| 623 | fi | ||
| 624 | |||
| 625 | # tricky way to convert output to 1-line without ip's | ||
| 626 | # messy '\'; this drops all extra white space | ||
| 627 | out=$(echo ${out}) | ||
| 628 | if [ "${out}" != "${expected}" ]; then | ||
| 629 | rc=1 | ||
| 630 | if [ "${VERBOSE}" = "1" ]; then | ||
| 631 | printf " Unexpected route entry. Have:\n" | ||
| 632 | printf " ${out}\n" | ||
| 633 | printf " Expected:\n" | ||
| 634 | printf " ${expected}\n\n" | ||
| 635 | fi | ||
| 636 | fi | ||
| 637 | |||
| 638 | return $rc | ||
| 639 | } | ||
| 640 | |||
| 608 | # add route for a prefix, flushing any existing routes first | 641 | # add route for a prefix, flushing any existing routes first |
| 609 | # expected to be the first step of a test | 642 | # expected to be the first step of a test |
| 610 | add_route6() | 643 | add_route6() |
| @@ -652,31 +685,7 @@ check_route6() | |||
| 652 | pfx=$1 | 685 | pfx=$1 |
| 653 | 686 | ||
| 654 | out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//') | 687 | out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//') |
| 655 | [ "${out}" = "${expected}" ] && return 0 | 688 | check_expected "${out}" "${expected}" |
| 656 | |||
| 657 | if [ -z "${out}" ]; then | ||
| 658 | if [ "$VERBOSE" = "1" ]; then | ||
| 659 | printf "\nNo route entry found\n" | ||
| 660 | printf "Expected:\n" | ||
| 661 | printf " ${expected}\n" | ||
| 662 | fi | ||
| 663 | return 1 | ||
| 664 | fi | ||
| 665 | |||
| 666 | # tricky way to convert output to 1-line without ip's | ||
| 667 | # messy '\'; this drops all extra white space | ||
| 668 | out=$(echo ${out}) | ||
| 669 | if [ "${out}" != "${expected}" ]; then | ||
| 670 | rc=1 | ||
| 671 | if [ "${VERBOSE}" = "1" ]; then | ||
| 672 | printf " Unexpected route entry. Have:\n" | ||
| 673 | printf " ${out}\n" | ||
| 674 | printf " Expected:\n" | ||
| 675 | printf " ${expected}\n\n" | ||
| 676 | fi | ||
| 677 | fi | ||
| 678 | |||
| 679 | return $rc | ||
| 680 | } | 689 | } |
| 681 | 690 | ||
| 682 | route_cleanup() | 691 | route_cleanup() |
| @@ -725,7 +734,7 @@ route_setup() | |||
| 725 | ip -netns ns2 addr add 172.16.103.2/24 dev veth4 | 734 | ip -netns ns2 addr add 172.16.103.2/24 dev veth4 |
| 726 | ip -netns ns2 addr add 172.16.104.1/24 dev dummy1 | 735 | ip -netns ns2 addr add 172.16.104.1/24 dev dummy1 |
| 727 | 736 | ||
| 728 | set +ex | 737 | set +e |
| 729 | } | 738 | } |
| 730 | 739 | ||
| 731 | # assumption is that basic add of a single path route works | 740 | # assumption is that basic add of a single path route works |
| @@ -960,7 +969,8 @@ ipv6_addr_metric_test() | |||
| 960 | run_cmd "$IP li set dev dummy2 down" | 969 | run_cmd "$IP li set dev dummy2 down" |
| 961 | rc=$? | 970 | rc=$? |
| 962 | if [ $rc -eq 0 ]; then | 971 | if [ $rc -eq 0 ]; then |
| 963 | check_route6 "" | 972 | out=$($IP -6 ro ls match 2001:db8:104::/64) |
| 973 | check_expected "${out}" "" | ||
| 964 | rc=$? | 974 | rc=$? |
| 965 | fi | 975 | fi |
| 966 | log_test $rc 0 "Prefix route removed on link down" | 976 | log_test $rc 0 "Prefix route removed on link down" |
| @@ -1091,38 +1101,13 @@ check_route() | |||
| 1091 | local pfx | 1101 | local pfx |
| 1092 | local expected="$1" | 1102 | local expected="$1" |
| 1093 | local out | 1103 | local out |
| 1094 | local rc=0 | ||
| 1095 | 1104 | ||
| 1096 | set -- $expected | 1105 | set -- $expected |
| 1097 | pfx=$1 | 1106 | pfx=$1 |
| 1098 | [ "${pfx}" = "unreachable" ] && pfx=$2 | 1107 | [ "${pfx}" = "unreachable" ] && pfx=$2 |
| 1099 | 1108 | ||
| 1100 | out=$($IP ro ls match ${pfx}) | 1109 | out=$($IP ro ls match ${pfx}) |
| 1101 | [ "${out}" = "${expected}" ] && return 0 | 1110 | check_expected "${out}" "${expected}" |
| 1102 | |||
| 1103 | if [ -z "${out}" ]; then | ||
| 1104 | if [ "$VERBOSE" = "1" ]; then | ||
| 1105 | printf "\nNo route entry found\n" | ||
| 1106 | printf "Expected:\n" | ||
| 1107 | printf " ${expected}\n" | ||
| 1108 | fi | ||
| 1109 | return 1 | ||
| 1110 | fi | ||
| 1111 | |||
| 1112 | # tricky way to convert output to 1-line without ip's | ||
| 1113 | # messy '\'; this drops all extra white space | ||
| 1114 | out=$(echo ${out}) | ||
| 1115 | if [ "${out}" != "${expected}" ]; then | ||
| 1116 | rc=1 | ||
| 1117 | if [ "${VERBOSE}" = "1" ]; then | ||
| 1118 | printf " Unexpected route entry. Have:\n" | ||
| 1119 | printf " ${out}\n" | ||
| 1120 | printf " Expected:\n" | ||
| 1121 | printf " ${expected}\n\n" | ||
| 1122 | fi | ||
| 1123 | fi | ||
| 1124 | |||
| 1125 | return $rc | ||
| 1126 | } | 1111 | } |
| 1127 | 1112 | ||
| 1128 | # assumption is that basic add of a single path route works | 1113 | # assumption is that basic add of a single path route works |
| @@ -1387,7 +1372,8 @@ ipv4_addr_metric_test() | |||
| 1387 | run_cmd "$IP li set dev dummy2 down" | 1372 | run_cmd "$IP li set dev dummy2 down" |
| 1388 | rc=$? | 1373 | rc=$? |
| 1389 | if [ $rc -eq 0 ]; then | 1374 | if [ $rc -eq 0 ]; then |
| 1390 | check_route "" | 1375 | out=$($IP ro ls match 172.16.104.0/24) |
| 1376 | check_expected "${out}" "" | ||
| 1391 | rc=$? | 1377 | rc=$? |
| 1392 | fi | 1378 | fi |
| 1393 | log_test $rc 0 "Prefix route removed on link down" | 1379 | log_test $rc 0 "Prefix route removed on link down" |
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests index 2dc95fda7ef7..ea5938ec009a 100755 --- a/tools/testing/selftests/net/run_afpackettests +++ b/tools/testing/selftests/net/run_afpackettests | |||
| @@ -6,12 +6,14 @@ if [ $(id -u) != 0 ]; then | |||
| 6 | exit 0 | 6 | exit 0 |
| 7 | fi | 7 | fi |
| 8 | 8 | ||
| 9 | ret=0 | ||
| 9 | echo "--------------------" | 10 | echo "--------------------" |
| 10 | echo "running psock_fanout test" | 11 | echo "running psock_fanout test" |
| 11 | echo "--------------------" | 12 | echo "--------------------" |
| 12 | ./in_netns.sh ./psock_fanout | 13 | ./in_netns.sh ./psock_fanout |
| 13 | if [ $? -ne 0 ]; then | 14 | if [ $? -ne 0 ]; then |
| 14 | echo "[FAIL]" | 15 | echo "[FAIL]" |
| 16 | ret=1 | ||
| 15 | else | 17 | else |
| 16 | echo "[PASS]" | 18 | echo "[PASS]" |
| 17 | fi | 19 | fi |
| @@ -22,6 +24,7 @@ echo "--------------------" | |||
| 22 | ./in_netns.sh ./psock_tpacket | 24 | ./in_netns.sh ./psock_tpacket |
| 23 | if [ $? -ne 0 ]; then | 25 | if [ $? -ne 0 ]; then |
| 24 | echo "[FAIL]" | 26 | echo "[FAIL]" |
| 27 | ret=1 | ||
| 25 | else | 28 | else |
| 26 | echo "[PASS]" | 29 | echo "[PASS]" |
| 27 | fi | 30 | fi |
| @@ -32,6 +35,8 @@ echo "--------------------" | |||
| 32 | ./in_netns.sh ./txring_overwrite | 35 | ./in_netns.sh ./txring_overwrite |
| 33 | if [ $? -ne 0 ]; then | 36 | if [ $? -ne 0 ]; then |
| 34 | echo "[FAIL]" | 37 | echo "[FAIL]" |
| 38 | ret=1 | ||
| 35 | else | 39 | else |
| 36 | echo "[PASS]" | 40 | echo "[PASS]" |
| 37 | fi | 41 | fi |
| 42 | exit $ret | ||
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests index b093f39c298c..14e41faf2c57 100755 --- a/tools/testing/selftests/net/run_netsocktests +++ b/tools/testing/selftests/net/run_netsocktests | |||
| @@ -7,7 +7,7 @@ echo "--------------------" | |||
| 7 | ./socket | 7 | ./socket |
| 8 | if [ $? -ne 0 ]; then | 8 | if [ $? -ne 0 ]; then |
| 9 | echo "[FAIL]" | 9 | echo "[FAIL]" |
| 10 | exit 1 | ||
| 10 | else | 11 | else |
| 11 | echo "[PASS]" | 12 | echo "[PASS]" |
| 12 | fi | 13 | fi |
| 13 | |||
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index c9ff2b47bd1c..a37cb1192c6a 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | # Makefile for netfilter selftests | 2 | # Makefile for netfilter selftests |
| 3 | 3 | ||
| 4 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh | 4 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh |
| 5 | 5 | ||
| 6 | include ../lib.mk | 6 | include ../lib.mk |
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh new file mode 100755 index 000000000000..b48e1833bc89 --- /dev/null +++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh | |||
| @@ -0,0 +1,283 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | # | ||
| 3 | # check that ICMP df-needed/pkttoobig icmp are set are set as related | ||
| 4 | # state | ||
| 5 | # | ||
| 6 | # Setup is: | ||
| 7 | # | ||
| 8 | # nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2 | ||
| 9 | # MTU 1500, except for nsrouter2 <-> nsclient2 link (1280). | ||
| 10 | # ping nsclient2 from nsclient1, checking that conntrack did set RELATED | ||
| 11 | # 'fragmentation needed' icmp packet. | ||
| 12 | # | ||
| 13 | # In addition, nsrouter1 will perform IP masquerading, i.e. also | ||
| 14 | # check the icmp errors are propagated to the correct host as per | ||
| 15 | # nat of "established" icmp-echo "connection". | ||
| 16 | |||
| 17 | # Kselftest framework requirement - SKIP code is 4. | ||
| 18 | ksft_skip=4 | ||
| 19 | ret=0 | ||
| 20 | |||
| 21 | nft --version > /dev/null 2>&1 | ||
| 22 | if [ $? -ne 0 ];then | ||
| 23 | echo "SKIP: Could not run test without nft tool" | ||
| 24 | exit $ksft_skip | ||
| 25 | fi | ||
| 26 | |||
| 27 | ip -Version > /dev/null 2>&1 | ||
| 28 | if [ $? -ne 0 ];then | ||
| 29 | echo "SKIP: Could not run test without ip tool" | ||
| 30 | exit $ksft_skip | ||
| 31 | fi | ||
| 32 | |||
| 33 | cleanup() { | ||
| 34 | for i in 1 2;do ip netns del nsclient$i;done | ||
| 35 | for i in 1 2;do ip netns del nsrouter$i;done | ||
| 36 | } | ||
| 37 | |||
| 38 | ipv4() { | ||
| 39 | echo -n 192.168.$1.2 | ||
| 40 | } | ||
| 41 | |||
| 42 | ipv6 () { | ||
| 43 | echo -n dead:$1::2 | ||
| 44 | } | ||
| 45 | |||
| 46 | check_counter() | ||
| 47 | { | ||
| 48 | ns=$1 | ||
| 49 | name=$2 | ||
| 50 | expect=$3 | ||
| 51 | local lret=0 | ||
| 52 | |||
| 53 | cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect") | ||
| 54 | if [ $? -ne 0 ]; then | ||
| 55 | echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2 | ||
| 56 | ip netns exec $ns nft list counter inet filter "$name" 1>&2 | ||
| 57 | lret=1 | ||
| 58 | fi | ||
| 59 | |||
| 60 | return $lret | ||
| 61 | } | ||
| 62 | |||
| 63 | check_unknown() | ||
| 64 | { | ||
| 65 | expect="packets 0 bytes 0" | ||
| 66 | for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do | ||
| 67 | check_counter $n "unknown" "$expect" | ||
| 68 | if [ $? -ne 0 ] ;then | ||
| 69 | return 1 | ||
| 70 | fi | ||
| 71 | done | ||
| 72 | |||
| 73 | return 0 | ||
| 74 | } | ||
| 75 | |||
| 76 | for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do | ||
| 77 | ip netns add $n | ||
| 78 | ip -net $n link set lo up | ||
| 79 | done | ||
| 80 | |||
| 81 | DEV=veth0 | ||
| 82 | ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1 | ||
| 83 | DEV=veth0 | ||
| 84 | ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2 | ||
| 85 | |||
| 86 | DEV=veth0 | ||
| 87 | ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2 | ||
| 88 | |||
| 89 | DEV=veth0 | ||
| 90 | for i in 1 2; do | ||
| 91 | ip -net nsclient$i link set $DEV up | ||
| 92 | ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV | ||
| 93 | ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV | ||
| 94 | done | ||
| 95 | |||
| 96 | ip -net nsrouter1 link set eth1 up | ||
| 97 | ip -net nsrouter1 link set veth0 up | ||
| 98 | |||
| 99 | ip -net nsrouter2 link set eth1 up | ||
| 100 | ip -net nsrouter2 link set eth2 up | ||
| 101 | |||
| 102 | ip -net nsclient1 route add default via 192.168.1.1 | ||
| 103 | ip -net nsclient1 -6 route add default via dead:1::1 | ||
| 104 | |||
| 105 | ip -net nsclient2 route add default via 192.168.2.1 | ||
| 106 | ip -net nsclient2 route add default via dead:2::1 | ||
| 107 | |||
| 108 | i=3 | ||
| 109 | ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1 | ||
| 110 | ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0 | ||
| 111 | ip -net nsrouter1 addr add dead:1::1/64 dev eth1 | ||
| 112 | ip -net nsrouter1 addr add dead:3::1/64 dev veth0 | ||
| 113 | ip -net nsrouter1 route add default via 192.168.3.10 | ||
| 114 | ip -net nsrouter1 -6 route add default via dead:3::10 | ||
| 115 | |||
| 116 | ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1 | ||
| 117 | ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2 | ||
| 118 | ip -net nsrouter2 addr add dead:2::1/64 dev eth1 | ||
| 119 | ip -net nsrouter2 addr add dead:3::10/64 dev eth2 | ||
| 120 | ip -net nsrouter2 route add default via 192.168.3.1 | ||
| 121 | ip -net nsrouter2 route add default via dead:3::1 | ||
| 122 | |||
| 123 | sleep 2 | ||
| 124 | for i in 4 6; do | ||
| 125 | ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1 | ||
| 126 | ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1 | ||
| 127 | done | ||
| 128 | |||
| 129 | for netns in nsrouter1 nsrouter2; do | ||
| 130 | ip netns exec $netns nft -f - <<EOF | ||
| 131 | table inet filter { | ||
| 132 | counter unknown { } | ||
| 133 | counter related { } | ||
| 134 | chain forward { | ||
| 135 | type filter hook forward priority 0; policy accept; | ||
| 136 | meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept | ||
| 137 | meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept | ||
| 138 | meta l4proto { icmp, icmpv6 } ct state new,established accept | ||
| 139 | counter name "unknown" drop | ||
| 140 | } | ||
| 141 | } | ||
| 142 | EOF | ||
| 143 | done | ||
| 144 | |||
| 145 | ip netns exec nsclient1 nft -f - <<EOF | ||
| 146 | table inet filter { | ||
| 147 | counter unknown { } | ||
| 148 | counter related { } | ||
| 149 | chain input { | ||
| 150 | type filter hook input priority 0; policy accept; | ||
| 151 | meta l4proto { icmp, icmpv6 } ct state established,untracked accept | ||
| 152 | |||
| 153 | meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept | ||
| 154 | counter name "unknown" drop | ||
| 155 | } | ||
| 156 | } | ||
| 157 | EOF | ||
| 158 | |||
| 159 | ip netns exec nsclient2 nft -f - <<EOF | ||
| 160 | table inet filter { | ||
| 161 | counter unknown { } | ||
| 162 | counter new { } | ||
| 163 | counter established { } | ||
| 164 | |||
| 165 | chain input { | ||
| 166 | type filter hook input priority 0; policy accept; | ||
| 167 | meta l4proto { icmp, icmpv6 } ct state established,untracked accept | ||
| 168 | |||
| 169 | meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept | ||
| 170 | meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept | ||
| 171 | counter name "unknown" drop | ||
| 172 | } | ||
| 173 | chain output { | ||
| 174 | type filter hook output priority 0; policy accept; | ||
| 175 | meta l4proto { icmp, icmpv6 } ct state established,untracked accept | ||
| 176 | |||
| 177 | meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" | ||
| 178 | meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" | ||
| 179 | counter name "unknown" drop | ||
| 180 | } | ||
| 181 | } | ||
| 182 | EOF | ||
| 183 | |||
| 184 | |||
| 185 | # make sure NAT core rewrites adress of icmp error if nat is used according to | ||
| 186 | # conntrack nat information (icmp error will be directed at nsrouter1 address, | ||
| 187 | # but it needs to be routed to nsclient1 address). | ||
| 188 | ip netns exec nsrouter1 nft -f - <<EOF | ||
| 189 | table ip nat { | ||
| 190 | chain postrouting { | ||
| 191 | type nat hook postrouting priority 0; policy accept; | ||
| 192 | ip protocol icmp oifname "veth0" counter masquerade | ||
| 193 | } | ||
| 194 | } | ||
| 195 | table ip6 nat { | ||
| 196 | chain postrouting { | ||
| 197 | type nat hook postrouting priority 0; policy accept; | ||
| 198 | ip6 nexthdr icmpv6 oifname "veth0" counter masquerade | ||
| 199 | } | ||
| 200 | } | ||
| 201 | EOF | ||
| 202 | |||
| 203 | ip netns exec nsrouter2 ip link set eth1 mtu 1280 | ||
| 204 | ip netns exec nsclient2 ip link set veth0 mtu 1280 | ||
| 205 | sleep 1 | ||
| 206 | |||
| 207 | ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null | ||
| 208 | if [ $? -ne 0 ]; then | ||
| 209 | echo "ERROR: netns ip routing/connectivity broken" 1>&2 | ||
| 210 | cleanup | ||
| 211 | exit 1 | ||
| 212 | fi | ||
| 213 | ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null | ||
| 214 | if [ $? -ne 0 ]; then | ||
| 215 | echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2 | ||
| 216 | cleanup | ||
| 217 | exit 1 | ||
| 218 | fi | ||
| 219 | |||
| 220 | check_unknown | ||
| 221 | if [ $? -ne 0 ]; then | ||
| 222 | ret=1 | ||
| 223 | fi | ||
| 224 | |||
| 225 | expect="packets 0 bytes 0" | ||
| 226 | for netns in nsrouter1 nsrouter2 nsclient1;do | ||
| 227 | check_counter "$netns" "related" "$expect" | ||
| 228 | if [ $? -ne 0 ]; then | ||
| 229 | ret=1 | ||
| 230 | fi | ||
| 231 | done | ||
| 232 | |||
| 233 | expect="packets 2 bytes 2076" | ||
| 234 | check_counter nsclient2 "new" "$expect" | ||
| 235 | if [ $? -ne 0 ]; then | ||
| 236 | ret=1 | ||
| 237 | fi | ||
| 238 | |||
| 239 | ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null | ||
| 240 | if [ $? -eq 0 ]; then | ||
| 241 | echo "ERROR: ping should have failed with PMTU too big error" 1>&2 | ||
| 242 | ret=1 | ||
| 243 | fi | ||
| 244 | |||
| 245 | # nsrouter2 should have generated the icmp error, so | ||
| 246 | # related counter should be 0 (its in forward). | ||
| 247 | expect="packets 0 bytes 0" | ||
| 248 | check_counter "nsrouter2" "related" "$expect" | ||
| 249 | if [ $? -ne 0 ]; then | ||
| 250 | ret=1 | ||
| 251 | fi | ||
| 252 | |||
| 253 | # but nsrouter1 should have seen it, same for nsclient1. | ||
| 254 | expect="packets 1 bytes 576" | ||
| 255 | for netns in nsrouter1 nsclient1;do | ||
| 256 | check_counter "$netns" "related" "$expect" | ||
| 257 | if [ $? -ne 0 ]; then | ||
| 258 | ret=1 | ||
| 259 | fi | ||
| 260 | done | ||
| 261 | |||
| 262 | ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null | ||
| 263 | if [ $? -eq 0 ]; then | ||
| 264 | echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2 | ||
| 265 | ret=1 | ||
| 266 | fi | ||
| 267 | |||
| 268 | expect="packets 2 bytes 1856" | ||
| 269 | for netns in nsrouter1 nsclient1;do | ||
| 270 | check_counter "$netns" "related" "$expect" | ||
| 271 | if [ $? -ne 0 ]; then | ||
| 272 | ret=1 | ||
| 273 | fi | ||
| 274 | done | ||
| 275 | |||
| 276 | if [ $ret -eq 0 ];then | ||
| 277 | echo "PASS: icmp mtu error had RELATED state" | ||
| 278 | else | ||
| 279 | echo "ERROR: icmp error RELATED state test has failed" | ||
| 280 | fi | ||
| 281 | |||
| 282 | cleanup | ||
| 283 | exit $ret | ||
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 8ec76681605c..3194007cf8d1 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh | |||
| @@ -321,6 +321,7 @@ EOF | |||
| 321 | 321 | ||
| 322 | test_masquerade6() | 322 | test_masquerade6() |
| 323 | { | 323 | { |
| 324 | local natflags=$1 | ||
| 324 | local lret=0 | 325 | local lret=0 |
| 325 | 326 | ||
| 326 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | 327 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null |
| @@ -354,13 +355,13 @@ ip netns exec ns0 nft -f - <<EOF | |||
| 354 | table ip6 nat { | 355 | table ip6 nat { |
| 355 | chain postrouting { | 356 | chain postrouting { |
| 356 | type nat hook postrouting priority 0; policy accept; | 357 | type nat hook postrouting priority 0; policy accept; |
| 357 | meta oif veth0 masquerade | 358 | meta oif veth0 masquerade $natflags |
| 358 | } | 359 | } |
| 359 | } | 360 | } |
| 360 | EOF | 361 | EOF |
| 361 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | 362 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 |
| 362 | if [ $? -ne 0 ] ; then | 363 | if [ $? -ne 0 ] ; then |
| 363 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" | 364 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags" |
| 364 | lret=1 | 365 | lret=1 |
| 365 | fi | 366 | fi |
| 366 | 367 | ||
| @@ -397,19 +398,26 @@ EOF | |||
| 397 | fi | 398 | fi |
| 398 | done | 399 | done |
| 399 | 400 | ||
| 401 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
| 402 | if [ $? -ne 0 ] ; then | ||
| 403 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)" | ||
| 404 | lret=1 | ||
| 405 | fi | ||
| 406 | |||
| 400 | ip netns exec ns0 nft flush chain ip6 nat postrouting | 407 | ip netns exec ns0 nft flush chain ip6 nat postrouting |
| 401 | if [ $? -ne 0 ]; then | 408 | if [ $? -ne 0 ]; then |
| 402 | echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 | 409 | echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 |
| 403 | lret=1 | 410 | lret=1 |
| 404 | fi | 411 | fi |
| 405 | 412 | ||
| 406 | test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" | 413 | test $lret -eq 0 && echo "PASS: IPv6 masquerade $natflags for ns2" |
| 407 | 414 | ||
| 408 | return $lret | 415 | return $lret |
| 409 | } | 416 | } |
| 410 | 417 | ||
| 411 | test_masquerade() | 418 | test_masquerade() |
| 412 | { | 419 | { |
| 420 | local natflags=$1 | ||
| 413 | local lret=0 | 421 | local lret=0 |
| 414 | 422 | ||
| 415 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | 423 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null |
| @@ -417,7 +425,7 @@ test_masquerade() | |||
| 417 | 425 | ||
| 418 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | 426 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 |
| 419 | if [ $? -ne 0 ] ; then | 427 | if [ $? -ne 0 ] ; then |
| 420 | echo "ERROR: canot ping ns1 from ns2" | 428 | echo "ERROR: cannot ping ns1 from ns2 $natflags" |
| 421 | lret=1 | 429 | lret=1 |
| 422 | fi | 430 | fi |
| 423 | 431 | ||
| @@ -443,13 +451,13 @@ ip netns exec ns0 nft -f - <<EOF | |||
| 443 | table ip nat { | 451 | table ip nat { |
| 444 | chain postrouting { | 452 | chain postrouting { |
| 445 | type nat hook postrouting priority 0; policy accept; | 453 | type nat hook postrouting priority 0; policy accept; |
| 446 | meta oif veth0 masquerade | 454 | meta oif veth0 masquerade $natflags |
| 447 | } | 455 | } |
| 448 | } | 456 | } |
| 449 | EOF | 457 | EOF |
| 450 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | 458 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 |
| 451 | if [ $? -ne 0 ] ; then | 459 | if [ $? -ne 0 ] ; then |
| 452 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" | 460 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquere $natflags" |
| 453 | lret=1 | 461 | lret=1 |
| 454 | fi | 462 | fi |
| 455 | 463 | ||
| @@ -485,13 +493,19 @@ EOF | |||
| 485 | fi | 493 | fi |
| 486 | done | 494 | done |
| 487 | 495 | ||
| 496 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
| 497 | if [ $? -ne 0 ] ; then | ||
| 498 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)" | ||
| 499 | lret=1 | ||
| 500 | fi | ||
| 501 | |||
| 488 | ip netns exec ns0 nft flush chain ip nat postrouting | 502 | ip netns exec ns0 nft flush chain ip nat postrouting |
| 489 | if [ $? -ne 0 ]; then | 503 | if [ $? -ne 0 ]; then |
| 490 | echo "ERROR: Could not flush nat postrouting" 1>&2 | 504 | echo "ERROR: Could not flush nat postrouting" 1>&2 |
| 491 | lret=1 | 505 | lret=1 |
| 492 | fi | 506 | fi |
| 493 | 507 | ||
| 494 | test $lret -eq 0 && echo "PASS: IP masquerade for ns2" | 508 | test $lret -eq 0 && echo "PASS: IP masquerade $natflags for ns2" |
| 495 | 509 | ||
| 496 | return $lret | 510 | return $lret |
| 497 | } | 511 | } |
| @@ -750,8 +764,12 @@ test_local_dnat | |||
| 750 | test_local_dnat6 | 764 | test_local_dnat6 |
| 751 | 765 | ||
| 752 | reset_counters | 766 | reset_counters |
| 753 | test_masquerade | 767 | test_masquerade "" |
| 754 | test_masquerade6 | 768 | test_masquerade6 "" |
| 769 | |||
| 770 | reset_counters | ||
| 771 | test_masquerade "fully-random" | ||
| 772 | test_masquerade6 "fully-random" | ||
| 755 | 773 | ||
| 756 | reset_counters | 774 | reset_counters |
| 757 | test_redirect | 775 | test_redirect |
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c index 7202bbac976e..853aa164a401 100644 --- a/tools/testing/selftests/proc/proc-pid-vm.c +++ b/tools/testing/selftests/proc/proc-pid-vm.c | |||
| @@ -187,8 +187,8 @@ static int make_exe(const uint8_t *payload, size_t len) | |||
| 187 | ph.p_offset = 0; | 187 | ph.p_offset = 0; |
| 188 | ph.p_vaddr = VADDR; | 188 | ph.p_vaddr = VADDR; |
| 189 | ph.p_paddr = 0; | 189 | ph.p_paddr = 0; |
| 190 | ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload); | 190 | ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len; |
| 191 | ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload); | 191 | ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len; |
| 192 | ph.p_align = 4096; | 192 | ph.p_align = 4096; |
| 193 | 193 | ||
| 194 | fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700); | 194 | fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700); |
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c index 762cb01f2ca7..47b7473dedef 100644 --- a/tools/testing/selftests/proc/proc-self-map-files-002.c +++ b/tools/testing/selftests/proc/proc-self-map-files-002.c | |||
| @@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b) | |||
| 46 | 46 | ||
| 47 | int main(void) | 47 | int main(void) |
| 48 | { | 48 | { |
| 49 | const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE); | 49 | const int PAGE_SIZE = sysconf(_SC_PAGESIZE); |
| 50 | #ifdef __arm__ | 50 | const unsigned long va_max = 1UL << 32; |
| 51 | unsigned long va = 2 * PAGE_SIZE; | 51 | unsigned long va; |
| 52 | #else | ||
| 53 | unsigned long va = 0; | ||
| 54 | #endif | ||
| 55 | void *p; | 52 | void *p; |
| 56 | int fd; | 53 | int fd; |
| 57 | unsigned long a, b; | 54 | unsigned long a, b; |
| @@ -60,10 +57,13 @@ int main(void) | |||
| 60 | if (fd == -1) | 57 | if (fd == -1) |
| 61 | return 1; | 58 | return 1; |
| 62 | 59 | ||
| 63 | p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0); | 60 | for (va = 0; va < va_max; va += PAGE_SIZE) { |
| 64 | if (p == MAP_FAILED) { | 61 | p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0); |
| 65 | if (errno == EPERM) | 62 | if (p == (void *)va) |
| 66 | return 4; | 63 | break; |
| 64 | } | ||
| 65 | if (va == va_max) { | ||
| 66 | fprintf(stderr, "error: mmap doesn't like you\n"); | ||
| 67 | return 1; | 67 | return 1; |
| 68 | } | 68 | } |
| 69 | 69 | ||
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json index 27f0acaed880..ddabb160a11b 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json | |||
| @@ -144,6 +144,30 @@ | |||
| 144 | ] | 144 | ] |
| 145 | }, | 145 | }, |
| 146 | { | 146 | { |
| 147 | "id": "7571", | ||
| 148 | "name": "Add sample action with invalid rate", | ||
| 149 | "category": [ | ||
| 150 | "actions", | ||
| 151 | "sample" | ||
| 152 | ], | ||
| 153 | "setup": [ | ||
| 154 | [ | ||
| 155 | "$TC actions flush action sample", | ||
| 156 | 0, | ||
| 157 | 1, | ||
| 158 | 255 | ||
| 159 | ] | ||
| 160 | ], | ||
| 161 | "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2", | ||
| 162 | "expExitCode": "255", | ||
| 163 | "verifyCmd": "$TC actions get action sample index 2", | ||
| 164 | "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref", | ||
| 165 | "matchCount": "0", | ||
| 166 | "teardown": [ | ||
| 167 | "$TC actions flush action sample" | ||
| 168 | ] | ||
| 169 | }, | ||
| 170 | { | ||
| 147 | "id": "b6d4", | 171 | "id": "b6d4", |
| 148 | "name": "Add sample action with mandatory arguments and invalid control action", | 172 | "name": "Add sample action with mandatory arguments and invalid control action", |
| 149 | "category": [ | 173 | "category": [ |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index 99a5ffca1088..2d096b2abf2c 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |||
| @@ -19,6 +19,26 @@ | |||
| 19 | ] | 19 | ] |
| 20 | }, | 20 | }, |
| 21 | { | 21 | { |
| 22 | "id": "2638", | ||
| 23 | "name": "Add matchall and try to get it", | ||
| 24 | "category": [ | ||
| 25 | "filter", | ||
| 26 | "matchall" | ||
| 27 | ], | ||
| 28 | "setup": [ | ||
| 29 | "$TC qdisc add dev $DEV1 clsact", | ||
| 30 | "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok" | ||
| 31 | ], | ||
| 32 | "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall", | ||
| 33 | "expExitCode": "0", | ||
| 34 | "verifyCmd": "$TC filter show dev $DEV1 ingress", | ||
| 35 | "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234", | ||
| 36 | "matchCount": "1", | ||
| 37 | "teardown": [ | ||
| 38 | "$TC qdisc del dev $DEV1 clsact" | ||
| 39 | ] | ||
| 40 | }, | ||
| 41 | { | ||
| 22 | "id": "d052", | 42 | "id": "d052", |
| 23 | "name": "Add 1M filters with the same action", | 43 | "name": "Add 1M filters with the same action", |
| 24 | "category": [ | 44 | "category": [ |
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index 40ea95ce2ead..828c18584624 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py | |||
| @@ -22,6 +22,7 @@ TPM2_CC_UNSEAL = 0x015E | |||
| 22 | TPM2_CC_FLUSH_CONTEXT = 0x0165 | 22 | TPM2_CC_FLUSH_CONTEXT = 0x0165 |
| 23 | TPM2_CC_START_AUTH_SESSION = 0x0176 | 23 | TPM2_CC_START_AUTH_SESSION = 0x0176 |
| 24 | TPM2_CC_GET_CAPABILITY = 0x017A | 24 | TPM2_CC_GET_CAPABILITY = 0x017A |
| 25 | TPM2_CC_GET_RANDOM = 0x017B | ||
| 25 | TPM2_CC_PCR_READ = 0x017E | 26 | TPM2_CC_PCR_READ = 0x017E |
| 26 | TPM2_CC_POLICY_PCR = 0x017F | 27 | TPM2_CC_POLICY_PCR = 0x017F |
| 27 | TPM2_CC_PCR_EXTEND = 0x0182 | 28 | TPM2_CC_PCR_EXTEND = 0x0182 |
| @@ -357,9 +358,9 @@ class Client: | |||
| 357 | self.flags = flags | 358 | self.flags = flags |
| 358 | 359 | ||
| 359 | if (self.flags & Client.FLAG_SPACE) == 0: | 360 | if (self.flags & Client.FLAG_SPACE) == 0: |
| 360 | self.tpm = open('/dev/tpm0', 'r+b') | 361 | self.tpm = open('/dev/tpm0', 'r+b', buffering=0) |
| 361 | else: | 362 | else: |
| 362 | self.tpm = open('/dev/tpmrm0', 'r+b') | 363 | self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0) |
| 363 | 364 | ||
| 364 | def close(self): | 365 | def close(self): |
| 365 | self.tpm.close() | 366 | self.tpm.close() |
diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py index 3bb066fea4a0..d4973be53493 100644 --- a/tools/testing/selftests/tpm2/tpm2_tests.py +++ b/tools/testing/selftests/tpm2/tpm2_tests.py | |||
| @@ -158,6 +158,69 @@ class SmokeTest(unittest.TestCase): | |||
| 158 | pass | 158 | pass |
| 159 | self.assertEqual(rejected, True) | 159 | self.assertEqual(rejected, True) |
| 160 | 160 | ||
| 161 | def test_read_partial_resp(self): | ||
| 162 | try: | ||
| 163 | fmt = '>HIIH' | ||
| 164 | cmd = struct.pack(fmt, | ||
| 165 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 166 | struct.calcsize(fmt), | ||
| 167 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 168 | 0x20) | ||
| 169 | self.client.tpm.write(cmd) | ||
| 170 | hdr = self.client.tpm.read(10) | ||
| 171 | sz = struct.unpack('>I', hdr[2:6])[0] | ||
| 172 | rsp = self.client.tpm.read() | ||
| 173 | except: | ||
| 174 | pass | ||
| 175 | self.assertEqual(sz, 10 + 2 + 32) | ||
| 176 | self.assertEqual(len(rsp), 2 + 32) | ||
| 177 | |||
| 178 | def test_read_partial_overwrite(self): | ||
| 179 | try: | ||
| 180 | fmt = '>HIIH' | ||
| 181 | cmd = struct.pack(fmt, | ||
| 182 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 183 | struct.calcsize(fmt), | ||
| 184 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 185 | 0x20) | ||
| 186 | self.client.tpm.write(cmd) | ||
| 187 | # Read part of the respone | ||
| 188 | rsp1 = self.client.tpm.read(15) | ||
| 189 | |||
| 190 | # Send a new cmd | ||
| 191 | self.client.tpm.write(cmd) | ||
| 192 | |||
| 193 | # Read the whole respone | ||
| 194 | rsp2 = self.client.tpm.read() | ||
| 195 | except: | ||
| 196 | pass | ||
| 197 | self.assertEqual(len(rsp1), 15) | ||
| 198 | self.assertEqual(len(rsp2), 10 + 2 + 32) | ||
| 199 | |||
| 200 | def test_send_two_cmds(self): | ||
| 201 | rejected = False | ||
| 202 | try: | ||
| 203 | fmt = '>HIIH' | ||
| 204 | cmd = struct.pack(fmt, | ||
| 205 | tpm2.TPM2_ST_NO_SESSIONS, | ||
| 206 | struct.calcsize(fmt), | ||
| 207 | tpm2.TPM2_CC_GET_RANDOM, | ||
| 208 | 0x20) | ||
| 209 | self.client.tpm.write(cmd) | ||
| 210 | |||
| 211 | # expect the second one to raise -EBUSY error | ||
| 212 | self.client.tpm.write(cmd) | ||
| 213 | rsp = self.client.tpm.read() | ||
| 214 | |||
| 215 | except IOError, e: | ||
| 216 | # read the response | ||
| 217 | rsp = self.client.tpm.read() | ||
| 218 | rejected = True | ||
| 219 | pass | ||
| 220 | except: | ||
| 221 | pass | ||
| 222 | self.assertEqual(rejected, True) | ||
| 223 | |||
| 161 | class SpaceTest(unittest.TestCase): | 224 | class SpaceTest(unittest.TestCase): |
| 162 | def setUp(self): | 225 | def setUp(self): |
| 163 | logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG) | 226 | logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG) |
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 3547b0d8c91e..79e59e4fa3dc 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c | |||
| @@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm, | |||
| 144 | { | 144 | { |
| 145 | struct kvm_kernel_irq_routing_entry *ei; | 145 | struct kvm_kernel_irq_routing_entry *ei; |
| 146 | int r; | 146 | int r; |
| 147 | u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES); | ||
| 147 | 148 | ||
| 148 | /* | 149 | /* |
| 149 | * Do not allow GSI to be mapped to the same irqchip more than once. | 150 | * Do not allow GSI to be mapped to the same irqchip more than once. |
| 150 | * Allow only one to one mapping between GSI and non-irqchip routing. | 151 | * Allow only one to one mapping between GSI and non-irqchip routing. |
| 151 | */ | 152 | */ |
| 152 | hlist_for_each_entry(ei, &rt->map[ue->gsi], link) | 153 | hlist_for_each_entry(ei, &rt->map[gsi], link) |
| 153 | if (ei->type != KVM_IRQ_ROUTING_IRQCHIP || | 154 | if (ei->type != KVM_IRQ_ROUTING_IRQCHIP || |
| 154 | ue->type != KVM_IRQ_ROUTING_IRQCHIP || | 155 | ue->type != KVM_IRQ_ROUTING_IRQCHIP || |
| 155 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | 156 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) |
| 156 | return -EINVAL; | 157 | return -EINVAL; |
| 157 | 158 | ||
| 158 | e->gsi = ue->gsi; | 159 | e->gsi = gsi; |
| 159 | e->type = ue->type; | 160 | e->type = ue->type; |
| 160 | r = kvm_set_routing_entry(kvm, e, ue); | 161 | r = kvm_set_routing_entry(kvm, e, ue); |
| 161 | if (r) | 162 | if (r) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 55fe8e20d8fd..dc8edc97ba85 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -2977,12 +2977,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
| 2977 | struct kvm_device_ops *ops = NULL; | 2977 | struct kvm_device_ops *ops = NULL; |
| 2978 | struct kvm_device *dev; | 2978 | struct kvm_device *dev; |
| 2979 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; | 2979 | bool test = cd->flags & KVM_CREATE_DEVICE_TEST; |
| 2980 | int type; | ||
| 2980 | int ret; | 2981 | int ret; |
| 2981 | 2982 | ||
| 2982 | if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) | 2983 | if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) |
| 2983 | return -ENODEV; | 2984 | return -ENODEV; |
| 2984 | 2985 | ||
| 2985 | ops = kvm_device_ops_table[cd->type]; | 2986 | type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table)); |
| 2987 | ops = kvm_device_ops_table[type]; | ||
| 2986 | if (ops == NULL) | 2988 | if (ops == NULL) |
| 2987 | return -ENODEV; | 2989 | return -ENODEV; |
| 2988 | 2990 | ||
| @@ -2997,7 +2999,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
| 2997 | dev->kvm = kvm; | 2999 | dev->kvm = kvm; |
| 2998 | 3000 | ||
| 2999 | mutex_lock(&kvm->lock); | 3001 | mutex_lock(&kvm->lock); |
| 3000 | ret = ops->create(dev, cd->type); | 3002 | ret = ops->create(dev, type); |
| 3001 | if (ret < 0) { | 3003 | if (ret < 0) { |
| 3002 | mutex_unlock(&kvm->lock); | 3004 | mutex_unlock(&kvm->lock); |
| 3003 | kfree(dev); | 3005 | kfree(dev); |
