diff options
author | David S. Miller <davem@davemloft.net> | 2019-01-21 17:41:32 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-01-21 17:41:32 -0500 |
commit | fa7f3a8d56b38a3ed1880a3780afba82387da277 (patch) | |
tree | a4628ee966f21963e5e97a6d1a227a3e8138183e | |
parent | 28f9d1a3d4fecdb2352d3984ddeec88146385885 (diff) | |
parent | 49a57857aeea06ca831043acbb0fa5e0f50602fd (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Completely minor snmp doc conflict.
Signed-off-by: David S. Miller <davem@davemloft.net>
331 files changed, 3353 insertions, 1443 deletions
diff --git a/.clang-format b/.clang-format index e6080f5834a3..bc2ffb2a0b53 100644 --- a/.clang-format +++ b/.clang-format | |||
@@ -72,6 +72,10 @@ ForEachMacros: | |||
72 | - 'apei_estatus_for_each_section' | 72 | - 'apei_estatus_for_each_section' |
73 | - 'ata_for_each_dev' | 73 | - 'ata_for_each_dev' |
74 | - 'ata_for_each_link' | 74 | - 'ata_for_each_link' |
75 | - '__ata_qc_for_each' | ||
76 | - 'ata_qc_for_each' | ||
77 | - 'ata_qc_for_each_raw' | ||
78 | - 'ata_qc_for_each_with_internal' | ||
75 | - 'ax25_for_each' | 79 | - 'ax25_for_each' |
76 | - 'ax25_uid_for_each' | 80 | - 'ax25_uid_for_each' |
77 | - 'bio_for_each_integrity_vec' | 81 | - 'bio_for_each_integrity_vec' |
@@ -85,6 +89,7 @@ ForEachMacros: | |||
85 | - 'blk_queue_for_each_rl' | 89 | - 'blk_queue_for_each_rl' |
86 | - 'bond_for_each_slave' | 90 | - 'bond_for_each_slave' |
87 | - 'bond_for_each_slave_rcu' | 91 | - 'bond_for_each_slave_rcu' |
92 | - 'bpf_for_each_spilled_reg' | ||
88 | - 'btree_for_each_safe128' | 93 | - 'btree_for_each_safe128' |
89 | - 'btree_for_each_safe32' | 94 | - 'btree_for_each_safe32' |
90 | - 'btree_for_each_safe64' | 95 | - 'btree_for_each_safe64' |
@@ -103,6 +108,8 @@ ForEachMacros: | |||
103 | - 'drm_atomic_crtc_for_each_plane' | 108 | - 'drm_atomic_crtc_for_each_plane' |
104 | - 'drm_atomic_crtc_state_for_each_plane' | 109 | - 'drm_atomic_crtc_state_for_each_plane' |
105 | - 'drm_atomic_crtc_state_for_each_plane_state' | 110 | - 'drm_atomic_crtc_state_for_each_plane_state' |
111 | - 'drm_atomic_for_each_plane_damage' | ||
112 | - 'drm_connector_for_each_possible_encoder' | ||
106 | - 'drm_for_each_connector_iter' | 113 | - 'drm_for_each_connector_iter' |
107 | - 'drm_for_each_crtc' | 114 | - 'drm_for_each_crtc' |
108 | - 'drm_for_each_encoder' | 115 | - 'drm_for_each_encoder' |
@@ -121,11 +128,21 @@ ForEachMacros: | |||
121 | - 'for_each_bio' | 128 | - 'for_each_bio' |
122 | - 'for_each_board_func_rsrc' | 129 | - 'for_each_board_func_rsrc' |
123 | - 'for_each_bvec' | 130 | - 'for_each_bvec' |
131 | - 'for_each_card_components' | ||
132 | - 'for_each_card_links' | ||
133 | - 'for_each_card_links_safe' | ||
134 | - 'for_each_card_prelinks' | ||
135 | - 'for_each_card_rtds' | ||
136 | - 'for_each_card_rtds_safe' | ||
137 | - 'for_each_cgroup_storage_type' | ||
124 | - 'for_each_child_of_node' | 138 | - 'for_each_child_of_node' |
125 | - 'for_each_clear_bit' | 139 | - 'for_each_clear_bit' |
126 | - 'for_each_clear_bit_from' | 140 | - 'for_each_clear_bit_from' |
127 | - 'for_each_cmsghdr' | 141 | - 'for_each_cmsghdr' |
128 | - 'for_each_compatible_node' | 142 | - 'for_each_compatible_node' |
143 | - 'for_each_component_dais' | ||
144 | - 'for_each_component_dais_safe' | ||
145 | - 'for_each_comp_order' | ||
129 | - 'for_each_console' | 146 | - 'for_each_console' |
130 | - 'for_each_cpu' | 147 | - 'for_each_cpu' |
131 | - 'for_each_cpu_and' | 148 | - 'for_each_cpu_and' |
@@ -133,6 +150,10 @@ ForEachMacros: | |||
133 | - 'for_each_cpu_wrap' | 150 | - 'for_each_cpu_wrap' |
134 | - 'for_each_dev_addr' | 151 | - 'for_each_dev_addr' |
135 | - 'for_each_dma_cap_mask' | 152 | - 'for_each_dma_cap_mask' |
153 | - 'for_each_dpcm_be' | ||
154 | - 'for_each_dpcm_be_rollback' | ||
155 | - 'for_each_dpcm_be_safe' | ||
156 | - 'for_each_dpcm_fe' | ||
136 | - 'for_each_drhd_unit' | 157 | - 'for_each_drhd_unit' |
137 | - 'for_each_dss_dev' | 158 | - 'for_each_dss_dev' |
138 | - 'for_each_efi_memory_desc' | 159 | - 'for_each_efi_memory_desc' |
@@ -149,6 +170,7 @@ ForEachMacros: | |||
149 | - 'for_each_iommu' | 170 | - 'for_each_iommu' |
150 | - 'for_each_ip_tunnel_rcu' | 171 | - 'for_each_ip_tunnel_rcu' |
151 | - 'for_each_irq_nr' | 172 | - 'for_each_irq_nr' |
173 | - 'for_each_link_codecs' | ||
152 | - 'for_each_lru' | 174 | - 'for_each_lru' |
153 | - 'for_each_matching_node' | 175 | - 'for_each_matching_node' |
154 | - 'for_each_matching_node_and_match' | 176 | - 'for_each_matching_node_and_match' |
@@ -160,6 +182,7 @@ ForEachMacros: | |||
160 | - 'for_each_mem_range_rev' | 182 | - 'for_each_mem_range_rev' |
161 | - 'for_each_migratetype_order' | 183 | - 'for_each_migratetype_order' |
162 | - 'for_each_msi_entry' | 184 | - 'for_each_msi_entry' |
185 | - 'for_each_msi_entry_safe' | ||
163 | - 'for_each_net' | 186 | - 'for_each_net' |
164 | - 'for_each_netdev' | 187 | - 'for_each_netdev' |
165 | - 'for_each_netdev_continue' | 188 | - 'for_each_netdev_continue' |
@@ -183,12 +206,14 @@ ForEachMacros: | |||
183 | - 'for_each_node_with_property' | 206 | - 'for_each_node_with_property' |
184 | - 'for_each_of_allnodes' | 207 | - 'for_each_of_allnodes' |
185 | - 'for_each_of_allnodes_from' | 208 | - 'for_each_of_allnodes_from' |
209 | - 'for_each_of_cpu_node' | ||
186 | - 'for_each_of_pci_range' | 210 | - 'for_each_of_pci_range' |
187 | - 'for_each_old_connector_in_state' | 211 | - 'for_each_old_connector_in_state' |
188 | - 'for_each_old_crtc_in_state' | 212 | - 'for_each_old_crtc_in_state' |
189 | - 'for_each_oldnew_connector_in_state' | 213 | - 'for_each_oldnew_connector_in_state' |
190 | - 'for_each_oldnew_crtc_in_state' | 214 | - 'for_each_oldnew_crtc_in_state' |
191 | - 'for_each_oldnew_plane_in_state' | 215 | - 'for_each_oldnew_plane_in_state' |
216 | - 'for_each_oldnew_plane_in_state_reverse' | ||
192 | - 'for_each_oldnew_private_obj_in_state' | 217 | - 'for_each_oldnew_private_obj_in_state' |
193 | - 'for_each_old_plane_in_state' | 218 | - 'for_each_old_plane_in_state' |
194 | - 'for_each_old_private_obj_in_state' | 219 | - 'for_each_old_private_obj_in_state' |
@@ -206,14 +231,17 @@ ForEachMacros: | |||
206 | - 'for_each_process' | 231 | - 'for_each_process' |
207 | - 'for_each_process_thread' | 232 | - 'for_each_process_thread' |
208 | - 'for_each_property_of_node' | 233 | - 'for_each_property_of_node' |
234 | - 'for_each_registered_fb' | ||
209 | - 'for_each_reserved_mem_region' | 235 | - 'for_each_reserved_mem_region' |
210 | - 'for_each_resv_unavail_range' | 236 | - 'for_each_rtd_codec_dai' |
237 | - 'for_each_rtd_codec_dai_rollback' | ||
211 | - 'for_each_rtdcom' | 238 | - 'for_each_rtdcom' |
212 | - 'for_each_rtdcom_safe' | 239 | - 'for_each_rtdcom_safe' |
213 | - 'for_each_set_bit' | 240 | - 'for_each_set_bit' |
214 | - 'for_each_set_bit_from' | 241 | - 'for_each_set_bit_from' |
215 | - 'for_each_sg' | 242 | - 'for_each_sg' |
216 | - 'for_each_sg_page' | 243 | - 'for_each_sg_page' |
244 | - 'for_each_sibling_event' | ||
217 | - '__for_each_thread' | 245 | - '__for_each_thread' |
218 | - 'for_each_thread' | 246 | - 'for_each_thread' |
219 | - 'for_each_zone' | 247 | - 'for_each_zone' |
@@ -251,6 +279,8 @@ ForEachMacros: | |||
251 | - 'hlist_nulls_for_each_entry_from' | 279 | - 'hlist_nulls_for_each_entry_from' |
252 | - 'hlist_nulls_for_each_entry_rcu' | 280 | - 'hlist_nulls_for_each_entry_rcu' |
253 | - 'hlist_nulls_for_each_entry_safe' | 281 | - 'hlist_nulls_for_each_entry_safe' |
282 | - 'i3c_bus_for_each_i2cdev' | ||
283 | - 'i3c_bus_for_each_i3cdev' | ||
254 | - 'ide_host_for_each_port' | 284 | - 'ide_host_for_each_port' |
255 | - 'ide_port_for_each_dev' | 285 | - 'ide_port_for_each_dev' |
256 | - 'ide_port_for_each_present_dev' | 286 | - 'ide_port_for_each_present_dev' |
@@ -267,11 +297,14 @@ ForEachMacros: | |||
267 | - 'kvm_for_each_memslot' | 297 | - 'kvm_for_each_memslot' |
268 | - 'kvm_for_each_vcpu' | 298 | - 'kvm_for_each_vcpu' |
269 | - 'list_for_each' | 299 | - 'list_for_each' |
300 | - 'list_for_each_codec' | ||
301 | - 'list_for_each_codec_safe' | ||
270 | - 'list_for_each_entry' | 302 | - 'list_for_each_entry' |
271 | - 'list_for_each_entry_continue' | 303 | - 'list_for_each_entry_continue' |
272 | - 'list_for_each_entry_continue_rcu' | 304 | - 'list_for_each_entry_continue_rcu' |
273 | - 'list_for_each_entry_continue_reverse' | 305 | - 'list_for_each_entry_continue_reverse' |
274 | - 'list_for_each_entry_from' | 306 | - 'list_for_each_entry_from' |
307 | - 'list_for_each_entry_from_rcu' | ||
275 | - 'list_for_each_entry_from_reverse' | 308 | - 'list_for_each_entry_from_reverse' |
276 | - 'list_for_each_entry_lockless' | 309 | - 'list_for_each_entry_lockless' |
277 | - 'list_for_each_entry_rcu' | 310 | - 'list_for_each_entry_rcu' |
@@ -291,6 +324,7 @@ ForEachMacros: | |||
291 | - 'media_device_for_each_intf' | 324 | - 'media_device_for_each_intf' |
292 | - 'media_device_for_each_link' | 325 | - 'media_device_for_each_link' |
293 | - 'media_device_for_each_pad' | 326 | - 'media_device_for_each_pad' |
327 | - 'nanddev_io_for_each_page' | ||
294 | - 'netdev_for_each_lower_dev' | 328 | - 'netdev_for_each_lower_dev' |
295 | - 'netdev_for_each_lower_private' | 329 | - 'netdev_for_each_lower_private' |
296 | - 'netdev_for_each_lower_private_rcu' | 330 | - 'netdev_for_each_lower_private_rcu' |
@@ -357,12 +391,14 @@ ForEachMacros: | |||
357 | - 'sk_nulls_for_each' | 391 | - 'sk_nulls_for_each' |
358 | - 'sk_nulls_for_each_from' | 392 | - 'sk_nulls_for_each_from' |
359 | - 'sk_nulls_for_each_rcu' | 393 | - 'sk_nulls_for_each_rcu' |
394 | - 'snd_array_for_each' | ||
360 | - 'snd_pcm_group_for_each_entry' | 395 | - 'snd_pcm_group_for_each_entry' |
361 | - 'snd_soc_dapm_widget_for_each_path' | 396 | - 'snd_soc_dapm_widget_for_each_path' |
362 | - 'snd_soc_dapm_widget_for_each_path_safe' | 397 | - 'snd_soc_dapm_widget_for_each_path_safe' |
363 | - 'snd_soc_dapm_widget_for_each_sink_path' | 398 | - 'snd_soc_dapm_widget_for_each_sink_path' |
364 | - 'snd_soc_dapm_widget_for_each_source_path' | 399 | - 'snd_soc_dapm_widget_for_each_source_path' |
365 | - 'tb_property_for_each' | 400 | - 'tb_property_for_each' |
401 | - 'tcf_exts_for_each_action' | ||
366 | - 'udp_portaddr_for_each_entry' | 402 | - 'udp_portaddr_for_each_entry' |
367 | - 'udp_portaddr_for_each_entry_rcu' | 403 | - 'udp_portaddr_for_each_entry_rcu' |
368 | - 'usb_hub_for_each_child' | 404 | - 'usb_hub_for_each_child' |
@@ -371,6 +407,11 @@ ForEachMacros: | |||
371 | - 'v4l2_m2m_for_each_dst_buf_safe' | 407 | - 'v4l2_m2m_for_each_dst_buf_safe' |
372 | - 'v4l2_m2m_for_each_src_buf' | 408 | - 'v4l2_m2m_for_each_src_buf' |
373 | - 'v4l2_m2m_for_each_src_buf_safe' | 409 | - 'v4l2_m2m_for_each_src_buf_safe' |
410 | - 'virtio_device_for_each_vq' | ||
411 | - 'xa_for_each' | ||
412 | - 'xas_for_each' | ||
413 | - 'xas_for_each_conflict' | ||
414 | - 'xas_for_each_marked' | ||
374 | - 'zorro_for_each_dev' | 415 | - 'zorro_for_each_dev' |
375 | 416 | ||
376 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 | 417 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 |
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 84262cdb8d29..96fa46cb133c 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt | |||
@@ -235,4 +235,4 @@ cpus { | |||
235 | =========================================== | 235 | =========================================== |
236 | 236 | ||
237 | [1] ARM Linux Kernel documentation - CPUs bindings | 237 | [1] ARM Linux Kernel documentation - CPUs bindings |
238 | Documentation/devicetree/bindings/arm/cpus.txt | 238 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 8f0937db55c5..45730ba60af5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt | |||
@@ -684,7 +684,7 @@ cpus { | |||
684 | =========================================== | 684 | =========================================== |
685 | 685 | ||
686 | [1] ARM Linux Kernel documentation - CPUs bindings | 686 | [1] ARM Linux Kernel documentation - CPUs bindings |
687 | Documentation/devicetree/bindings/arm/cpus.txt | 687 | Documentation/devicetree/bindings/arm/cpus.yaml |
688 | 688 | ||
689 | [2] ARM Linux Kernel documentation - PSCI bindings | 689 | [2] ARM Linux Kernel documentation - PSCI bindings |
690 | Documentation/devicetree/bindings/arm/psci.txt | 690 | Documentation/devicetree/bindings/arm/psci.txt |
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt index 1b2ab1ff5587..46652bf65147 100644 --- a/Documentation/devicetree/bindings/arm/sp810.txt +++ b/Documentation/devicetree/bindings/arm/sp810.txt | |||
@@ -4,7 +4,7 @@ SP810 System Controller | |||
4 | Required properties: | 4 | Required properties: |
5 | 5 | ||
6 | - compatible: standard compatible string for a Primecell peripheral, | 6 | - compatible: standard compatible string for a Primecell peripheral, |
7 | see Documentation/devicetree/bindings/arm/primecell.txt | 7 | see Documentation/devicetree/bindings/arm/primecell.yaml |
8 | for more details | 8 | for more details |
9 | should be: "arm,sp810", "arm,primecell" | 9 | should be: "arm,sp810", "arm,primecell" |
10 | 10 | ||
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index de9eb0486630..b0d80c0fb265 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt | |||
@@ -472,4 +472,4 @@ cpus { | |||
472 | 472 | ||
473 | =============================================================================== | 473 | =============================================================================== |
474 | [1] ARM Linux kernel documentation | 474 | [1] ARM Linux kernel documentation |
475 | Documentation/devicetree/bindings/arm/cpus.txt | 475 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt index af376a01f2b7..23b52dc02266 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt | |||
@@ -18,4 +18,4 @@ Required Properties: | |||
18 | Each clock is assigned an identifier and client nodes use this identifier | 18 | Each clock is assigned an identifier and client nodes use this identifier |
19 | to specify the clock which they consume. | 19 | to specify the clock which they consume. |
20 | 20 | ||
21 | All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. | 21 | All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>. |
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt index ef89ab46b2c9..572fa2773ec4 100644 --- a/Documentation/devicetree/bindings/display/arm,pl11x.txt +++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt | |||
@@ -1,6 +1,6 @@ | |||
1 | * ARM PrimeCell Color LCD Controller PL110/PL111 | 1 | * ARM PrimeCell Color LCD Controller PL110/PL111 |
2 | 2 | ||
3 | See also Documentation/devicetree/bindings/arm/primecell.txt | 3 | See also Documentation/devicetree/bindings/arm/primecell.yaml |
4 | 4 | ||
5 | Required properties: | 5 | Required properties: |
6 | 6 | ||
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 38ca2201e8ae..2e097b57f170 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt | |||
@@ -14,8 +14,6 @@ Required properties: | |||
14 | 14 | ||
15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K | 15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K |
16 | SoCs (either from AP or CP), see | 16 | SoCs (either from AP or CP), see |
17 | Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt | ||
18 | and | ||
19 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt | 17 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt |
20 | for specific details about the offset property. | 18 | for specific details about the offset property. |
21 | 19 | ||
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index b83bb8249074..a3be5298a5eb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt | |||
@@ -78,7 +78,7 @@ Sub-nodes: | |||
78 | PPI affinity can be expressed as a single "ppi-partitions" node, | 78 | PPI affinity can be expressed as a single "ppi-partitions" node, |
79 | containing a set of sub-nodes, each with the following property: | 79 | containing a set of sub-nodes, each with the following property: |
80 | - affinity: Should be a list of phandles to CPU nodes (as described in | 80 | - affinity: Should be a list of phandles to CPU nodes (as described in |
81 | Documentation/devicetree/bindings/arm/cpus.txt). | 81 | Documentation/devicetree/bindings/arm/cpus.yaml). |
82 | 82 | ||
83 | GICv3 has one or more Interrupt Translation Services (ITS) that are | 83 | GICv3 has one or more Interrupt Translation Services (ITS) that are |
84 | used to route Message Signalled Interrupts (MSI) to the CPUs. | 84 | used to route Message Signalled Interrupts (MSI) to the CPUs. |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 0b8cc533ca83..cf759e5f9b10 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt | |||
@@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function | |||
55 | = EXAMPLE | 55 | = EXAMPLE |
56 | The following example represents the GLINK RPM node on a MSM8996 device, with | 56 | The following example represents the GLINK RPM node on a MSM8996 device, with |
57 | the function for the "rpm_request" channel defined, which is used for | 57 | the function for the "rpm_request" channel defined, which is used for |
58 | regualtors and root clocks. | 58 | regulators and root clocks. |
59 | 59 | ||
60 | apcs_glb: mailbox@9820000 { | 60 | apcs_glb: mailbox@9820000 { |
61 | compatible = "qcom,msm8996-apcs-hmss-global"; | 61 | compatible = "qcom,msm8996-apcs-hmss-global"; |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt index a35af2dafdad..49e1d72d3648 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt | |||
@@ -41,12 +41,12 @@ processor ID) and a string identifier. | |||
41 | - qcom,local-pid: | 41 | - qcom,local-pid: |
42 | Usage: required | 42 | Usage: required |
43 | Value type: <u32> | 43 | Value type: <u32> |
44 | Definition: specifies the identfier of the local endpoint of this edge | 44 | Definition: specifies the identifier of the local endpoint of this edge |
45 | 45 | ||
46 | - qcom,remote-pid: | 46 | - qcom,remote-pid: |
47 | Usage: required | 47 | Usage: required |
48 | Value type: <u32> | 48 | Value type: <u32> |
49 | Definition: specifies the identfier of the remote endpoint of this edge | 49 | Definition: specifies the identifier of the remote endpoint of this edge |
50 | 50 | ||
51 | = SUBNODES | 51 | = SUBNODES |
52 | Each SMP2P pair contain a set of inbound and outbound entries, these are | 52 | Each SMP2P pair contain a set of inbound and outbound entries, these are |
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 62af30511a95..60a5ec04e8f0 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt | |||
@@ -163,6 +163,14 @@ C. Boot options | |||
163 | be preserved until there actually is some text is output to the console. | 163 | be preserved until there actually is some text is output to the console. |
164 | This option causes fbcon to bind immediately to the fbdev device. | 164 | This option causes fbcon to bind immediately to the fbdev device. |
165 | 165 | ||
166 | 7. fbcon=logo-pos:<location> | ||
167 | |||
168 | The only possible 'location' is 'center' (without quotes), and when | ||
169 | given, the bootup logo is moved from the default top-left corner | ||
170 | location to the center of the framebuffer. If more than one logo is | ||
171 | displayed due to multiple CPUs, the collected line of logos is moved | ||
172 | as a whole. | ||
173 | |||
166 | C. Attaching, Detaching and Unloading | 174 | C. Attaching, Detaching and Unloading |
167 | 175 | ||
168 | Before going on to how to attach, detach and unload the framebuffer console, an | 176 | Before going on to how to attach, detach and unload the framebuffer console, an |
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 6a47629ef8ed..59e86de662cd 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst | |||
@@ -11,19 +11,19 @@ Contents: | |||
11 | batman-adv | 11 | batman-adv |
12 | can | 12 | can |
13 | can_ucan_protocol | 13 | can_ucan_protocol |
14 | dpaa2/index | 14 | device_drivers/freescale/dpaa2/index |
15 | e100 | 15 | device_drivers/intel/e100 |
16 | e1000 | 16 | device_drivers/intel/e1000 |
17 | e1000e | 17 | device_drivers/intel/e1000e |
18 | fm10k | 18 | device_drivers/intel/fm10k |
19 | igb | 19 | device_drivers/intel/igb |
20 | igbvf | 20 | device_drivers/intel/igbvf |
21 | ixgb | 21 | device_drivers/intel/ixgb |
22 | ixgbe | 22 | device_drivers/intel/ixgbe |
23 | ixgbevf | 23 | device_drivers/intel/ixgbevf |
24 | i40e | 24 | device_drivers/intel/i40e |
25 | iavf | 25 | device_drivers/intel/iavf |
26 | ice | 26 | device_drivers/intel/ice |
27 | kapi | 27 | kapi |
28 | z8530book | 28 | z8530book |
29 | msg_zerocopy | 29 | msg_zerocopy |
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index c9d052e0cf51..2df5894353d6 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt | |||
@@ -1000,51 +1000,6 @@ The kernel interface functions are as follows: | |||
1000 | size should be set when the call is begun. tx_total_len may not be less | 1000 | size should be set when the call is begun. tx_total_len may not be less |
1001 | than zero. | 1001 | than zero. |
1002 | 1002 | ||
1003 | (*) Check to see the completion state of a call so that the caller can assess | ||
1004 | whether it needs to be retried. | ||
1005 | |||
1006 | enum rxrpc_call_completion { | ||
1007 | RXRPC_CALL_SUCCEEDED, | ||
1008 | RXRPC_CALL_REMOTELY_ABORTED, | ||
1009 | RXRPC_CALL_LOCALLY_ABORTED, | ||
1010 | RXRPC_CALL_LOCAL_ERROR, | ||
1011 | RXRPC_CALL_NETWORK_ERROR, | ||
1012 | }; | ||
1013 | |||
1014 | int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, | ||
1015 | enum rxrpc_call_completion *_compl, | ||
1016 | u32 *_abort_code); | ||
1017 | |||
1018 | On return, -EINPROGRESS will be returned if the call is still ongoing; if | ||
1019 | it is finished, *_compl will be set to indicate the manner of completion, | ||
1020 | *_abort_code will be set to any abort code that occurred. 0 will be | ||
1021 | returned on a successful completion, -ECONNABORTED will be returned if the | ||
1022 | client failed due to a remote abort and anything else will return an | ||
1023 | appropriate error code. | ||
1024 | |||
1025 | The caller should look at this information to decide if it's worth | ||
1026 | retrying the call. | ||
1027 | |||
1028 | (*) Retry a client call. | ||
1029 | |||
1030 | int rxrpc_kernel_retry_call(struct socket *sock, | ||
1031 | struct rxrpc_call *call, | ||
1032 | struct sockaddr_rxrpc *srx, | ||
1033 | struct key *key); | ||
1034 | |||
1035 | This attempts to partially reinitialise a call and submit it again while | ||
1036 | reusing the original call's Tx queue to avoid the need to repackage and | ||
1037 | re-encrypt the data to be sent. call indicates the call to retry, srx the | ||
1038 | new address to send it to and key the encryption key to use for signing or | ||
1039 | encrypting the packets. | ||
1040 | |||
1041 | For this to work, the first Tx data packet must still be in the transmit | ||
1042 | queue, and currently this is only permitted for local and network errors | ||
1043 | and the call must not have been aborted. Any partially constructed Tx | ||
1044 | packet is left as is and can continue being filled afterwards. | ||
1045 | |||
1046 | It returns 0 if the call was requeued and an error otherwise. | ||
1047 | |||
1048 | (*) Get call RTT. | 1003 | (*) Get call RTT. |
1049 | 1004 | ||
1050 | u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); | 1005 | u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); |
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst index 486ab33acc3a..c5642f430d2e 100644 --- a/Documentation/networking/snmp_counter.rst +++ b/Documentation/networking/snmp_counter.rst | |||
@@ -366,6 +366,27 @@ to the accept queue. | |||
366 | 366 | ||
367 | TCP Fast Open | 367 | TCP Fast Open |
368 | ============= | 368 | ============= |
369 | * TcpEstabResets | ||
370 | Defined in `RFC1213 tcpEstabResets`_. | ||
371 | |||
372 | .. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48 | ||
373 | |||
374 | * TcpAttemptFails | ||
375 | Defined in `RFC1213 tcpAttemptFails`_. | ||
376 | |||
377 | .. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48 | ||
378 | |||
379 | * TcpOutRsts | ||
380 | Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates | ||
381 | the 'segments sent containing the RST flag', but in linux kernel, this | ||
382 | couner indicates the segments kerenl tried to send. The sending | ||
383 | process might be failed due to some errors (e.g. memory alloc failed). | ||
384 | |||
385 | .. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52 | ||
386 | |||
387 | |||
388 | TCP Fast Path | ||
389 | ============ | ||
369 | When kernel receives a TCP packet, it has two paths to handler the | 390 | When kernel receives a TCP packet, it has two paths to handler the |
370 | packet, one is fast path, another is slow path. The comment in kernel | 391 | packet, one is fast path, another is slow path. The comment in kernel |
371 | code provides a good explanation of them, I pasted them below:: | 392 | code provides a good explanation of them, I pasted them below:: |
@@ -413,7 +434,6 @@ increase 1. | |||
413 | 434 | ||
414 | TCP abort | 435 | TCP abort |
415 | ========= | 436 | ========= |
416 | |||
417 | * TcpExtTCPAbortOnData | 437 | * TcpExtTCPAbortOnData |
418 | 438 | ||
419 | It means TCP layer has data in flight, but need to close the | 439 | It means TCP layer has data in flight, but need to close the |
@@ -589,7 +609,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP | |||
589 | stack of kernel will increase TcpExtTCPSACKReorder for both of the | 609 | stack of kernel will increase TcpExtTCPSACKReorder for both of the |
590 | above scenarios. | 610 | above scenarios. |
591 | 611 | ||
592 | |||
593 | DSACK | 612 | DSACK |
594 | ===== | 613 | ===== |
595 | The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report | 614 | The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report |
@@ -612,8 +631,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a | |||
612 | DSACK to the sender. | 631 | DSACK to the sender. |
613 | 632 | ||
614 | * TcpExtTCPDSACKRecv | 633 | * TcpExtTCPDSACKRecv |
615 | 634 | The TCP stack receives a DSACK, which indicates an acknowledged | |
616 | The TCP stack receives a DSACK, which indicate an acknowledged | ||
617 | duplicate packet is received. | 635 | duplicate packet is received. |
618 | 636 | ||
619 | * TcpExtTCPDSACKOfoRecv | 637 | * TcpExtTCPDSACKOfoRecv |
@@ -621,6 +639,56 @@ duplicate packet is received. | |||
621 | The TCP stack receives a DSACK, which indicate an out of order | 639 | The TCP stack receives a DSACK, which indicate an out of order |
622 | duplicate packet is received. | 640 | duplicate packet is received. |
623 | 641 | ||
642 | invalid SACK and DSACK | ||
643 | ==================== | ||
644 | When a SACK (or DSACK) block is invalid, a corresponding counter would | ||
645 | be updated. The validation method is base on the start/end sequence | ||
646 | number of the SACK block. For more details, please refer the comment | ||
647 | of the function tcp_is_sackblock_valid in the kernel source code. A | ||
648 | SACK option could have up to 4 blocks, they are checked | ||
649 | individually. E.g., if 3 blocks of a SACk is invalid, the | ||
650 | corresponding counter would be updated 3 times. The comment of the | ||
651 | `Add counters for discarded SACK blocks`_ patch has additional | ||
652 | explaination: | ||
653 | |||
654 | .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 | ||
655 | |||
656 | * TcpExtTCPSACKDiscard | ||
657 | This counter indicates how many SACK blocks are invalid. If the invalid | ||
658 | SACK block is caused by ACK recording, the TCP stack will only ignore | ||
659 | it and won't update this counter. | ||
660 | |||
661 | * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo | ||
662 | When a DSACK block is invalid, one of these two counters would be | ||
663 | updated. Which counter will be updated depends on the undo_marker flag | ||
664 | of the TCP socket. If the undo_marker is not set, the TCP stack isn't | ||
665 | likely to re-transmit any packets, and we still receive an invalid | ||
666 | DSACK block, the reason might be that the packet is duplicated in the | ||
667 | middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo | ||
668 | will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld | ||
669 | will be updated. As implied in its name, it might be an old packet. | ||
670 | |||
671 | SACK shift | ||
672 | ========= | ||
673 | The linux networking stack stores data in sk_buff struct (skb for | ||
674 | short). If a SACK block acrosses multiple skb, the TCP stack will try | ||
675 | to re-arrange data in these skb. E.g. if a SACK block acknowledges seq | ||
676 | 10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and | ||
677 | 15 in skb2 would be moved to skb1. This operation is 'shift'. If a | ||
678 | SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has | ||
679 | seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be | ||
680 | discard, this operation is 'merge'. | ||
681 | |||
682 | * TcpExtTCPSackShifted | ||
683 | A skb is shifted | ||
684 | |||
685 | * TcpExtTCPSackMerged | ||
686 | A skb is merged | ||
687 | |||
688 | * TcpExtTCPSackShiftFallback | ||
689 | A skb should be shifted or merged, but the TCP stack doesn't do it for | ||
690 | some reasons. | ||
691 | |||
624 | TCP out of order | 692 | TCP out of order |
625 | ================ | 693 | ================ |
626 | * TcpExtTCPOFOQueue | 694 | * TcpExtTCPOFOQueue |
@@ -721,6 +789,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_). | |||
721 | .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 | 789 | .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 |
722 | .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 | 790 | .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 |
723 | 791 | ||
792 | TCP receive window | ||
793 | ================= | ||
794 | * TcpExtTCPWantZeroWindowAdv | ||
795 | Depending on current memory usage, the TCP stack tries to set receive | ||
796 | window to zero. But the receive window might still be a no-zero | ||
797 | value. For example, if the previous window size is 10, and the TCP | ||
798 | stack receives 3 bytes, the current window size would be 7 even if the | ||
799 | window size calculated by the memory usage is zero. | ||
800 | |||
801 | * TcpExtTCPToZeroWindowAdv | ||
802 | The TCP receive window is set to zero from a no-zero value. | ||
803 | |||
804 | * TcpExtTCPFromZeroWindowAdv | ||
805 | The TCP receive window is set to no-zero value from zero. | ||
806 | |||
807 | |||
808 | Delayed ACK | ||
809 | ========== | ||
810 | The TCP Delayed ACK is a technique which is used for reducing the | ||
811 | packet count in the network. For more details, please refer the | ||
812 | `Delayed ACK wiki`_ | ||
813 | |||
814 | .. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment | ||
815 | |||
816 | * TcpExtDelayedACKs | ||
817 | A delayed ACK timer expires. The TCP stack will send a pure ACK packet | ||
818 | and exit the delayed ACK mode. | ||
819 | |||
820 | * TcpExtDelayedACKLocked | ||
821 | A delayed ACK timer expires, but the TCP stack can't send an ACK | ||
822 | immediately due to the socket is locked by a userspace program. The | ||
823 | TCP stack will send a pure ACK later (after the userspace program | ||
824 | unlock the socket). When the TCP stack sends the pure ACK later, the | ||
825 | TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK | ||
826 | mode. | ||
827 | |||
828 | * TcpExtDelayedACKLost | ||
829 | It will be updated when the TCP stack receives a packet which has been | ||
830 | ACKed. A Delayed ACK loss might cause this issue, but it would also be | ||
831 | triggered by other reasons, such as a packet is duplicated in the | ||
832 | network. | ||
833 | |||
834 | Tail Loss Probe (TLP) | ||
835 | =================== | ||
836 | TLP is an algorithm which is used to detect TCP packet loss. For more | ||
837 | details, please refer the `TLP paper`_. | ||
838 | |||
839 | .. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 | ||
840 | |||
841 | * TcpExtTCPLossProbes | ||
842 | A TLP probe packet is sent. | ||
843 | |||
844 | * TcpExtTCPLossProbeRecovery | ||
845 | A packet loss is detected and recovered by TLP. | ||
724 | 846 | ||
725 | examples | 847 | examples |
726 | ======== | 848 | ======== |
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 1be0b6f9e0cb..9d1432e0aaa8 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
@@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set. | |||
417 | 417 | ||
418 | Hardware time stamping must also be initialized for each device driver | 418 | Hardware time stamping must also be initialized for each device driver |
419 | that is expected to do hardware time stamping. The parameter is defined in | 419 | that is expected to do hardware time stamping. The parameter is defined in |
420 | /include/linux/net_tstamp.h as: | 420 | include/uapi/linux/net_tstamp.h as: |
421 | 421 | ||
422 | struct hwtstamp_config { | 422 | struct hwtstamp_config { |
423 | int flags; /* no flags defined right now, must be zero */ | 423 | int flags; /* no flags defined right now, must be zero */ |
@@ -487,7 +487,7 @@ enum { | |||
487 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, | 487 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, |
488 | 488 | ||
489 | /* for the complete list of values, please check | 489 | /* for the complete list of values, please check |
490 | * the include file /include/linux/net_tstamp.h | 490 | * the include file include/uapi/linux/net_tstamp.h |
491 | */ | 491 | */ |
492 | }; | 492 | }; |
493 | 493 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a592b9992b46..8e2c82f4c72f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon* | |||
3471 | F: drivers/i2c/busses/i2c-thunderx* | 3471 | F: drivers/i2c/busses/i2c-thunderx* |
3472 | 3472 | ||
3473 | CAVIUM LIQUIDIO NETWORK DRIVER | 3473 | CAVIUM LIQUIDIO NETWORK DRIVER |
3474 | M: Derek Chickles <derek.chickles@caviumnetworks.com> | 3474 | M: Derek Chickles <dchickles@marvell.com> |
3475 | M: Satanand Burla <satananda.burla@caviumnetworks.com> | 3475 | M: Satanand Burla <sburla@marvell.com> |
3476 | M: Felix Manlunas <felix.manlunas@caviumnetworks.com> | 3476 | M: Felix Manlunas <fmanlunas@marvell.com> |
3477 | M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> | ||
3478 | L: netdev@vger.kernel.org | 3477 | L: netdev@vger.kernel.org |
3479 | W: http://www.cavium.com | 3478 | W: http://www.cavium.com |
3480 | S: Supported | 3479 | S: Supported |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 5 | 2 | VERSION = 5 |
3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc2 | 5 | EXTRAVERSION = -rc3 |
6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION | |||
955 | endif | 955 | endif |
956 | endif | 956 | endif |
957 | 957 | ||
958 | PHONY += prepare0 | ||
958 | 959 | ||
959 | ifeq ($(KBUILD_EXTMOD),) | 960 | ifeq ($(KBUILD_EXTMOD),) |
960 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ | 961 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ |
@@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc | |||
1061 | # archprepare is used in arch Makefiles and when processed asm symlink, | 1062 | # archprepare is used in arch Makefiles and when processed asm symlink, |
1062 | # version.h and scripts_basic is processed / created. | 1063 | # version.h and scripts_basic is processed / created. |
1063 | 1064 | ||
1064 | # Listed in dependency order | 1065 | PHONY += prepare archprepare prepare1 prepare2 prepare3 |
1065 | PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 | ||
1066 | 1066 | ||
1067 | # prepare3 is used to check if we are building in a separate output directory, | 1067 | # prepare3 is used to check if we are building in a separate output directory, |
1068 | # and if so do: | 1068 | # and if so do: |
@@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) | |||
1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) | 1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) |
1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) | 1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) |
1362 | 1362 | ||
1363 | PHONY += $(mrproper-dirs) mrproper archmrproper | 1363 | PHONY += $(mrproper-dirs) mrproper |
1364 | $(mrproper-dirs): | 1364 | $(mrproper-dirs): |
1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) | 1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) |
1366 | 1366 | ||
1367 | mrproper: clean archmrproper $(mrproper-dirs) | 1367 | mrproper: clean $(mrproper-dirs) |
1368 | $(call cmd,rmdirs) | 1368 | $(call cmd,rmdirs) |
1369 | $(call cmd,rmfiles) | 1369 | $(call cmd,rmfiles) |
1370 | 1370 | ||
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index eb43e09c1980..926434f413fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h | |||
@@ -60,8 +60,6 @@ | |||
60 | 60 | ||
61 | #ifdef CONFIG_KASAN_SW_TAGS | 61 | #ifdef CONFIG_KASAN_SW_TAGS |
62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) | 62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) |
63 | #else | ||
64 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
65 | #endif | 63 | #endif |
66 | 64 | ||
67 | #ifndef __ASSEMBLY__ | 65 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index ac352accb3d9..3e8063f4f9d3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void) | |||
60 | * later determine that kpti is required, then | 60 | * later determine that kpti is required, then |
61 | * kpti_install_ng_mappings() will make them non-global. | 61 | * kpti_install_ng_mappings() will make them non-global. |
62 | */ | 62 | */ |
63 | if (arm64_kernel_unmapped_at_el0()) | ||
64 | return true; | ||
65 | |||
63 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) | 66 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
64 | return arm64_kernel_unmapped_at_el0(); | 67 | return false; |
65 | 68 | ||
66 | /* | 69 | /* |
67 | * KASLR is enabled so we're going to be enabling kpti on non-broken | 70 | * KASLR is enabled so we're going to be enabling kpti on non-broken |
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c..ba6b41790fcd 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | #include <asm/cacheflush.h> | ||
17 | #include <asm/fixmap.h> | 18 | #include <asm/fixmap.h> |
18 | #include <asm/kernel-pgtable.h> | 19 | #include <asm/kernel-pgtable.h> |
19 | #include <asm/memory.h> | 20 | #include <asm/memory.h> |
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) | |||
43 | return ret; | 44 | return ret; |
44 | } | 45 | } |
45 | 46 | ||
46 | static __init const u8 *get_cmdline(void *fdt) | 47 | static __init const u8 *kaslr_get_cmdline(void *fdt) |
47 | { | 48 | { |
48 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; | 49 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; |
49 | 50 | ||
@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
109 | * Check if 'nokaslr' appears on the command line, and | 110 | * Check if 'nokaslr' appears on the command line, and |
110 | * return 0 if that is the case. | 111 | * return 0 if that is the case. |
111 | */ | 112 | */ |
112 | cmdline = get_cmdline(fdt); | 113 | cmdline = kaslr_get_cmdline(fdt); |
113 | str = strstr(cmdline, "nokaslr"); | 114 | str = strstr(cmdline, "nokaslr"); |
114 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) | 115 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) |
115 | return 0; | 116 | return 0; |
@@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
169 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; | 170 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; |
170 | module_alloc_base &= PAGE_MASK; | 171 | module_alloc_base &= PAGE_MASK; |
171 | 172 | ||
173 | __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); | ||
174 | __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); | ||
175 | |||
172 | return offset; | 176 | return offset; |
173 | } | 177 | } |
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 4003ddc616e1..f801f3708a89 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile | |||
@@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/ | |||
37 | 37 | ||
38 | boot := arch/h8300/boot | 38 | boot := arch/h8300/boot |
39 | 39 | ||
40 | archmrproper: | ||
41 | |||
42 | archclean: | 40 | archclean: |
43 | $(Q)$(MAKE) $(clean)=$(boot) | 41 | $(Q)$(MAKE) $(clean)=$(boot) |
44 | 42 | ||
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 320d86f192ee..171290f9f1de 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig | |||
16 | NM := $(CROSS_COMPILE)nm -B | 16 | NM := $(CROSS_COMPILE)nm -B |
17 | READELF := $(CROSS_COMPILE)readelf | 17 | READELF := $(CROSS_COMPILE)readelf |
18 | 18 | ||
19 | export AWK | ||
20 | |||
21 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ | 19 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ |
22 | 20 | ||
23 | OBJCOPYFLAGS := --strip-all | 21 | OBJCOPYFLAGS := --strip-all |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 787290781b8c..0d14f51d0002 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -3155,6 +3155,7 @@ config MIPS32_O32 | |||
3155 | config MIPS32_N32 | 3155 | config MIPS32_N32 |
3156 | bool "Kernel support for n32 binaries" | 3156 | bool "Kernel support for n32 binaries" |
3157 | depends on 64BIT | 3157 | depends on 64BIT |
3158 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | ||
3158 | select COMPAT | 3159 | select COMPAT |
3159 | select MIPS32_COMPAT | 3160 | select MIPS32_COMPAT |
3160 | select SYSVIPC_COMPAT if SYSVIPC | 3161 | select SYSVIPC_COMPAT if SYSVIPC |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..fe3773539eff 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void) | |||
173 | pm_power_off = bcm47xx_machine_halt; | 173 | pm_power_off = bcm47xx_machine_halt; |
174 | } | 174 | } |
175 | 175 | ||
176 | #ifdef CONFIG_BCM47XX_BCMA | ||
177 | static struct device * __init bcm47xx_setup_device(void) | ||
178 | { | ||
179 | struct device *dev; | ||
180 | int err; | ||
181 | |||
182 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
183 | if (!dev) | ||
184 | return NULL; | ||
185 | |||
186 | err = dev_set_name(dev, "bcm47xx_soc"); | ||
187 | if (err) { | ||
188 | pr_err("Failed to set SoC device name: %d\n", err); | ||
189 | kfree(dev); | ||
190 | return NULL; | ||
191 | } | ||
192 | |||
193 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
194 | if (err) | ||
195 | pr_err("Failed to set SoC DMA mask: %d\n", err); | ||
196 | |||
197 | return dev; | ||
198 | } | ||
199 | #endif | ||
200 | |||
176 | /* | 201 | /* |
177 | * This finishes bus initialization doing things that were not possible without | 202 | * This finishes bus initialization doing things that were not possible without |
178 | * kmalloc. Make sure to call it late enough (after mm_init). | 203 | * kmalloc. Make sure to call it late enough (after mm_init). |
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) | |||
183 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { | 208 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { |
184 | int err; | 209 | int err; |
185 | 210 | ||
211 | bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); | ||
212 | if (!bcm47xx_bus.bcma.dev) | ||
213 | panic("Failed to setup SoC device\n"); | ||
214 | |||
186 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); | 215 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); |
187 | if (err) | 216 | if (err) |
188 | panic("Failed to initialize BCMA bus (err %d)", err); | 217 | panic("Failed to initialize BCMA bus (err %d)", err); |
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) | |||
235 | #endif | 264 | #endif |
236 | #ifdef CONFIG_BCM47XX_BCMA | 265 | #ifdef CONFIG_BCM47XX_BCMA |
237 | case BCM47XX_BUS_TYPE_BCMA: | 266 | case BCM47XX_BUS_TYPE_BCMA: |
267 | if (device_register(bcm47xx_bus.bcma.dev)) | ||
268 | pr_err("Failed to register SoC device\n"); | ||
238 | bcma_bus_register(&bcm47xx_bus.bcma.bus); | 269 | bcma_bus_register(&bcm47xx_bus.bcma.bus); |
239 | break; | 270 | break; |
240 | #endif | 271 | #endif |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2c79ab52977a..8bf43c5a7bc7 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored) | |||
98 | " sync \n" | 98 | " sync \n" |
99 | " synci ($0) \n"); | 99 | " synci ($0) \n"); |
100 | 100 | ||
101 | relocated_kexec_smp_wait(NULL); | 101 | kexec_reboot(); |
102 | } | 102 | } |
103 | #endif | 103 | #endif |
104 | 104 | ||
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 4e4ec779f182..6f981af67826 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig | |||
@@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
66 | # CONFIG_SERIAL_8250_PCI is not set | 66 | # CONFIG_SERIAL_8250_PCI is not set |
67 | CONFIG_SERIAL_8250_NR_UARTS=1 | 67 | CONFIG_SERIAL_8250_NR_UARTS=1 |
68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 | 68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 |
69 | CONFIG_SERIAL_OF_PLATFORM=y | ||
69 | CONFIG_SERIAL_AR933X=y | 70 | CONFIG_SERIAL_AR933X=y |
70 | CONFIG_SERIAL_AR933X_CONSOLE=y | 71 | CONFIG_SERIAL_AR933X_CONSOLE=y |
71 | # CONFIG_HW_RANDOM is not set | 72 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h index c6b63a409641..6dd8ad2409dc 100644 --- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h +++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) | 18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) |
19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) | 19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) |
20 | 20 | ||
21 | #define MIPS_CPU_TIMER_IRQ 7 | ||
22 | |||
23 | #define MAX_IM 5 | 21 | #define MAX_IM 5 |
24 | 22 | ||
25 | #endif /* _FALCON_IRQ__ */ | 23 | #endif /* _FALCON_IRQ__ */ |
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h index 141076325307..0b424214a5e9 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h | |||
@@ -19,8 +19,6 @@ | |||
19 | 19 | ||
20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) | 20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) |
21 | 21 | ||
22 | #define MIPS_CPU_TIMER_IRQ 7 | ||
23 | |||
24 | #define MAX_IM 5 | 22 | #define MAX_IM 5 |
25 | 23 | ||
26 | #endif | 24 | #endif |
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 6256d35dbf4d..bedb5047aff3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
@@ -74,14 +74,15 @@ static int __init vdma_init(void) | |||
74 | get_order(VDMA_PGTBL_SIZE)); | 74 | get_order(VDMA_PGTBL_SIZE)); |
75 | BUG_ON(!pgtbl); | 75 | BUG_ON(!pgtbl); |
76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); | 76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); |
77 | pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); | 77 | pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Clear the R4030 translation table | 80 | * Clear the R4030 translation table |
81 | */ | 81 | */ |
82 | vdma_pgtbl_init(); | 82 | vdma_pgtbl_init(); |
83 | 83 | ||
84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); | 84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, |
85 | CPHYSADDR((unsigned long)pgtbl)); | ||
85 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); | 86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); |
86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); | 87 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); |
87 | 88 | ||
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed11..6549499eb202 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { | |||
224 | .irq_set_type = ltq_eiu_settype, | 224 | .irq_set_type = ltq_eiu_settype, |
225 | }; | 225 | }; |
226 | 226 | ||
227 | static void ltq_hw_irqdispatch(int module) | 227 | static void ltq_hw_irq_handler(struct irq_desc *desc) |
228 | { | 228 | { |
229 | int module = irq_desc_get_irq(desc) - 2; | ||
229 | u32 irq; | 230 | u32 irq; |
231 | int hwirq; | ||
230 | 232 | ||
231 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); | 233 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); |
232 | if (irq == 0) | 234 | if (irq == 0) |
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) | |||
237 | * other bits might be bogus | 239 | * other bits might be bogus |
238 | */ | 240 | */ |
239 | irq = __fls(irq); | 241 | irq = __fls(irq); |
240 | do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); | 242 | hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); |
243 | generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); | ||
241 | 244 | ||
242 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | 245 | /* if this is a EBU irq, we need to ack it or get a deadlock */ |
243 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) | 246 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) |
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) | |||
245 | LTQ_EBU_PCC_ISTAT); | 248 | LTQ_EBU_PCC_ISTAT); |
246 | } | 249 | } |
247 | 250 | ||
248 | #define DEFINE_HWx_IRQDISPATCH(x) \ | ||
249 | static void ltq_hw ## x ## _irqdispatch(void) \ | ||
250 | { \ | ||
251 | ltq_hw_irqdispatch(x); \ | ||
252 | } | ||
253 | DEFINE_HWx_IRQDISPATCH(0) | ||
254 | DEFINE_HWx_IRQDISPATCH(1) | ||
255 | DEFINE_HWx_IRQDISPATCH(2) | ||
256 | DEFINE_HWx_IRQDISPATCH(3) | ||
257 | DEFINE_HWx_IRQDISPATCH(4) | ||
258 | |||
259 | #if MIPS_CPU_TIMER_IRQ == 7 | ||
260 | static void ltq_hw5_irqdispatch(void) | ||
261 | { | ||
262 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
263 | } | ||
264 | #else | ||
265 | DEFINE_HWx_IRQDISPATCH(5) | ||
266 | #endif | ||
267 | |||
268 | static void ltq_hw_irq_handler(struct irq_desc *desc) | ||
269 | { | ||
270 | ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); | ||
271 | } | ||
272 | |||
273 | asmlinkage void plat_irq_dispatch(void) | ||
274 | { | ||
275 | unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; | ||
276 | int irq; | ||
277 | |||
278 | if (!pending) { | ||
279 | spurious_interrupt(); | ||
280 | return; | ||
281 | } | ||
282 | |||
283 | pending >>= CAUSEB_IP; | ||
284 | while (pending) { | ||
285 | irq = fls(pending) - 1; | ||
286 | do_IRQ(MIPS_CPU_IRQ_BASE + irq); | ||
287 | pending &= ~BIT(irq); | ||
288 | } | ||
289 | } | ||
290 | |||
291 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) | 251 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) |
292 | { | 252 | { |
293 | struct irq_chip *chip = <q_irq_type; | 253 | struct irq_chip *chip = <q_irq_type; |
@@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
343 | for (i = 0; i < MAX_IM; i++) | 303 | for (i = 0; i < MAX_IM; i++) |
344 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); | 304 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); |
345 | 305 | ||
346 | if (cpu_has_vint) { | ||
347 | pr_info("Setting up vectored interrupts\n"); | ||
348 | set_vi_handler(2, ltq_hw0_irqdispatch); | ||
349 | set_vi_handler(3, ltq_hw1_irqdispatch); | ||
350 | set_vi_handler(4, ltq_hw2_irqdispatch); | ||
351 | set_vi_handler(5, ltq_hw3_irqdispatch); | ||
352 | set_vi_handler(6, ltq_hw4_irqdispatch); | ||
353 | set_vi_handler(7, ltq_hw5_irqdispatch); | ||
354 | } | ||
355 | |||
356 | ltq_domain = irq_domain_add_linear(node, | 306 | ltq_domain = irq_domain_add_linear(node, |
357 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, | 307 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, |
358 | &irq_domain_ops, 0); | 308 | &irq_domain_ops, 0); |
359 | 309 | ||
360 | #ifndef CONFIG_MIPS_MT_SMP | ||
361 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||
362 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
363 | #else | ||
364 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | | ||
365 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
366 | #endif | ||
367 | |||
368 | /* tell oprofile which irq to use */ | 310 | /* tell oprofile which irq to use */ |
369 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); | 311 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
370 | 312 | ||
371 | /* | ||
372 | * if the timer irq is not one of the mips irqs we need to | ||
373 | * create a mapping | ||
374 | */ | ||
375 | if (MIPS_CPU_TIMER_IRQ != 7) | ||
376 | irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); | ||
377 | |||
378 | /* the external interrupts are optional and xway only */ | 313 | /* the external interrupts are optional and xway only */ |
379 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); | 314 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); |
380 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { | 315 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { |
@@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int); | |||
411 | 346 | ||
412 | unsigned int get_c0_compare_int(void) | 347 | unsigned int get_c0_compare_int(void) |
413 | { | 348 | { |
414 | return MIPS_CPU_TIMER_IRQ; | 349 | return CP0_LEGACY_COMPARE_IRQ; |
415 | } | 350 | } |
416 | 351 | ||
417 | static struct of_device_id __initdata of_irq_ids[] = { | 352 | static struct of_device_id __initdata of_irq_ids[] = { |
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10e..288b58b00dc8 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c | |||
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) | |||
369 | int irq; | 369 | int irq; |
370 | struct irq_chip *msi; | 370 | struct irq_chip *msi; |
371 | 371 | ||
372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | 372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { |
373 | return 0; | ||
374 | } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | ||
373 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; | 375 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; |
374 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; | 376 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; |
375 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; | 377 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 0a935c136ec2..ac3482882cf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
@@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S | |||
3 | 3 | ||
4 | KBUILD_DEFCONFIG := defconfig | 4 | KBUILD_DEFCONFIG := defconfig |
5 | 5 | ||
6 | comma = , | ||
7 | |||
8 | |||
9 | ifdef CONFIG_FUNCTION_TRACER | 6 | ifdef CONFIG_FUNCTION_TRACER |
10 | arch-y += -malways-save-lp -mno-relax | 7 | arch-y += -malways-save-lp -mno-relax |
11 | endif | 8 | endif |
@@ -54,8 +51,6 @@ endif | |||
54 | boot := arch/nds32/boot | 51 | boot := arch/nds32/boot |
55 | core-y += $(boot)/dts/ | 52 | core-y += $(boot)/dts/ |
56 | 53 | ||
57 | .PHONY: FORCE | ||
58 | |||
59 | Image: vmlinux | 54 | Image: vmlinux |
60 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 55 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
61 | 56 | ||
@@ -68,9 +63,6 @@ prepare: vdso_prepare | |||
68 | vdso_prepare: prepare0 | 63 | vdso_prepare: prepare0 |
69 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h | 64 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h |
70 | 65 | ||
71 | CLEAN_FILES += include/asm-nds32/constants.h* | ||
72 | |||
73 | # We use MRPROPER_FILES and CLEAN_FILES now | ||
74 | archclean: | 66 | archclean: |
75 | $(Q)$(MAKE) $(clean)=$(boot) | 67 | $(Q)$(MAKE) $(clean)=$(boot) |
76 | 68 | ||
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 70e06d34006c..bf10141c7426 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile | |||
@@ -20,7 +20,6 @@ | |||
20 | KBUILD_DEFCONFIG := or1ksim_defconfig | 20 | KBUILD_DEFCONFIG := or1ksim_defconfig |
21 | 21 | ||
22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
23 | LDFLAGS_vmlinux := | ||
24 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 23 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) |
25 | 24 | ||
26 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ | 25 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ |
@@ -50,5 +49,3 @@ else | |||
50 | BUILTIN_DTB := n | 49 | BUILTIN_DTB := n |
51 | endif | 50 | endif |
52 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ | 51 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ |
53 | |||
54 | all: vmlinux | ||
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h | |||
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
50 | PERF_REG_POWERPC_MMCRA, | ||
50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
51 | }; | 52 | }; |
52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 57deb1e9ffea..20cc816b3508 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -852,11 +852,12 @@ start_here: | |||
852 | 852 | ||
853 | /* set up the PTE pointers for the Abatron bdiGDB. | 853 | /* set up the PTE pointers for the Abatron bdiGDB. |
854 | */ | 854 | */ |
855 | tovirt(r6,r6) | ||
856 | lis r5, abatron_pteptrs@h | 855 | lis r5, abatron_pteptrs@h |
857 | ori r5, r5, abatron_pteptrs@l | 856 | ori r5, r5, abatron_pteptrs@l |
858 | stw r5, 0xf0(0) /* Must match your Abatron config file */ | 857 | stw r5, 0xf0(0) /* Must match your Abatron config file */ |
859 | tophys(r5,r5) | 858 | tophys(r5,r5) |
859 | lis r6, swapper_pg_dir@h | ||
860 | ori r6, r6, swapper_pg_dir@l | ||
860 | stw r6, 0(r5) | 861 | stw r6, 0(r5) |
861 | 862 | ||
862 | /* Now turn on the MMU for real! */ | 863 | /* Now turn on the MMU for real! */ |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index bd5e6834ca69..6794466f6420 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, | 755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, |
756 | &uc_transact->uc_mcontext)) | 756 | &uc_transact->uc_mcontext)) |
757 | goto badframe; | 757 | goto badframe; |
758 | } | 758 | } else |
759 | #endif | 759 | #endif |
760 | /* Fall through, for non-TM restore */ | 760 | { |
761 | if (!MSR_TM_ACTIVE(msr)) { | ||
762 | /* | 761 | /* |
762 | * Fall through, for non-TM restore | ||
763 | * | ||
763 | * Unset MSR[TS] on the thread regs since MSR from user | 764 | * Unset MSR[TS] on the thread regs since MSR from user |
764 | * context does not have MSR active, and recheckpoint was | 765 | * context does not have MSR active, and recheckpoint was |
765 | * not called since restore_tm_sigcontexts() was not called | 766 | * not called since restore_tm_sigcontexts() was not called |
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 29746dc28df5..517662a56bdc 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c | |||
@@ -967,13 +967,6 @@ out: | |||
967 | } | 967 | } |
968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
969 | 969 | ||
970 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
971 | unsigned long __init arch_syscall_addr(int nr) | ||
972 | { | ||
973 | return sys_call_table[nr*2]; | ||
974 | } | ||
975 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
976 | |||
977 | #ifdef PPC64_ELF_ABI_v1 | 970 | #ifdef PPC64_ELF_ABI_v1 |
978 | char *arch_ftrace_match_adjust(char *str, const char *search) | 971 | char *arch_ftrace_match_adjust(char *str, const char *search) |
979 | { | 972 | { |
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 5c36b3a8d47a..3349f3f8fe84 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c | |||
@@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { | |||
70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), | 70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), |
71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), | 71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), |
72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), | 72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), |
73 | PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | u64 perf_reg_value(struct pt_regs *regs, int idx) | 76 | u64 perf_reg_value(struct pt_regs *regs, int idx) |
@@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
83 | !is_sier_available())) | 84 | !is_sier_available())) |
84 | return 0; | 85 | return 0; |
85 | 86 | ||
87 | if (idx == PERF_REG_POWERPC_MMCRA && | ||
88 | (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || | ||
89 | IS_ENABLED(CONFIG_PPC32))) | ||
90 | return 0; | ||
91 | |||
86 | return regs_get_register(regs, pt_regs_offset[idx]); | 92 | return regs_get_register(regs, pt_regs_offset[idx]); |
87 | } | 93 | } |
88 | 94 | ||
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index a1aaa1569d7c..f0e488d97567 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c | |||
@@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
237 | continue; | 237 | continue; |
238 | 238 | ||
239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); | 239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); |
240 | seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); | 240 | seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); |
241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); | 241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); |
242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); | 242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); |
243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); | 243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); |
244 | 244 | ||
245 | seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); | 245 | seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); |
246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); | 246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); |
247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); | 247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); |
248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); | 248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); |
@@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
252 | blk->size, blk->owner); | 252 | blk->size, blk->owner); |
253 | } | 253 | } |
254 | 254 | ||
255 | seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); | 255 | seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); |
256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); | 256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); |
257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); | 257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); |
258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); | 258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..9438fa0fc355 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void) | |||
538 | /* see if there is a keyboard in the device tree | 538 | /* see if there is a keyboard in the device tree |
539 | with a parent of type "adb" */ | 539 | with a parent of type "adb" */ |
540 | for_each_node_by_name(kbd, "keyboard") | 540 | for_each_node_by_name(kbd, "keyboard") |
541 | if (kbd->parent && kbd->parent->type | 541 | if (of_node_is_type(kbd->parent, "adb")) |
542 | && strcmp(kbd->parent->type, "adb") == 0) | ||
543 | break; | 542 | break; |
544 | of_node_put(kbd); | 543 | of_node_put(kbd); |
545 | if (kbd) | 544 | if (kbd) |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index d7f742ed48ba..3f58c7dbd581 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) | |||
564 | } | 564 | } |
565 | } else { | 565 | } else { |
566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ | 566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ |
567 | pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); | 567 | pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); |
568 | table_group = &pe->npucomp->table_group; | 568 | table_group = &pe->npucomp->table_group; |
569 | table_group->ops = &pnv_npu_peers_ops; | 569 | table_group->ops = &pnv_npu_peers_ops; |
570 | iommu_register_group(table_group, hose->global_number, | 570 | iommu_register_group(table_group, hose->global_number, |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1d6406a051f1..7db3119f8a5b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void) | |||
2681 | list_for_each_entry(hose, &hose_list, list_node) { | 2681 | list_for_each_entry(hose, &hose_list, list_node) { |
2682 | phb = hose->private_data; | 2682 | phb = hose->private_data; |
2683 | 2683 | ||
2684 | if (phb->type == PNV_PHB_NPU_NVLINK) | 2684 | if (phb->type == PNV_PHB_NPU_NVLINK || |
2685 | phb->type == PNV_PHB_NPU_OCAPI) | ||
2685 | continue; | 2686 | continue; |
2686 | 2687 | ||
2687 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { | 2688 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 7725825d887d..37a77e57893e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void) | |||
264 | if (!of_device_is_compatible(nvdn->parent, | 264 | if (!of_device_is_compatible(nvdn->parent, |
265 | "ibm,power9-npu")) | 265 | "ibm,power9-npu")) |
266 | continue; | 266 | continue; |
267 | #ifdef CONFIG_PPC_POWERNV | ||
267 | WARN_ON_ONCE(pnv_npu2_init(hose)); | 268 | WARN_ON_ONCE(pnv_npu2_init(hose)); |
269 | #endif | ||
268 | break; | 270 | break; |
269 | } | 271 | } |
270 | } | 272 | } |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15af091611e2..4b4a7f32b68e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -617,7 +617,7 @@ config X86_INTEL_QUARK | |||
617 | 617 | ||
618 | config X86_INTEL_LPSS | 618 | config X86_INTEL_LPSS |
619 | bool "Intel Low Power Subsystem Support" | 619 | bool "Intel Low Power Subsystem Support" |
620 | depends on X86 && ACPI | 620 | depends on X86 && ACPI && PCI |
621 | select COMMON_CLK | 621 | select COMMON_CLK |
622 | select PINCTRL | 622 | select PINCTRL |
623 | select IOSF_MBI | 623 | select IOSF_MBI |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a77445d1b034..780f2b42c8ef 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t | |||
711 | { | 711 | { |
712 | if (unlikely(!access_ok(ptr,len))) | 712 | if (unlikely(!access_ok(ptr,len))) |
713 | return 0; | 713 | return 0; |
714 | __uaccess_begin(); | 714 | __uaccess_begin_nospec(); |
715 | return 1; | 715 | return 1; |
716 | } | 716 | } |
717 | #define user_access_begin(a,b) user_access_begin(a,b) | 717 | #define user_access_begin(a,b) user_access_begin(a,b) |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2f6787fc7106..c54a493e139a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) | |||
898 | val = native_read_msr_safe(msr, err); | 898 | val = native_read_msr_safe(msr, err); |
899 | switch (msr) { | 899 | switch (msr) { |
900 | case MSR_IA32_APICBASE: | 900 | case MSR_IA32_APICBASE: |
901 | #ifdef CONFIG_X86_X2APIC | 901 | val &= ~X2APIC_ENABLE; |
902 | if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) | ||
903 | #endif | ||
904 | val &= ~X2APIC_ENABLE; | ||
905 | break; | 902 | break; |
906 | } | 903 | } |
907 | return val; | 904 | return val; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 72bf446c3fee..6e29794573b7 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -361,8 +361,6 @@ void xen_timer_resume(void) | |||
361 | { | 361 | { |
362 | int cpu; | 362 | int cpu; |
363 | 363 | ||
364 | pvclock_resume(); | ||
365 | |||
366 | if (xen_clockevent != &xen_vcpuop_clockevent) | 364 | if (xen_clockevent != &xen_vcpuop_clockevent) |
367 | return; | 365 | return; |
368 | 366 | ||
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { | |||
379 | }; | 377 | }; |
380 | 378 | ||
381 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; | 379 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; |
380 | static u64 xen_clock_value_saved; | ||
382 | 381 | ||
383 | void xen_save_time_memory_area(void) | 382 | void xen_save_time_memory_area(void) |
384 | { | 383 | { |
385 | struct vcpu_register_time_memory_area t; | 384 | struct vcpu_register_time_memory_area t; |
386 | int ret; | 385 | int ret; |
387 | 386 | ||
387 | xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; | ||
388 | |||
388 | if (!xen_clock) | 389 | if (!xen_clock) |
389 | return; | 390 | return; |
390 | 391 | ||
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) | |||
404 | int ret; | 405 | int ret; |
405 | 406 | ||
406 | if (!xen_clock) | 407 | if (!xen_clock) |
407 | return; | 408 | goto out; |
408 | 409 | ||
409 | t.addr.v = &xen_clock->pvti; | 410 | t.addr.v = &xen_clock->pvti; |
410 | 411 | ||
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) | |||
421 | if (ret != 0) | 422 | if (ret != 0) |
422 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", | 423 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", |
423 | ret); | 424 | ret); |
425 | |||
426 | out: | ||
427 | /* Need pvclock_resume() before using xen_clocksource_read(). */ | ||
428 | pvclock_resume(); | ||
429 | xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; | ||
424 | } | 430 | } |
425 | 431 | ||
426 | static void xen_setup_vsyscall_time_info(void) | 432 | static void xen_setup_vsyscall_time_info(void) |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63e0f12be7c9..72adbbe975d5 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, | |||
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | /** | 1156 | /** |
1157 | * __bfq_deactivate_entity - deactivate an entity from its service tree. | 1157 | * __bfq_deactivate_entity - update sched_data and service trees for |
1158 | * @entity: the entity to deactivate. | 1158 | * entity, so as to represent entity as inactive |
1159 | * @entity: the entity being deactivated. | ||
1159 | * @ins_into_idle_tree: if false, the entity will not be put into the | 1160 | * @ins_into_idle_tree: if false, the entity will not be put into the |
1160 | * idle tree. | 1161 | * idle tree. |
1161 | * | 1162 | * |
1162 | * Deactivates an entity, independently of its previous state. Must | 1163 | * If necessary and allowed, puts entity into the idle tree. NOTE: |
1163 | * be invoked only if entity is on a service tree. Extracts the entity | 1164 | * entity may be on no tree if in service. |
1164 | * from that tree, and if necessary and allowed, puts it into the idle | ||
1165 | * tree. | ||
1166 | */ | 1165 | */ |
1167 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | 1166 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) |
1168 | { | 1167 | { |
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c index fb2c82c351e4..038cb627c868 100644 --- a/block/blk-mq-debugfs-zoned.c +++ b/block/blk-mq-debugfs-zoned.c | |||
@@ -1,8 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | 2 | /* |
3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. |
4 | * | ||
5 | * This file is released under the GPL. | ||
6 | */ | 4 | */ |
7 | 5 | ||
8 | #include <linux/blkdev.h> | 6 | #include <linux/blkdev.h> |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ba37b9e15e9..8f5b533764ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1906 | { | 1906 | { |
1907 | const int is_sync = op_is_sync(bio->bi_opf); | 1907 | const int is_sync = op_is_sync(bio->bi_opf); |
1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); | 1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); |
1909 | struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; | 1909 | struct blk_mq_alloc_data data = { .flags = 0}; |
1910 | struct request *rq; | 1910 | struct request *rq; |
1911 | struct blk_plug *plug; | 1911 | struct blk_plug *plug; |
1912 | struct request *same_queue_rq = NULL; | 1912 | struct request *same_queue_rq = NULL; |
@@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1928 | 1928 | ||
1929 | rq_qos_throttle(q, bio); | 1929 | rq_qos_throttle(q, bio); |
1930 | 1930 | ||
1931 | data.cmd_flags = bio->bi_opf; | ||
1931 | rq = blk_mq_get_request(q, bio, &data); | 1932 | rq = blk_mq_get_request(q, bio, &data); |
1932 | if (unlikely(!rq)) { | 1933 | if (unlikely(!rq)) { |
1933 | rq_qos_cleanup(q, bio); | 1934 | rq_qos_cleanup(q, bio); |
diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 6651e713c45d..5564e73266a6 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c | |||
@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
539 | ictx = skcipher_instance_ctx(inst); | 539 | ictx = skcipher_instance_ctx(inst); |
540 | 540 | ||
541 | /* Stream cipher, e.g. "xchacha12" */ | 541 | /* Stream cipher, e.g. "xchacha12" */ |
542 | crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, | ||
543 | skcipher_crypto_instance(inst)); | ||
542 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, | 544 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, |
543 | 0, crypto_requires_sync(algt->type, | 545 | 0, crypto_requires_sync(algt->type, |
544 | algt->mask)); | 546 | algt->mask)); |
@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
547 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); | 549 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); |
548 | 550 | ||
549 | /* Block cipher, e.g. "aes" */ | 551 | /* Block cipher, e.g. "aes" */ |
552 | crypto_set_spawn(&ictx->blockcipher_spawn, | ||
553 | skcipher_crypto_instance(inst)); | ||
550 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, | 554 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, |
551 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); | 555 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); |
552 | if (err) | 556 | if (err) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 37f54d1b2f66..4be293a4b5f0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, | |||
58 | return -EINVAL; | 58 | return -EINVAL; |
59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
60 | return -EINVAL; | 60 | return -EINVAL; |
61 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 61 | |
62 | /* | ||
63 | * RTA_OK() didn't align the rtattr's payload when validating that it | ||
64 | * fits in the buffer. Yet, the keys should start on the next 4-byte | ||
65 | * aligned boundary. To avoid confusion, require that the rtattr | ||
66 | * payload be exactly the param struct, which has a 4-byte aligned size. | ||
67 | */ | ||
68 | if (RTA_PAYLOAD(rta) != sizeof(*param)) | ||
62 | return -EINVAL; | 69 | return -EINVAL; |
70 | BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); | ||
63 | 71 | ||
64 | param = RTA_DATA(rta); | 72 | param = RTA_DATA(rta); |
65 | keys->enckeylen = be32_to_cpu(param->enckeylen); | 73 | keys->enckeylen = be32_to_cpu(param->enckeylen); |
66 | 74 | ||
67 | key += RTA_ALIGN(rta->rta_len); | 75 | key += rta->rta_len; |
68 | keylen -= RTA_ALIGN(rta->rta_len); | 76 | keylen -= rta->rta_len; |
69 | 77 | ||
70 | if (keylen < keys->enckeylen) | 78 | if (keylen < keys->enckeylen) |
71 | return -EINVAL; | 79 | return -EINVAL; |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 80a25cc04aec..4741fe89ba2c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | |||
279 | struct aead_request *req = areq->data; | 279 | struct aead_request *req = areq->data; |
280 | 280 | ||
281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); | 281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); |
282 | aead_request_complete(req, err); | 282 | authenc_esn_request_complete(req, err); |
283 | } | 283 | } |
284 | 284 | ||
285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | 285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) |
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad..c0cf87ae7ef6 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c | |||
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) | |||
100 | 100 | ||
101 | for (i = 0; i <= 63; i++) { | 101 | for (i = 0; i <= 63; i++) { |
102 | 102 | ||
103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); | 103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); |
104 | 104 | ||
105 | ss2 = ss1 ^ rol32(a, 12); | 105 | ss2 = ss1 ^ rol32(a, 12); |
106 | 106 | ||
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7c6afc111d76..bb857421c2e8 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -41,7 +41,8 @@ acpi-y += ec.o | |||
41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o | 42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o |
43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o | 43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o |
44 | acpi-y += acpi_lpss.o acpi_apd.o | 44 | acpi-$(CONFIG_PCI) += acpi_lpss.o |
45 | acpi-y += acpi_apd.o | ||
45 | acpi-y += acpi_platform.o | 46 | acpi-y += acpi_platform.o |
46 | acpi-y += acpi_pnp.o | 47 | acpi-y += acpi_pnp.o |
47 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o | 48 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 99d820a693a8..5c093ce01bcd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void) | |||
1054 | goto error0; | 1054 | goto error0; |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | /* | ||
1058 | * ACPI 2.0 requires the EC driver to be loaded and work before | ||
1059 | * the EC device is found in the namespace (i.e. before | ||
1060 | * acpi_load_tables() is called). | ||
1061 | * | ||
1062 | * This is accomplished by looking for the ECDT table, and getting | ||
1063 | * the EC parameters out of that. | ||
1064 | * | ||
1065 | * Ignore the result. Not having an ECDT is not fatal. | ||
1066 | */ | ||
1067 | status = acpi_ec_ecdt_probe(); | ||
1068 | |||
1069 | #ifdef CONFIG_X86 | 1057 | #ifdef CONFIG_X86 |
1070 | if (!acpi_ioapic) { | 1058 | if (!acpi_ioapic) { |
1071 | /* compatible (0) means level (3) */ | 1059 | /* compatible (0) means level (3) */ |
@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void) | |||
1142 | goto error1; | 1130 | goto error1; |
1143 | } | 1131 | } |
1144 | 1132 | ||
1133 | /* | ||
1134 | * ACPI 2.0 requires the EC driver to be loaded and work before the EC | ||
1135 | * device is found in the namespace. | ||
1136 | * | ||
1137 | * This is accomplished by looking for the ECDT table and getting the EC | ||
1138 | * parameters out of that. | ||
1139 | * | ||
1140 | * Do that before calling acpi_initialize_objects() which may trigger EC | ||
1141 | * address space accesses. | ||
1142 | */ | ||
1143 | acpi_ec_ecdt_probe(); | ||
1144 | |||
1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); | 1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); |
1146 | if (ACPI_FAILURE(status)) { | 1146 | if (ACPI_FAILURE(status)) { |
1147 | printk(KERN_ERR PREFIX | 1147 | printk(KERN_ERR PREFIX |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 7e6952edb5b0..6a9e1fb8913a 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void); | |||
81 | #else | 81 | #else |
82 | static inline void acpi_debugfs_init(void) { return; } | 82 | static inline void acpi_debugfs_init(void) { return; } |
83 | #endif | 83 | #endif |
84 | #ifdef CONFIG_PCI | ||
84 | void acpi_lpss_init(void); | 85 | void acpi_lpss_init(void); |
86 | #else | ||
87 | static inline void acpi_lpss_init(void) {} | ||
88 | #endif | ||
85 | 89 | ||
86 | void acpi_apd_init(void); | 90 | void acpi_apd_init(void); |
87 | 91 | ||
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 011d3db19c80..5143e11e3b0f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <acpi/nfit.h> | 26 | #include <acpi/nfit.h> |
27 | #include "intel.h" | 27 | #include "intel.h" |
28 | #include "nfit.h" | 28 | #include "nfit.h" |
29 | #include "intel.h" | ||
30 | 29 | ||
31 | /* | 30 | /* |
32 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | 31 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) | |||
78 | } | 77 | } |
79 | EXPORT_SYMBOL(to_nfit_uuid); | 78 | EXPORT_SYMBOL(to_nfit_uuid); |
80 | 79 | ||
81 | static struct acpi_nfit_desc *to_acpi_nfit_desc( | ||
82 | struct nvdimm_bus_descriptor *nd_desc) | ||
83 | { | ||
84 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); | ||
85 | } | ||
86 | |||
87 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) | 80 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) |
88 | { | 81 | { |
89 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; | 82 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
@@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) | |||
419 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | 412 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, |
420 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) | 413 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) |
421 | { | 414 | { |
422 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 415 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
423 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 416 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
424 | union acpi_object in_obj, in_buf, *out_obj; | 417 | union acpi_object in_obj, in_buf, *out_obj; |
425 | const struct nd_cmd_desc *desc = NULL; | 418 | const struct nd_cmd_desc *desc = NULL; |
@@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
721 | struct acpi_nfit_memory_map *memdev; | 714 | struct acpi_nfit_memory_map *memdev; |
722 | struct acpi_nfit_desc *acpi_desc; | 715 | struct acpi_nfit_desc *acpi_desc; |
723 | struct nfit_mem *nfit_mem; | 716 | struct nfit_mem *nfit_mem; |
717 | u16 physical_id; | ||
724 | 718 | ||
725 | mutex_lock(&acpi_desc_lock); | 719 | mutex_lock(&acpi_desc_lock); |
726 | list_for_each_entry(acpi_desc, &acpi_descs, list) { | 720 | list_for_each_entry(acpi_desc, &acpi_descs, list) { |
@@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
728 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | 722 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
729 | memdev = __to_nfit_memdev(nfit_mem); | 723 | memdev = __to_nfit_memdev(nfit_mem); |
730 | if (memdev->device_handle == device_handle) { | 724 | if (memdev->device_handle == device_handle) { |
725 | *flags = memdev->flags; | ||
726 | physical_id = memdev->physical_id; | ||
731 | mutex_unlock(&acpi_desc->init_mutex); | 727 | mutex_unlock(&acpi_desc->init_mutex); |
732 | mutex_unlock(&acpi_desc_lock); | 728 | mutex_unlock(&acpi_desc_lock); |
733 | *flags = memdev->flags; | 729 | return physical_id; |
734 | return memdev->physical_id; | ||
735 | } | 730 | } |
736 | } | 731 | } |
737 | mutex_unlock(&acpi_desc->init_mutex); | 732 | mutex_unlock(&acpi_desc->init_mutex); |
@@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, | |||
2231 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); | 2226 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); |
2232 | if (!nd_set) | 2227 | if (!nd_set) |
2233 | return -ENOMEM; | 2228 | return -ENOMEM; |
2234 | ndr_desc->nd_set = nd_set; | ||
2235 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); | 2229 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); |
2236 | 2230 | ||
2237 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); | 2231 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); |
@@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init); | |||
3367 | 3361 | ||
3368 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | 3362 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) |
3369 | { | 3363 | { |
3370 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3364 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
3371 | struct device *dev = acpi_desc->dev; | 3365 | struct device *dev = acpi_desc->dev; |
3372 | 3366 | ||
3373 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ | 3367 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
@@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
3384 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, | 3378 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
3385 | struct nvdimm *nvdimm, unsigned int cmd) | 3379 | struct nvdimm *nvdimm, unsigned int cmd) |
3386 | { | 3380 | { |
3387 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3381 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
3388 | 3382 | ||
3389 | if (nvdimm) | 3383 | if (nvdimm) |
3390 | return 0; | 3384 | return 0; |
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 850b2927b4e7..f70de71f79d6 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c | |||
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm, | |||
146 | 146 | ||
147 | static void nvdimm_invalidate_cache(void); | 147 | static void nvdimm_invalidate_cache(void); |
148 | 148 | ||
149 | static int intel_security_unlock(struct nvdimm *nvdimm, | 149 | static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, |
150 | const struct nvdimm_key_data *key_data) | 150 | const struct nvdimm_key_data *key_data) |
151 | { | 151 | { |
152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm, | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static int intel_security_erase(struct nvdimm *nvdimm, | 230 | static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, |
231 | const struct nvdimm_key_data *key, | 231 | const struct nvdimm_key_data *key, |
232 | enum nvdimm_passphrase_type ptype) | 232 | enum nvdimm_passphrase_type ptype) |
233 | { | 233 | { |
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm, | |||
276 | return 0; | 276 | return 0; |
277 | } | 277 | } |
278 | 278 | ||
279 | static int intel_security_query_overwrite(struct nvdimm *nvdimm) | 279 | static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) |
280 | { | 280 | { |
281 | int rc; | 281 | int rc; |
282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm) | |||
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | 315 | ||
316 | static int intel_security_overwrite(struct nvdimm *nvdimm, | 316 | static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, |
317 | const struct nvdimm_key_data *nkey) | 317 | const struct nvdimm_key_data *nkey) |
318 | { | 318 | { |
319 | int rc; | 319 | int rc; |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ca7a6b4eaae..8218db17ebdb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers" | |||
1091 | 1091 | ||
1092 | config PATA_ACPI | 1092 | config PATA_ACPI |
1093 | tristate "ACPI firmware driver for PATA" | 1093 | tristate "ACPI firmware driver for PATA" |
1094 | depends on ATA_ACPI && ATA_BMDMA | 1094 | depends on ATA_ACPI && ATA_BMDMA && PCI |
1095 | help | 1095 | help |
1096 | This option enables an ACPI method driver which drives | 1096 | This option enables an ACPI method driver which drives |
1097 | motherboard PATA controller interfaces through the ACPI | 1097 | motherboard PATA controller interfaces through the ACPI |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 2e9d1cfe3aeb..211607986134 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
@@ -718,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) | |||
718 | instead of '/ 512', use '>> 9' to prevent a call | 718 | instead of '/ 512', use '>> 9' to prevent a call |
719 | to divdu3 on x86 platforms | 719 | to divdu3 on x86 platforms |
720 | */ | 720 | */ |
721 | rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; | 721 | rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; |
722 | 722 | ||
723 | if (rate_cps < 10) | 723 | if (rate_cps < 10) |
724 | rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ | 724 | rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1bd1145ad8b5..330c1f7e9665 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
@@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) | |||
108 | * suppress pointless writes. | 108 | * suppress pointless writes. |
109 | */ | 109 | */ |
110 | for (i = 0; i < d->chip->num_regs; i++) { | 110 | for (i = 0; i < d->chip->num_regs; i++) { |
111 | if (!d->chip->mask_base) | ||
112 | continue; | ||
113 | |||
111 | reg = d->chip->mask_base + | 114 | reg = d->chip->mask_base + |
112 | (i * map->reg_stride * d->irq_reg_stride); | 115 | (i * map->reg_stride * d->irq_reg_stride); |
113 | if (d->chip->mask_invert) { | 116 | if (d->chip->mask_invert) { |
@@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) | |||
258 | const struct regmap_irq_type *t = &irq_data->type; | 261 | const struct regmap_irq_type *t = &irq_data->type; |
259 | 262 | ||
260 | if ((t->types_supported & type) != type) | 263 | if ((t->types_supported & type) != type) |
261 | return -ENOTSUPP; | 264 | return 0; |
262 | 265 | ||
263 | reg = t->type_reg_offset / map->reg_stride; | 266 | reg = t->type_reg_offset / map->reg_stride; |
264 | 267 | ||
@@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
588 | /* Mask all the interrupts by default */ | 591 | /* Mask all the interrupts by default */ |
589 | for (i = 0; i < chip->num_regs; i++) { | 592 | for (i = 0; i < chip->num_regs; i++) { |
590 | d->mask_buf[i] = d->mask_buf_def[i]; | 593 | d->mask_buf[i] = d->mask_buf_def[i]; |
594 | if (!chip->mask_base) | ||
595 | continue; | ||
596 | |||
591 | reg = chip->mask_base + | 597 | reg = chip->mask_base + |
592 | (i * map->reg_stride * d->irq_reg_stride); | 598 | (i * map->reg_stride * d->irq_reg_stride); |
593 | if (chip->mask_invert) | 599 | if (chip->mask_invert) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 08696f5f00bb..7c9a949e876b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) | |||
288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); | 288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
289 | set_capacity(nbd->disk, config->bytesize >> 9); | 289 | set_capacity(nbd->disk, config->bytesize >> 9); |
290 | if (bdev) { | 290 | if (bdev) { |
291 | if (bdev->bd_disk) | 291 | if (bdev->bd_disk) { |
292 | bd_set_size(bdev, config->bytesize); | 292 | bd_set_size(bdev, config->bytesize); |
293 | else | 293 | set_blocksize(bdev, config->blksize); |
294 | } else | ||
294 | bdev->bd_invalidated = 1; | 295 | bdev->bd_invalidated = 1; |
295 | bdput(bdev); | 296 | bdput(bdev); |
296 | } | 297 | } |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5a90075f719d..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU | |||
692 | depends on ARCH_BCM_IPROC | 692 | depends on ARCH_BCM_IPROC |
693 | depends on MAILBOX | 693 | depends on MAILBOX |
694 | default m | 694 | default m |
695 | select CRYPTO_AUTHENC | ||
695 | select CRYPTO_DES | 696 | select CRYPTO_DES |
696 | select CRYPTO_MD5 | 697 | select CRYPTO_MD5 |
697 | select CRYPTO_SHA1 | 698 | select CRYPTO_SHA1 |
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c9393ffb70ed..5567cbda2798 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
2845 | struct spu_hw *spu = &iproc_priv.spu; | 2845 | struct spu_hw *spu = &iproc_priv.spu; |
2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); | 2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); |
2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | 2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); |
2848 | struct rtattr *rta = (void *)key; | 2848 | struct crypto_authenc_keys keys; |
2849 | struct crypto_authenc_key_param *param; | 2849 | int ret; |
2850 | const u8 *origkey = key; | ||
2851 | const unsigned int origkeylen = keylen; | ||
2852 | |||
2853 | int ret = 0; | ||
2854 | 2850 | ||
2855 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, | 2851 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, |
2856 | keylen); | 2852 | keylen); |
2857 | flow_dump(" key: ", key, keylen); | 2853 | flow_dump(" key: ", key, keylen); |
2858 | 2854 | ||
2859 | if (!RTA_OK(rta, keylen)) | 2855 | ret = crypto_authenc_extractkeys(&keys, key, keylen); |
2860 | goto badkey; | 2856 | if (ret) |
2861 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
2862 | goto badkey; | ||
2863 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
2864 | goto badkey; | 2857 | goto badkey; |
2865 | 2858 | ||
2866 | param = RTA_DATA(rta); | 2859 | if (keys.enckeylen > MAX_KEY_SIZE || |
2867 | ctx->enckeylen = be32_to_cpu(param->enckeylen); | 2860 | keys.authkeylen > MAX_KEY_SIZE) |
2868 | |||
2869 | key += RTA_ALIGN(rta->rta_len); | ||
2870 | keylen -= RTA_ALIGN(rta->rta_len); | ||
2871 | |||
2872 | if (keylen < ctx->enckeylen) | ||
2873 | goto badkey; | ||
2874 | if (ctx->enckeylen > MAX_KEY_SIZE) | ||
2875 | goto badkey; | 2861 | goto badkey; |
2876 | 2862 | ||
2877 | ctx->authkeylen = keylen - ctx->enckeylen; | 2863 | ctx->enckeylen = keys.enckeylen; |
2878 | 2864 | ctx->authkeylen = keys.authkeylen; | |
2879 | if (ctx->authkeylen > MAX_KEY_SIZE) | ||
2880 | goto badkey; | ||
2881 | 2865 | ||
2882 | memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); | 2866 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
2883 | /* May end up padding auth key. So make sure it's zeroed. */ | 2867 | /* May end up padding auth key. So make sure it's zeroed. */ |
2884 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | 2868 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
2885 | memcpy(ctx->authkey, key, ctx->authkeylen); | 2869 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
2886 | 2870 | ||
2887 | switch (ctx->alg->cipher_info.alg) { | 2871 | switch (ctx->alg->cipher_info.alg) { |
2888 | case CIPHER_ALG_DES: | 2872 | case CIPHER_ALG_DES: |
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
2890 | u32 tmp[DES_EXPKEY_WORDS]; | 2874 | u32 tmp[DES_EXPKEY_WORDS]; |
2891 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; | 2875 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; |
2892 | 2876 | ||
2893 | if (des_ekey(tmp, key) == 0) { | 2877 | if (des_ekey(tmp, keys.enckey) == 0) { |
2894 | if (crypto_aead_get_flags(cipher) & | 2878 | if (crypto_aead_get_flags(cipher) & |
2895 | CRYPTO_TFM_REQ_WEAK_KEY) { | 2879 | CRYPTO_TFM_REQ_WEAK_KEY) { |
2896 | crypto_aead_set_flags(cipher, flags); | 2880 | crypto_aead_set_flags(cipher, flags); |
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
2905 | break; | 2889 | break; |
2906 | case CIPHER_ALG_3DES: | 2890 | case CIPHER_ALG_3DES: |
2907 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { | 2891 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { |
2908 | const u32 *K = (const u32 *)key; | 2892 | const u32 *K = (const u32 *)keys.enckey; |
2909 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; | 2893 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; |
2910 | 2894 | ||
2911 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | 2895 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
2956 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 2940 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
2957 | ctx->fallback_cipher->base.crt_flags |= | 2941 | ctx->fallback_cipher->base.crt_flags |= |
2958 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | 2942 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
2959 | ret = | 2943 | ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); |
2960 | crypto_aead_setkey(ctx->fallback_cipher, origkey, | ||
2961 | origkeylen); | ||
2962 | if (ret) { | 2944 | if (ret) { |
2963 | flow_log(" fallback setkey() returned:%d\n", ret); | 2945 | flow_log(" fallback setkey() returned:%d\n", ret); |
2964 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 2946 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 92e593e2069a..80ae69f906fb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) | |||
3476 | * Skip algorithms requiring message digests | 3476 | * Skip algorithms requiring message digests |
3477 | * if MD or MD size is not supported by device. | 3477 | * if MD or MD size is not supported by device. |
3478 | */ | 3478 | */ |
3479 | if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && | 3479 | if (is_mdha(c2_alg_sel) && |
3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) | 3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) |
3481 | continue; | 3481 | continue; |
3482 | 3482 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 81712aa5d0f2..bb1a2cdf1951 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1072 | 1072 | ||
1073 | desc = edesc->hw_desc; | 1073 | desc = edesc->hw_desc; |
1074 | 1074 | ||
1075 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1075 | if (buflen) { |
1076 | if (dma_mapping_error(jrdev, state->buf_dma)) { | 1076 | state->buf_dma = dma_map_single(jrdev, buf, buflen, |
1077 | dev_err(jrdev, "unable to map src\n"); | 1077 | DMA_TO_DEVICE); |
1078 | goto unmap; | 1078 | if (dma_mapping_error(jrdev, state->buf_dma)) { |
1079 | } | 1079 | dev_err(jrdev, "unable to map src\n"); |
1080 | goto unmap; | ||
1081 | } | ||
1080 | 1082 | ||
1081 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1083 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1084 | } | ||
1082 | 1085 | ||
1083 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1086 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1084 | digestsize); | 1087 | digestsize); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index ec10230178c5..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
@@ -1155,6 +1155,7 @@ | |||
1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) | 1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) |
1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) | 1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) |
1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) | 1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) |
1158 | #define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) | ||
1158 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) | 1159 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) |
1159 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) | 1160 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) |
1160 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) | 1161 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) |
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
@@ -7,6 +7,9 @@ | |||
7 | 7 | ||
8 | #ifndef CAAM_ERROR_H | 8 | #ifndef CAAM_ERROR_H |
9 | #define CAAM_ERROR_H | 9 | #define CAAM_ERROR_H |
10 | |||
11 | #include "desc.h" | ||
12 | |||
10 | #define CAAM_ERROR_STR_MAX 302 | 13 | #define CAAM_ERROR_STR_MAX 302 |
11 | 14 | ||
12 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | 15 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); |
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | |||
17 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 20 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, |
18 | int rowsize, int groupsize, struct scatterlist *sg, | 21 | int rowsize, int groupsize, struct scatterlist *sg, |
19 | size_t tlen, bool ascii); | 22 | size_t tlen, bool ascii); |
23 | |||
24 | static inline bool is_mdha(u32 algtype) | ||
25 | { | ||
26 | return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == | ||
27 | OP_ALG_CHA_MDHA; | ||
28 | } | ||
20 | #endif /* CAAM_ERROR_H */ | 29 | #endif /* CAAM_ERROR_H */ |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index e34e4df8fd24..fe070d75c842 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) | |||
567 | 567 | ||
568 | /* ORH error code */ | 568 | /* ORH error code */ |
569 | err = READ_ONCE(*sr->resp.orh) & 0xff; | 569 | err = READ_ONCE(*sr->resp.orh) & 0xff; |
570 | softreq_destroy(sr); | ||
571 | 570 | ||
572 | if (sr->callback) | 571 | if (sr->callback) |
573 | sr->callback(sr->cb_arg, err); | 572 | sr->callback(sr->cb_arg, err); |
573 | softreq_destroy(sr); | ||
574 | 574 | ||
575 | req_completed++; | 575 | req_completed++; |
576 | } | 576 | } |
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index f2643cda45db..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c | |||
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
549 | unsigned int keylen) | 549 | unsigned int keylen) |
550 | { | 550 | { |
551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
552 | struct rtattr *rta = (struct rtattr *)key; | ||
553 | struct cc_crypto_req cc_req = {}; | 552 | struct cc_crypto_req cc_req = {}; |
554 | struct crypto_authenc_key_param *param; | ||
555 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; | 553 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; |
556 | int rc = -EINVAL; | ||
557 | unsigned int seq_len = 0; | 554 | unsigned int seq_len = 0; |
558 | struct device *dev = drvdata_to_dev(ctx->drvdata); | 555 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
556 | const u8 *enckey, *authkey; | ||
557 | int rc; | ||
559 | 558 | ||
560 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", | 559 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", |
561 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); | 560 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); |
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
563 | /* STAT_PHASE_0: Init and sanity checks */ | 562 | /* STAT_PHASE_0: Init and sanity checks */ |
564 | 563 | ||
565 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ | 564 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ |
566 | if (!RTA_OK(rta, keylen)) | 565 | struct crypto_authenc_keys keys; |
567 | goto badkey; | 566 | |
568 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 567 | rc = crypto_authenc_extractkeys(&keys, key, keylen); |
569 | goto badkey; | 568 | if (rc) |
570 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
571 | goto badkey; | ||
572 | param = RTA_DATA(rta); | ||
573 | ctx->enc_keylen = be32_to_cpu(param->enckeylen); | ||
574 | key += RTA_ALIGN(rta->rta_len); | ||
575 | keylen -= RTA_ALIGN(rta->rta_len); | ||
576 | if (keylen < ctx->enc_keylen) | ||
577 | goto badkey; | 569 | goto badkey; |
578 | ctx->auth_keylen = keylen - ctx->enc_keylen; | 570 | enckey = keys.enckey; |
571 | authkey = keys.authkey; | ||
572 | ctx->enc_keylen = keys.enckeylen; | ||
573 | ctx->auth_keylen = keys.authkeylen; | ||
579 | 574 | ||
580 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { | 575 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
581 | /* the nonce is stored in bytes at end of key */ | 576 | /* the nonce is stored in bytes at end of key */ |
577 | rc = -EINVAL; | ||
582 | if (ctx->enc_keylen < | 578 | if (ctx->enc_keylen < |
583 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) | 579 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) |
584 | goto badkey; | 580 | goto badkey; |
585 | /* Copy nonce from last 4 bytes in CTR key to | 581 | /* Copy nonce from last 4 bytes in CTR key to |
586 | * first 4 bytes in CTR IV | 582 | * first 4 bytes in CTR IV |
587 | */ | 583 | */ |
588 | memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + | 584 | memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - |
589 | ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, | 585 | CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); |
590 | CTR_RFC3686_NONCE_SIZE); | ||
591 | /* Set CTR key size */ | 586 | /* Set CTR key size */ |
592 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; | 587 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; |
593 | } | 588 | } |
594 | } else { /* non-authenc - has just one key */ | 589 | } else { /* non-authenc - has just one key */ |
590 | enckey = key; | ||
591 | authkey = NULL; | ||
595 | ctx->enc_keylen = keylen; | 592 | ctx->enc_keylen = keylen; |
596 | ctx->auth_keylen = 0; | 593 | ctx->auth_keylen = 0; |
597 | } | 594 | } |
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
603 | /* STAT_PHASE_1: Copy key to ctx */ | 600 | /* STAT_PHASE_1: Copy key to ctx */ |
604 | 601 | ||
605 | /* Get key material */ | 602 | /* Get key material */ |
606 | memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); | 603 | memcpy(ctx->enckey, enckey, ctx->enc_keylen); |
607 | if (ctx->enc_keylen == 24) | 604 | if (ctx->enc_keylen == 24) |
608 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); | 605 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); |
609 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { | 606 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { |
610 | memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); | 607 | memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, |
608 | ctx->auth_keylen); | ||
611 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ | 609 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ |
612 | rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); | 610 | rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); |
613 | if (rc) | 611 | if (rc) |
614 | goto badkey; | 612 | goto badkey; |
615 | } | 613 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 45e20707cef8..f8e2c5c3f4eb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1361 | struct talitos_private *priv = dev_get_drvdata(dev); | 1361 | struct talitos_private *priv = dev_get_drvdata(dev); |
1362 | bool is_sec1 = has_ftr_sec1(priv); | 1362 | bool is_sec1 = has_ftr_sec1(priv); |
1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; | 1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; |
1364 | void *err; | ||
1365 | 1364 | ||
1366 | if (cryptlen + authsize > max_len) { | 1365 | if (cryptlen + authsize > max_len) { |
1367 | dev_err(dev, "length exceeds h/w max limit\n"); | 1366 | dev_err(dev, "length exceeds h/w max limit\n"); |
1368 | return ERR_PTR(-EINVAL); | 1367 | return ERR_PTR(-EINVAL); |
1369 | } | 1368 | } |
1370 | 1369 | ||
1371 | if (ivsize) | ||
1372 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
1373 | |||
1374 | if (!dst || dst == src) { | 1370 | if (!dst || dst == src) { |
1375 | src_len = assoclen + cryptlen + authsize; | 1371 | src_len = assoclen + cryptlen + authsize; |
1376 | src_nents = sg_nents_for_len(src, src_len); | 1372 | src_nents = sg_nents_for_len(src, src_len); |
1377 | if (src_nents < 0) { | 1373 | if (src_nents < 0) { |
1378 | dev_err(dev, "Invalid number of src SG.\n"); | 1374 | dev_err(dev, "Invalid number of src SG.\n"); |
1379 | err = ERR_PTR(-EINVAL); | 1375 | return ERR_PTR(-EINVAL); |
1380 | goto error_sg; | ||
1381 | } | 1376 | } |
1382 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1377 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1383 | dst_nents = dst ? src_nents : 0; | 1378 | dst_nents = dst ? src_nents : 0; |
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1387 | src_nents = sg_nents_for_len(src, src_len); | 1382 | src_nents = sg_nents_for_len(src, src_len); |
1388 | if (src_nents < 0) { | 1383 | if (src_nents < 0) { |
1389 | dev_err(dev, "Invalid number of src SG.\n"); | 1384 | dev_err(dev, "Invalid number of src SG.\n"); |
1390 | err = ERR_PTR(-EINVAL); | 1385 | return ERR_PTR(-EINVAL); |
1391 | goto error_sg; | ||
1392 | } | 1386 | } |
1393 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1387 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1394 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); | 1388 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); |
1395 | dst_nents = sg_nents_for_len(dst, dst_len); | 1389 | dst_nents = sg_nents_for_len(dst, dst_len); |
1396 | if (dst_nents < 0) { | 1390 | if (dst_nents < 0) { |
1397 | dev_err(dev, "Invalid number of dst SG.\n"); | 1391 | dev_err(dev, "Invalid number of dst SG.\n"); |
1398 | err = ERR_PTR(-EINVAL); | 1392 | return ERR_PTR(-EINVAL); |
1399 | goto error_sg; | ||
1400 | } | 1393 | } |
1401 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1394 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1402 | } | 1395 | } |
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1423 | /* if its a ahash, add space for a second desc next to the first one */ | 1416 | /* if its a ahash, add space for a second desc next to the first one */ |
1424 | if (is_sec1 && !dst) | 1417 | if (is_sec1 && !dst) |
1425 | alloc_len += sizeof(struct talitos_desc); | 1418 | alloc_len += sizeof(struct talitos_desc); |
1419 | alloc_len += ivsize; | ||
1426 | 1420 | ||
1427 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1421 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1428 | if (!edesc) { | 1422 | if (!edesc) |
1429 | err = ERR_PTR(-ENOMEM); | 1423 | return ERR_PTR(-ENOMEM); |
1430 | goto error_sg; | 1424 | if (ivsize) { |
1425 | iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); | ||
1426 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
1431 | } | 1427 | } |
1432 | memset(&edesc->desc, 0, sizeof(edesc->desc)); | 1428 | memset(&edesc->desc, 0, sizeof(edesc->desc)); |
1433 | 1429 | ||
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1445 | DMA_BIDIRECTIONAL); | 1441 | DMA_BIDIRECTIONAL); |
1446 | } | 1442 | } |
1447 | return edesc; | 1443 | return edesc; |
1448 | error_sg: | ||
1449 | if (iv_dma) | ||
1450 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
1451 | return err; | ||
1452 | } | 1444 | } |
1453 | 1445 | ||
1454 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1446 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dafc645b2e4e..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
531 | struct drm_gem_object *obj; | 531 | struct drm_gem_object *obj; |
532 | struct amdgpu_framebuffer *amdgpu_fb; | 532 | struct amdgpu_framebuffer *amdgpu_fb; |
533 | int ret; | 533 | int ret; |
534 | int height; | ||
535 | struct amdgpu_device *adev = dev->dev_private; | ||
536 | int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); | ||
537 | int pitch = mode_cmd->pitches[0] / cpp; | ||
538 | |||
539 | pitch = amdgpu_align_pitch(adev, pitch, cpp, false); | ||
540 | if (mode_cmd->pitches[0] != pitch) { | ||
541 | DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n", | ||
542 | pitch, mode_cmd->pitches[0]); | ||
543 | return ERR_PTR(-EINVAL); | ||
544 | } | ||
545 | 534 | ||
546 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); | 535 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); |
547 | if (obj == NULL) { | 536 | if (obj == NULL) { |
@@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
556 | return ERR_PTR(-EINVAL); | 545 | return ERR_PTR(-EINVAL); |
557 | } | 546 | } |
558 | 547 | ||
559 | height = ALIGN(mode_cmd->height, 8); | ||
560 | if (obj->size < pitch * height) { | ||
561 | DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n", | ||
562 | pitch * height, obj->size); | ||
563 | return ERR_PTR(-EINVAL); | ||
564 | } | ||
565 | |||
566 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); | 548 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
567 | if (amdgpu_fb == NULL) { | 549 | if (amdgpu_fb == NULL) { |
568 | drm_gem_object_put_unlocked(obj); | 550 | drm_gem_object_put_unlocked(obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig | |||
@@ -4,8 +4,8 @@ | |||
4 | 4 | ||
5 | config HSA_AMD | 5 | config HSA_AMD |
6 | bool "HSA kernel driver for AMD GPU devices" | 6 | bool "HSA kernel driver for AMD GPU devices" |
7 | depends on DRM_AMDGPU && X86_64 | 7 | depends on DRM_AMDGPU && (X86_64 || ARM64) |
8 | imply AMD_IOMMU_V2 | 8 | imply AMD_IOMMU_V2 if X86_64 |
9 | select MMU_NOTIFIER | 9 | select MMU_NOTIFIER |
10 | help | 10 | help |
11 | Enable this if you want to use HSA features on AMD GPU devices. | 11 | Enable this if you want to use HSA features on AMD GPU devices. |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..5d85ff341385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
863 | return 0; | 863 | return 0; |
864 | } | 864 | } |
865 | 865 | ||
866 | #if CONFIG_X86_64 | ||
866 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
867 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
868 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | |||
905 | 906 | ||
906 | return 0; | 907 | return 0; |
907 | } | 908 | } |
909 | #endif | ||
908 | 910 | ||
909 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU | 911 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU |
910 | * | 912 | * |
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
920 | struct crat_subtype_generic *sub_type_hdr; | 922 | struct crat_subtype_generic *sub_type_hdr; |
921 | int avail_size = *size; | 923 | int avail_size = *size; |
922 | int numa_node_id; | 924 | int numa_node_id; |
925 | #ifdef CONFIG_X86_64 | ||
923 | uint32_t entries = 0; | 926 | uint32_t entries = 0; |
927 | #endif | ||
924 | int ret = 0; | 928 | int ret = 0; |
925 | 929 | ||
926 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) | 930 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) |
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
982 | sub_type_hdr->length); | 986 | sub_type_hdr->length); |
983 | 987 | ||
984 | /* Fill in Subtype: IO Link */ | 988 | /* Fill in Subtype: IO Link */ |
989 | #ifdef CONFIG_X86_64 | ||
985 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, | 990 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, |
986 | &entries, | 991 | &entries, |
987 | (struct crat_subtype_iolink *)sub_type_hdr); | 992 | (struct crat_subtype_iolink *)sub_type_hdr); |
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
992 | 997 | ||
993 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + | 998 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + |
994 | sub_type_hdr->length * entries); | 999 | sub_type_hdr->length * entries); |
1000 | #else | ||
1001 | pr_info("IO link not available for non x86 platforms\n"); | ||
1002 | #endif | ||
995 | 1003 | ||
996 | crat_table->num_domains++; | 1004 | crat_table->num_domains++; |
997 | } | 1005 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) | |||
1093 | * the GPU device is not already present in the topology device | 1093 | * the GPU device is not already present in the topology device |
1094 | * list then return NULL. This means a new topology device has to | 1094 | * list then return NULL. This means a new topology device has to |
1095 | * be created for this GPU. | 1095 | * be created for this GPU. |
1096 | * TODO: Rather than assiging @gpu to first topology device withtout | ||
1097 | * gpu attached, it will better to have more stringent check. | ||
1098 | */ | 1096 | */ |
1099 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | 1097 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) |
1100 | { | 1098 | { |
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | |||
1102 | struct kfd_topology_device *out_dev = NULL; | 1100 | struct kfd_topology_device *out_dev = NULL; |
1103 | 1101 | ||
1104 | down_write(&topology_lock); | 1102 | down_write(&topology_lock); |
1105 | list_for_each_entry(dev, &topology_device_list, list) | 1103 | list_for_each_entry(dev, &topology_device_list, list) { |
1104 | /* Discrete GPUs need their own topology device list | ||
1105 | * entries. Don't assign them to CPU/APU nodes. | ||
1106 | */ | ||
1107 | if (!gpu->device_info->needs_iommu_device && | ||
1108 | dev->node_props.cpu_cores_count) | ||
1109 | continue; | ||
1110 | |||
1106 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { | 1111 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { |
1107 | dev->gpu = gpu; | 1112 | dev->gpu = gpu; |
1108 | out_dev = dev; | 1113 | out_dev = dev; |
1109 | break; | 1114 | break; |
1110 | } | 1115 | } |
1116 | } | ||
1111 | up_write(&topology_lock); | 1117 | up_write(&topology_lock); |
1112 | return out_dev; | 1118 | return out_dev; |
1113 | } | 1119 | } |
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) | |||
1392 | 1398 | ||
1393 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | 1399 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) |
1394 | { | 1400 | { |
1395 | const struct cpuinfo_x86 *cpuinfo; | ||
1396 | int first_cpu_of_numa_node; | 1401 | int first_cpu_of_numa_node; |
1397 | 1402 | ||
1398 | if (!cpumask || cpumask == cpu_none_mask) | 1403 | if (!cpumask || cpumask == cpu_none_mask) |
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | |||
1400 | first_cpu_of_numa_node = cpumask_first(cpumask); | 1405 | first_cpu_of_numa_node = cpumask_first(cpumask); |
1401 | if (first_cpu_of_numa_node >= nr_cpu_ids) | 1406 | if (first_cpu_of_numa_node >= nr_cpu_ids) |
1402 | return -1; | 1407 | return -1; |
1403 | cpuinfo = &cpu_data(first_cpu_of_numa_node); | 1408 | #ifdef CONFIG_X86_64 |
1404 | 1409 | return cpu_data(first_cpu_of_numa_node).apicid; | |
1405 | return cpuinfo->apicid; | 1410 | #else |
1411 | return first_cpu_of_numa_node; | ||
1412 | #endif | ||
1406 | } | 1413 | } |
1407 | 1414 | ||
1408 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor | 1415 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 34f35e9a3c46..f4fa40c387d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
1772 | + caps.min_input_signal * 0x101; | 1772 | + caps.min_input_signal * 0x101; |
1773 | 1773 | ||
1774 | if (dc_link_set_backlight_level(dm->backlight_link, | 1774 | if (dc_link_set_backlight_level(dm->backlight_link, |
1775 | brightness, 0, 0)) | 1775 | brightness, 0)) |
1776 | return 0; | 1776 | return 0; |
1777 | else | 1777 | else |
1778 | return 1; | 1778 | return 1; |
@@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
5935 | !new_crtc_state->color_mgmt_changed && | 5935 | !new_crtc_state->color_mgmt_changed && |
5936 | !new_crtc_state->vrr_enabled) | 5936 | old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) |
5937 | continue; | 5937 | continue; |
5938 | 5938 | ||
5939 | if (!new_crtc_state->enable) | 5939 | if (!new_crtc_state->enable) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 52deacf39841..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
2190 | 2190 | ||
2191 | bool dc_link_set_backlight_level(const struct dc_link *link, | 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, |
2192 | uint32_t backlight_pwm_u16_16, | 2192 | uint32_t backlight_pwm_u16_16, |
2193 | uint32_t frame_ramp, | 2193 | uint32_t frame_ramp) |
2194 | const struct dc_stream_state *stream) | ||
2195 | { | 2194 | { |
2196 | struct dc *core_dc = link->ctx->dc; | 2195 | struct dc *core_dc = link->ctx->dc; |
2197 | struct abm *abm = core_dc->res_pool->abm; | 2196 | struct abm *abm = core_dc->res_pool->abm; |
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
2206 | (abm->funcs->set_backlight_level_pwm == NULL)) | 2205 | (abm->funcs->set_backlight_level_pwm == NULL)) |
2207 | return false; | 2206 | return false; |
2208 | 2207 | ||
2209 | if (stream) | ||
2210 | ((struct dc_stream_state *)stream)->bl_pwm_level = | ||
2211 | backlight_pwm_u16_16; | ||
2212 | |||
2213 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2208 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
2214 | 2209 | ||
2215 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 2210 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
@@ -2637,11 +2632,6 @@ void core_link_enable_stream( | |||
2637 | 2632 | ||
2638 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | 2633 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) |
2639 | enable_stream_features(pipe_ctx); | 2634 | enable_stream_features(pipe_ctx); |
2640 | |||
2641 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
2642 | pipe_ctx->stream->bl_pwm_level, | ||
2643 | 0, | ||
2644 | pipe_ctx->stream); | ||
2645 | } | 2635 | } |
2646 | 2636 | ||
2647 | } | 2637 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
146 | */ | 146 | */ |
147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, |
148 | uint32_t backlight_pwm_u16_16, | 148 | uint32_t backlight_pwm_u16_16, |
149 | uint32_t frame_ramp, | 149 | uint32_t frame_ramp); |
150 | const struct dc_stream_state *stream); | ||
151 | 150 | ||
152 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 151 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
153 | 152 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
@@ -91,7 +91,6 @@ struct dc_stream_state { | |||
91 | 91 | ||
92 | /* DMCU info */ | 92 | /* DMCU info */ |
93 | unsigned int abm_level; | 93 | unsigned int abm_level; |
94 | unsigned int bl_pwm_level; | ||
95 | 94 | ||
96 | /* from core_stream struct */ | 95 | /* from core_stream struct */ |
97 | struct dc_context *ctx; | 96 | struct dc_context *ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4bf24758217f..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | |||
1000 | 1000 | ||
1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); | 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); |
1002 | 1002 | ||
1003 | if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | 1003 | if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) |
1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ |
1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); |
1006 | /* un-mute audio */ | 1006 | /* un-mute audio */ |
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( |
1018 | pipe_ctx->stream_res.stream_enc, true); | 1018 | pipe_ctx->stream_res.stream_enc, true); |
1019 | if (pipe_ctx->stream_res.audio) { | 1019 | if (pipe_ctx->stream_res.audio) { |
1020 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
1021 | |||
1020 | if (option != KEEP_ACQUIRED_RESOURCE || | 1022 | if (option != KEEP_ACQUIRED_RESOURCE || |
1021 | !dc->debug.az_endpoint_mute_only) { | 1023 | !dc->debug.az_endpoint_mute_only) { |
1022 | /*only disalbe az_endpoint if power down or free*/ | 1024 | /*only disalbe az_endpoint if power down or free*/ |
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1036 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); | 1038 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); |
1037 | pipe_ctx->stream_res.audio = NULL; | 1039 | pipe_ctx->stream_res.audio = NULL; |
1038 | } | 1040 | } |
1041 | if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | ||
1042 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | ||
1043 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | ||
1039 | 1044 | ||
1040 | /* TODO: notify audio driver for if audio modes list changed | 1045 | /* TODO: notify audio driver for if audio modes list changed |
1041 | * add audio mode list change flag */ | 1046 | * add audio mode list change flag */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | |||
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position( | |||
463 | if (src_y_offset >= (int)param->viewport.height) | 463 | if (src_y_offset >= (int)param->viewport.height) |
464 | cur_en = 0; /* not visible beyond bottom edge*/ | 464 | cur_en = 0; /* not visible beyond bottom edge*/ |
465 | 465 | ||
466 | if (src_y_offset < 0) | 466 | if (src_y_offset + (int)height <= 0) |
467 | cur_en = 0; /* not visible beyond top edge*/ | 467 | cur_en = 0; /* not visible beyond top edge*/ |
468 | 468 | ||
469 | REG_UPDATE(CURSOR0_CONTROL, | 469 | REG_UPDATE(CURSOR0_CONTROL, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( | |||
1140 | if (src_y_offset >= (int)param->viewport.height) | 1140 | if (src_y_offset >= (int)param->viewport.height) |
1141 | cur_en = 0; /* not visible beyond bottom edge*/ | 1141 | cur_en = 0; /* not visible beyond bottom edge*/ |
1142 | 1142 | ||
1143 | if (src_y_offset < 0) //+ (int)hubp->curs_attr.height | 1143 | if (src_y_offset + (int)hubp->curs_attr.height <= 0) |
1144 | cur_en = 0; /* not visible beyond top edge*/ | 1144 | cur_en = 0; /* not visible beyond top edge*/ |
1145 | 1145 | ||
1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) | 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91e015e14355..58a12ddf12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( | |||
2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) | 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) |
2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
2358 | 2358 | tg = pipe_ctx->stream_res.tg; | |
2359 | /* Skip inactive pipes and ones already updated */ | 2359 | /* Skip inactive pipes and ones already updated */ |
2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream |
2361 | || !pipe_ctx->plane_state) | 2361 | || !pipe_ctx->plane_state |
2362 | || !tg->funcs->is_tg_enabled(tg)) | ||
2362 | continue; | 2363 | continue; |
2363 | 2364 | ||
2364 | pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); | 2365 | tg->funcs->lock(tg); |
2365 | 2366 | ||
2366 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( | 2367 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( |
2367 | pipe_ctx->plane_res.hubp, | 2368 | pipe_ctx->plane_res.hubp, |
2368 | &pipe_ctx->dlg_regs, | 2369 | &pipe_ctx->dlg_regs, |
2369 | &pipe_ctx->ttu_regs); | 2370 | &pipe_ctx->ttu_regs); |
2370 | } | ||
2371 | |||
2372 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
2373 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
2374 | 2371 | ||
2375 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2372 | tg->funcs->unlock(tg); |
2376 | || !pipe_ctx->plane_state) | 2373 | } |
2377 | continue; | ||
2378 | |||
2379 | dcn10_pipe_control_lock(dc, pipe_ctx, false); | ||
2380 | } | ||
2381 | 2374 | ||
2382 | if (num_planes == 0) | 2375 | if (num_planes == 0) |
2383 | false_optc_underflow_wa(dc, stream, tg); | 2376 | false_optc_underflow_wa(dc, stream, tg); |
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c | |||
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le | |||
57 | #define NUM_POWER_FN_SEGS 8 | 57 | #define NUM_POWER_FN_SEGS 8 |
58 | #define NUM_BL_CURVE_SEGS 16 | 58 | #define NUM_BL_CURVE_SEGS 16 |
59 | 59 | ||
60 | #pragma pack(push, 1) | ||
60 | /* NOTE: iRAM is 256B in size */ | 61 | /* NOTE: iRAM is 256B in size */ |
61 | struct iram_table_v_2 { | 62 | struct iram_table_v_2 { |
62 | /* flags */ | 63 | /* flags */ |
@@ -100,6 +101,7 @@ struct iram_table_v_2 { | |||
100 | uint8_t dummy8; /* 0xfe */ | 101 | uint8_t dummy8; /* 0xfe */ |
101 | uint8_t dummy9; /* 0xff */ | 102 | uint8_t dummy9; /* 0xff */ |
102 | }; | 103 | }; |
104 | #pragma pack(pop) | ||
103 | 105 | ||
104 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) | 106 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) |
105 | { | 107 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) | |||
753 | return 0; | 753 | return 0; |
754 | } | 754 | } |
755 | 755 | ||
756 | static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) | ||
757 | { | ||
758 | uint32_t result; | ||
759 | |||
760 | PP_ASSERT_WITH_CODE( | ||
761 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, | ||
762 | "[Run_ACG_BTC] Attempt to run ACG BTC failed!", | ||
763 | return -EINVAL); | ||
764 | |||
765 | result = smum_get_argument(hwmgr); | ||
766 | PP_ASSERT_WITH_CODE(result == 1, | ||
767 | "Failed to run ACG BTC!", return -EINVAL); | ||
768 | |||
769 | return 0; | ||
770 | } | ||
771 | |||
756 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) | 772 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) |
757 | { | 773 | { |
758 | struct vega12_hwmgr *data = | 774 | struct vega12_hwmgr *data = |
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
931 | "Failed to initialize SMC table!", | 947 | "Failed to initialize SMC table!", |
932 | result = tmp_result); | 948 | result = tmp_result); |
933 | 949 | ||
950 | tmp_result = vega12_run_acg_btc(hwmgr); | ||
951 | PP_ASSERT_WITH_CODE(!tmp_result, | ||
952 | "Failed to run ACG BTC!", | ||
953 | result = tmp_result); | ||
954 | |||
934 | result = vega12_enable_all_smu_features(hwmgr); | 955 | result = vega12_enable_all_smu_features(hwmgr); |
935 | PP_ASSERT_WITH_CODE(!result, | 956 | PP_ASSERT_WITH_CODE(!result, |
936 | "Failed to enable all smu features!", | 957 | "Failed to enable all smu features!", |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2802 | MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2802 | return 0; | 2803 | return 0; |
2803 | } | 2804 | } |
2804 | 2805 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
@@ -41,7 +41,7 @@ struct intel_gvt_mpt { | |||
41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
42 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
44 | void (*detach_vgpu)(unsigned long handle); | 44 | void (*detach_vgpu)(void *vgpu); |
45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); | 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); |
46 | unsigned long (*from_virt_to_mfn)(void *p); | 46 | unsigned long (*from_virt_to_mfn)(void *p); |
47 | int (*enable_page_track)(unsigned long handle, u64 gfn); | 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
996 | { | 996 | { |
997 | unsigned int index; | 997 | unsigned int index; |
998 | u64 virtaddr; | 998 | u64 virtaddr; |
999 | unsigned long req_size, pgoff = 0; | 999 | unsigned long req_size, pgoff, req_start; |
1000 | pgprot_t pg_prot; | 1000 | pgprot_t pg_prot; |
1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
1002 | 1002 | ||
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
1014 | pg_prot = vma->vm_page_prot; | 1014 | pg_prot = vma->vm_page_prot; |
1015 | virtaddr = vma->vm_start; | 1015 | virtaddr = vma->vm_start; |
1016 | req_size = vma->vm_end - vma->vm_start; | 1016 | req_size = vma->vm_end - vma->vm_start; |
1017 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | 1017 | pgoff = vma->vm_pgoff & |
1018 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | ||
1019 | req_start = pgoff << PAGE_SHIFT; | ||
1020 | |||
1021 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | ||
1022 | return -EINVAL; | ||
1023 | if (req_start + req_size > | ||
1024 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | ||
1025 | return -EINVAL; | ||
1026 | |||
1027 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | ||
1018 | 1028 | ||
1019 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | 1029 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
1020 | } | 1030 | } |
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) | |||
1662 | return 0; | 1672 | return 0; |
1663 | } | 1673 | } |
1664 | 1674 | ||
1665 | static void kvmgt_detach_vgpu(unsigned long handle) | 1675 | static void kvmgt_detach_vgpu(void *p_vgpu) |
1666 | { | 1676 | { |
1667 | /* nothing to do here */ | 1677 | int i; |
1678 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | ||
1679 | |||
1680 | if (!vgpu->vdev.region) | ||
1681 | return; | ||
1682 | |||
1683 | for (i = 0; i < vgpu->vdev.num_regions; i++) | ||
1684 | if (vgpu->vdev.region[i].ops->release) | ||
1685 | vgpu->vdev.region[i].ops->release(vgpu, | ||
1686 | &vgpu->vdev.region[i]); | ||
1687 | vgpu->vdev.num_regions = 0; | ||
1688 | kfree(vgpu->vdev.region); | ||
1689 | vgpu->vdev.region = NULL; | ||
1668 | } | 1690 | } |
1669 | 1691 | ||
1670 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | 1692 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) | |||
101 | if (!intel_gvt_host.mpt->detach_vgpu) | 101 | if (!intel_gvt_host.mpt->detach_vgpu) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | intel_gvt_host.mpt->detach_vgpu(vgpu->handle); | 104 | intel_gvt_host.mpt->detach_vgpu(vgpu); |
105 | } | 105 | } |
106 | 106 | ||
107 | #define MSI_CAP_CONTROL(offset) (offset + 2) | 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) |
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
@@ -46,7 +46,6 @@ struct meson_crtc { | |||
46 | struct drm_crtc base; | 46 | struct drm_crtc base; |
47 | struct drm_pending_vblank_event *event; | 47 | struct drm_pending_vblank_event *event; |
48 | struct meson_drm *priv; | 48 | struct meson_drm *priv; |
49 | bool enabled; | ||
50 | }; | 49 | }; |
51 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) | 50 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) |
52 | 51 | ||
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { | |||
82 | 81 | ||
83 | }; | 82 | }; |
84 | 83 | ||
85 | static void meson_crtc_enable(struct drm_crtc *crtc) | 84 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, |
85 | struct drm_crtc_state *old_state) | ||
86 | { | 86 | { |
87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
88 | struct drm_crtc_state *crtc_state = crtc->state; | 88 | struct drm_crtc_state *crtc_state = crtc->state; |
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) | |||
108 | 108 | ||
109 | drm_crtc_vblank_on(crtc); | 109 | drm_crtc_vblank_on(crtc); |
110 | 110 | ||
111 | meson_crtc->enabled = true; | ||
112 | } | ||
113 | |||
114 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
115 | struct drm_crtc_state *old_state) | ||
116 | { | ||
117 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
118 | struct meson_drm *priv = meson_crtc->priv; | ||
119 | |||
120 | DRM_DEBUG_DRIVER("\n"); | ||
121 | |||
122 | if (!meson_crtc->enabled) | ||
123 | meson_crtc_enable(crtc); | ||
124 | |||
125 | priv->viu.osd1_enabled = true; | 111 | priv->viu.osd1_enabled = true; |
126 | } | 112 | } |
127 | 113 | ||
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, | |||
153 | 139 | ||
154 | crtc->state->event = NULL; | 140 | crtc->state->event = NULL; |
155 | } | 141 | } |
156 | |||
157 | meson_crtc->enabled = false; | ||
158 | } | 142 | } |
159 | 143 | ||
160 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | 144 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, |
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | |||
163 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 147 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
164 | unsigned long flags; | 148 | unsigned long flags; |
165 | 149 | ||
166 | if (crtc->state->enable && !meson_crtc->enabled) | ||
167 | meson_crtc_enable(crtc); | ||
168 | |||
169 | if (crtc->state->event) { | 150 | if (crtc->state->event) { |
170 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | 151 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); |
171 | 152 | ||
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { | |||
75 | .fb_create = drm_gem_fb_create, | 75 | .fb_create = drm_gem_fb_create, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { | ||
79 | .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, | ||
80 | }; | ||
81 | |||
78 | static irqreturn_t meson_irq(int irq, void *arg) | 82 | static irqreturn_t meson_irq(int irq, void *arg) |
79 | { | 83 | { |
80 | struct drm_device *dev = arg; | 84 | struct drm_device *dev = arg; |
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | |||
266 | drm->mode_config.max_width = 3840; | 270 | drm->mode_config.max_width = 3840; |
267 | drm->mode_config.max_height = 2160; | 271 | drm->mode_config.max_height = 2160; |
268 | drm->mode_config.funcs = &meson_mode_config_funcs; | 272 | drm->mode_config.funcs = &meson_mode_config_funcs; |
273 | drm->mode_config.helper_private = &meson_mode_config_helpers; | ||
269 | 274 | ||
270 | /* Hardware Initialization */ | 275 | /* Hardware Initialization */ |
271 | 276 | ||
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, | |||
388 | remote_node = of_graph_get_remote_port_parent(ep); | 393 | remote_node = of_graph_get_remote_port_parent(ep); |
389 | if (!remote_node || | 394 | if (!remote_node || |
390 | remote_node == parent || /* Ignore parent endpoint */ | 395 | remote_node == parent || /* Ignore parent endpoint */ |
391 | !of_device_is_available(remote_node)) | 396 | !of_device_is_available(remote_node)) { |
397 | of_node_put(remote_node); | ||
392 | continue; | 398 | continue; |
399 | } | ||
393 | 400 | ||
394 | count += meson_probe_remote(pdev, match, remote, remote_node); | 401 | count += meson_probe_remote(pdev, match, remote, remote_node); |
395 | 402 | ||
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) | |||
408 | 415 | ||
409 | for_each_endpoint_of_node(np, ep) { | 416 | for_each_endpoint_of_node(np, ep) { |
410 | remote = of_graph_get_remote_port_parent(ep); | 417 | remote = of_graph_get_remote_port_parent(ep); |
411 | if (!remote || !of_device_is_available(remote)) | 418 | if (!remote || !of_device_is_available(remote)) { |
419 | of_node_put(remote); | ||
412 | continue; | 420 | continue; |
421 | } | ||
413 | 422 | ||
414 | count += meson_probe_remote(pdev, &match, np, remote); | 423 | count += meson_probe_remote(pdev, &match, np, remote); |
424 | of_node_put(remote); | ||
415 | } | 425 | } |
416 | 426 | ||
417 | if (count && !match) | 427 | if (count && !match) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -2435,6 +2435,38 @@ nv140_chipset = { | |||
2435 | }; | 2435 | }; |
2436 | 2436 | ||
2437 | static const struct nvkm_device_chip | 2437 | static const struct nvkm_device_chip |
2438 | nv162_chipset = { | ||
2439 | .name = "TU102", | ||
2440 | .bar = tu104_bar_new, | ||
2441 | .bios = nvkm_bios_new, | ||
2442 | .bus = gf100_bus_new, | ||
2443 | .devinit = tu104_devinit_new, | ||
2444 | .fault = tu104_fault_new, | ||
2445 | .fb = gv100_fb_new, | ||
2446 | .fuse = gm107_fuse_new, | ||
2447 | .gpio = gk104_gpio_new, | ||
2448 | .i2c = gm200_i2c_new, | ||
2449 | .ibus = gm200_ibus_new, | ||
2450 | .imem = nv50_instmem_new, | ||
2451 | .ltc = gp102_ltc_new, | ||
2452 | .mc = tu104_mc_new, | ||
2453 | .mmu = tu104_mmu_new, | ||
2454 | .pci = gp100_pci_new, | ||
2455 | .pmu = gp102_pmu_new, | ||
2456 | .therm = gp100_therm_new, | ||
2457 | .timer = gk20a_timer_new, | ||
2458 | .top = gk104_top_new, | ||
2459 | .ce[0] = tu104_ce_new, | ||
2460 | .ce[1] = tu104_ce_new, | ||
2461 | .ce[2] = tu104_ce_new, | ||
2462 | .ce[3] = tu104_ce_new, | ||
2463 | .ce[4] = tu104_ce_new, | ||
2464 | .disp = tu104_disp_new, | ||
2465 | .dma = gv100_dma_new, | ||
2466 | .fifo = tu104_fifo_new, | ||
2467 | }; | ||
2468 | |||
2469 | static const struct nvkm_device_chip | ||
2438 | nv164_chipset = { | 2470 | nv164_chipset = { |
2439 | .name = "TU104", | 2471 | .name = "TU104", |
2440 | .bar = tu104_bar_new, | 2472 | .bar = tu104_bar_new, |
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, | |||
2950 | case 0x138: device->chip = &nv138_chipset; break; | 2982 | case 0x138: device->chip = &nv138_chipset; break; |
2951 | case 0x13b: device->chip = &nv13b_chipset; break; | 2983 | case 0x13b: device->chip = &nv13b_chipset; break; |
2952 | case 0x140: device->chip = &nv140_chipset; break; | 2984 | case 0x140: device->chip = &nv140_chipset; break; |
2985 | case 0x162: device->chip = &nv162_chipset; break; | ||
2953 | case 0x164: device->chip = &nv164_chipset; break; | 2986 | case 0x164: device->chip = &nv164_chipset; break; |
2954 | case 0x166: device->chip = &nv166_chipset; break; | 2987 | case 0x166: device->chip = &nv166_chipset; break; |
2955 | default: | 2988 | default: |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { | |||
250 | #if defined(CONFIG_DEBUG_FS) | 250 | #if defined(CONFIG_DEBUG_FS) |
251 | .debugfs_init = qxl_debugfs_init, | 251 | .debugfs_init = qxl_debugfs_init, |
252 | #endif | 252 | #endif |
253 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
254 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
255 | .gem_prime_export = drm_gem_prime_export, | 253 | .gem_prime_export = drm_gem_prime_export, |
256 | .gem_prime_import = drm_gem_prime_import, | 254 | .gem_prime_import = drm_gem_prime_import, |
257 | .gem_prime_pin = qxl_gem_prime_pin, | 255 | .gem_prime_pin = qxl_gem_prime_pin, |
258 | .gem_prime_unpin = qxl_gem_prime_unpin, | 256 | .gem_prime_unpin = qxl_gem_prime_unpin, |
259 | .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, | ||
260 | .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, | ||
261 | .gem_prime_vmap = qxl_gem_prime_vmap, | 257 | .gem_prime_vmap = qxl_gem_prime_vmap, |
262 | .gem_prime_vunmap = qxl_gem_prime_vunmap, | 258 | .gem_prime_vunmap = qxl_gem_prime_vunmap, |
263 | .gem_prime_mmap = qxl_gem_prime_mmap, | 259 | .gem_prime_mmap = qxl_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c | |||
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) | |||
38 | WARN_ONCE(1, "not implemented"); | 38 | WARN_ONCE(1, "not implemented"); |
39 | } | 39 | } |
40 | 40 | ||
41 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
42 | { | ||
43 | WARN_ONCE(1, "not implemented"); | ||
44 | return ERR_PTR(-ENOSYS); | ||
45 | } | ||
46 | |||
47 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | ||
48 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
49 | struct sg_table *table) | ||
50 | { | ||
51 | WARN_ONCE(1, "not implemented"); | ||
52 | return ERR_PTR(-ENOSYS); | ||
53 | } | ||
54 | |||
55 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) | 41 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) |
56 | { | 42 | { |
57 | WARN_ONCE(1, "not implemented"); | 43 | WARN_ONCE(1, "not implemented"); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..37f93022a106 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, | |||
113 | child_count++; | 113 | child_count++; |
114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, | 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, |
115 | &panel, &bridge); | 115 | &panel, &bridge); |
116 | if (!ret) | 116 | if (!ret) { |
117 | of_node_put(endpoint); | ||
117 | break; | 118 | break; |
119 | } | ||
118 | } | 120 | } |
119 | 121 | ||
120 | of_node_put(port); | 122 | of_node_put(port); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, | |||
786 | remote = of_graph_get_remote_port_parent(ep); | 786 | remote = of_graph_get_remote_port_parent(ep); |
787 | if (!remote) | 787 | if (!remote) |
788 | continue; | 788 | continue; |
789 | of_node_put(remote); | ||
789 | 790 | ||
790 | /* does this node match any registered engines? */ | 791 | /* does this node match any registered engines? */ |
791 | list_for_each_entry(frontend, &drv->frontend_list, list) { | 792 | list_for_each_entry(frontend, &drv->frontend_list, list) { |
792 | if (remote == frontend->node) { | 793 | if (remote == frontend->node) { |
793 | of_node_put(remote); | ||
794 | of_node_put(port); | 794 | of_node_put(port); |
795 | of_node_put(ep); | ||
795 | return frontend; | 796 | return frontend; |
796 | } | 797 | } |
797 | } | 798 | } |
798 | } | 799 | } |
799 | 800 | of_node_put(port); | |
800 | return ERR_PTR(-EINVAL); | 801 | return ERR_PTR(-EINVAL); |
801 | } | 802 | } |
802 | 803 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c | |||
@@ -127,14 +127,10 @@ static struct drm_driver driver = { | |||
127 | #if defined(CONFIG_DEBUG_FS) | 127 | #if defined(CONFIG_DEBUG_FS) |
128 | .debugfs_init = virtio_gpu_debugfs_init, | 128 | .debugfs_init = virtio_gpu_debugfs_init, |
129 | #endif | 129 | #endif |
130 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
131 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
132 | .gem_prime_export = drm_gem_prime_export, | 130 | .gem_prime_export = drm_gem_prime_export, |
133 | .gem_prime_import = drm_gem_prime_import, | 131 | .gem_prime_import = drm_gem_prime_import, |
134 | .gem_prime_pin = virtgpu_gem_prime_pin, | 132 | .gem_prime_pin = virtgpu_gem_prime_pin, |
135 | .gem_prime_unpin = virtgpu_gem_prime_unpin, | 133 | .gem_prime_unpin = virtgpu_gem_prime_unpin, |
136 | .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, | ||
137 | .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, | ||
138 | .gem_prime_vmap = virtgpu_gem_prime_vmap, | 134 | .gem_prime_vmap = virtgpu_gem_prime_vmap, |
139 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, | 135 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, |
140 | .gem_prime_mmap = virtgpu_gem_prime_mmap, | 136 | .gem_prime_mmap = virtgpu_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); | |||
372 | /* virtgpu_prime.c */ | 372 | /* virtgpu_prime.c */ |
373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); | 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); |
374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); | 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); |
375 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
376 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
377 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
378 | struct sg_table *sgt); | ||
379 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); | 375 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); |
380 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 376 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
381 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, | 377 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c | |||
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
39 | WARN_ONCE(1, "not implemented"); | 39 | WARN_ONCE(1, "not implemented"); |
40 | } | 40 | } |
41 | 41 | ||
42 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
43 | { | ||
44 | WARN_ONCE(1, "not implemented"); | ||
45 | return ERR_PTR(-ENODEV); | ||
46 | } | ||
47 | |||
48 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
49 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
50 | struct sg_table *table) | ||
51 | { | ||
52 | WARN_ONCE(1, "not implemented"); | ||
53 | return ERR_PTR(-ENODEV); | ||
54 | } | ||
55 | |||
56 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) | 42 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
57 | { | 43 | { |
58 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 44 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO | |||
21 | bool "Laptop Hybrid Graphics - GPU switching support" | 21 | bool "Laptop Hybrid Graphics - GPU switching support" |
22 | depends on X86 | 22 | depends on X86 |
23 | depends on ACPI | 23 | depends on ACPI |
24 | depends on PCI | ||
24 | select VGA_ARB | 25 | select VGA_ARB |
25 | help | 26 | help |
26 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer | 27 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer |
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 0e30fa00204c..f9b8e3e23a8e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c | |||
@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, | |||
393 | } | 393 | } |
394 | 394 | ||
395 | rv = lm80_read_value(client, LM80_REG_FANDIV); | 395 | rv = lm80_read_value(client, LM80_REG_FANDIV); |
396 | if (rv < 0) | 396 | if (rv < 0) { |
397 | mutex_unlock(&data->update_lock); | ||
397 | return rv; | 398 | return rv; |
399 | } | ||
398 | reg = (rv & ~(3 << (2 * (nr + 1)))) | 400 | reg = (rv & ~(3 << (2 * (nr + 1)))) |
399 | | (data->fan_div[nr] << (2 * (nr + 1))); | 401 | | (data->fan_div[nr] << (2 * (nr + 1))); |
400 | lm80_write_value(client, LM80_REG_FANDIV, reg); | 402 | lm80_write_value(client, LM80_REG_FANDIV, reg); |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c3040079b1cb..4adec4ab7d06 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -44,8 +44,8 @@ | |||
44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 | 44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 |
45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 | 45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 |
46 | * (0xd451) | 46 | * (0xd451) |
47 | * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 | 47 | * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 |
48 | * (0xd459) | 48 | * (0xd429) |
49 | * | 49 | * |
50 | * #temp lists the number of monitored temperature sources (first value) plus | 50 | * #temp lists the number of monitored temperature sources (first value) plus |
51 | * the number of directly connectable temperature sensors (second value). | 51 | * the number of directly connectable temperature sensors (second value). |
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); | |||
138 | #define SIO_NCT6795_ID 0xd350 | 138 | #define SIO_NCT6795_ID 0xd350 |
139 | #define SIO_NCT6796_ID 0xd420 | 139 | #define SIO_NCT6796_ID 0xd420 |
140 | #define SIO_NCT6797_ID 0xd450 | 140 | #define SIO_NCT6797_ID 0xd450 |
141 | #define SIO_NCT6798_ID 0xd458 | 141 | #define SIO_NCT6798_ID 0xd428 |
142 | #define SIO_ID_MASK 0xFFF8 | 142 | #define SIO_ID_MASK 0xFFF8 |
143 | 143 | ||
144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; | 144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; |
@@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) | |||
4508 | 4508 | ||
4509 | if (data->kind == nct6791 || data->kind == nct6792 || | 4509 | if (data->kind == nct6791 || data->kind == nct6792 || |
4510 | data->kind == nct6793 || data->kind == nct6795 || | 4510 | data->kind == nct6793 || data->kind == nct6795 || |
4511 | data->kind == nct6796) | 4511 | data->kind == nct6796 || data->kind == nct6797 || |
4512 | data->kind == nct6798) | ||
4512 | nct6791_enable_io_mapping(sioreg); | 4513 | nct6791_enable_io_mapping(sioreg); |
4513 | 4514 | ||
4514 | superio_exit(sioreg); | 4515 | superio_exit(sioreg); |
@@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | |||
4644 | 4645 | ||
4645 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || | 4646 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || |
4646 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || | 4647 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || |
4647 | sio_data->kind == nct6796) | 4648 | sio_data->kind == nct6796 || sio_data->kind == nct6797 || |
4649 | sio_data->kind == nct6798) | ||
4648 | nct6791_enable_io_mapping(sioaddr); | 4650 | nct6791_enable_io_mapping(sioaddr); |
4649 | 4651 | ||
4650 | superio_exit(sioaddr); | 4652 | superio_exit(sioaddr); |
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 423903f87955..391118c8aae8 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
@@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev, | |||
380 | val *= 1000000ULL; | 380 | val *= 1000000ULL; |
381 | break; | 381 | break; |
382 | case 2: | 382 | case 2: |
383 | val = get_unaligned_be32(&power->update_tag) * | 383 | val = (u64)get_unaligned_be32(&power->update_tag) * |
384 | occ->powr_sample_time_us; | 384 | occ->powr_sample_time_us; |
385 | break; | 385 | break; |
386 | case 3: | 386 | case 3: |
387 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 387 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
@@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev, | |||
425 | &power->update_tag); | 425 | &power->update_tag); |
426 | break; | 426 | break; |
427 | case 2: | 427 | case 2: |
428 | val = get_unaligned_be32(&power->update_tag) * | 428 | val = (u64)get_unaligned_be32(&power->update_tag) * |
429 | occ->powr_sample_time_us; | 429 | occ->powr_sample_time_us; |
430 | break; | 430 | break; |
431 | case 3: | 431 | case 3: |
432 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 432 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
@@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
463 | &power->system.update_tag); | 463 | &power->system.update_tag); |
464 | break; | 464 | break; |
465 | case 2: | 465 | case 2: |
466 | val = get_unaligned_be32(&power->system.update_tag) * | 466 | val = (u64)get_unaligned_be32(&power->system.update_tag) * |
467 | occ->powr_sample_time_us; | 467 | occ->powr_sample_time_us; |
468 | break; | 468 | break; |
469 | case 3: | 469 | case 3: |
470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; | 470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; |
@@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
477 | &power->proc.update_tag); | 477 | &power->proc.update_tag); |
478 | break; | 478 | break; |
479 | case 6: | 479 | case 6: |
480 | val = get_unaligned_be32(&power->proc.update_tag) * | 480 | val = (u64)get_unaligned_be32(&power->proc.update_tag) * |
481 | occ->powr_sample_time_us; | 481 | occ->powr_sample_time_us; |
482 | break; | 482 | break; |
483 | case 7: | 483 | case 7: |
484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; | 484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; |
@@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
491 | &power->vdd.update_tag); | 491 | &power->vdd.update_tag); |
492 | break; | 492 | break; |
493 | case 10: | 493 | case 10: |
494 | val = get_unaligned_be32(&power->vdd.update_tag) * | 494 | val = (u64)get_unaligned_be32(&power->vdd.update_tag) * |
495 | occ->powr_sample_time_us; | 495 | occ->powr_sample_time_us; |
496 | break; | 496 | break; |
497 | case 11: | 497 | case 11: |
498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; | 498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; |
@@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
505 | &power->vdn.update_tag); | 505 | &power->vdn.update_tag); |
506 | break; | 506 | break; |
507 | case 14: | 507 | case 14: |
508 | val = get_unaligned_be32(&power->vdn.update_tag) * | 508 | val = (u64)get_unaligned_be32(&power->vdn.update_tag) * |
509 | occ->powr_sample_time_us; | 509 | occ->powr_sample_time_us; |
510 | break; | 510 | break; |
511 | case 15: | 511 | case 15: |
512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; | 512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; |
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 8844c9565d2a..7053be59ad2e 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c | |||
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { | |||
88 | .data = (void *)2 | 88 | .data = (void *)2 |
89 | }, | 89 | }, |
90 | { | 90 | { |
91 | .compatible = "ti,tmp422", | 91 | .compatible = "ti,tmp442", |
92 | .data = (void *)3 | 92 | .data = (void *)3 |
93 | }, | 93 | }, |
94 | { }, | 94 | { }, |
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b532e2c9cf5c..f8c00b94817f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c | |||
@@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, | |||
901 | master->regs + | 901 | master->regs + |
902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
903 | 903 | ||
904 | if (!old_dyn_addr) | ||
905 | return 0; | ||
906 | |||
907 | master->addrs[data->index] = dev->info.dyn_addr; | 904 | master->addrs[data->index] = dev->info.dyn_addr; |
908 | 905 | ||
909 | return 0; | 906 | return 0; |
@@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) | |||
925 | return -ENOMEM; | 922 | return -ENOMEM; |
926 | 923 | ||
927 | data->index = pos; | 924 | data->index = pos; |
928 | master->addrs[pos] = dev->info.dyn_addr; | 925 | master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; |
929 | master->free_pos &= ~BIT(pos); | 926 | master->free_pos &= ~BIT(pos); |
930 | i3c_dev_set_master_data(dev, data); | 927 | i3c_dev_set_master_data(dev, data); |
931 | 928 | ||
932 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), | 929 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), |
933 | master->regs + | 930 | master->regs + |
934 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 931 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
935 | 932 | ||
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index bbd79b8b1a80..8889a4fdb454 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c | |||
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) | |||
1556 | return PTR_ERR(master->pclk); | 1556 | return PTR_ERR(master->pclk); |
1557 | 1557 | ||
1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); | 1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); |
1559 | if (IS_ERR(master->pclk)) | 1559 | if (IS_ERR(master->sysclk)) |
1560 | return PTR_ERR(master->pclk); | 1560 | return PTR_ERR(master->sysclk); |
1561 | 1561 | ||
1562 | irq = platform_get_irq(pdev, 0); | 1562 | irq = platform_get_irq(pdev, 0); |
1563 | if (irq < 0) | 1563 | if (irq < 0) |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 63a7cc00bae0..84f077b2b90a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, | |||
494 | id_priv->id.route.addr.dev_addr.transport = | 494 | id_priv->id.route.addr.dev_addr.transport = |
495 | rdma_node_get_transport(cma_dev->device->node_type); | 495 | rdma_node_get_transport(cma_dev->device->node_type); |
496 | list_add_tail(&id_priv->list, &cma_dev->id_list); | 496 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
497 | rdma_restrack_kadd(&id_priv->res); | 497 | if (id_priv->res.kern_name) |
498 | rdma_restrack_kadd(&id_priv->res); | ||
499 | else | ||
500 | rdma_restrack_uadd(&id_priv->res); | ||
498 | } | 501 | } |
499 | 502 | ||
500 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, | 503 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e600fc23ae62..3c97a8b6bf1e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c | |||
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, | |||
584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, | 584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, |
585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) | 585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) |
586 | goto err; | 586 | goto err; |
587 | if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && | ||
588 | nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, | ||
589 | pd->unsafe_global_rkey)) | ||
590 | goto err; | ||
591 | 587 | ||
592 | if (fill_res_name_pid(msg, res)) | 588 | if (fill_res_name_pid(msg, res)) |
593 | goto err; | 589 | goto err; |
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index be6b8e1257d0..69f8db66925e 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h | |||
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj, | |||
106 | enum uverbs_obj_access access, | 106 | enum uverbs_obj_access access, |
107 | bool commit); | 107 | bool commit); |
108 | 108 | ||
109 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); | ||
110 | |||
109 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 111 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
110 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 112 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
111 | 113 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6b12cc5f97b2..3317300ab036 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, | |||
60 | { | 60 | { |
61 | int ret; | 61 | int ret; |
62 | 62 | ||
63 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
64 | return uverbs_copy_to_struct_or_zero( | ||
65 | attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); | ||
66 | |||
63 | if (copy_to_user(attrs->ucore.outbuf, resp, | 67 | if (copy_to_user(attrs->ucore.outbuf, resp, |
64 | min(attrs->ucore.outlen, resp_len))) | 68 | min(attrs->ucore.outlen, resp_len))) |
65 | return -EFAULT; | 69 | return -EFAULT; |
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) | |||
1181 | goto out_put; | 1185 | goto out_put; |
1182 | } | 1186 | } |
1183 | 1187 | ||
1188 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
1189 | ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); | ||
1190 | |||
1184 | ret = 0; | 1191 | ret = 0; |
1185 | 1192 | ||
1186 | out_put: | 1193 | out_put: |
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) | |||
2012 | return -ENOMEM; | 2019 | return -ENOMEM; |
2013 | 2020 | ||
2014 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); | 2021 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); |
2015 | if (!qp) | 2022 | if (!qp) { |
2023 | ret = -EINVAL; | ||
2016 | goto out; | 2024 | goto out; |
2025 | } | ||
2017 | 2026 | ||
2018 | is_ud = qp->qp_type == IB_QPT_UD; | 2027 | is_ud = qp->qp_type == IB_QPT_UD; |
2019 | sg_ind = 0; | 2028 | sg_ind = 0; |
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c81ff698052..0ca04d224015 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c | |||
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, | |||
144 | 0, uattr->len - len); | 144 | 0, uattr->len - len); |
145 | } | 145 | } |
146 | 146 | ||
147 | static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, | ||
148 | const struct uverbs_attr *attr) | ||
149 | { | ||
150 | struct bundle_priv *pbundle = | ||
151 | container_of(bundle, struct bundle_priv, bundle); | ||
152 | u16 flags; | ||
153 | |||
154 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | ||
155 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
156 | if (put_user(flags, | ||
157 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
158 | return -EFAULT; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
147 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, | 162 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, |
148 | const struct uverbs_api_attr *attr_uapi, | 163 | const struct uverbs_api_attr *attr_uapi, |
149 | struct uverbs_objs_arr_attr *attr, | 164 | struct uverbs_objs_arr_attr *attr, |
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, | |||
456 | } | 471 | } |
457 | 472 | ||
458 | /* | 473 | /* |
474 | * Until the drivers are revised to use the bundle directly we have to | ||
475 | * assume that the driver wrote to its UHW_OUT and flag userspace | ||
476 | * appropriately. | ||
477 | */ | ||
478 | if (!ret && pbundle->method_elm->has_udata) { | ||
479 | const struct uverbs_attr *attr = | ||
480 | uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); | ||
481 | |||
482 | if (!IS_ERR(attr)) | ||
483 | ret = uverbs_set_output(&pbundle->bundle, attr); | ||
484 | } | ||
485 | |||
486 | /* | ||
459 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can | 487 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can |
460 | * not invoke the method because the request is not supported. No | 488 | * not invoke the method because the request is not supported. No |
461 | * other cases should return this code. | 489 | * other cases should return this code. |
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, | |||
706 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | 734 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, |
707 | const void *from, size_t size) | 735 | const void *from, size_t size) |
708 | { | 736 | { |
709 | struct bundle_priv *pbundle = | ||
710 | container_of(bundle, struct bundle_priv, bundle); | ||
711 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 737 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
712 | u16 flags; | ||
713 | size_t min_size; | 738 | size_t min_size; |
714 | 739 | ||
715 | if (IS_ERR(attr)) | 740 | if (IS_ERR(attr)) |
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | |||
719 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) | 744 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) |
720 | return -EFAULT; | 745 | return -EFAULT; |
721 | 746 | ||
722 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | 747 | return uverbs_set_output(bundle, attr); |
723 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
724 | if (put_user(flags, | ||
725 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
726 | return -EFAULT; | ||
727 | |||
728 | return 0; | ||
729 | } | 748 | } |
730 | EXPORT_SYMBOL(uverbs_copy_to); | 749 | EXPORT_SYMBOL(uverbs_copy_to); |
731 | 750 | ||
751 | |||
752 | /* | ||
753 | * This is only used if the caller has directly used copy_to_use to write the | ||
754 | * data. It signals to user space that the buffer is filled in. | ||
755 | */ | ||
756 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) | ||
757 | { | ||
758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | ||
759 | |||
760 | if (IS_ERR(attr)) | ||
761 | return PTR_ERR(attr); | ||
762 | |||
763 | return uverbs_set_output(bundle, attr); | ||
764 | } | ||
765 | |||
732 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, | 766 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, |
733 | size_t idx, s64 lower_bound, u64 upper_bound, | 767 | size_t idx, s64 lower_bound, u64 upper_bound, |
734 | s64 *def_val) | 768 | s64 *def_val) |
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, | |||
757 | { | 791 | { |
758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 792 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
759 | 793 | ||
760 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), | 794 | if (size < attr->ptr_attr.len) { |
761 | attr->ptr_attr.len)) | 795 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, |
762 | return -EFAULT; | 796 | attr->ptr_attr.len - size)) |
797 | return -EFAULT; | ||
798 | } | ||
763 | return uverbs_copy_to(bundle, idx, from, size); | 799 | return uverbs_copy_to(bundle, idx, from, size); |
764 | } | 800 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb0007aa0c27..2890a77339e1 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
690 | 690 | ||
691 | buf += sizeof(hdr); | 691 | buf += sizeof(hdr); |
692 | 692 | ||
693 | memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); | ||
693 | bundle.ufile = file; | 694 | bundle.ufile = file; |
694 | if (!method_elm->is_ex) { | 695 | if (!method_elm->is_ex) { |
695 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); | 696 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82cb6b71ac7c..e3e9dd54caa2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
534 | { | 534 | { |
535 | struct mthca_ucontext *context; | 535 | struct mthca_ucontext *context; |
536 | 536 | ||
537 | qp = kmalloc(sizeof *qp, GFP_KERNEL); | 537 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
538 | if (!qp) | 538 | if (!qp) |
539 | return ERR_PTR(-ENOMEM); | 539 | return ERR_PTR(-ENOMEM); |
540 | 540 | ||
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
600 | if (udata) | 600 | if (udata) |
601 | return ERR_PTR(-EINVAL); | 601 | return ERR_PTR(-EINVAL); |
602 | 602 | ||
603 | qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); | 603 | qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); |
604 | if (!qp) | 604 | if (!qp) |
605 | return ERR_PTR(-ENOMEM); | 605 | return ERR_PTR(-ENOMEM); |
606 | 606 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997e..3c633ab58052 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) | |||
427 | 427 | ||
428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) | 428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) |
429 | { | 429 | { |
430 | return (enum pvrdma_wr_opcode)op; | 430 | switch (op) { |
431 | case IB_WR_RDMA_WRITE: | ||
432 | return PVRDMA_WR_RDMA_WRITE; | ||
433 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
434 | return PVRDMA_WR_RDMA_WRITE_WITH_IMM; | ||
435 | case IB_WR_SEND: | ||
436 | return PVRDMA_WR_SEND; | ||
437 | case IB_WR_SEND_WITH_IMM: | ||
438 | return PVRDMA_WR_SEND_WITH_IMM; | ||
439 | case IB_WR_RDMA_READ: | ||
440 | return PVRDMA_WR_RDMA_READ; | ||
441 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
442 | return PVRDMA_WR_ATOMIC_CMP_AND_SWP; | ||
443 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
444 | return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; | ||
445 | case IB_WR_LSO: | ||
446 | return PVRDMA_WR_LSO; | ||
447 | case IB_WR_SEND_WITH_INV: | ||
448 | return PVRDMA_WR_SEND_WITH_INV; | ||
449 | case IB_WR_RDMA_READ_WITH_INV: | ||
450 | return PVRDMA_WR_RDMA_READ_WITH_INV; | ||
451 | case IB_WR_LOCAL_INV: | ||
452 | return PVRDMA_WR_LOCAL_INV; | ||
453 | case IB_WR_REG_MR: | ||
454 | return PVRDMA_WR_FAST_REG_MR; | ||
455 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | ||
456 | return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; | ||
457 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: | ||
458 | return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; | ||
459 | case IB_WR_REG_SIG_MR: | ||
460 | return PVRDMA_WR_REG_SIG_MR; | ||
461 | default: | ||
462 | return PVRDMA_WR_ERROR; | ||
463 | } | ||
431 | } | 464 | } |
432 | 465 | ||
433 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( | 466 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3acf74cbe266..1ec3646087ba 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, | |||
721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | 721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; | 722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; |
723 | 723 | ||
724 | if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { | ||
725 | *bad_wr = wr; | ||
726 | ret = -EINVAL; | ||
727 | goto out; | ||
728 | } | ||
729 | |||
724 | switch (qp->ibqp.qp_type) { | 730 | switch (qp->ibqp.qp_type) { |
725 | case IB_QPT_GSI: | 731 | case IB_QPT_GSI: |
726 | case IB_QPT_UD: | 732 | case IB_QPT_UD: |
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4ac378e48902..40ca1e8fa09f 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c | |||
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) | |||
423 | int i, j; | 423 | int i, j; |
424 | 424 | ||
425 | for (j = 0; j < AVM_MAXVERSION; j++) | 425 | for (j = 0; j < AVM_MAXVERSION; j++) |
426 | cinfo->version[j] = "\0\0" + 1; | 426 | cinfo->version[j] = ""; |
427 | for (i = 0, j = 0; | 427 | for (i = 0, j = 0; |
428 | j < AVM_MAXVERSION && i < cinfo->versionlen; | 428 | j < AVM_MAXVERSION && i < cinfo->versionlen; |
429 | j++, i += cinfo->versionbuf[i] + 1) | 429 | j++, i += cinfo->versionbuf[i] + 1) |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index a2e74feee2b2..fd64df5a57a5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) | |||
318 | 318 | ||
319 | /* Let the programs run for couple of ms and check the engine status */ | 319 | /* Let the programs run for couple of ms and check the engine status */ |
320 | usleep_range(3000, 6000); | 320 | usleep_range(3000, 6000); |
321 | lp55xx_read(chip, LP5523_REG_STATUS, &status); | 321 | ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); |
322 | if (ret) | ||
323 | return ret; | ||
322 | status &= LP5523_ENG_STATUS_MASK; | 324 | status &= LP5523_ENG_STATUS_MASK; |
323 | 325 | ||
324 | if (status != LP5523_ENG_STATUS_MASK) { | 326 | if (status != LP5523_ENG_STATUS_MASK) { |
diff --git a/drivers/md/md.c b/drivers/md/md.c index fd4af4de03b4..05ffffb8b769 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -207,15 +207,10 @@ static bool create_on_open = true; | |||
207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
208 | struct mddev *mddev) | 208 | struct mddev *mddev) |
209 | { | 209 | { |
210 | struct bio *b; | ||
211 | |||
212 | if (!mddev || !bioset_initialized(&mddev->bio_set)) | 210 | if (!mddev || !bioset_initialized(&mddev->bio_set)) |
213 | return bio_alloc(gfp_mask, nr_iovecs); | 211 | return bio_alloc(gfp_mask, nr_iovecs); |
214 | 212 | ||
215 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); | 213 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); |
216 | if (!b) | ||
217 | return NULL; | ||
218 | return b; | ||
219 | } | 214 | } |
220 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 215 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
221 | 216 | ||
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index d01821a6906a..89d9c4c21037 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c | |||
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) | |||
807 | struct vb2_v4l2_buffer *vbuf; | 807 | struct vb2_v4l2_buffer *vbuf; |
808 | unsigned long flags; | 808 | unsigned long flags; |
809 | 809 | ||
810 | cancel_delayed_work_sync(&dev->work_run); | 810 | if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) |
811 | cancel_delayed_work_sync(&dev->work_run); | ||
812 | |||
811 | for (;;) { | 813 | for (;;) { |
812 | if (V4L2_TYPE_IS_OUTPUT(q->type)) | 814 | if (V4L2_TYPE_IS_OUTPUT(q->type)) |
813 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); | 815 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); |
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 1441a73ce64c..90aad465f9ed 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c | |||
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
287 | const struct v4l2_window *win; | 287 | const struct v4l2_window *win; |
288 | const struct v4l2_sdr_format *sdr; | 288 | const struct v4l2_sdr_format *sdr; |
289 | const struct v4l2_meta_format *meta; | 289 | const struct v4l2_meta_format *meta; |
290 | u32 planes; | ||
290 | unsigned i; | 291 | unsigned i; |
291 | 292 | ||
292 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); | 293 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); |
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
317 | prt_names(mp->field, v4l2_field_names), | 318 | prt_names(mp->field, v4l2_field_names), |
318 | mp->colorspace, mp->num_planes, mp->flags, | 319 | mp->colorspace, mp->num_planes, mp->flags, |
319 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); | 320 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); |
320 | for (i = 0; i < mp->num_planes; i++) | 321 | planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); |
322 | for (i = 0; i < planes; i++) | ||
321 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, | 323 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, |
322 | mp->plane_fmt[i].bytesperline, | 324 | mp->plane_fmt[i].bytesperline, |
323 | mp->plane_fmt[i].sizeimage); | 325 | mp->plane_fmt[i].sizeimage); |
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
1551 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) | 1553 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) |
1552 | break; | 1554 | break; |
1553 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
1556 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
1557 | break; | ||
1554 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1558 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1559 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
1560 | bytesperline); | ||
1556 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); | 1561 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); |
1557 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1562 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
1558 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) | 1563 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) |
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
1581 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) | 1586 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) |
1582 | break; | 1587 | break; |
1583 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1588 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
1589 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
1590 | break; | ||
1584 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1591 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
1585 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1592 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
1593 | bytesperline); | ||
1586 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); | 1594 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); |
1587 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1595 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
1588 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) | 1596 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) |
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
1648 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) | 1656 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) |
1649 | break; | 1657 | break; |
1650 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1658 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
1659 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
1660 | break; | ||
1651 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1661 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
1652 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1662 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
1663 | bytesperline); | ||
1653 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); | 1664 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); |
1654 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1665 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
1655 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) | 1666 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) |
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
1678 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) | 1689 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) |
1679 | break; | 1690 | break; |
1680 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1691 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
1692 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
1693 | break; | ||
1681 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1694 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
1682 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1695 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
1696 | bytesperline); | ||
1683 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); | 1697 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); |
1684 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1698 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
1685 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) | 1699 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) |
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 6b212c8b78e7..2bfa3a903bf9 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c | |||
@@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, | |||
394 | struct _vop_vdev *vdev = to_vopvdev(dev); | 394 | struct _vop_vdev *vdev = to_vopvdev(dev); |
395 | struct vop_device *vpdev = vdev->vpdev; | 395 | struct vop_device *vpdev = vdev->vpdev; |
396 | struct mic_device_ctrl __iomem *dc = vdev->dc; | 396 | struct mic_device_ctrl __iomem *dc = vdev->dc; |
397 | int i, err, retry; | 397 | int i, err, retry, queue_idx = 0; |
398 | 398 | ||
399 | /* We must have this many virtqueues. */ | 399 | /* We must have this many virtqueues. */ |
400 | if (nvqs > ioread8(&vdev->desc->num_vq)) | 400 | if (nvqs > ioread8(&vdev->desc->num_vq)) |
401 | return -ENOENT; | 401 | return -ENOENT; |
402 | 402 | ||
403 | for (i = 0; i < nvqs; ++i) { | 403 | for (i = 0; i < nvqs; ++i) { |
404 | if (!names[i]) { | ||
405 | vqs[i] = NULL; | ||
406 | continue; | ||
407 | } | ||
408 | |||
404 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", | 409 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", |
405 | __func__, i, names[i]); | 410 | __func__, i, names[i]); |
406 | vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], | 411 | vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], |
407 | ctx ? ctx[i] : false); | 412 | ctx ? ctx[i] : false); |
408 | if (IS_ERR(vqs[i])) { | 413 | if (IS_ERR(vqs[i])) { |
409 | err = PTR_ERR(vqs[i]); | 414 | err = PTR_ERR(vqs[i]); |
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index eebac35304c6..6e8edc9375dd 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
@@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali) | |||
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | /* clk rate info is needed for setup_data_interface */ | 1324 | /* clk rate info is needed for setup_data_interface */ |
1325 | if (denali->clk_rate && denali->clk_x_rate) | 1325 | if (!denali->clk_rate || !denali->clk_x_rate) |
1326 | chip->options |= NAND_KEEP_TIMINGS; | 1326 | chip->options |= NAND_KEEP_TIMINGS; |
1327 | 1327 | ||
1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; | 1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; |
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 325b4414dccc..c9149a37f8f0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c | |||
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf, | |||
593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); | 593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); |
594 | } | 594 | } |
595 | 595 | ||
596 | /* fsmc_select_chip - assert or deassert nCE */ | ||
597 | static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) | ||
598 | { | ||
599 | u32 pc = readl(host->regs_va + FSMC_PC); | ||
600 | |||
601 | if (!assert) | ||
602 | writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
603 | else | ||
604 | writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
605 | |||
606 | /* | ||
607 | * nCE line changes must be applied before returning from this | ||
608 | * function. | ||
609 | */ | ||
610 | mb(); | ||
611 | } | ||
612 | |||
613 | /* | 596 | /* |
614 | * fsmc_exec_op - hook called by the core to execute NAND operations | 597 | * fsmc_exec_op - hook called by the core to execute NAND operations |
615 | * | 598 | * |
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
627 | 610 | ||
628 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); | 611 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); |
629 | 612 | ||
630 | fsmc_ce_ctrl(host, true); | ||
631 | |||
632 | for (op_id = 0; op_id < op->ninstrs; op_id++) { | 613 | for (op_id = 0; op_id < op->ninstrs; op_id++) { |
633 | instr = &op->instrs[op_id]; | 614 | instr = &op->instrs[op_id]; |
634 | 615 | ||
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
686 | } | 667 | } |
687 | } | 668 | } |
688 | 669 | ||
689 | fsmc_ce_ctrl(host, false); | ||
690 | |||
691 | return ret; | 670 | return ret; |
692 | } | 671 | } |
693 | 672 | ||
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c index f92ae5aa2a54..9526d5b23c80 100644 --- a/drivers/mtd/nand/raw/jz4740_nand.c +++ b/drivers/mtd/nand/raw/jz4740_nand.c | |||
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, | |||
260 | } | 260 | } |
261 | 261 | ||
262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, | 262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, |
263 | const char *name, struct resource **res, void *__iomem *base) | 263 | const char *name, struct resource **res, void __iomem **base) |
264 | { | 264 | { |
265 | int ret; | 265 | int ret; |
266 | 266 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6371958dd170..edb1c023a753 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -519,7 +519,7 @@ config NET_FAILOVER | |||
519 | and destroy a failover master netdev and manages a primary and | 519 | and destroy a failover master netdev and manages a primary and |
520 | standby slave netdevs that get registered via the generic failover | 520 | standby slave netdevs that get registered via the generic failover |
521 | infrastructure. This can be used by paravirtual drivers to enable | 521 | infrastructure. This can be used by paravirtual drivers to enable |
522 | an alternate low latency datapath. It alsoenables live migration of | 522 | an alternate low latency datapath. It also enables live migration of |
523 | a VM with direct attached VF by failing over to the paravirtual | 523 | a VM with direct attached VF by failing over to the paravirtual |
524 | datapath when the VF is unplugged. | 524 | datapath when the VF is unplugged. |
525 | 525 | ||
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index b4b839a1d095..ad41ec63cc9f 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c | |||
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) | |||
347 | struct device_node *mdio_np; | 347 | struct device_node *mdio_np; |
348 | int ret; | 348 | int ret; |
349 | 349 | ||
350 | mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, | 350 | mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); |
351 | "realtek,smi-mdio"); | ||
352 | if (!mdio_np) { | 351 | if (!mdio_np) { |
353 | dev_err(smi->dev, "no MDIO bus node\n"); | 352 | dev_err(smi->dev, "no MDIO bus node\n"); |
354 | return -ENODEV; | 353 | return -ENODEV; |
355 | } | 354 | } |
356 | 355 | ||
357 | smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); | 356 | smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); |
358 | if (!smi->slave_mii_bus) | 357 | if (!smi->slave_mii_bus) { |
359 | return -ENOMEM; | 358 | ret = -ENOMEM; |
359 | goto err_put_node; | ||
360 | } | ||
360 | smi->slave_mii_bus->priv = smi; | 361 | smi->slave_mii_bus->priv = smi; |
361 | smi->slave_mii_bus->name = "SMI slave MII"; | 362 | smi->slave_mii_bus->name = "SMI slave MII"; |
362 | smi->slave_mii_bus->read = realtek_smi_mdio_read; | 363 | smi->slave_mii_bus->read = realtek_smi_mdio_read; |
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) | |||
371 | if (ret) { | 372 | if (ret) { |
372 | dev_err(smi->dev, "unable to register MDIO bus %s\n", | 373 | dev_err(smi->dev, "unable to register MDIO bus %s\n", |
373 | smi->slave_mii_bus->id); | 374 | smi->slave_mii_bus->id); |
374 | of_node_put(mdio_np); | 375 | goto err_put_node; |
375 | } | 376 | } |
376 | 377 | ||
377 | return 0; | 378 | return 0; |
379 | |||
380 | err_put_node: | ||
381 | of_node_put(mdio_np); | ||
382 | |||
383 | return ret; | ||
378 | } | 384 | } |
379 | 385 | ||
380 | static int realtek_smi_probe(struct platform_device *pdev) | 386 | static int realtek_smi_probe(struct platform_device *pdev) |
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) | |||
457 | struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); | 463 | struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); |
458 | 464 | ||
459 | dsa_unregister_switch(smi->ds); | 465 | dsa_unregister_switch(smi->ds); |
466 | if (smi->slave_mii_bus) | ||
467 | of_node_put(smi->slave_mii_bus->dev.of_node); | ||
460 | gpiod_set_value(smi->reset, 1); | 468 | gpiod_set_value(smi->reset, 1); |
461 | 469 | ||
462 | return 0; | 470 | return 0; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d272dc6984ac..b40d4377cc71 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
@@ -431,8 +431,6 @@ | |||
431 | #define MAC_MDIOSCAR_PA_WIDTH 5 | 431 | #define MAC_MDIOSCAR_PA_WIDTH 5 |
432 | #define MAC_MDIOSCAR_RA_INDEX 0 | 432 | #define MAC_MDIOSCAR_RA_INDEX 0 |
433 | #define MAC_MDIOSCAR_RA_WIDTH 16 | 433 | #define MAC_MDIOSCAR_RA_WIDTH 16 |
434 | #define MAC_MDIOSCAR_REG_INDEX 0 | ||
435 | #define MAC_MDIOSCAR_REG_WIDTH 21 | ||
436 | #define MAC_MDIOSCCDR_BUSY_INDEX 22 | 434 | #define MAC_MDIOSCCDR_BUSY_INDEX 22 |
437 | #define MAC_MDIOSCCDR_BUSY_WIDTH 1 | 435 | #define MAC_MDIOSCCDR_BUSY_WIDTH 1 |
438 | #define MAC_MDIOSCCDR_CMD_INDEX 16 | 436 | #define MAC_MDIOSCCDR_CMD_INDEX 16 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1e929a1e4ca7..4666084eda16 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, | |||
1284 | } | 1284 | } |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | static unsigned int xgbe_create_mdio_sca(int port, int reg) | ||
1288 | { | ||
1289 | unsigned int mdio_sca, da; | ||
1290 | |||
1291 | da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; | ||
1292 | |||
1293 | mdio_sca = 0; | ||
1294 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); | ||
1295 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); | ||
1296 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); | ||
1297 | |||
1298 | return mdio_sca; | ||
1299 | } | ||
1300 | |||
1287 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | 1301 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, |
1288 | int reg, u16 val) | 1302 | int reg, u16 val) |
1289 | { | 1303 | { |
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | |||
1291 | 1305 | ||
1292 | reinit_completion(&pdata->mdio_complete); | 1306 | reinit_completion(&pdata->mdio_complete); |
1293 | 1307 | ||
1294 | mdio_sca = 0; | 1308 | mdio_sca = xgbe_create_mdio_sca(addr, reg); |
1295 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
1296 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
1297 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | 1309 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
1298 | 1310 | ||
1299 | mdio_sccd = 0; | 1311 | mdio_sccd = 0; |
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | |||
1317 | 1329 | ||
1318 | reinit_completion(&pdata->mdio_complete); | 1330 | reinit_completion(&pdata->mdio_complete); |
1319 | 1331 | ||
1320 | mdio_sca = 0; | 1332 | mdio_sca = xgbe_create_mdio_sca(addr, reg); |
1321 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
1322 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
1323 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | 1333 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
1324 | 1334 | ||
1325 | mdio_sccd = 0; | 1335 | mdio_sccd = 0; |
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index f53090cde041..dfebc30c4841 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
2051 | bool nonlinear = skb_is_nonlinear(skb); | 2051 | bool nonlinear = skb_is_nonlinear(skb); |
2052 | struct rtnl_link_stats64 *percpu_stats; | 2052 | struct rtnl_link_stats64 *percpu_stats; |
2053 | struct dpaa_percpu_priv *percpu_priv; | 2053 | struct dpaa_percpu_priv *percpu_priv; |
2054 | struct netdev_queue *txq; | ||
2054 | struct dpaa_priv *priv; | 2055 | struct dpaa_priv *priv; |
2055 | struct qm_fd fd; | 2056 | struct qm_fd fd; |
2056 | int offset = 0; | 2057 | int offset = 0; |
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
2100 | if (unlikely(err < 0)) | 2101 | if (unlikely(err < 0)) |
2101 | goto skb_to_fd_failed; | 2102 | goto skb_to_fd_failed; |
2102 | 2103 | ||
2104 | txq = netdev_get_tx_queue(net_dev, queue_mapping); | ||
2105 | |||
2106 | /* LLTX requires to do our own update of trans_start */ | ||
2107 | txq->trans_start = jiffies; | ||
2108 | |||
2103 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { | 2109 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
2104 | fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); | 2110 | fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); |
2105 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 2111 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index fe1592ae8769..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -515,7 +515,7 @@ struct igb_adapter { | |||
515 | /* OS defined structs */ | 515 | /* OS defined structs */ |
516 | struct pci_dev *pdev; | 516 | struct pci_dev *pdev; |
517 | 517 | ||
518 | struct mutex stats64_lock; | 518 | spinlock_t stats64_lock; |
519 | struct rtnl_link_stats64 stats64; | 519 | struct rtnl_link_stats64 stats64; |
520 | 520 | ||
521 | /* structs defined in e1000_hw.h */ | 521 | /* structs defined in e1000_hw.h */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7426060b678f..c57671068245 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
2295 | int i, j; | 2295 | int i, j; |
2296 | char *p; | 2296 | char *p; |
2297 | 2297 | ||
2298 | mutex_lock(&adapter->stats64_lock); | 2298 | spin_lock(&adapter->stats64_lock); |
2299 | igb_update_stats(adapter); | 2299 | igb_update_stats(adapter); |
2300 | 2300 | ||
2301 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { | 2301 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { |
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
2338 | } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | 2338 | } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); |
2339 | i += IGB_RX_QUEUE_STATS_LEN; | 2339 | i += IGB_RX_QUEUE_STATS_LEN; |
2340 | } | 2340 | } |
2341 | mutex_unlock(&adapter->stats64_lock); | 2341 | spin_unlock(&adapter->stats64_lock); |
2342 | } | 2342 | } |
2343 | 2343 | ||
2344 | static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | 2344 | static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3615e2e52399..dfa357b1a9d6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter) | |||
2203 | del_timer_sync(&adapter->phy_info_timer); | 2203 | del_timer_sync(&adapter->phy_info_timer); |
2204 | 2204 | ||
2205 | /* record the stats before reset*/ | 2205 | /* record the stats before reset*/ |
2206 | mutex_lock(&adapter->stats64_lock); | 2206 | spin_lock(&adapter->stats64_lock); |
2207 | igb_update_stats(adapter); | 2207 | igb_update_stats(adapter); |
2208 | mutex_unlock(&adapter->stats64_lock); | 2208 | spin_unlock(&adapter->stats64_lock); |
2209 | 2209 | ||
2210 | adapter->link_speed = 0; | 2210 | adapter->link_speed = 0; |
2211 | adapter->link_duplex = 0; | 2211 | adapter->link_duplex = 0; |
@@ -3841,7 +3841,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
3841 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | 3841 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
3842 | 3842 | ||
3843 | spin_lock_init(&adapter->nfc_lock); | 3843 | spin_lock_init(&adapter->nfc_lock); |
3844 | mutex_init(&adapter->stats64_lock); | 3844 | spin_lock_init(&adapter->stats64_lock); |
3845 | #ifdef CONFIG_PCI_IOV | 3845 | #ifdef CONFIG_PCI_IOV |
3846 | switch (hw->mac.type) { | 3846 | switch (hw->mac.type) { |
3847 | case e1000_82576: | 3847 | case e1000_82576: |
@@ -5407,9 +5407,9 @@ no_wait: | |||
5407 | } | 5407 | } |
5408 | } | 5408 | } |
5409 | 5409 | ||
5410 | mutex_lock(&adapter->stats64_lock); | 5410 | spin_lock(&adapter->stats64_lock); |
5411 | igb_update_stats(adapter); | 5411 | igb_update_stats(adapter); |
5412 | mutex_unlock(&adapter->stats64_lock); | 5412 | spin_unlock(&adapter->stats64_lock); |
5413 | 5413 | ||
5414 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5414 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5415 | struct igb_ring *tx_ring = adapter->tx_ring[i]; | 5415 | struct igb_ring *tx_ring = adapter->tx_ring[i]; |
@@ -6236,10 +6236,10 @@ static void igb_get_stats64(struct net_device *netdev, | |||
6236 | { | 6236 | { |
6237 | struct igb_adapter *adapter = netdev_priv(netdev); | 6237 | struct igb_adapter *adapter = netdev_priv(netdev); |
6238 | 6238 | ||
6239 | mutex_lock(&adapter->stats64_lock); | 6239 | spin_lock(&adapter->stats64_lock); |
6240 | igb_update_stats(adapter); | 6240 | igb_update_stats(adapter); |
6241 | memcpy(stats, &adapter->stats64, sizeof(*stats)); | 6241 | memcpy(stats, &adapter->stats64, sizeof(*stats)); |
6242 | mutex_unlock(&adapter->stats64_lock); | 6242 | spin_unlock(&adapter->stats64_lock); |
6243 | } | 6243 | } |
6244 | 6244 | ||
6245 | /** | 6245 | /** |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c9df08133718..3bbccead2f63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -844,9 +844,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
844 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, | 844 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, |
845 | Autoneg); | 845 | Autoneg); |
846 | 846 | ||
847 | if (get_fec_supported_advertised(mdev, link_ksettings)) | 847 | err = get_fec_supported_advertised(mdev, link_ksettings); |
848 | if (err) { | ||
848 | netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", | 849 | netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", |
849 | __func__, err); | 850 | __func__, err); |
851 | err = 0; /* don't fail caps query because of FEC error */ | ||
852 | } | ||
850 | 853 | ||
851 | if (!an_disable_admin) | 854 | if (!an_disable_admin) |
852 | ethtool_link_ksettings_add_link_mode(link_ksettings, | 855 | ethtool_link_ksettings_add_link_mode(link_ksettings, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 96cc0c6a4014..04736212a21c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv { | |||
58 | struct list_head list; | 58 | struct list_head list; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); | 61 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
62 | struct net_device *netdev); | ||
62 | 63 | ||
63 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, | 64 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, |
64 | struct ethtool_drvinfo *drvinfo) | 65 | struct ethtool_drvinfo *drvinfo) |
@@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) | |||
179 | 180 | ||
180 | s->tx_packets += sq_stats->packets; | 181 | s->tx_packets += sq_stats->packets; |
181 | s->tx_bytes += sq_stats->bytes; | 182 | s->tx_bytes += sq_stats->bytes; |
183 | s->tx_queue_dropped += sq_stats->dropped; | ||
182 | } | 184 | } |
183 | } | 185 | } |
184 | } | 186 | } |
@@ -663,7 +665,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) | |||
663 | struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; | 665 | struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; |
664 | 666 | ||
665 | list_for_each_entry_safe(cb_priv, temp, head, list) { | 667 | list_for_each_entry_safe(cb_priv, temp, head, list) { |
666 | mlx5e_rep_indr_unregister_block(cb_priv->netdev); | 668 | mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); |
667 | kfree(cb_priv); | 669 | kfree(cb_priv); |
668 | } | 670 | } |
669 | } | 671 | } |
@@ -735,7 +737,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, | |||
735 | 737 | ||
736 | err = tcf_block_cb_register(f->block, | 738 | err = tcf_block_cb_register(f->block, |
737 | mlx5e_rep_indr_setup_block_cb, | 739 | mlx5e_rep_indr_setup_block_cb, |
738 | netdev, indr_priv, f->extack); | 740 | indr_priv, indr_priv, f->extack); |
739 | if (err) { | 741 | if (err) { |
740 | list_del(&indr_priv->list); | 742 | list_del(&indr_priv->list); |
741 | kfree(indr_priv); | 743 | kfree(indr_priv); |
@@ -743,14 +745,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, | |||
743 | 745 | ||
744 | return err; | 746 | return err; |
745 | case TC_BLOCK_UNBIND: | 747 | case TC_BLOCK_UNBIND: |
748 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); | ||
749 | if (!indr_priv) | ||
750 | return -ENOENT; | ||
751 | |||
746 | tcf_block_cb_unregister(f->block, | 752 | tcf_block_cb_unregister(f->block, |
747 | mlx5e_rep_indr_setup_block_cb, | 753 | mlx5e_rep_indr_setup_block_cb, |
748 | netdev); | 754 | indr_priv); |
749 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); | 755 | list_del(&indr_priv->list); |
750 | if (indr_priv) { | 756 | kfree(indr_priv); |
751 | list_del(&indr_priv->list); | ||
752 | kfree(indr_priv); | ||
753 | } | ||
754 | 757 | ||
755 | return 0; | 758 | return 0; |
756 | default: | 759 | default: |
@@ -779,7 +782,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, | |||
779 | 782 | ||
780 | err = __tc_indr_block_cb_register(netdev, rpriv, | 783 | err = __tc_indr_block_cb_register(netdev, rpriv, |
781 | mlx5e_rep_indr_setup_tc_cb, | 784 | mlx5e_rep_indr_setup_tc_cb, |
782 | netdev); | 785 | rpriv); |
783 | if (err) { | 786 | if (err) { |
784 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); | 787 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); |
785 | 788 | ||
@@ -789,10 +792,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, | |||
789 | return err; | 792 | return err; |
790 | } | 793 | } |
791 | 794 | ||
792 | static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) | 795 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
796 | struct net_device *netdev) | ||
793 | { | 797 | { |
794 | __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, | 798 | __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, |
795 | netdev); | 799 | rpriv); |
796 | } | 800 | } |
797 | 801 | ||
798 | static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, | 802 | static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, |
@@ -811,7 +815,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, | |||
811 | mlx5e_rep_indr_register_block(rpriv, netdev); | 815 | mlx5e_rep_indr_register_block(rpriv, netdev); |
812 | break; | 816 | break; |
813 | case NETDEV_UNREGISTER: | 817 | case NETDEV_UNREGISTER: |
814 | mlx5e_rep_indr_unregister_block(netdev); | 818 | mlx5e_rep_indr_unregister_block(rpriv, netdev); |
815 | break; | 819 | break; |
816 | } | 820 | } |
817 | return NOTIFY_OK; | 821 | return NOTIFY_OK; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d0bb5ff8c26..f86e4804e83e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | |||
732 | ((struct ipv6hdr *)ip_p)->nexthdr; | 732 | ((struct ipv6hdr *)ip_p)->nexthdr; |
733 | } | 733 | } |
734 | 734 | ||
735 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | ||
736 | |||
735 | static inline void mlx5e_handle_csum(struct net_device *netdev, | 737 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
736 | struct mlx5_cqe64 *cqe, | 738 | struct mlx5_cqe64 *cqe, |
737 | struct mlx5e_rq *rq, | 739 | struct mlx5e_rq *rq, |
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
754 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) | 756 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) |
755 | goto csum_unnecessary; | 757 | goto csum_unnecessary; |
756 | 758 | ||
759 | /* CQE csum doesn't cover padding octets in short ethernet | ||
760 | * frames. And the pad field is appended prior to calculating | ||
761 | * and appending the FCS field. | ||
762 | * | ||
763 | * Detecting these padded frames requires to verify and parse | ||
764 | * IP headers, so we simply force all those small frames to be | ||
765 | * CHECKSUM_UNNECESSARY even if they are not padded. | ||
766 | */ | ||
767 | if (short_frame(skb->len)) | ||
768 | goto csum_unnecessary; | ||
769 | |||
757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { | 770 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { |
758 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) | 771 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
759 | goto csum_unnecessary; | 772 | goto csum_unnecessary; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 66b8098c6fd2..a2321fe8d6a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c | |||
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) | |||
604 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); | 604 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); |
605 | u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); | 605 | u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); |
606 | u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); | 606 | u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); |
607 | char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; | ||
608 | |||
609 | memcpy(ncqe, cqe, q->elem_size); | ||
610 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
607 | 611 | ||
608 | if (sendq) { | 612 | if (sendq) { |
609 | struct mlxsw_pci_queue *sdq; | 613 | struct mlxsw_pci_queue *sdq; |
610 | 614 | ||
611 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); | 615 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); |
612 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, | 616 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, |
613 | wqe_counter, cqe); | 617 | wqe_counter, ncqe); |
614 | q->u.cq.comp_sdq_count++; | 618 | q->u.cq.comp_sdq_count++; |
615 | } else { | 619 | } else { |
616 | struct mlxsw_pci_queue *rdq; | 620 | struct mlxsw_pci_queue *rdq; |
617 | 621 | ||
618 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); | 622 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); |
619 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, | 623 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, |
620 | wqe_counter, q->u.cq.v, cqe); | 624 | wqe_counter, q->u.cq.v, ncqe); |
621 | q->u.cq.comp_rdq_count++; | 625 | q->u.cq.comp_rdq_count++; |
622 | } | 626 | } |
623 | if (++items == credits) | 627 | if (++items == credits) |
624 | break; | 628 | break; |
625 | } | 629 | } |
626 | if (items) { | 630 | if (items) |
627 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
628 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); | 631 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
629 | } | ||
630 | } | 632 | } |
631 | 633 | ||
632 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) | 634 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) |
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, | |||
1365 | u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); | 1367 | u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); |
1366 | 1368 | ||
1367 | if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) | 1369 | if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) |
1368 | break; | 1370 | return 0; |
1369 | cond_resched(); | 1371 | cond_resched(); |
1370 | } while (time_before(jiffies, end)); | 1372 | } while (time_before(jiffies, end)); |
1371 | return 0; | 1373 | return -EBUSY; |
1372 | } | 1374 | } |
1373 | 1375 | ||
1374 | static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) | 1376 | static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index bb99f6d41fe0..ffee38e36ce8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | #define MLXSW_PCI_SW_RESET 0xF0010 | 28 | #define MLXSW_PCI_SW_RESET 0xF0010 |
29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) | 29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) |
30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 | 30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 |
31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 | 31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 |
32 | #define MLXSW_PCI_FW_READY 0xA1844 | 32 | #define MLXSW_PCI_FW_READY 0xA1844 |
33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF | 33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF |
@@ -53,6 +53,7 @@ | |||
53 | #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ | 53 | #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ |
54 | #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ | 54 | #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ |
55 | #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ | 55 | #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ |
56 | #define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE | ||
56 | #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ | 57 | #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ |
57 | #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) | 58 | #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) |
58 | #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) | 59 | #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 055cc6943b34..9d9aa28684af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | |||
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { | |||
997 | static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { | 997 | static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { |
998 | .type = MLXSW_SP_FID_TYPE_DUMMY, | 998 | .type = MLXSW_SP_FID_TYPE_DUMMY, |
999 | .fid_size = sizeof(struct mlxsw_sp_fid), | 999 | .fid_size = sizeof(struct mlxsw_sp_fid), |
1000 | .start_index = MLXSW_SP_RFID_BASE - 1, | 1000 | .start_index = VLAN_N_VID - 1, |
1001 | .end_index = MLXSW_SP_RFID_BASE - 1, | 1001 | .end_index = VLAN_N_VID - 1, |
1002 | .ops = &mlxsw_sp_fid_dummy_ops, | 1002 | .ops = &mlxsw_sp_fid_dummy_ops, |
1003 | }; | 1003 | }; |
1004 | 1004 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8ab796e0c0d7..0f4e68d31cc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1233,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, | |||
1233 | static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) | 1233 | static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) |
1234 | { | 1234 | { |
1235 | return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : | 1235 | return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : |
1236 | MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; | 1236 | MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) | 1239 | static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) |
@@ -1290,7 +1290,7 @@ out: | |||
1290 | static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1290 | static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
1291 | const char *mac, u16 fid, bool adding, | 1291 | const char *mac, u16 fid, bool adding, |
1292 | enum mlxsw_reg_sfd_rec_action action, | 1292 | enum mlxsw_reg_sfd_rec_action action, |
1293 | bool dynamic) | 1293 | enum mlxsw_reg_sfd_rec_policy policy) |
1294 | { | 1294 | { |
1295 | char *sfd_pl; | 1295 | char *sfd_pl; |
1296 | u8 num_rec; | 1296 | u8 num_rec; |
@@ -1301,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1301 | return -ENOMEM; | 1301 | return -ENOMEM; |
1302 | 1302 | ||
1303 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1303 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
1304 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1304 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); |
1305 | mac, fid, action, local_port); | ||
1306 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | 1305 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); |
1307 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1306 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1308 | if (err) | 1307 | if (err) |
@@ -1321,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1321 | bool dynamic) | 1320 | bool dynamic) |
1322 | { | 1321 | { |
1323 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, | 1322 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, |
1324 | MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); | 1323 | MLXSW_REG_SFD_REC_ACTION_NOP, |
1324 | mlxsw_sp_sfd_rec_policy(dynamic)); | ||
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, | 1327 | int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, |
@@ -1329,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, | |||
1329 | { | 1329 | { |
1330 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, | 1330 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, |
1331 | MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, | 1331 | MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, |
1332 | false); | 1332 | MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | 1335 | static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 084a1b3fbc80..cf22a79af66b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w) | |||
337 | 337 | ||
338 | if (src) | 338 | if (src) |
339 | dev_put(src->dev); | 339 | dev_put(src->dev); |
340 | kfree_skb(skb); | 340 | consume_skb(skb); |
341 | } | 341 | } |
342 | } | 342 | } |
343 | 343 | ||
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 1b350183bffb..a271239748f2 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c | |||
@@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = { | |||
197 | .phy_id = PHY_ID_BCM8706, | 197 | .phy_id = PHY_ID_BCM8706, |
198 | .phy_id_mask = 0xffffffff, | 198 | .phy_id_mask = 0xffffffff, |
199 | .name = "Broadcom BCM8706", | 199 | .name = "Broadcom BCM8706", |
200 | .features = PHY_10GBIT_FEC_FEATURES, | ||
200 | .config_init = bcm87xx_config_init, | 201 | .config_init = bcm87xx_config_init, |
201 | .config_aneg = bcm87xx_config_aneg, | 202 | .config_aneg = bcm87xx_config_aneg, |
202 | .read_status = bcm87xx_read_status, | 203 | .read_status = bcm87xx_read_status, |
@@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = { | |||
208 | .phy_id = PHY_ID_BCM8727, | 209 | .phy_id = PHY_ID_BCM8727, |
209 | .phy_id_mask = 0xffffffff, | 210 | .phy_id_mask = 0xffffffff, |
210 | .name = "Broadcom BCM8727", | 211 | .name = "Broadcom BCM8727", |
212 | .features = PHY_10GBIT_FEC_FEATURES, | ||
211 | .config_init = bcm87xx_config_init, | 213 | .config_init = bcm87xx_config_init, |
212 | .config_aneg = bcm87xx_config_aneg, | 214 | .config_aneg = bcm87xx_config_aneg, |
213 | .read_status = bcm87xx_read_status, | 215 | .read_status = bcm87xx_read_status, |
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 8022cd317f62..1a4d04afb7f0 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c | |||
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = { | |||
88 | .phy_id = PHY_ID_CS4340, | 88 | .phy_id = PHY_ID_CS4340, |
89 | .phy_id_mask = 0xffffffff, | 89 | .phy_id_mask = 0xffffffff, |
90 | .name = "Cortina CS4340", | 90 | .name = "Cortina CS4340", |
91 | .features = PHY_10GBIT_FEATURES, | ||
91 | .config_init = gen10g_config_init, | 92 | .config_init = gen10g_config_init, |
92 | .config_aneg = gen10g_config_aneg, | 93 | .config_aneg = gen10g_config_aneg, |
93 | .read_status = cortina_read_status, | 94 | .read_status = cortina_read_status, |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a9c7c7f41b0c..2e12f982534f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -1046,6 +1046,39 @@ static int m88e1145_config_init(struct phy_device *phydev) | |||
1046 | return 0; | 1046 | return 0; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | /* The VOD can be out of specification on link up. Poke an | ||
1050 | * undocumented register, in an undocumented page, with a magic value | ||
1051 | * to fix this. | ||
1052 | */ | ||
1053 | static int m88e6390_errata(struct phy_device *phydev) | ||
1054 | { | ||
1055 | int err; | ||
1056 | |||
1057 | err = phy_write(phydev, MII_BMCR, | ||
1058 | BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); | ||
1059 | if (err) | ||
1060 | return err; | ||
1061 | |||
1062 | usleep_range(300, 400); | ||
1063 | |||
1064 | err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); | ||
1065 | if (err) | ||
1066 | return err; | ||
1067 | |||
1068 | return genphy_soft_reset(phydev); | ||
1069 | } | ||
1070 | |||
1071 | static int m88e6390_config_aneg(struct phy_device *phydev) | ||
1072 | { | ||
1073 | int err; | ||
1074 | |||
1075 | err = m88e6390_errata(phydev); | ||
1076 | if (err) | ||
1077 | return err; | ||
1078 | |||
1079 | return m88e1510_config_aneg(phydev); | ||
1080 | } | ||
1081 | |||
1049 | /** | 1082 | /** |
1050 | * fiber_lpa_mod_linkmode_lpa_t | 1083 | * fiber_lpa_mod_linkmode_lpa_t |
1051 | * @advertising: the linkmode advertisement settings | 1084 | * @advertising: the linkmode advertisement settings |
@@ -1402,7 +1435,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, | |||
1402 | * before enabling it if !phy_interrupt_is_valid() | 1435 | * before enabling it if !phy_interrupt_is_valid() |
1403 | */ | 1436 | */ |
1404 | if (!phy_interrupt_is_valid(phydev)) | 1437 | if (!phy_interrupt_is_valid(phydev)) |
1405 | phy_read(phydev, MII_M1011_IEVENT); | 1438 | __phy_read(phydev, MII_M1011_IEVENT); |
1406 | 1439 | ||
1407 | /* Enable the WOL interrupt */ | 1440 | /* Enable the WOL interrupt */ |
1408 | err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, | 1441 | err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, |
@@ -2283,7 +2316,7 @@ static struct phy_driver marvell_drivers[] = { | |||
2283 | .features = PHY_GBIT_FEATURES, | 2316 | .features = PHY_GBIT_FEATURES, |
2284 | .probe = m88e6390_probe, | 2317 | .probe = m88e6390_probe, |
2285 | .config_init = &marvell_config_init, | 2318 | .config_init = &marvell_config_init, |
2286 | .config_aneg = &m88e1510_config_aneg, | 2319 | .config_aneg = &m88e6390_config_aneg, |
2287 | .read_status = &marvell_read_status, | 2320 | .read_status = &marvell_read_status, |
2288 | .ack_interrupt = &marvell_ack_interrupt, | 2321 | .ack_interrupt = &marvell_ack_interrupt, |
2289 | .config_intr = &marvell_config_intr, | 2322 | .config_intr = &marvell_config_intr, |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 741f27228088..2e53ba3fa2e7 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -388,6 +388,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | |||
388 | if (IS_ERR(gpiod)) { | 388 | if (IS_ERR(gpiod)) { |
389 | dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", | 389 | dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", |
390 | bus->id); | 390 | bus->id); |
391 | device_del(&bus->dev); | ||
391 | return PTR_ERR(gpiod); | 392 | return PTR_ERR(gpiod); |
392 | } else if (gpiod) { | 393 | } else if (gpiod) { |
393 | bus->reset_gpiod = gpiod; | 394 | bus->reset_gpiod = gpiod; |
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index b03bcf2c388a..3ddaf9595697 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c | |||
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = { | |||
233 | .name = "Meson GXL Internal PHY", | 233 | .name = "Meson GXL Internal PHY", |
234 | .features = PHY_BASIC_FEATURES, | 234 | .features = PHY_BASIC_FEATURES, |
235 | .flags = PHY_IS_INTERNAL, | 235 | .flags = PHY_IS_INTERNAL, |
236 | .soft_reset = genphy_soft_reset, | ||
236 | .config_init = meson_gxl_config_init, | 237 | .config_init = meson_gxl_config_init, |
237 | .aneg_done = genphy_aneg_done, | 238 | .aneg_done = genphy_aneg_done, |
238 | .read_status = meson_gxl_read_status, | 239 | .read_status = meson_gxl_read_status, |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 670698b7ef0e..0d62b548ab39 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -1049,6 +1049,7 @@ static struct phy_driver ksphy_driver[] = { | |||
1049 | .driver_data = &ksz9021_type, | 1049 | .driver_data = &ksz9021_type, |
1050 | .probe = kszphy_probe, | 1050 | .probe = kszphy_probe, |
1051 | .config_init = ksz9031_config_init, | 1051 | .config_init = ksz9031_config_init, |
1052 | .soft_reset = genphy_soft_reset, | ||
1052 | .read_status = ksz9031_read_status, | 1053 | .read_status = ksz9031_read_status, |
1053 | .ack_interrupt = kszphy_ack_interrupt, | 1054 | .ack_interrupt = kszphy_ack_interrupt, |
1054 | .config_intr = kszphy_config_intr, | 1055 | .config_intr = kszphy_config_intr, |
@@ -1077,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = { | |||
1077 | .phy_id = PHY_ID_KSZ8873MLL, | 1078 | .phy_id = PHY_ID_KSZ8873MLL, |
1078 | .phy_id_mask = MICREL_PHY_ID_MASK, | 1079 | .phy_id_mask = MICREL_PHY_ID_MASK, |
1079 | .name = "Micrel KSZ8873MLL Switch", | 1080 | .name = "Micrel KSZ8873MLL Switch", |
1081 | .features = PHY_BASIC_FEATURES, | ||
1080 | .config_init = kszphy_config_init, | 1082 | .config_init = kszphy_config_init, |
1081 | .config_aneg = ksz8873mll_config_aneg, | 1083 | .config_aneg = ksz8873mll_config_aneg, |
1082 | .read_status = ksz8873mll_read_status, | 1084 | .read_status = ksz8873mll_read_status, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f7a92e7edff7..745a705a505a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -538,13 +538,6 @@ int phy_start_aneg(struct phy_device *phydev) | |||
538 | 538 | ||
539 | mutex_lock(&phydev->lock); | 539 | mutex_lock(&phydev->lock); |
540 | 540 | ||
541 | if (!__phy_is_started(phydev)) { | ||
542 | WARN(1, "called from state %s\n", | ||
543 | phy_state_to_str(phydev->state)); | ||
544 | err = -EBUSY; | ||
545 | goto out_unlock; | ||
546 | } | ||
547 | |||
548 | if (AUTONEG_DISABLE == phydev->autoneg) | 541 | if (AUTONEG_DISABLE == phydev->autoneg) |
549 | phy_sanitize_settings(phydev); | 542 | phy_sanitize_settings(phydev); |
550 | 543 | ||
@@ -555,11 +548,13 @@ int phy_start_aneg(struct phy_device *phydev) | |||
555 | if (err < 0) | 548 | if (err < 0) |
556 | goto out_unlock; | 549 | goto out_unlock; |
557 | 550 | ||
558 | if (phydev->autoneg == AUTONEG_ENABLE) { | 551 | if (__phy_is_started(phydev)) { |
559 | err = phy_check_link_status(phydev); | 552 | if (phydev->autoneg == AUTONEG_ENABLE) { |
560 | } else { | 553 | err = phy_check_link_status(phydev); |
561 | phydev->state = PHY_FORCING; | 554 | } else { |
562 | phydev->link_timeout = PHY_FORCE_TIMEOUT; | 555 | phydev->state = PHY_FORCING; |
556 | phydev->link_timeout = PHY_FORCE_TIMEOUT; | ||
557 | } | ||
563 | } | 558 | } |
564 | 559 | ||
565 | out_unlock: | 560 | out_unlock: |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7b3164174251..b61db0a5ba3a 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -59,6 +59,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features); | |||
59 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; | 59 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
60 | EXPORT_SYMBOL_GPL(phy_10gbit_features); | 60 | EXPORT_SYMBOL_GPL(phy_10gbit_features); |
61 | 61 | ||
62 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; | ||
63 | EXPORT_SYMBOL_GPL(phy_10gbit_fec_features); | ||
64 | |||
62 | static const int phy_basic_ports_array[] = { | 65 | static const int phy_basic_ports_array[] = { |
63 | ETHTOOL_LINK_MODE_Autoneg_BIT, | 66 | ETHTOOL_LINK_MODE_Autoneg_BIT, |
64 | ETHTOOL_LINK_MODE_TP_BIT, | 67 | ETHTOOL_LINK_MODE_TP_BIT, |
@@ -107,6 +110,11 @@ const int phy_10gbit_features_array[1] = { | |||
107 | }; | 110 | }; |
108 | EXPORT_SYMBOL_GPL(phy_10gbit_features_array); | 111 | EXPORT_SYMBOL_GPL(phy_10gbit_features_array); |
109 | 112 | ||
113 | const int phy_10gbit_fec_features_array[1] = { | ||
114 | ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, | ||
115 | }; | ||
116 | EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); | ||
117 | |||
110 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; | 118 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
111 | EXPORT_SYMBOL_GPL(phy_10gbit_full_features); | 119 | EXPORT_SYMBOL_GPL(phy_10gbit_full_features); |
112 | 120 | ||
@@ -189,6 +197,10 @@ static void features_init(void) | |||
189 | linkmode_set_bit_array(phy_10gbit_full_features_array, | 197 | linkmode_set_bit_array(phy_10gbit_full_features_array, |
190 | ARRAY_SIZE(phy_10gbit_full_features_array), | 198 | ARRAY_SIZE(phy_10gbit_full_features_array), |
191 | phy_10gbit_full_features); | 199 | phy_10gbit_full_features); |
200 | /* 10G FEC only */ | ||
201 | linkmode_set_bit_array(phy_10gbit_fec_features_array, | ||
202 | ARRAY_SIZE(phy_10gbit_fec_features_array), | ||
203 | phy_10gbit_fec_features); | ||
192 | } | 204 | } |
193 | 205 | ||
194 | void phy_device_free(struct phy_device *phydev) | 206 | void phy_device_free(struct phy_device *phydev) |
@@ -2290,6 +2302,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) | |||
2290 | { | 2302 | { |
2291 | int retval; | 2303 | int retval; |
2292 | 2304 | ||
2305 | if (WARN_ON(!new_driver->features)) { | ||
2306 | pr_err("%s: Driver features are missing\n", new_driver->name); | ||
2307 | return -EINVAL; | ||
2308 | } | ||
2309 | |||
2293 | new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; | 2310 | new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; |
2294 | new_driver->mdiodrv.driver.name = new_driver->name; | 2311 | new_driver->mdiodrv.driver.name = new_driver->name; |
2295 | new_driver->mdiodrv.driver.bus = &mdio_bus_type; | 2312 | new_driver->mdiodrv.driver.bus = &mdio_bus_type; |
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 22f3bdd8206c..91247182bc52 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c | |||
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = { | |||
80 | .phy_id = PHY_ID_TN2020, | 80 | .phy_id = PHY_ID_TN2020, |
81 | .phy_id_mask = 0xffffffff, | 81 | .phy_id_mask = 0xffffffff, |
82 | .name = "Teranetics TN2020", | 82 | .name = "Teranetics TN2020", |
83 | .features = PHY_10GBIT_FEATURES, | ||
83 | .soft_reset = gen10g_no_soft_reset, | 84 | .soft_reset = gen10g_no_soft_reset, |
84 | .aneg_done = teranetics_aneg_done, | 85 | .aneg_done = teranetics_aneg_done, |
85 | .config_init = gen10g_config_init, | 86 | .config_init = gen10g_config_init, |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 62dc564b251d..f22639f0116a 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
445 | if (pskb_trim_rcsum(skb, len)) | 445 | if (pskb_trim_rcsum(skb, len)) |
446 | goto drop; | 446 | goto drop; |
447 | 447 | ||
448 | ph = pppoe_hdr(skb); | ||
448 | pn = pppoe_pernet(dev_net(dev)); | 449 | pn = pppoe_pernet(dev_net(dev)); |
449 | 450 | ||
450 | /* Note that get_item does a sock_hold(), so sk_pppox(po) | 451 | /* Note that get_item does a sock_hold(), so sk_pppox(po) |
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 57f1c94fca0b..820a2fe7d027 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c | |||
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = { | |||
1287 | 1287 | ||
1288 | #undef ASIX112_DESC | 1288 | #undef ASIX112_DESC |
1289 | 1289 | ||
1290 | static const struct driver_info trendnet_info = { | ||
1291 | .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", | ||
1292 | .bind = aqc111_bind, | ||
1293 | .unbind = aqc111_unbind, | ||
1294 | .status = aqc111_status, | ||
1295 | .link_reset = aqc111_link_reset, | ||
1296 | .reset = aqc111_reset, | ||
1297 | .stop = aqc111_stop, | ||
1298 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | | ||
1299 | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, | ||
1300 | .rx_fixup = aqc111_rx_fixup, | ||
1301 | .tx_fixup = aqc111_tx_fixup, | ||
1302 | }; | ||
1303 | |||
1290 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) | 1304 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) |
1291 | { | 1305 | { |
1292 | struct usbnet *dev = usb_get_intfdata(intf); | 1306 | struct usbnet *dev = usb_get_intfdata(intf); |
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = { | |||
1440 | {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, | 1454 | {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, |
1441 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, | 1455 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, |
1442 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, | 1456 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, |
1457 | {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, | ||
1443 | { },/* END */ | 1458 | { },/* END */ |
1444 | }; | 1459 | }; |
1445 | MODULE_DEVICE_TABLE(usb, products); | 1460 | MODULE_DEVICE_TABLE(usb, products); |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3305f23793c7..5512a1038721 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -843,6 +843,14 @@ static const struct usb_device_id products[] = { | |||
843 | .driver_info = 0, | 843 | .driver_info = 0, |
844 | }, | 844 | }, |
845 | 845 | ||
846 | /* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */ | ||
847 | { | ||
848 | USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM, | ||
849 | USB_CDC_SUBCLASS_ETHERNET, | ||
850 | USB_CDC_PROTO_NONE), | ||
851 | .driver_info = 0, | ||
852 | }, | ||
853 | |||
846 | /* WHITELIST!!! | 854 | /* WHITELIST!!! |
847 | * | 855 | * |
848 | * CDC Ether uses two interfaces, not necessarily consecutive. | 856 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 236ba5d5fb4b..2a0edd4653e3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
1331 | return stats.packets; | 1331 | return stats.packets; |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | static void free_old_xmit_skbs(struct send_queue *sq) | 1334 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
1335 | { | 1335 | { |
1336 | struct sk_buff *skb; | 1336 | struct sk_buff *skb; |
1337 | unsigned int len; | 1337 | unsigned int len; |
@@ -1344,7 +1344,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) | |||
1344 | bytes += skb->len; | 1344 | bytes += skb->len; |
1345 | packets++; | 1345 | packets++; |
1346 | 1346 | ||
1347 | dev_consume_skb_any(skb); | 1347 | napi_consume_skb(skb, in_napi); |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /* Avoid overhead when no packets have been processed | 1350 | /* Avoid overhead when no packets have been processed |
@@ -1370,7 +1370,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) | |||
1370 | return; | 1370 | return; |
1371 | 1371 | ||
1372 | if (__netif_tx_trylock(txq)) { | 1372 | if (__netif_tx_trylock(txq)) { |
1373 | free_old_xmit_skbs(sq); | 1373 | free_old_xmit_skbs(sq, true); |
1374 | __netif_tx_unlock(txq); | 1374 | __netif_tx_unlock(txq); |
1375 | } | 1375 | } |
1376 | 1376 | ||
@@ -1446,7 +1446,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) | |||
1446 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | 1446 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); |
1447 | 1447 | ||
1448 | __netif_tx_lock(txq, raw_smp_processor_id()); | 1448 | __netif_tx_lock(txq, raw_smp_processor_id()); |
1449 | free_old_xmit_skbs(sq); | 1449 | free_old_xmit_skbs(sq, true); |
1450 | __netif_tx_unlock(txq); | 1450 | __netif_tx_unlock(txq); |
1451 | 1451 | ||
1452 | virtqueue_napi_complete(napi, sq->vq, 0); | 1452 | virtqueue_napi_complete(napi, sq->vq, 0); |
@@ -1515,7 +1515,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1515 | bool use_napi = sq->napi.weight; | 1515 | bool use_napi = sq->napi.weight; |
1516 | 1516 | ||
1517 | /* Free up any pending old buffers before queueing new ones. */ | 1517 | /* Free up any pending old buffers before queueing new ones. */ |
1518 | free_old_xmit_skbs(sq); | 1518 | free_old_xmit_skbs(sq, false); |
1519 | 1519 | ||
1520 | if (use_napi && kick) | 1520 | if (use_napi && kick) |
1521 | virtqueue_enable_cb_delayed(sq->vq); | 1521 | virtqueue_enable_cb_delayed(sq->vq); |
@@ -1558,7 +1558,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1558 | if (!use_napi && | 1558 | if (!use_napi && |
1559 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | 1559 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
1560 | /* More just got used, free them then recheck. */ | 1560 | /* More just got used, free them then recheck. */ |
1561 | free_old_xmit_skbs(sq); | 1561 | free_old_xmit_skbs(sq, false); |
1562 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | 1562 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
1563 | netif_start_subqueue(dev, qnum); | 1563 | netif_start_subqueue(dev, qnum); |
1564 | virtqueue_disable_cb(sq->vq); | 1564 | virtqueue_disable_cb(sq->vq); |
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 2b2cf4e554d3..e5ffd5733540 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
@@ -54,12 +54,12 @@ struct nvdimm { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | static inline enum nvdimm_security_state nvdimm_security_state( | 56 | static inline enum nvdimm_security_state nvdimm_security_state( |
57 | struct nvdimm *nvdimm, bool master) | 57 | struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) |
58 | { | 58 | { |
59 | if (!nvdimm->sec.ops) | 59 | if (!nvdimm->sec.ops) |
60 | return -ENXIO; | 60 | return -ENXIO; |
61 | 61 | ||
62 | return nvdimm->sec.ops->state(nvdimm, master); | 62 | return nvdimm->sec.ops->state(nvdimm, ptype); |
63 | } | 63 | } |
64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); | 64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); |
65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) | 65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index deb1a66bf117..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | |||
2041 | return ret; | 2041 | return ret; |
2042 | } | 2042 | } |
2043 | 2043 | ||
2044 | /* irq_queues covers admin queue */ | ||
2044 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | 2045 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) |
2045 | { | 2046 | { |
2046 | unsigned int this_w_queues = write_queues; | 2047 | unsigned int this_w_queues = write_queues; |
2047 | 2048 | ||
2049 | WARN_ON(!irq_queues); | ||
2050 | |||
2048 | /* | 2051 | /* |
2049 | * Setup read/write queue split | 2052 | * Setup read/write queue split, assign admin queue one independent |
2053 | * irq vector if irq_queues is > 1. | ||
2050 | */ | 2054 | */ |
2051 | if (irq_queues == 1) { | 2055 | if (irq_queues <= 2) { |
2052 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; | 2056 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
2053 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2057 | dev->io_queues[HCTX_TYPE_READ] = 0; |
2054 | return; | 2058 | return; |
@@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | |||
2056 | 2060 | ||
2057 | /* | 2061 | /* |
2058 | * If 'write_queues' is set, ensure it leaves room for at least | 2062 | * If 'write_queues' is set, ensure it leaves room for at least |
2059 | * one read queue | 2063 | * one read queue and one admin queue |
2060 | */ | 2064 | */ |
2061 | if (this_w_queues >= irq_queues) | 2065 | if (this_w_queues >= irq_queues) |
2062 | this_w_queues = irq_queues - 1; | 2066 | this_w_queues = irq_queues - 2; |
2063 | 2067 | ||
2064 | /* | 2068 | /* |
2065 | * If 'write_queues' is set to zero, reads and writes will share | 2069 | * If 'write_queues' is set to zero, reads and writes will share |
2066 | * a queue set. | 2070 | * a queue set. |
2067 | */ | 2071 | */ |
2068 | if (!this_w_queues) { | 2072 | if (!this_w_queues) { |
2069 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; | 2073 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; |
2070 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2074 | dev->io_queues[HCTX_TYPE_READ] = 0; |
2071 | } else { | 2075 | } else { |
2072 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; | 2076 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; |
2073 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; | 2077 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; |
2074 | } | 2078 | } |
2075 | } | 2079 | } |
2076 | 2080 | ||
@@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
2095 | this_p_queues = nr_io_queues - 1; | 2099 | this_p_queues = nr_io_queues - 1; |
2096 | irq_queues = 1; | 2100 | irq_queues = 1; |
2097 | } else { | 2101 | } else { |
2098 | irq_queues = nr_io_queues - this_p_queues; | 2102 | irq_queues = nr_io_queues - this_p_queues + 1; |
2099 | } | 2103 | } |
2100 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; | 2104 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
2101 | 2105 | ||
@@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
2115 | * If we got a failure and we're down to asking for just | 2119 | * If we got a failure and we're down to asking for just |
2116 | * 1 + 1 queues, just ask for a single vector. We'll share | 2120 | * 1 + 1 queues, just ask for a single vector. We'll share |
2117 | * that between the single IO queue and the admin queue. | 2121 | * that between the single IO queue and the admin queue. |
2122 | * Otherwise, we assign one independent vector to admin queue. | ||
2118 | */ | 2123 | */ |
2119 | if (result >= 0 && irq_queues > 1) | 2124 | if (irq_queues > 1) |
2120 | irq_queues = irq_sets[0] + irq_sets[1] + 1; | 2125 | irq_queues = irq_sets[0] + irq_sets[1] + 1; |
2121 | 2126 | ||
2122 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, | 2127 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, |
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 44b37b202e39..ad0df786fe93 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c | |||
@@ -1089,7 +1089,7 @@ out: | |||
1089 | 1089 | ||
1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) | 1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) |
1091 | { | 1091 | { |
1092 | int result; | 1092 | int result = 0; |
1093 | 1093 | ||
1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) | 1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) |
1095 | return 0; | 1095 | return 0; |
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index a09c1c3cf831..49b16f76d78e 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c | |||
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np) | |||
207 | 207 | ||
208 | if (!of_node_check_flag(np, OF_OVERLAY)) { | 208 | if (!of_node_check_flag(np, OF_OVERLAY)) { |
209 | np->name = __of_get_property(np, "name", NULL); | 209 | np->name = __of_get_property(np, "name", NULL); |
210 | np->type = __of_get_property(np, "device_type", NULL); | ||
211 | if (!np->name) | 210 | if (!np->name) |
212 | np->name = "<NULL>"; | 211 | np->name = "<NULL>"; |
213 | if (!np->type) | ||
214 | np->type = "<NULL>"; | ||
215 | 212 | ||
216 | phandle = __of_get_property(np, "phandle", &sz); | 213 | phandle = __of_get_property(np, "phandle", &sz); |
217 | if (!phandle) | 214 | if (!phandle) |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7099c652c6a5..9cc1461aac7d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob, | |||
314 | populate_properties(blob, offset, mem, np, pathp, dryrun); | 314 | populate_properties(blob, offset, mem, np, pathp, dryrun); |
315 | if (!dryrun) { | 315 | if (!dryrun) { |
316 | np->name = of_get_property(np, "name", NULL); | 316 | np->name = of_get_property(np, "name", NULL); |
317 | np->type = of_get_property(np, "device_type", NULL); | ||
318 | |||
319 | if (!np->name) | 317 | if (!np->name) |
320 | np->name = "<NULL>"; | 318 | np->name = "<NULL>"; |
321 | if (!np->type) | ||
322 | np->type = "<NULL>"; | ||
323 | } | 319 | } |
324 | 320 | ||
325 | *pnp = np; | 321 | *pnp = np; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2b5ac43a5690..c423e94baf0f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, | |||
423 | 423 | ||
424 | tchild->parent = target->np; | 424 | tchild->parent = target->np; |
425 | tchild->name = __of_get_property(node, "name", NULL); | 425 | tchild->name = __of_get_property(node, "name", NULL); |
426 | tchild->type = __of_get_property(node, "device_type", NULL); | ||
427 | 426 | ||
428 | if (!tchild->name) | 427 | if (!tchild->name) |
429 | tchild->name = "<NULL>"; | 428 | tchild->name = "<NULL>"; |
430 | if (!tchild->type) | ||
431 | tchild->type = "<NULL>"; | ||
432 | 429 | ||
433 | /* ignore obsolete "linux,phandle" */ | 430 | /* ignore obsolete "linux,phandle" */ |
434 | phandle = __of_get_property(node, "phandle", &size); | 431 | phandle = __of_get_property(node, "phandle", &size); |
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index d3185063d369..7eda43c66c91 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c | |||
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
155 | dp->parent = parent; | 155 | dp->parent = parent; |
156 | 156 | ||
157 | dp->name = of_pdt_get_one_property(node, "name"); | 157 | dp->name = of_pdt_get_one_property(node, "name"); |
158 | dp->type = of_pdt_get_one_property(node, "device_type"); | ||
159 | dp->phandle = node; | 158 | dp->phandle = node; |
160 | 159 | ||
161 | dp->properties = of_pdt_build_prop_list(node); | 160 | dp->properties = of_pdt_build_prop_list(node); |
diff --git a/drivers/of/property.c b/drivers/of/property.c index 08430031bd28..8631efa1daa1 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, | |||
806 | 806 | ||
807 | if (!of_device_is_available(remote)) { | 807 | if (!of_device_is_available(remote)) { |
808 | pr_debug("not available for remote node\n"); | 808 | pr_debug("not available for remote node\n"); |
809 | of_node_put(remote); | ||
809 | return NULL; | 810 | return NULL; |
810 | } | 811 | } |
811 | 812 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 4310c7a4212e..2ab92409210a 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -21,13 +21,14 @@ menuconfig PCI | |||
21 | support for PCI-X and the foundations for PCI Express support. | 21 | support for PCI-X and the foundations for PCI Express support. |
22 | Say 'Y' here unless you know what you are doing. | 22 | Say 'Y' here unless you know what you are doing. |
23 | 23 | ||
24 | if PCI | ||
25 | |||
24 | config PCI_DOMAINS | 26 | config PCI_DOMAINS |
25 | bool | 27 | bool |
26 | depends on PCI | 28 | depends on PCI |
27 | 29 | ||
28 | config PCI_DOMAINS_GENERIC | 30 | config PCI_DOMAINS_GENERIC |
29 | bool | 31 | bool |
30 | depends on PCI | ||
31 | select PCI_DOMAINS | 32 | select PCI_DOMAINS |
32 | 33 | ||
33 | config PCI_SYSCALL | 34 | config PCI_SYSCALL |
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig" | |||
37 | 38 | ||
38 | config PCI_MSI | 39 | config PCI_MSI |
39 | bool "Message Signaled Interrupts (MSI and MSI-X)" | 40 | bool "Message Signaled Interrupts (MSI and MSI-X)" |
40 | depends on PCI | ||
41 | select GENERIC_MSI_IRQ | 41 | select GENERIC_MSI_IRQ |
42 | help | 42 | help |
43 | This allows device drivers to enable MSI (Message Signaled | 43 | This allows device drivers to enable MSI (Message Signaled |
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN | |||
59 | config PCI_QUIRKS | 59 | config PCI_QUIRKS |
60 | default y | 60 | default y |
61 | bool "Enable PCI quirk workarounds" if EXPERT | 61 | bool "Enable PCI quirk workarounds" if EXPERT |
62 | depends on PCI | ||
63 | help | 62 | help |
64 | This enables workarounds for various PCI chipset bugs/quirks. | 63 | This enables workarounds for various PCI chipset bugs/quirks. |
65 | Disable this only if your target machine is unaffected by PCI | 64 | Disable this only if your target machine is unaffected by PCI |
@@ -67,7 +66,7 @@ config PCI_QUIRKS | |||
67 | 66 | ||
68 | config PCI_DEBUG | 67 | config PCI_DEBUG |
69 | bool "PCI Debugging" | 68 | bool "PCI Debugging" |
70 | depends on PCI && DEBUG_KERNEL | 69 | depends on DEBUG_KERNEL |
71 | help | 70 | help |
72 | Say Y here if you want the PCI core to produce a bunch of debug | 71 | Say Y here if you want the PCI core to produce a bunch of debug |
73 | messages to the system log. Select this if you are having a | 72 | messages to the system log. Select this if you are having a |
@@ -77,7 +76,6 @@ config PCI_DEBUG | |||
77 | 76 | ||
78 | config PCI_REALLOC_ENABLE_AUTO | 77 | config PCI_REALLOC_ENABLE_AUTO |
79 | bool "Enable PCI resource re-allocation detection" | 78 | bool "Enable PCI resource re-allocation detection" |
80 | depends on PCI | ||
81 | depends on PCI_IOV | 79 | depends on PCI_IOV |
82 | help | 80 | help |
83 | Say Y here if you want the PCI core to detect if PCI resource | 81 | Say Y here if you want the PCI core to detect if PCI resource |
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO | |||
90 | 88 | ||
91 | config PCI_STUB | 89 | config PCI_STUB |
92 | tristate "PCI Stub driver" | 90 | tristate "PCI Stub driver" |
93 | depends on PCI | ||
94 | help | 91 | help |
95 | Say Y or M here if you want be able to reserve a PCI device | 92 | Say Y or M here if you want be able to reserve a PCI device |
96 | when it is going to be assigned to a guest operating system. | 93 | when it is going to be assigned to a guest operating system. |
@@ -99,7 +96,6 @@ config PCI_STUB | |||
99 | 96 | ||
100 | config PCI_PF_STUB | 97 | config PCI_PF_STUB |
101 | tristate "PCI PF Stub driver" | 98 | tristate "PCI PF Stub driver" |
102 | depends on PCI | ||
103 | depends on PCI_IOV | 99 | depends on PCI_IOV |
104 | help | 100 | help |
105 | Say Y or M here if you want to enable support for devices that | 101 | Say Y or M here if you want to enable support for devices that |
@@ -111,7 +107,7 @@ config PCI_PF_STUB | |||
111 | 107 | ||
112 | config XEN_PCIDEV_FRONTEND | 108 | config XEN_PCIDEV_FRONTEND |
113 | tristate "Xen PCI Frontend" | 109 | tristate "Xen PCI Frontend" |
114 | depends on PCI && X86 && XEN | 110 | depends on X86 && XEN |
115 | select PCI_XEN | 111 | select PCI_XEN |
116 | select XEN_XENBUS_FRONTEND | 112 | select XEN_XENBUS_FRONTEND |
117 | default y | 113 | default y |
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL | |||
133 | 129 | ||
134 | config PCI_IOV | 130 | config PCI_IOV |
135 | bool "PCI IOV support" | 131 | bool "PCI IOV support" |
136 | depends on PCI | ||
137 | select PCI_ATS | 132 | select PCI_ATS |
138 | help | 133 | help |
139 | I/O Virtualization is a PCI feature supported by some devices | 134 | I/O Virtualization is a PCI feature supported by some devices |
@@ -144,7 +139,6 @@ config PCI_IOV | |||
144 | 139 | ||
145 | config PCI_PRI | 140 | config PCI_PRI |
146 | bool "PCI PRI support" | 141 | bool "PCI PRI support" |
147 | depends on PCI | ||
148 | select PCI_ATS | 142 | select PCI_ATS |
149 | help | 143 | help |
150 | PRI is the PCI Page Request Interface. It allows PCI devices that are | 144 | PRI is the PCI Page Request Interface. It allows PCI devices that are |
@@ -154,7 +148,6 @@ config PCI_PRI | |||
154 | 148 | ||
155 | config PCI_PASID | 149 | config PCI_PASID |
156 | bool "PCI PASID support" | 150 | bool "PCI PASID support" |
157 | depends on PCI | ||
158 | select PCI_ATS | 151 | select PCI_ATS |
159 | help | 152 | help |
160 | Process Address Space Identifiers (PASIDs) can be used by PCI devices | 153 | Process Address Space Identifiers (PASIDs) can be used by PCI devices |
@@ -167,7 +160,7 @@ config PCI_PASID | |||
167 | 160 | ||
168 | config PCI_P2PDMA | 161 | config PCI_P2PDMA |
169 | bool "PCI peer-to-peer transfer support" | 162 | bool "PCI peer-to-peer transfer support" |
170 | depends on PCI && ZONE_DEVICE | 163 | depends on ZONE_DEVICE |
171 | select GENERIC_ALLOCATOR | 164 | select GENERIC_ALLOCATOR |
172 | help | 165 | help |
173 | Enableѕ drivers to do PCI peer-to-peer transactions to and from | 166 | Enableѕ drivers to do PCI peer-to-peer transactions to and from |
@@ -184,12 +177,11 @@ config PCI_P2PDMA | |||
184 | 177 | ||
185 | config PCI_LABEL | 178 | config PCI_LABEL |
186 | def_bool y if (DMI || ACPI) | 179 | def_bool y if (DMI || ACPI) |
187 | depends on PCI | ||
188 | select NLS | 180 | select NLS |
189 | 181 | ||
190 | config PCI_HYPERV | 182 | config PCI_HYPERV |
191 | tristate "Hyper-V PCI Frontend" | 183 | tristate "Hyper-V PCI Frontend" |
192 | depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 | 184 | depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 |
193 | help | 185 | help |
194 | The PCI device frontend driver allows the kernel to import arbitrary | 186 | The PCI device frontend driver allows the kernel to import arbitrary |
195 | PCI devices from a PCI backend to support PCI driver domains. | 187 | PCI devices from a PCI backend to support PCI driver domains. |
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig" | |||
198 | source "drivers/pci/controller/Kconfig" | 190 | source "drivers/pci/controller/Kconfig" |
199 | source "drivers/pci/endpoint/Kconfig" | 191 | source "drivers/pci/endpoint/Kconfig" |
200 | source "drivers/pci/switch/Kconfig" | 192 | source "drivers/pci/switch/Kconfig" |
193 | |||
194 | endif | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7a1c8a09efa5..4c0b47867258 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
1168 | const struct irq_affinity *affd) | 1168 | const struct irq_affinity *affd) |
1169 | { | 1169 | { |
1170 | static const struct irq_affinity msi_default_affd; | 1170 | static const struct irq_affinity msi_default_affd; |
1171 | int vecs = -ENOSPC; | 1171 | int msix_vecs = -ENOSPC; |
1172 | int msi_vecs = -ENOSPC; | ||
1172 | 1173 | ||
1173 | if (flags & PCI_IRQ_AFFINITY) { | 1174 | if (flags & PCI_IRQ_AFFINITY) { |
1174 | if (!affd) | 1175 | if (!affd) |
@@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
1179 | } | 1180 | } |
1180 | 1181 | ||
1181 | if (flags & PCI_IRQ_MSIX) { | 1182 | if (flags & PCI_IRQ_MSIX) { |
1182 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1183 | msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, |
1183 | affd); | 1184 | max_vecs, affd); |
1184 | if (vecs > 0) | 1185 | if (msix_vecs > 0) |
1185 | return vecs; | 1186 | return msix_vecs; |
1186 | } | 1187 | } |
1187 | 1188 | ||
1188 | if (flags & PCI_IRQ_MSI) { | 1189 | if (flags & PCI_IRQ_MSI) { |
1189 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); | 1190 | msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, |
1190 | if (vecs > 0) | 1191 | affd); |
1191 | return vecs; | 1192 | if (msi_vecs > 0) |
1193 | return msi_vecs; | ||
1192 | } | 1194 | } |
1193 | 1195 | ||
1194 | /* use legacy irq if allowed */ | 1196 | /* use legacy irq if allowed */ |
@@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
1199 | } | 1201 | } |
1200 | } | 1202 | } |
1201 | 1203 | ||
1202 | return vecs; | 1204 | if (msix_vecs == -ENOSPC) |
1205 | return -ENOSPC; | ||
1206 | return msi_vecs; | ||
1203 | } | 1207 | } |
1204 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); | 1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); |
1205 | 1209 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..c25acace7d91 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str) | |||
6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { | 6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { |
6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); | 6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); |
6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { | 6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
6198 | disable_acs_redir_param = str + 18; | 6198 | disable_acs_redir_param = |
6199 | kstrdup(str + 18, GFP_KERNEL); | ||
6199 | } else { | 6200 | } else { |
6200 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 6201 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
6201 | str); | 6202 | str); |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e3b62c2ee8d1..5e2109c54c7c 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL | |||
1009 | 1009 | ||
1010 | config INTEL_IPS | 1010 | config INTEL_IPS |
1011 | tristate "Intel Intelligent Power Sharing" | 1011 | tristate "Intel Intelligent Power Sharing" |
1012 | depends on ACPI | 1012 | depends on ACPI && PCI |
1013 | ---help--- | 1013 | ---help--- |
1014 | Intel Calpella platforms support dynamic power sharing between the | 1014 | Intel Calpella platforms support dynamic power sharing between the |
1015 | CPU and GPU, maximizing performance in a given TDP. This driver, | 1015 | CPU and GPU, maximizing performance in a given TDP. This driver, |
@@ -1135,7 +1135,7 @@ config SAMSUNG_Q10 | |||
1135 | 1135 | ||
1136 | config APPLE_GMUX | 1136 | config APPLE_GMUX |
1137 | tristate "Apple Gmux Driver" | 1137 | tristate "Apple Gmux Driver" |
1138 | depends on ACPI | 1138 | depends on ACPI && PCI |
1139 | depends on PNP | 1139 | depends on PNP |
1140 | depends on BACKLIGHT_CLASS_DEVICE | 1140 | depends on BACKLIGHT_CLASS_DEVICE |
1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE | 1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE |
@@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT | |||
1174 | 1174 | ||
1175 | config INTEL_PMC_IPC | 1175 | config INTEL_PMC_IPC |
1176 | tristate "Intel PMC IPC Driver" | 1176 | tristate "Intel PMC IPC Driver" |
1177 | depends on ACPI | 1177 | depends on ACPI && PCI |
1178 | ---help--- | 1178 | ---help--- |
1179 | This driver provides support for PMC control on some Intel platforms. | 1179 | This driver provides support for PMC control on some Intel platforms. |
1180 | The PMC is an ARC processor which defines IPC commands for communication | 1180 | The PMC is an ARC processor which defines IPC commands for communication |
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 183fc42a510a..2d7cd344f3bf 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c | |||
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, | |||
153 | const bool * ctx, | 153 | const bool * ctx, |
154 | struct irq_affinity *desc) | 154 | struct irq_affinity *desc) |
155 | { | 155 | { |
156 | int i, ret; | 156 | int i, ret, queue_idx = 0; |
157 | 157 | ||
158 | for (i = 0; i < nvqs; ++i) { | 158 | for (i = 0; i < nvqs; ++i) { |
159 | vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], | 159 | if (!names[i]) { |
160 | vqs[i] = NULL; | ||
161 | continue; | ||
162 | } | ||
163 | |||
164 | vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
160 | ctx ? ctx[i] : false); | 165 | ctx ? ctx[i] : false); |
161 | if (IS_ERR(vqs[i])) { | 166 | if (IS_ERR(vqs[i])) { |
162 | ret = PTR_ERR(vqs[i]); | 167 | ret = PTR_ERR(vqs[i]); |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index fc9dbad476c0..ae1d56da671d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
635 | { | 635 | { |
636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); | 636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
637 | unsigned long *indicatorp = NULL; | 637 | unsigned long *indicatorp = NULL; |
638 | int ret, i; | 638 | int ret, i, queue_idx = 0; |
639 | struct ccw1 *ccw; | 639 | struct ccw1 *ccw; |
640 | 640 | ||
641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); | 641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
643 | return -ENOMEM; | 643 | return -ENOMEM; |
644 | 644 | ||
645 | for (i = 0; i < nvqs; ++i) { | 645 | for (i = 0; i < nvqs; ++i) { |
646 | vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], | 646 | if (!names[i]) { |
647 | ctx ? ctx[i] : false, ccw); | 647 | vqs[i] = NULL; |
648 | continue; | ||
649 | } | ||
650 | |||
651 | vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], | ||
652 | names[i], ctx ? ctx[i] : false, | ||
653 | ccw); | ||
648 | if (IS_ERR(vqs[i])) { | 654 | if (IS_ERR(vqs[i])) { |
649 | ret = PTR_ERR(vqs[i]); | 655 | ret = PTR_ERR(vqs[i]); |
650 | vqs[i] = NULL; | 656 | vqs[i] = NULL; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 8a20411699d9..75e1273a44b3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, | |||
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | 1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, |
1147 | unsigned int tid, int pg_idx, bool reply) | 1147 | unsigned int tid, int pg_idx) |
1148 | { | 1148 | { |
1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
1150 | GFP_KERNEL); | 1150 | GFP_KERNEL); |
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
1160 | req = (struct cpl_set_tcb_field *)skb->head; | 1160 | req = (struct cpl_set_tcb_field *)skb->head; |
1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
1163 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1163 | req->reply = V_NO_REPLY(1); |
1164 | req->cpu_idx = 0; | 1164 | req->cpu_idx = 0; |
1165 | req->word = htons(31); | 1165 | req->word = htons(31); |
1166 | req->mask = cpu_to_be64(0xF0000000); | 1166 | req->mask = cpu_to_be64(0xF0000000); |
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
1177 | * @tid: connection id | 1177 | * @tid: connection id |
1178 | * @hcrc: header digest enabled | 1178 | * @hcrc: header digest enabled |
1179 | * @dcrc: data digest enabled | 1179 | * @dcrc: data digest enabled |
1180 | * @reply: request reply from h/w | ||
1181 | * set up the iscsi digest settings for a connection identified by tid | 1180 | * set up the iscsi digest settings for a connection identified by tid |
1182 | */ | 1181 | */ |
1183 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 1182 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
1184 | int hcrc, int dcrc, int reply) | 1183 | int hcrc, int dcrc) |
1185 | { | 1184 | { |
1186 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1185 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
1187 | GFP_KERNEL); | 1186 | GFP_KERNEL); |
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
1197 | req = (struct cpl_set_tcb_field *)skb->head; | 1196 | req = (struct cpl_set_tcb_field *)skb->head; |
1198 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1197 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
1199 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1198 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
1200 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1199 | req->reply = V_NO_REPLY(1); |
1201 | req->cpu_idx = 0; | 1200 | req->cpu_idx = 0; |
1202 | req->word = htons(31); | 1201 | req->word = htons(31); |
1203 | req->mask = cpu_to_be64(0x0F000000); | 1202 | req->mask = cpu_to_be64(0x0F000000); |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 49f8028ac524..d26f50af00ea 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
1548 | struct cxgbi_sock *csk; | 1548 | struct cxgbi_sock *csk; |
1549 | 1549 | ||
1550 | csk = lookup_tid(t, tid); | 1550 | csk = lookup_tid(t, tid); |
1551 | if (!csk) | 1551 | if (!csk) { |
1552 | pr_err("can't find conn. for tid %u.\n", tid); | 1552 | pr_err("can't find conn. for tid %u.\n", tid); |
1553 | return; | ||
1554 | } | ||
1553 | 1555 | ||
1554 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1556 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
1555 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", | 1557 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", |
1556 | csk, csk->state, csk->flags, csk->tid, rpl->status); | 1558 | csk, csk->state, csk->flags, csk->tid, rpl->status); |
1557 | 1559 | ||
1558 | if (rpl->status != CPL_ERR_NONE) | 1560 | if (rpl->status != CPL_ERR_NONE) { |
1559 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", | 1561 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", |
1560 | csk, tid, rpl->status); | 1562 | csk, tid, rpl->status); |
1563 | csk->err = -EINVAL; | ||
1564 | } | ||
1565 | |||
1566 | complete(&csk->cmpl); | ||
1561 | 1567 | ||
1562 | __kfree_skb(skb); | 1568 | __kfree_skb(skb); |
1563 | } | 1569 | } |
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, | |||
1983 | } | 1989 | } |
1984 | 1990 | ||
1985 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | 1991 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, |
1986 | int pg_idx, bool reply) | 1992 | int pg_idx) |
1987 | { | 1993 | { |
1988 | struct sk_buff *skb; | 1994 | struct sk_buff *skb; |
1989 | struct cpl_set_tcb_field *req; | 1995 | struct cpl_set_tcb_field *req; |
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
1999 | req = (struct cpl_set_tcb_field *)skb->head; | 2005 | req = (struct cpl_set_tcb_field *)skb->head; |
2000 | INIT_TP_WR(req, csk->tid); | 2006 | INIT_TP_WR(req, csk->tid); |
2001 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | 2007 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
2002 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2008 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
2003 | req->word_cookie = htons(0); | 2009 | req->word_cookie = htons(0); |
2004 | req->mask = cpu_to_be64(0x3 << 8); | 2010 | req->mask = cpu_to_be64(0x3 << 8); |
2005 | req->val = cpu_to_be64(pg_idx << 8); | 2011 | req->val = cpu_to_be64(pg_idx << 8); |
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
2008 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2014 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
2009 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); | 2015 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); |
2010 | 2016 | ||
2017 | reinit_completion(&csk->cmpl); | ||
2011 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2018 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
2012 | return 0; | 2019 | wait_for_completion(&csk->cmpl); |
2020 | |||
2021 | return csk->err; | ||
2013 | } | 2022 | } |
2014 | 2023 | ||
2015 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 2024 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
2016 | int hcrc, int dcrc, int reply) | 2025 | int hcrc, int dcrc) |
2017 | { | 2026 | { |
2018 | struct sk_buff *skb; | 2027 | struct sk_buff *skb; |
2019 | struct cpl_set_tcb_field *req; | 2028 | struct cpl_set_tcb_field *req; |
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
2031 | req = (struct cpl_set_tcb_field *)skb->head; | 2040 | req = (struct cpl_set_tcb_field *)skb->head; |
2032 | INIT_TP_WR(req, tid); | 2041 | INIT_TP_WR(req, tid); |
2033 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 2042 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
2034 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2043 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
2035 | req->word_cookie = htons(0); | 2044 | req->word_cookie = htons(0); |
2036 | req->mask = cpu_to_be64(0x3 << 4); | 2045 | req->mask = cpu_to_be64(0x3 << 4); |
2037 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | | 2046 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
2041 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2050 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
2042 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); | 2051 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); |
2043 | 2052 | ||
2053 | reinit_completion(&csk->cmpl); | ||
2044 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2054 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
2045 | return 0; | 2055 | wait_for_completion(&csk->cmpl); |
2056 | |||
2057 | return csk->err; | ||
2046 | } | 2058 | } |
2047 | 2059 | ||
2048 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) | 2060 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 75f876409fb9..245742557c03 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) | |||
573 | skb_queue_head_init(&csk->receive_queue); | 573 | skb_queue_head_init(&csk->receive_queue); |
574 | skb_queue_head_init(&csk->write_queue); | 574 | skb_queue_head_init(&csk->write_queue); |
575 | timer_setup(&csk->retry_timer, NULL, 0); | 575 | timer_setup(&csk->retry_timer, NULL, 0); |
576 | init_completion(&csk->cmpl); | ||
576 | rwlock_init(&csk->callback_lock); | 577 | rwlock_init(&csk->callback_lock); |
577 | csk->cdev = cdev; | 578 | csk->cdev = cdev; |
578 | csk->flags = 0; | 579 | csk->flags = 0; |
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | |||
2251 | if (!err && conn->hdrdgst_en) | 2252 | if (!err && conn->hdrdgst_en) |
2252 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2253 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2253 | conn->hdrdgst_en, | 2254 | conn->hdrdgst_en, |
2254 | conn->datadgst_en, 0); | 2255 | conn->datadgst_en); |
2255 | break; | 2256 | break; |
2256 | case ISCSI_PARAM_DATADGST_EN: | 2257 | case ISCSI_PARAM_DATADGST_EN: |
2257 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2258 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2258 | if (!err && conn->datadgst_en) | 2259 | if (!err && conn->datadgst_en) |
2259 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2260 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2260 | conn->hdrdgst_en, | 2261 | conn->hdrdgst_en, |
2261 | conn->datadgst_en, 0); | 2262 | conn->datadgst_en); |
2262 | break; | 2263 | break; |
2263 | case ISCSI_PARAM_MAX_R2T: | 2264 | case ISCSI_PARAM_MAX_R2T: |
2264 | return iscsi_tcp_set_max_r2t(conn, buf); | 2265 | return iscsi_tcp_set_max_r2t(conn, buf); |
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, | |||
2384 | 2385 | ||
2385 | ppm = csk->cdev->cdev2ppm(csk->cdev); | 2386 | ppm = csk->cdev->cdev2ppm(csk->cdev); |
2386 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, | 2387 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, |
2387 | ppm->tformat.pgsz_idx_dflt, 0); | 2388 | ppm->tformat.pgsz_idx_dflt); |
2388 | if (err < 0) | 2389 | if (err < 0) |
2389 | return err; | 2390 | return err; |
2390 | 2391 | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 5d5d8b50d842..1917ff57651d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
@@ -149,6 +149,7 @@ struct cxgbi_sock { | |||
149 | struct sk_buff_head receive_queue; | 149 | struct sk_buff_head receive_queue; |
150 | struct sk_buff_head write_queue; | 150 | struct sk_buff_head write_queue; |
151 | struct timer_list retry_timer; | 151 | struct timer_list retry_timer; |
152 | struct completion cmpl; | ||
152 | int err; | 153 | int err; |
153 | rwlock_t callback_lock; | 154 | rwlock_t callback_lock; |
154 | void *user_data; | 155 | void *user_data; |
@@ -490,9 +491,9 @@ struct cxgbi_device { | |||
490 | struct cxgbi_ppm *, | 491 | struct cxgbi_ppm *, |
491 | struct cxgbi_task_tag_info *); | 492 | struct cxgbi_task_tag_info *); |
492 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, | 493 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, |
493 | unsigned int, int, int, int); | 494 | unsigned int, int, int); |
494 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, | 495 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, |
495 | unsigned int, int, bool); | 496 | unsigned int, int); |
496 | 497 | ||
497 | void (*csk_release_offload_resources)(struct cxgbi_sock *); | 498 | void (*csk_release_offload_resources)(struct cxgbi_sock *); |
498 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); | 499 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e2420a810e99..c92b3822c408 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | |||
@@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; | 2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; |
2508 | } | 2508 | } |
2509 | 2509 | ||
2510 | if (hisi_hba->prot_mask) { | ||
2511 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
2512 | prot_mask); | ||
2513 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
2514 | } | ||
2515 | |||
2510 | rc = scsi_add_host(shost, dev); | 2516 | rc = scsi_add_host(shost, dev); |
2511 | if (rc) | 2517 | if (rc) |
2512 | goto err_out_ha; | 2518 | goto err_out_ha; |
@@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2519 | if (rc) | 2525 | if (rc) |
2520 | goto err_out_register_ha; | 2526 | goto err_out_register_ha; |
2521 | 2527 | ||
2522 | if (hisi_hba->prot_mask) { | ||
2523 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
2524 | prot_mask); | ||
2525 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
2526 | } | ||
2527 | |||
2528 | scsi_scan_host(shost); | 2528 | scsi_scan_host(shost); |
2529 | 2529 | ||
2530 | return 0; | 2530 | return 0; |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68b90c4f79a3..1727d0c71b12 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
576 | shost->max_lun = ~0; | 576 | shost->max_lun = ~0; |
577 | shost->max_cmd_len = MAX_COMMAND_SIZE; | 577 | shost->max_cmd_len = MAX_COMMAND_SIZE; |
578 | 578 | ||
579 | /* turn on DIF support */ | ||
580 | scsi_host_set_prot(shost, | ||
581 | SHOST_DIF_TYPE1_PROTECTION | | ||
582 | SHOST_DIF_TYPE2_PROTECTION | | ||
583 | SHOST_DIF_TYPE3_PROTECTION); | ||
584 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); | ||
585 | |||
579 | err = scsi_add_host(shost, &pdev->dev); | 586 | err = scsi_add_host(shost, &pdev->dev); |
580 | if (err) | 587 | if (err) |
581 | goto err_shost; | 588 | goto err_shost; |
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
663 | goto err_host_alloc; | 670 | goto err_host_alloc; |
664 | } | 671 | } |
665 | pci_info->hosts[i] = h; | 672 | pci_info->hosts[i] = h; |
666 | |||
667 | /* turn on DIF support */ | ||
668 | scsi_host_set_prot(to_shost(h), | ||
669 | SHOST_DIF_TYPE1_PROTECTION | | ||
670 | SHOST_DIF_TYPE2_PROTECTION | | ||
671 | SHOST_DIF_TYPE3_PROTECTION); | ||
672 | scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); | ||
673 | } | 673 | } |
674 | 674 | ||
675 | err = isci_setup_interrupts(pdev); | 675 | err = isci_setup_interrupts(pdev); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 12fd74761ae0..2242e9b3ca12 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -9407,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; | 9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
9408 | if (phba->link_flag & LS_LOOPBACK_MODE) | 9408 | if (phba->link_flag & LS_LOOPBACK_MODE) |
9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); | 9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); |
9410 | /* fall through */ | ||
9410 | case CMD_XMIT_SEQUENCE64_CR: | 9411 | case CMD_XMIT_SEQUENCE64_CR: |
9411 | /* word3 iocb=io_tag32 wqe=reserved */ | 9412 | /* word3 iocb=io_tag32 wqe=reserved */ |
9412 | wqe->xmit_sequence.rsvd3 = 0; | 9413 | wqe->xmit_sequence.rsvd3 = 0; |
@@ -13528,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
13528 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13529 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
13529 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13530 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
13530 | "2537 Receive Frame Truncated!!\n"); | 13531 | "2537 Receive Frame Truncated!!\n"); |
13532 | /* fall through */ | ||
13531 | case FC_STATUS_RQ_SUCCESS: | 13533 | case FC_STATUS_RQ_SUCCESS: |
13532 | spin_lock_irqsave(&phba->hbalock, iflags); | 13534 | spin_lock_irqsave(&phba->hbalock, iflags); |
13533 | lpfc_sli4_rq_release(hrq, drq); | 13535 | lpfc_sli4_rq_release(hrq, drq); |
@@ -13937,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
13937 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13939 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
13938 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13940 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
13939 | "6126 Receive Frame Truncated!!\n"); | 13941 | "6126 Receive Frame Truncated!!\n"); |
13940 | /* Drop thru */ | 13942 | /* fall through */ |
13941 | case FC_STATUS_RQ_SUCCESS: | 13943 | case FC_STATUS_RQ_SUCCESS: |
13942 | spin_lock_irqsave(&phba->hbalock, iflags); | 13944 | spin_lock_irqsave(&phba->hbalock, iflags); |
13943 | lpfc_sli4_rq_release(hrq, drq); | 13945 | lpfc_sli4_rq_release(hrq, drq); |
@@ -14849,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) | |||
14849 | eq->entry_count); | 14851 | eq->entry_count); |
14850 | if (eq->entry_count < 256) | 14852 | if (eq->entry_count < 256) |
14851 | return -EINVAL; | 14853 | return -EINVAL; |
14852 | /* otherwise default to smallest count (drop through) */ | 14854 | /* fall through - otherwise default to smallest count */ |
14853 | case 256: | 14855 | case 256: |
14854 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | 14856 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
14855 | LPFC_EQ_CNT_256); | 14857 | LPFC_EQ_CNT_256); |
@@ -14980,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
14980 | LPFC_CQ_CNT_WORD7); | 14982 | LPFC_CQ_CNT_WORD7); |
14981 | break; | 14983 | break; |
14982 | } | 14984 | } |
14983 | /* Fall Thru */ | 14985 | /* fall through */ |
14984 | default: | 14986 | default: |
14985 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 14987 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
14986 | "0361 Unsupported CQ count: " | 14988 | "0361 Unsupported CQ count: " |
@@ -14991,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
14991 | status = -EINVAL; | 14993 | status = -EINVAL; |
14992 | goto out; | 14994 | goto out; |
14993 | } | 14995 | } |
14994 | /* otherwise default to smallest count (drop through) */ | 14996 | /* fall through - otherwise default to smallest count */ |
14995 | case 256: | 14997 | case 256: |
14996 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | 14998 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
14997 | LPFC_CQ_CNT_256); | 14999 | LPFC_CQ_CNT_256); |
@@ -15151,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
15151 | LPFC_CQ_CNT_WORD7); | 15153 | LPFC_CQ_CNT_WORD7); |
15152 | break; | 15154 | break; |
15153 | } | 15155 | } |
15154 | /* Fall Thru */ | 15156 | /* fall through */ |
15155 | default: | 15157 | default: |
15156 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 15158 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
15157 | "3118 Bad CQ count. (%d)\n", | 15159 | "3118 Bad CQ count. (%d)\n", |
@@ -15160,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
15160 | status = -EINVAL; | 15162 | status = -EINVAL; |
15161 | goto out; | 15163 | goto out; |
15162 | } | 15164 | } |
15163 | /* otherwise default to smallest (drop thru) */ | 15165 | /* fall through - otherwise default to smallest */ |
15164 | case 256: | 15166 | case 256: |
15165 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, | 15167 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, |
15166 | &cq_set->u.request, LPFC_CQ_CNT_256); | 15168 | &cq_set->u.request, LPFC_CQ_CNT_256); |
@@ -15432,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
15432 | status = -EINVAL; | 15434 | status = -EINVAL; |
15433 | goto out; | 15435 | goto out; |
15434 | } | 15436 | } |
15435 | /* otherwise default to smallest count (drop through) */ | 15437 | /* fall through - otherwise default to smallest count */ |
15436 | case 16: | 15438 | case 16: |
15437 | bf_set(lpfc_mq_context_ring_size, | 15439 | bf_set(lpfc_mq_context_ring_size, |
15438 | &mq_create_ext->u.request.context, | 15440 | &mq_create_ext->u.request.context, |
@@ -15851,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15851 | status = -EINVAL; | 15853 | status = -EINVAL; |
15852 | goto out; | 15854 | goto out; |
15853 | } | 15855 | } |
15854 | /* otherwise default to smallest count (drop through) */ | 15856 | /* fall through - otherwise default to smallest count */ |
15855 | case 512: | 15857 | case 512: |
15856 | bf_set(lpfc_rq_context_rqe_count, | 15858 | bf_set(lpfc_rq_context_rqe_count, |
15857 | &rq_create->u.request.context, | 15859 | &rq_create->u.request.context, |
@@ -15988,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
15988 | status = -EINVAL; | 15990 | status = -EINVAL; |
15989 | goto out; | 15991 | goto out; |
15990 | } | 15992 | } |
15991 | /* otherwise default to smallest count (drop through) */ | 15993 | /* fall through - otherwise default to smallest count */ |
15992 | case 512: | 15994 | case 512: |
15993 | bf_set(lpfc_rq_context_rqe_count, | 15995 | bf_set(lpfc_rq_context_rqe_count, |
15994 | &rq_create->u.request.context, | 15996 | &rq_create->u.request.context, |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7eaa400f6328..fcbff83c0097 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) | |||
6236 | instance->consistent_mask_64bit = true; | 6236 | instance->consistent_mask_64bit = true; |
6237 | 6237 | ||
6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", | 6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", |
6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), | 6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), |
6240 | (instance->consistent_mask_64bit ? "63" : "32")); | 6240 | (instance->consistent_mask_64bit ? "63" : "32")); |
6241 | 6241 | ||
6242 | return 0; | 6242 | return 0; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a9a25f0eaf6f..647f48a28f85 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance) | |||
175 | /* | 175 | /* |
176 | * Check if it is our interrupt | 176 | * Check if it is our interrupt |
177 | */ | 177 | */ |
178 | status = readl(®s->outbound_intr_status); | 178 | status = megasas_readl(instance, |
179 | ®s->outbound_intr_status); | ||
179 | 180 | ||
180 | if (status & 1) { | 181 | if (status & 1) { |
181 | writel(status, ®s->outbound_intr_status); | 182 | writel(status, ®s->outbound_intr_status); |
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 4c5a3d23e010..084f2fcced0a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) | |||
657 | if (dev->dev_type == SAS_SATA_DEV) { | 657 | if (dev->dev_type == SAS_SATA_DEV) { |
658 | pm8001_device->attached_phy = | 658 | pm8001_device->attached_phy = |
659 | dev->rphy->identify.phy_identifier; | 659 | dev->rphy->identify.phy_identifier; |
660 | flag = 1; /* directly sata*/ | 660 | flag = 1; /* directly sata */ |
661 | } | 661 | } |
662 | } /*register this device to HBA*/ | 662 | } /*register this device to HBA*/ |
663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); | 663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4da660c1c431..6d6d6013e35b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
953 | 953 | ||
954 | qedi_ep = ep->dd_data; | 954 | qedi_ep = ep->dd_data; |
955 | if (qedi_ep->state == EP_STATE_IDLE || | 955 | if (qedi_ep->state == EP_STATE_IDLE || |
956 | qedi_ep->state == EP_STATE_OFLDCONN_NONE || | ||
956 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) | 957 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) |
957 | return -1; | 958 | return -1; |
958 | 959 | ||
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) | |||
1035 | 1036 | ||
1036 | switch (qedi_ep->state) { | 1037 | switch (qedi_ep->state) { |
1037 | case EP_STATE_OFLDCONN_START: | 1038 | case EP_STATE_OFLDCONN_START: |
1039 | case EP_STATE_OFLDCONN_NONE: | ||
1038 | goto ep_release_conn; | 1040 | goto ep_release_conn; |
1039 | case EP_STATE_OFLDCONN_FAILED: | 1041 | case EP_STATE_OFLDCONN_FAILED: |
1040 | break; | 1042 | break; |
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) | |||
1225 | 1227 | ||
1226 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { | 1228 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { |
1227 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); | 1229 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); |
1230 | qedi_ep->state = EP_STATE_OFLDCONN_NONE; | ||
1228 | ret = -EIO; | 1231 | ret = -EIO; |
1229 | goto set_path_exit; | 1232 | goto set_path_exit; |
1230 | } | 1233 | } |
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212f..892d70d54553 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h | |||
@@ -59,6 +59,7 @@ enum { | |||
59 | EP_STATE_OFLDCONN_FAILED = 0x2000, | 59 | EP_STATE_OFLDCONN_FAILED = 0x2000, |
60 | EP_STATE_CONNECT_FAILED = 0x4000, | 60 | EP_STATE_CONNECT_FAILED = 0x4000, |
61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, | 61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, |
62 | EP_STATE_OFLDCONN_NONE = 0x10000, | ||
62 | }; | 63 | }; |
63 | 64 | ||
64 | struct qedi_conn; | 65 | struct qedi_conn; |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index a414f51302b7..6856dfdfa473 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
4248 | ha->devnum = devnum; /* specifies microcode load address */ | 4248 | ha->devnum = devnum; /* specifies microcode load address */ |
4249 | 4249 | ||
4250 | #ifdef QLA_64BIT_PTR | 4250 | #ifdef QLA_64BIT_PTR |
4251 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { | 4251 | if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { |
4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { | 4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { |
4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " | 4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " |
4254 | "suitable DMA mask - aborting\n", ha->host_no); | 4254 | "suitable DMA mask - aborting\n", ha->host_no); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 26b93c563f92..d1fc4958222a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host { | |||
4394 | uint16_t n2n_id; | 4394 | uint16_t n2n_id; |
4395 | struct list_head gpnid_list; | 4395 | struct list_head gpnid_list; |
4396 | struct fab_scan scan; | 4396 | struct fab_scan scan; |
4397 | |||
4398 | unsigned int irq_offset; | ||
4397 | } scsi_qla_host_t; | 4399 | } scsi_qla_host_t; |
4398 | 4400 | ||
4399 | struct qla27xx_image_status { | 4401 | struct qla27xx_image_status { |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 30d3090842f8..8507c43b918c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); | 3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); |
3447 | } | 3447 | } |
3448 | } | 3448 | } |
3449 | vha->irq_offset = desc.pre_vectors; | ||
3449 | ha->msix_entries = kcalloc(ha->msix_count, | 3450 | ha->msix_entries = kcalloc(ha->msix_count, |
3450 | sizeof(struct qla_msix_entry), | 3451 | sizeof(struct qla_msix_entry), |
3451 | GFP_KERNEL); | 3452 | GFP_KERNEL); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea69dafc9774..c6ef83d0d99b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) | |||
6939 | if (USER_CTRL_IRQ(vha->hw)) | 6939 | if (USER_CTRL_IRQ(vha->hw)) |
6940 | rc = blk_mq_map_queues(qmap); | 6940 | rc = blk_mq_map_queues(qmap); |
6941 | else | 6941 | else |
6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); | 6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); |
6943 | return rc; | 6943 | return rc; |
6944 | } | 6944 | } |
6945 | 6945 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index cfdfcda28072..a77bfb224248 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, | |||
7232 | 7232 | ||
7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, | 7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, |
7234 | fw_ddb_entry); | 7234 | fw_ddb_entry); |
7235 | if (rc) | ||
7236 | goto free_sess; | ||
7235 | 7237 | ||
7236 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", | 7238 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", |
7237 | __func__, fnode_sess->dev.kobj.name); | 7239 | __func__, fnode_sess->dev.kobj.name); |
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index a2b4179bfdf7..7639df91b110 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c | |||
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, | |||
80 | 80 | ||
81 | if (err == 0) { | 81 | if (err == 0) { |
82 | pm_runtime_disable(dev); | 82 | pm_runtime_disable(dev); |
83 | pm_runtime_set_active(dev); | 83 | err = pm_runtime_set_active(dev); |
84 | pm_runtime_enable(dev); | 84 | pm_runtime_enable(dev); |
85 | |||
86 | /* | ||
87 | * Forcibly set runtime PM status of request queue to "active" | ||
88 | * to make sure we can again get requests from the queue | ||
89 | * (see also blk_pm_peek_request()). | ||
90 | * | ||
91 | * The resume hook will correct runtime PM status of the disk. | ||
92 | */ | ||
93 | if (!err && scsi_is_sdev_device(dev)) { | ||
94 | struct scsi_device *sdev = to_scsi_device(dev); | ||
95 | |||
96 | if (sdev->request_queue->dev) | ||
97 | blk_set_runtime_active(sdev->request_queue); | ||
98 | } | ||
85 | } | 99 | } |
86 | 100 | ||
87 | return err; | 101 | return err; |
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, | |||
140 | else | 154 | else |
141 | fn = NULL; | 155 | fn = NULL; |
142 | 156 | ||
143 | /* | ||
144 | * Forcibly set runtime PM status of request queue to "active" to | ||
145 | * make sure we can again get requests from the queue (see also | ||
146 | * blk_pm_peek_request()). | ||
147 | * | ||
148 | * The resume hook will correct runtime PM status of the disk. | ||
149 | */ | ||
150 | if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) | ||
151 | blk_set_runtime_active(to_scsi_device(dev)->request_queue); | ||
152 | |||
153 | if (fn) { | 157 | if (fn) { |
154 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); | 158 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); |
155 | 159 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a44f52e0e8..b2da8a00ec33 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, | |||
206 | sp = buffer_data[0] & 0x80 ? 1 : 0; | 206 | sp = buffer_data[0] & 0x80 ? 1 : 0; |
207 | buffer_data[0] &= ~0x80; | 207 | buffer_data[0] &= ~0x80; |
208 | 208 | ||
209 | /* | ||
210 | * Ensure WP, DPOFUA, and RESERVED fields are cleared in | ||
211 | * received mode parameter buffer before doing MODE SELECT. | ||
212 | */ | ||
213 | data.device_specific = 0; | ||
214 | |||
209 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, | 215 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, |
210 | SD_MAX_RETRIES, &data, &sshdr)) { | 216 | SD_MAX_RETRIES, &data, &sshdr)) { |
211 | if (scsi_sense_valid(&sshdr)) | 217 | if (scsi_sense_valid(&sshdr)) |
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7bde6c809442..f564af8949e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c | |||
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) | |||
323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, | 323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, |
324 | struct pqi_scsi_dev *device) | 324 | struct pqi_scsi_dev *device) |
325 | { | 325 | { |
326 | return device->in_remove & !ctrl_info->in_shutdown; | 326 | return device->in_remove && !ctrl_info->in_shutdown; |
327 | } | 327 | } |
328 | 328 | ||
329 | static inline void pqi_schedule_rescan_worker_with_delay( | 329 | static inline void pqi_schedule_rescan_worker_with_delay( |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dd65fea07687..6d176815e6ce 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
@@ -195,7 +195,7 @@ enum ufs_desc_def_size { | |||
195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, | 195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, |
196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, | 196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, |
197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, | 197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, |
198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, | 198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, |
199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, | 199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, |
200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, | 200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, |
201 | }; | 201 | }; |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9ba7671b84f8..71334aaf1447 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -8001,6 +8001,8 @@ out: | |||
8001 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, | 8001 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, |
8002 | ktime_to_us(ktime_sub(ktime_get(), start)), | 8002 | ktime_to_us(ktime_sub(ktime_get(), start)), |
8003 | hba->curr_dev_pwr_mode, hba->uic_link_state); | 8003 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
8004 | if (!ret) | ||
8005 | hba->is_sys_suspended = false; | ||
8004 | return ret; | 8006 | return ret; |
8005 | } | 8007 | } |
8006 | EXPORT_SYMBOL(ufshcd_system_resume); | 8008 | EXPORT_SYMBOL(ufshcd_system_resume); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 984941e036c8..bd15a564fe24 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void) | |||
714 | sizeof(struct iscsi_queue_req), | 714 | sizeof(struct iscsi_queue_req), |
715 | __alignof__(struct iscsi_queue_req), 0, NULL); | 715 | __alignof__(struct iscsi_queue_req), 0, NULL); |
716 | if (!lio_qr_cache) { | 716 | if (!lio_qr_cache) { |
717 | pr_err("nable to kmem_cache_create() for" | 717 | pr_err("Unable to kmem_cache_create() for" |
718 | " lio_qr_cache\n"); | 718 | " lio_qr_cache\n"); |
719 | goto bitmap_out; | 719 | goto bitmap_out; |
720 | } | 720 | } |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1e6d24943565..c34c88ef3319 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -148,7 +148,7 @@ struct tcmu_dev { | |||
148 | size_t ring_size; | 148 | size_t ring_size; |
149 | 149 | ||
150 | struct mutex cmdr_lock; | 150 | struct mutex cmdr_lock; |
151 | struct list_head cmdr_queue; | 151 | struct list_head qfull_queue; |
152 | 152 | ||
153 | uint32_t dbi_max; | 153 | uint32_t dbi_max; |
154 | uint32_t dbi_thresh; | 154 | uint32_t dbi_thresh; |
@@ -159,6 +159,7 @@ struct tcmu_dev { | |||
159 | 159 | ||
160 | struct timer_list cmd_timer; | 160 | struct timer_list cmd_timer; |
161 | unsigned int cmd_time_out; | 161 | unsigned int cmd_time_out; |
162 | struct list_head inflight_queue; | ||
162 | 163 | ||
163 | struct timer_list qfull_timer; | 164 | struct timer_list qfull_timer; |
164 | int qfull_time_out; | 165 | int qfull_time_out; |
@@ -179,7 +180,7 @@ struct tcmu_dev { | |||
179 | struct tcmu_cmd { | 180 | struct tcmu_cmd { |
180 | struct se_cmd *se_cmd; | 181 | struct se_cmd *se_cmd; |
181 | struct tcmu_dev *tcmu_dev; | 182 | struct tcmu_dev *tcmu_dev; |
182 | struct list_head cmdr_queue_entry; | 183 | struct list_head queue_entry; |
183 | 184 | ||
184 | uint16_t cmd_id; | 185 | uint16_t cmd_id; |
185 | 186 | ||
@@ -192,6 +193,7 @@ struct tcmu_cmd { | |||
192 | unsigned long deadline; | 193 | unsigned long deadline; |
193 | 194 | ||
194 | #define TCMU_CMD_BIT_EXPIRED 0 | 195 | #define TCMU_CMD_BIT_EXPIRED 0 |
196 | #define TCMU_CMD_BIT_INFLIGHT 1 | ||
195 | unsigned long flags; | 197 | unsigned long flags; |
196 | }; | 198 | }; |
197 | /* | 199 | /* |
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |||
586 | if (!tcmu_cmd) | 588 | if (!tcmu_cmd) |
587 | return NULL; | 589 | return NULL; |
588 | 590 | ||
589 | INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); | 591 | INIT_LIST_HEAD(&tcmu_cmd->queue_entry); |
590 | tcmu_cmd->se_cmd = se_cmd; | 592 | tcmu_cmd->se_cmd = se_cmd; |
591 | tcmu_cmd->tcmu_dev = udev; | 593 | tcmu_cmd->tcmu_dev = udev; |
592 | 594 | ||
@@ -915,11 +917,13 @@ setup_timer: | |||
915 | return 0; | 917 | return 0; |
916 | 918 | ||
917 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); | 919 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); |
918 | mod_timer(timer, tcmu_cmd->deadline); | 920 | if (!timer_pending(timer)) |
921 | mod_timer(timer, tcmu_cmd->deadline); | ||
922 | |||
919 | return 0; | 923 | return 0; |
920 | } | 924 | } |
921 | 925 | ||
922 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | 926 | static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) |
923 | { | 927 | { |
924 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 928 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
925 | unsigned int tmo; | 929 | unsigned int tmo; |
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | |||
942 | if (ret) | 946 | if (ret) |
943 | return ret; | 947 | return ret; |
944 | 948 | ||
945 | list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); | 949 | list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); |
946 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", | 950 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", |
947 | tcmu_cmd->cmd_id, udev->name); | 951 | tcmu_cmd->cmd_id, udev->name); |
948 | return 0; | 952 | return 0; |
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
999 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); | 1003 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); |
1000 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | 1004 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); |
1001 | 1005 | ||
1002 | if (!list_empty(&udev->cmdr_queue)) | 1006 | if (!list_empty(&udev->qfull_queue)) |
1003 | goto queue; | 1007 | goto queue; |
1004 | 1008 | ||
1005 | mb = udev->mb_addr; | 1009 | mb = udev->mb_addr; |
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
1096 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | 1100 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); |
1097 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 1101 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
1098 | 1102 | ||
1103 | list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); | ||
1104 | set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); | ||
1105 | |||
1099 | /* TODO: only if FLUSH and FUA? */ | 1106 | /* TODO: only if FLUSH and FUA? */ |
1100 | uio_event_notify(&udev->uio_info); | 1107 | uio_event_notify(&udev->uio_info); |
1101 | 1108 | ||
1102 | return 0; | 1109 | return 0; |
1103 | 1110 | ||
1104 | queue: | 1111 | queue: |
1105 | if (add_to_cmdr_queue(tcmu_cmd)) { | 1112 | if (add_to_qfull_queue(tcmu_cmd)) { |
1106 | *scsi_err = TCM_OUT_OF_RESOURCES; | 1113 | *scsi_err = TCM_OUT_OF_RESOURCES; |
1107 | return -1; | 1114 | return -1; |
1108 | } | 1115 | } |
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
1145 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | 1152 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) |
1146 | goto out; | 1153 | goto out; |
1147 | 1154 | ||
1155 | list_del_init(&cmd->queue_entry); | ||
1156 | |||
1148 | tcmu_cmd_reset_dbi_cur(cmd); | 1157 | tcmu_cmd_reset_dbi_cur(cmd); |
1149 | 1158 | ||
1150 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { | 1159 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
@@ -1194,9 +1203,29 @@ out: | |||
1194 | tcmu_free_cmd(cmd); | 1203 | tcmu_free_cmd(cmd); |
1195 | } | 1204 | } |
1196 | 1205 | ||
1206 | static void tcmu_set_next_deadline(struct list_head *queue, | ||
1207 | struct timer_list *timer) | ||
1208 | { | ||
1209 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | ||
1210 | unsigned long deadline = 0; | ||
1211 | |||
1212 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { | ||
1213 | if (!time_after(jiffies, tcmu_cmd->deadline)) { | ||
1214 | deadline = tcmu_cmd->deadline; | ||
1215 | break; | ||
1216 | } | ||
1217 | } | ||
1218 | |||
1219 | if (deadline) | ||
1220 | mod_timer(timer, deadline); | ||
1221 | else | ||
1222 | del_timer(timer); | ||
1223 | } | ||
1224 | |||
1197 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | 1225 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) |
1198 | { | 1226 | { |
1199 | struct tcmu_mailbox *mb; | 1227 | struct tcmu_mailbox *mb; |
1228 | struct tcmu_cmd *cmd; | ||
1200 | int handled = 0; | 1229 | int handled = 0; |
1201 | 1230 | ||
1202 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | 1231 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { |
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
1210 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { | 1239 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
1211 | 1240 | ||
1212 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 1241 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
1213 | struct tcmu_cmd *cmd; | ||
1214 | 1242 | ||
1215 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 1243 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
1216 | 1244 | ||
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
1243 | /* no more pending commands */ | 1271 | /* no more pending commands */ |
1244 | del_timer(&udev->cmd_timer); | 1272 | del_timer(&udev->cmd_timer); |
1245 | 1273 | ||
1246 | if (list_empty(&udev->cmdr_queue)) { | 1274 | if (list_empty(&udev->qfull_queue)) { |
1247 | /* | 1275 | /* |
1248 | * no more pending or waiting commands so try to | 1276 | * no more pending or waiting commands so try to |
1249 | * reclaim blocks if needed. | 1277 | * reclaim blocks if needed. |
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
1252 | tcmu_global_max_blocks) | 1280 | tcmu_global_max_blocks) |
1253 | schedule_delayed_work(&tcmu_unmap_work, 0); | 1281 | schedule_delayed_work(&tcmu_unmap_work, 0); |
1254 | } | 1282 | } |
1283 | } else if (udev->cmd_time_out) { | ||
1284 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
1255 | } | 1285 | } |
1256 | 1286 | ||
1257 | return handled; | 1287 | return handled; |
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
1271 | if (!time_after(jiffies, cmd->deadline)) | 1301 | if (!time_after(jiffies, cmd->deadline)) |
1272 | return 0; | 1302 | return 0; |
1273 | 1303 | ||
1274 | is_running = list_empty(&cmd->cmdr_queue_entry); | 1304 | is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); |
1275 | se_cmd = cmd->se_cmd; | 1305 | se_cmd = cmd->se_cmd; |
1276 | 1306 | ||
1277 | if (is_running) { | 1307 | if (is_running) { |
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
1288 | */ | 1318 | */ |
1289 | scsi_status = SAM_STAT_CHECK_CONDITION; | 1319 | scsi_status = SAM_STAT_CHECK_CONDITION; |
1290 | } else { | 1320 | } else { |
1291 | list_del_init(&cmd->cmdr_queue_entry); | ||
1292 | |||
1293 | idr_remove(&udev->commands, id); | 1321 | idr_remove(&udev->commands, id); |
1294 | tcmu_free_cmd(cmd); | 1322 | tcmu_free_cmd(cmd); |
1295 | scsi_status = SAM_STAT_TASK_SET_FULL; | 1323 | scsi_status = SAM_STAT_TASK_SET_FULL; |
1296 | } | 1324 | } |
1325 | list_del_init(&cmd->queue_entry); | ||
1297 | 1326 | ||
1298 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", | 1327 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", |
1299 | id, udev->name, is_running ? "inflight" : "queued"); | 1328 | id, udev->name, is_running ? "inflight" : "queued"); |
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1372 | 1401 | ||
1373 | INIT_LIST_HEAD(&udev->node); | 1402 | INIT_LIST_HEAD(&udev->node); |
1374 | INIT_LIST_HEAD(&udev->timedout_entry); | 1403 | INIT_LIST_HEAD(&udev->timedout_entry); |
1375 | INIT_LIST_HEAD(&udev->cmdr_queue); | 1404 | INIT_LIST_HEAD(&udev->qfull_queue); |
1405 | INIT_LIST_HEAD(&udev->inflight_queue); | ||
1376 | idr_init(&udev->commands); | 1406 | idr_init(&udev->commands); |
1377 | 1407 | ||
1378 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); | 1408 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1383 | return &udev->se_dev; | 1413 | return &udev->se_dev; |
1384 | } | 1414 | } |
1385 | 1415 | ||
1386 | static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | 1416 | static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) |
1387 | { | 1417 | { |
1388 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | 1418 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; |
1389 | LIST_HEAD(cmds); | 1419 | LIST_HEAD(cmds); |
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
1391 | sense_reason_t scsi_ret; | 1421 | sense_reason_t scsi_ret; |
1392 | int ret; | 1422 | int ret; |
1393 | 1423 | ||
1394 | if (list_empty(&udev->cmdr_queue)) | 1424 | if (list_empty(&udev->qfull_queue)) |
1395 | return true; | 1425 | return true; |
1396 | 1426 | ||
1397 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); | 1427 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); |
1398 | 1428 | ||
1399 | list_splice_init(&udev->cmdr_queue, &cmds); | 1429 | list_splice_init(&udev->qfull_queue, &cmds); |
1400 | 1430 | ||
1401 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { | 1431 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { |
1402 | list_del_init(&tcmu_cmd->cmdr_queue_entry); | 1432 | list_del_init(&tcmu_cmd->queue_entry); |
1403 | 1433 | ||
1404 | pr_debug("removing cmd %u on dev %s from queue\n", | 1434 | pr_debug("removing cmd %u on dev %s from queue\n", |
1405 | tcmu_cmd->cmd_id, udev->name); | 1435 | tcmu_cmd->cmd_id, udev->name); |
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
1437 | * cmd was requeued, so just put all cmds back in | 1467 | * cmd was requeued, so just put all cmds back in |
1438 | * the queue | 1468 | * the queue |
1439 | */ | 1469 | */ |
1440 | list_splice_tail(&cmds, &udev->cmdr_queue); | 1470 | list_splice_tail(&cmds, &udev->qfull_queue); |
1441 | drained = false; | 1471 | drained = false; |
1442 | goto done; | 1472 | break; |
1443 | } | 1473 | } |
1444 | } | 1474 | } |
1445 | if (list_empty(&udev->cmdr_queue)) | 1475 | |
1446 | del_timer(&udev->qfull_timer); | 1476 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); |
1447 | done: | ||
1448 | return drained; | 1477 | return drained; |
1449 | } | 1478 | } |
1450 | 1479 | ||
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) | |||
1454 | 1483 | ||
1455 | mutex_lock(&udev->cmdr_lock); | 1484 | mutex_lock(&udev->cmdr_lock); |
1456 | tcmu_handle_completions(udev); | 1485 | tcmu_handle_completions(udev); |
1457 | run_cmdr_queue(udev, false); | 1486 | run_qfull_queue(udev, false); |
1458 | mutex_unlock(&udev->cmdr_lock); | 1487 | mutex_unlock(&udev->cmdr_lock); |
1459 | 1488 | ||
1460 | return 0; | 1489 | return 0; |
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) | |||
1982 | /* complete IO that has executed successfully */ | 2011 | /* complete IO that has executed successfully */ |
1983 | tcmu_handle_completions(udev); | 2012 | tcmu_handle_completions(udev); |
1984 | /* fail IO waiting to be queued */ | 2013 | /* fail IO waiting to be queued */ |
1985 | run_cmdr_queue(udev, true); | 2014 | run_qfull_queue(udev, true); |
1986 | 2015 | ||
1987 | unlock: | 2016 | unlock: |
1988 | mutex_unlock(&udev->cmdr_lock); | 2017 | mutex_unlock(&udev->cmdr_lock); |
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
1997 | mutex_lock(&udev->cmdr_lock); | 2026 | mutex_lock(&udev->cmdr_lock); |
1998 | 2027 | ||
1999 | idr_for_each_entry(&udev->commands, cmd, i) { | 2028 | idr_for_each_entry(&udev->commands, cmd, i) { |
2000 | if (!list_empty(&cmd->cmdr_queue_entry)) | 2029 | if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) |
2001 | continue; | 2030 | continue; |
2002 | 2031 | ||
2003 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", | 2032 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", |
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
2006 | 2035 | ||
2007 | idr_remove(&udev->commands, i); | 2036 | idr_remove(&udev->commands, i); |
2008 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | 2037 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { |
2038 | list_del_init(&cmd->queue_entry); | ||
2009 | if (err_level == 1) { | 2039 | if (err_level == 1) { |
2010 | /* | 2040 | /* |
2011 | * Userspace was not able to start the | 2041 | * Userspace was not able to start the |
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void) | |||
2666 | 2696 | ||
2667 | mutex_lock(&udev->cmdr_lock); | 2697 | mutex_lock(&udev->cmdr_lock); |
2668 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); | 2698 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); |
2699 | |||
2700 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
2701 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); | ||
2702 | |||
2669 | mutex_unlock(&udev->cmdr_lock); | 2703 | mutex_unlock(&udev->cmdr_lock); |
2670 | 2704 | ||
2671 | spin_lock_bh(&timed_out_udevs_lock); | 2705 | spin_lock_bh(&timed_out_udevs_lock); |
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 0582bd12a239..0ca908d12750 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config INT340X_THERMAL | 5 | config INT340X_THERMAL |
6 | tristate "ACPI INT340X thermal drivers" | 6 | tristate "ACPI INT340X thermal drivers" |
7 | depends on X86 && ACPI | 7 | depends on X86 && ACPI && PCI |
8 | select THERMAL_GOV_USER_SPACE | 8 | select THERMAL_GOV_USER_SPACE |
9 | select ACPI_THERMAL_REL | 9 | select ACPI_THERMAL_REL |
10 | select ACPI_FAN | 10 | select ACPI_FAN |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 36f3d0f49e60..bca86bf7189f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net) | |||
1236 | if (nvq->done_idx > VHOST_NET_BATCH) | 1236 | if (nvq->done_idx > VHOST_NET_BATCH) |
1237 | vhost_net_signal_used(nvq); | 1237 | vhost_net_signal_used(nvq); |
1238 | if (unlikely(vq_log)) | 1238 | if (unlikely(vq_log)) |
1239 | vhost_log_write(vq, vq_log, log, vhost_len); | 1239 | vhost_log_write(vq, vq_log, log, vhost_len, |
1240 | vq->iov, in); | ||
1240 | total_len += vhost_len; | 1241 | total_len += vhost_len; |
1241 | if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { | 1242 | if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { |
1242 | vhost_poll_queue(&vq->poll); | 1243 | vhost_poll_queue(&vq->poll); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8e10ab436d1f..344684f3e2e4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, | |||
1127 | struct vhost_virtqueue *vq, | 1127 | struct vhost_virtqueue *vq, |
1128 | struct vhost_scsi_ctx *vc) | 1128 | struct vhost_scsi_ctx *vc) |
1129 | { | 1129 | { |
1130 | struct virtio_scsi_ctrl_tmf_resp __user *resp; | ||
1131 | struct virtio_scsi_ctrl_tmf_resp rsp; | 1130 | struct virtio_scsi_ctrl_tmf_resp rsp; |
1131 | struct iov_iter iov_iter; | ||
1132 | int ret; | 1132 | int ret; |
1133 | 1133 | ||
1134 | pr_debug("%s\n", __func__); | 1134 | pr_debug("%s\n", __func__); |
1135 | memset(&rsp, 0, sizeof(rsp)); | 1135 | memset(&rsp, 0, sizeof(rsp)); |
1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; | 1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; |
1137 | resp = vq->iov[vc->out].iov_base; | 1137 | |
1138 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1138 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
1139 | if (!ret) | 1139 | |
1140 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
1141 | if (likely(ret == sizeof(rsp))) | ||
1140 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1142 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
1141 | else | 1143 | else |
1142 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); | 1144 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); |
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs, | |||
1147 | struct vhost_virtqueue *vq, | 1149 | struct vhost_virtqueue *vq, |
1148 | struct vhost_scsi_ctx *vc) | 1150 | struct vhost_scsi_ctx *vc) |
1149 | { | 1151 | { |
1150 | struct virtio_scsi_ctrl_an_resp __user *resp; | ||
1151 | struct virtio_scsi_ctrl_an_resp rsp; | 1152 | struct virtio_scsi_ctrl_an_resp rsp; |
1153 | struct iov_iter iov_iter; | ||
1152 | int ret; | 1154 | int ret; |
1153 | 1155 | ||
1154 | pr_debug("%s\n", __func__); | 1156 | pr_debug("%s\n", __func__); |
1155 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ | 1157 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ |
1156 | rsp.response = VIRTIO_SCSI_S_OK; | 1158 | rsp.response = VIRTIO_SCSI_S_OK; |
1157 | resp = vq->iov[vc->out].iov_base; | 1159 | |
1158 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1160 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
1159 | if (!ret) | 1161 | |
1162 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
1163 | if (likely(ret == sizeof(rsp))) | ||
1160 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1164 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
1161 | else | 1165 | else |
1162 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); | 1166 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9f7942cbcbb2..15a216cdd507 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
1034 | int type, ret; | 1034 | int type, ret; |
1035 | 1035 | ||
1036 | ret = copy_from_iter(&type, sizeof(type), from); | 1036 | ret = copy_from_iter(&type, sizeof(type), from); |
1037 | if (ret != sizeof(type)) | 1037 | if (ret != sizeof(type)) { |
1038 | ret = -EINVAL; | ||
1038 | goto done; | 1039 | goto done; |
1040 | } | ||
1039 | 1041 | ||
1040 | switch (type) { | 1042 | switch (type) { |
1041 | case VHOST_IOTLB_MSG: | 1043 | case VHOST_IOTLB_MSG: |
@@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
1054 | 1056 | ||
1055 | iov_iter_advance(from, offset); | 1057 | iov_iter_advance(from, offset); |
1056 | ret = copy_from_iter(&msg, sizeof(msg), from); | 1058 | ret = copy_from_iter(&msg, sizeof(msg), from); |
1057 | if (ret != sizeof(msg)) | 1059 | if (ret != sizeof(msg)) { |
1060 | ret = -EINVAL; | ||
1058 | goto done; | 1061 | goto done; |
1062 | } | ||
1059 | if (vhost_process_iotlb_msg(dev, &msg)) { | 1063 | if (vhost_process_iotlb_msg(dev, &msg)) { |
1060 | ret = -EFAULT; | 1064 | ret = -EFAULT; |
1061 | goto done; | 1065 | goto done; |
@@ -1733,13 +1737,87 @@ static int log_write(void __user *log_base, | |||
1733 | return r; | 1737 | return r; |
1734 | } | 1738 | } |
1735 | 1739 | ||
1740 | static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) | ||
1741 | { | ||
1742 | struct vhost_umem *umem = vq->umem; | ||
1743 | struct vhost_umem_node *u; | ||
1744 | u64 start, end, l, min; | ||
1745 | int r; | ||
1746 | bool hit = false; | ||
1747 | |||
1748 | while (len) { | ||
1749 | min = len; | ||
1750 | /* More than one GPAs can be mapped into a single HVA. So | ||
1751 | * iterate all possible umems here to be safe. | ||
1752 | */ | ||
1753 | list_for_each_entry(u, &umem->umem_list, link) { | ||
1754 | if (u->userspace_addr > hva - 1 + len || | ||
1755 | u->userspace_addr - 1 + u->size < hva) | ||
1756 | continue; | ||
1757 | start = max(u->userspace_addr, hva); | ||
1758 | end = min(u->userspace_addr - 1 + u->size, | ||
1759 | hva - 1 + len); | ||
1760 | l = end - start + 1; | ||
1761 | r = log_write(vq->log_base, | ||
1762 | u->start + start - u->userspace_addr, | ||
1763 | l); | ||
1764 | if (r < 0) | ||
1765 | return r; | ||
1766 | hit = true; | ||
1767 | min = min(l, min); | ||
1768 | } | ||
1769 | |||
1770 | if (!hit) | ||
1771 | return -EFAULT; | ||
1772 | |||
1773 | len -= min; | ||
1774 | hva += min; | ||
1775 | } | ||
1776 | |||
1777 | return 0; | ||
1778 | } | ||
1779 | |||
1780 | static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | ||
1781 | { | ||
1782 | struct iovec iov[64]; | ||
1783 | int i, ret; | ||
1784 | |||
1785 | if (!vq->iotlb) | ||
1786 | return log_write(vq->log_base, vq->log_addr + used_offset, len); | ||
1787 | |||
1788 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | ||
1789 | len, iov, 64, VHOST_ACCESS_WO); | ||
1790 | if (ret) | ||
1791 | return ret; | ||
1792 | |||
1793 | for (i = 0; i < ret; i++) { | ||
1794 | ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
1795 | iov[i].iov_len); | ||
1796 | if (ret) | ||
1797 | return ret; | ||
1798 | } | ||
1799 | |||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1736 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 1803 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
1737 | unsigned int log_num, u64 len) | 1804 | unsigned int log_num, u64 len, struct iovec *iov, int count) |
1738 | { | 1805 | { |
1739 | int i, r; | 1806 | int i, r; |
1740 | 1807 | ||
1741 | /* Make sure data written is seen before log. */ | 1808 | /* Make sure data written is seen before log. */ |
1742 | smp_wmb(); | 1809 | smp_wmb(); |
1810 | |||
1811 | if (vq->iotlb) { | ||
1812 | for (i = 0; i < count; i++) { | ||
1813 | r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
1814 | iov[i].iov_len); | ||
1815 | if (r < 0) | ||
1816 | return r; | ||
1817 | } | ||
1818 | return 0; | ||
1819 | } | ||
1820 | |||
1743 | for (i = 0; i < log_num; ++i) { | 1821 | for (i = 0; i < log_num; ++i) { |
1744 | u64 l = min(log[i].len, len); | 1822 | u64 l = min(log[i].len, len); |
1745 | r = log_write(vq->log_base, log[i].addr, l); | 1823 | r = log_write(vq->log_base, log[i].addr, l); |
@@ -1769,9 +1847,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) | |||
1769 | smp_wmb(); | 1847 | smp_wmb(); |
1770 | /* Log used flag write. */ | 1848 | /* Log used flag write. */ |
1771 | used = &vq->used->flags; | 1849 | used = &vq->used->flags; |
1772 | log_write(vq->log_base, vq->log_addr + | 1850 | log_used(vq, (used - (void __user *)vq->used), |
1773 | (used - (void __user *)vq->used), | 1851 | sizeof vq->used->flags); |
1774 | sizeof vq->used->flags); | ||
1775 | if (vq->log_ctx) | 1852 | if (vq->log_ctx) |
1776 | eventfd_signal(vq->log_ctx, 1); | 1853 | eventfd_signal(vq->log_ctx, 1); |
1777 | } | 1854 | } |
@@ -1789,9 +1866,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) | |||
1789 | smp_wmb(); | 1866 | smp_wmb(); |
1790 | /* Log avail event write */ | 1867 | /* Log avail event write */ |
1791 | used = vhost_avail_event(vq); | 1868 | used = vhost_avail_event(vq); |
1792 | log_write(vq->log_base, vq->log_addr + | 1869 | log_used(vq, (used - (void __user *)vq->used), |
1793 | (used - (void __user *)vq->used), | 1870 | sizeof *vhost_avail_event(vq)); |
1794 | sizeof *vhost_avail_event(vq)); | ||
1795 | if (vq->log_ctx) | 1871 | if (vq->log_ctx) |
1796 | eventfd_signal(vq->log_ctx, 1); | 1872 | eventfd_signal(vq->log_ctx, 1); |
1797 | } | 1873 | } |
@@ -2191,10 +2267,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, | |||
2191 | /* Make sure data is seen before log. */ | 2267 | /* Make sure data is seen before log. */ |
2192 | smp_wmb(); | 2268 | smp_wmb(); |
2193 | /* Log used ring entry write. */ | 2269 | /* Log used ring entry write. */ |
2194 | log_write(vq->log_base, | 2270 | log_used(vq, ((void __user *)used - (void __user *)vq->used), |
2195 | vq->log_addr + | 2271 | count * sizeof *used); |
2196 | ((void __user *)used - (void __user *)vq->used), | ||
2197 | count * sizeof *used); | ||
2198 | } | 2272 | } |
2199 | old = vq->last_used_idx; | 2273 | old = vq->last_used_idx; |
2200 | new = (vq->last_used_idx += count); | 2274 | new = (vq->last_used_idx += count); |
@@ -2236,9 +2310,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | |||
2236 | /* Make sure used idx is seen before log. */ | 2310 | /* Make sure used idx is seen before log. */ |
2237 | smp_wmb(); | 2311 | smp_wmb(); |
2238 | /* Log used index update. */ | 2312 | /* Log used index update. */ |
2239 | log_write(vq->log_base, | 2313 | log_used(vq, offsetof(struct vring_used, idx), |
2240 | vq->log_addr + offsetof(struct vring_used, idx), | 2314 | sizeof vq->used->idx); |
2241 | sizeof vq->used->idx); | ||
2242 | if (vq->log_ctx) | 2315 | if (vq->log_ctx) |
2243 | eventfd_signal(vq->log_ctx, 1); | 2316 | eventfd_signal(vq->log_ctx, 1); |
2244 | } | 2317 | } |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 466ef7542291..1b675dad5e05 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); | |||
205 | bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); | 205 | bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); |
206 | 206 | ||
207 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 207 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
208 | unsigned int log_num, u64 len); | 208 | unsigned int log_num, u64 len, |
209 | struct iovec *iov, int count); | ||
209 | int vq_iotlb_prefetch(struct vhost_virtqueue *vq); | 210 | int vq_iotlb_prefetch(struct vhost_virtqueue *vq); |
210 | 211 | ||
211 | struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); | 212 | struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); |
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8976190b6c1f..bfa1360ec750 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c | |||
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt) | |||
510 | continue; | 510 | continue; |
511 | } | 511 | } |
512 | #endif | 512 | #endif |
513 | |||
514 | if (!strncmp(options, "logo-pos:", 9)) { | ||
515 | options += 9; | ||
516 | if (!strcmp(options, "center")) | ||
517 | fb_center_logo = true; | ||
518 | continue; | ||
519 | } | ||
513 | } | 520 | } |
514 | return 1; | 521 | return 1; |
515 | } | 522 | } |
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 558ed2ed3124..cb43a2258c51 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c | |||
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb); | |||
53 | int num_registered_fb __read_mostly; | 53 | int num_registered_fb __read_mostly; |
54 | EXPORT_SYMBOL(num_registered_fb); | 54 | EXPORT_SYMBOL(num_registered_fb); |
55 | 55 | ||
56 | bool fb_center_logo __read_mostly; | ||
57 | EXPORT_SYMBOL(fb_center_logo); | ||
58 | |||
56 | static struct fb_info *get_fb_info(unsigned int idx) | 59 | static struct fb_info *get_fb_info(unsigned int idx) |
57 | { | 60 | { |
58 | struct fb_info *fb_info; | 61 | struct fb_info *fb_info; |
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
506 | fb_set_logo(info, logo, logo_new, fb_logo.depth); | 509 | fb_set_logo(info, logo, logo_new, fb_logo.depth); |
507 | } | 510 | } |
508 | 511 | ||
509 | #ifdef CONFIG_FB_LOGO_CENTER | 512 | if (fb_center_logo) { |
510 | { | ||
511 | int xres = info->var.xres; | 513 | int xres = info->var.xres; |
512 | int yres = info->var.yres; | 514 | int yres = info->var.yres; |
513 | 515 | ||
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
520 | --n; | 522 | --n; |
521 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; | 523 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; |
522 | image.dy = y ?: (yres - logo->height) / 2; | 524 | image.dy = y ?: (yres - logo->height) / 2; |
525 | } else { | ||
526 | image.dx = 0; | ||
527 | image.dy = y; | ||
523 | } | 528 | } |
524 | #else | 529 | |
525 | image.dx = 0; | ||
526 | image.dy = y; | ||
527 | #endif | ||
528 | image.width = logo->width; | 530 | image.width = logo->width; |
529 | image.height = logo->height; | 531 | image.height = logo->height; |
530 | 532 | ||
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate) | |||
684 | } | 686 | } |
685 | 687 | ||
686 | height = fb_logo.logo->height; | 688 | height = fb_logo.logo->height; |
687 | #ifdef CONFIG_FB_LOGO_CENTER | 689 | if (fb_center_logo) |
688 | height += (yres - fb_logo.logo->height) / 2; | 690 | height += (yres - fb_logo.logo->height) / 2; |
689 | #endif | ||
690 | 691 | ||
691 | return fb_prepare_extra_logos(info, height, yres); | 692 | return fb_prepare_extra_logos(info, height, yres); |
692 | } | 693 | } |
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 31f769d67195..057d3cdef92e 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c | |||
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, | |||
318 | } | 318 | } |
319 | 319 | ||
320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, | 320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, |
321 | const char *name, unsigned long address) | 321 | unsigned long address) |
322 | { | 322 | { |
323 | struct offb_par *par = (struct offb_par *) info->par; | 323 | struct offb_par *par = (struct offb_par *) info->par; |
324 | 324 | ||
325 | if (dp && !strncmp(name, "ATY,Rage128", 11)) { | 325 | if (of_node_name_prefix(dp, "ATY,Rage128")) { |
326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
327 | if (par->cmap_adr) | 327 | if (par->cmap_adr) |
328 | par->cmap_type = cmap_r128; | 328 | par->cmap_type = cmap_r128; |
329 | } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) | 329 | } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || |
330 | || !strncmp(name, "ATY,RageM3p12A", 14))) { | 330 | of_node_name_prefix(dp, "ATY,RageM3p12A")) { |
331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
332 | if (par->cmap_adr) | 332 | if (par->cmap_adr) |
333 | par->cmap_type = cmap_M3A; | 333 | par->cmap_type = cmap_M3A; |
334 | } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { | 334 | } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { |
335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
336 | if (par->cmap_adr) | 336 | if (par->cmap_adr) |
337 | par->cmap_type = cmap_M3B; | 337 | par->cmap_type = cmap_M3B; |
338 | } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { | 338 | } else if (of_node_name_prefix(dp, "ATY,Rage6")) { |
339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); | 339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); |
340 | if (par->cmap_adr) | 340 | if (par->cmap_adr) |
341 | par->cmap_type = cmap_radeon; | 341 | par->cmap_type = cmap_radeon; |
342 | } else if (!strncmp(name, "ATY,", 4)) { | 342 | } else if (of_node_name_prefix(dp, "ATY,")) { |
343 | unsigned long base = address & 0xff000000UL; | 343 | unsigned long base = address & 0xff000000UL; |
344 | par->cmap_adr = | 344 | par->cmap_adr = |
345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; | 345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; |
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp | |||
350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); | 350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); |
351 | if (par->cmap_adr) | 351 | if (par->cmap_adr) |
352 | par->cmap_type = cmap_gxt2000; | 352 | par->cmap_type = cmap_gxt2000; |
353 | } else if (dp && !strncmp(name, "vga,Display-", 12)) { | 353 | } else if (of_node_name_prefix(dp, "vga,Display-")) { |
354 | /* Look for AVIVO initialized by SLOF */ | 354 | /* Look for AVIVO initialized by SLOF */ |
355 | struct device_node *pciparent = of_get_parent(dp); | 355 | struct device_node *pciparent = of_get_parent(dp); |
356 | const u32 *vid, *did; | 356 | const u32 *vid, *did; |
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, | |||
438 | 438 | ||
439 | par->cmap_type = cmap_unknown; | 439 | par->cmap_type = cmap_unknown; |
440 | if (depth == 8) | 440 | if (depth == 8) |
441 | offb_init_palette_hacks(info, dp, name, address); | 441 | offb_init_palette_hacks(info, dp, address); |
442 | else | 442 | else |
443 | fix->visual = FB_VISUAL_TRUECOLOR; | 443 | fix->visual = FB_VISUAL_TRUECOLOR; |
444 | 444 | ||
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 53f93616c671..8e23160ec59f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c | |||
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) | |||
609 | 609 | ||
610 | int r = 0; | 610 | int r = 0; |
611 | 611 | ||
612 | memset(&p, 0, sizeof(p)); | ||
613 | |||
612 | switch (cmd) { | 614 | switch (cmd) { |
613 | case OMAPFB_SYNC_GFX: | 615 | case OMAPFB_SYNC_GFX: |
614 | DBG("ioctl SYNC_GFX\n"); | 616 | DBG("ioctl SYNC_GFX\n"); |
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig index 1e972c4e88b1..d1f6196c8b9a 100644 --- a/drivers/video/logo/Kconfig +++ b/drivers/video/logo/Kconfig | |||
@@ -10,15 +10,6 @@ menuconfig LOGO | |||
10 | 10 | ||
11 | if LOGO | 11 | if LOGO |
12 | 12 | ||
13 | config FB_LOGO_CENTER | ||
14 | bool "Center the logo" | ||
15 | depends on FB=y | ||
16 | help | ||
17 | When this option is selected, the bootup logo is centered both | ||
18 | horizontally and vertically. If more than one logo is displayed | ||
19 | due to multiple CPUs, the collected line of logos is centered | ||
20 | as a whole. | ||
21 | |||
22 | config FB_LOGO_EXTRA | 13 | config FB_LOGO_EXTRA |
23 | bool | 14 | bool |
24 | depends on FB=y | 15 | depends on FB=y |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 728ecd1eea30..fb12fe205f86 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -61,6 +61,10 @@ enum virtio_balloon_vq { | |||
61 | VIRTIO_BALLOON_VQ_MAX | 61 | VIRTIO_BALLOON_VQ_MAX |
62 | }; | 62 | }; |
63 | 63 | ||
64 | enum virtio_balloon_config_read { | ||
65 | VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, | ||
66 | }; | ||
67 | |||
64 | struct virtio_balloon { | 68 | struct virtio_balloon { |
65 | struct virtio_device *vdev; | 69 | struct virtio_device *vdev; |
66 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; | 70 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; |
@@ -77,14 +81,20 @@ struct virtio_balloon { | |||
77 | /* Prevent updating balloon when it is being canceled. */ | 81 | /* Prevent updating balloon when it is being canceled. */ |
78 | spinlock_t stop_update_lock; | 82 | spinlock_t stop_update_lock; |
79 | bool stop_update; | 83 | bool stop_update; |
84 | /* Bitmap to indicate if reading the related config fields are needed */ | ||
85 | unsigned long config_read_bitmap; | ||
80 | 86 | ||
81 | /* The list of allocated free pages, waiting to be given back to mm */ | 87 | /* The list of allocated free pages, waiting to be given back to mm */ |
82 | struct list_head free_page_list; | 88 | struct list_head free_page_list; |
83 | spinlock_t free_page_list_lock; | 89 | spinlock_t free_page_list_lock; |
84 | /* The number of free page blocks on the above list */ | 90 | /* The number of free page blocks on the above list */ |
85 | unsigned long num_free_page_blocks; | 91 | unsigned long num_free_page_blocks; |
86 | /* The cmd id received from host */ | 92 | /* |
87 | u32 cmd_id_received; | 93 | * The cmd id received from host. |
94 | * Read it via virtio_balloon_cmd_id_received to get the latest value | ||
95 | * sent from host. | ||
96 | */ | ||
97 | u32 cmd_id_received_cache; | ||
88 | /* The cmd id that is actively in use */ | 98 | /* The cmd id that is actively in use */ |
89 | __virtio32 cmd_id_active; | 99 | __virtio32 cmd_id_active; |
90 | /* Buffer to store the stop sign */ | 100 | /* Buffer to store the stop sign */ |
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, | |||
390 | return num_returned; | 400 | return num_returned; |
391 | } | 401 | } |
392 | 402 | ||
403 | static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) | ||
404 | { | ||
405 | if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) | ||
406 | return; | ||
407 | |||
408 | /* No need to queue the work if the bit was already set. */ | ||
409 | if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
410 | &vb->config_read_bitmap)) | ||
411 | return; | ||
412 | |||
413 | queue_work(vb->balloon_wq, &vb->report_free_page_work); | ||
414 | } | ||
415 | |||
393 | static void virtballoon_changed(struct virtio_device *vdev) | 416 | static void virtballoon_changed(struct virtio_device *vdev) |
394 | { | 417 | { |
395 | struct virtio_balloon *vb = vdev->priv; | 418 | struct virtio_balloon *vb = vdev->priv; |
396 | unsigned long flags; | 419 | unsigned long flags; |
397 | s64 diff = towards_target(vb); | ||
398 | |||
399 | if (diff) { | ||
400 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
401 | if (!vb->stop_update) | ||
402 | queue_work(system_freezable_wq, | ||
403 | &vb->update_balloon_size_work); | ||
404 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
405 | } | ||
406 | 420 | ||
407 | if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { | 421 | spin_lock_irqsave(&vb->stop_update_lock, flags); |
408 | virtio_cread(vdev, struct virtio_balloon_config, | 422 | if (!vb->stop_update) { |
409 | free_page_report_cmd_id, &vb->cmd_id_received); | 423 | queue_work(system_freezable_wq, |
410 | if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | 424 | &vb->update_balloon_size_work); |
411 | /* Pass ULONG_MAX to give back all the free pages */ | 425 | virtio_balloon_queue_free_page_work(vb); |
412 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
413 | } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
414 | vb->cmd_id_received != | ||
415 | virtio32_to_cpu(vdev, vb->cmd_id_active)) { | ||
416 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
417 | if (!vb->stop_update) { | ||
418 | queue_work(vb->balloon_wq, | ||
419 | &vb->report_free_page_work); | ||
420 | } | ||
421 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
422 | } | ||
423 | } | 426 | } |
427 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
424 | } | 428 | } |
425 | 429 | ||
426 | static void update_balloon_size(struct virtio_balloon *vb) | 430 | static void update_balloon_size(struct virtio_balloon *vb) |
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) | |||
527 | return 0; | 531 | return 0; |
528 | } | 532 | } |
529 | 533 | ||
534 | static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) | ||
535 | { | ||
536 | if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
537 | &vb->config_read_bitmap)) | ||
538 | virtio_cread(vb->vdev, struct virtio_balloon_config, | ||
539 | free_page_report_cmd_id, | ||
540 | &vb->cmd_id_received_cache); | ||
541 | |||
542 | return vb->cmd_id_received_cache; | ||
543 | } | ||
544 | |||
530 | static int send_cmd_id_start(struct virtio_balloon *vb) | 545 | static int send_cmd_id_start(struct virtio_balloon *vb) |
531 | { | 546 | { |
532 | struct scatterlist sg; | 547 | struct scatterlist sg; |
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) | |||
537 | while (virtqueue_get_buf(vq, &unused)) | 552 | while (virtqueue_get_buf(vq, &unused)) |
538 | ; | 553 | ; |
539 | 554 | ||
540 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); | 555 | vb->cmd_id_active = virtio32_to_cpu(vb->vdev, |
556 | virtio_balloon_cmd_id_received(vb)); | ||
541 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); | 557 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); |
542 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); | 558 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); |
543 | if (!err) | 559 | if (!err) |
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
620 | * stop the reporting. | 636 | * stop the reporting. |
621 | */ | 637 | */ |
622 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); | 638 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); |
623 | if (cmd_id_active != vb->cmd_id_received) | 639 | if (unlikely(cmd_id_active != |
640 | virtio_balloon_cmd_id_received(vb))) | ||
624 | break; | 641 | break; |
625 | 642 | ||
626 | /* | 643 | /* |
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
637 | return 0; | 654 | return 0; |
638 | } | 655 | } |
639 | 656 | ||
640 | static void report_free_page_func(struct work_struct *work) | 657 | static void virtio_balloon_report_free_page(struct virtio_balloon *vb) |
641 | { | 658 | { |
642 | int err; | 659 | int err; |
643 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
644 | report_free_page_work); | ||
645 | struct device *dev = &vb->vdev->dev; | 660 | struct device *dev = &vb->vdev->dev; |
646 | 661 | ||
647 | /* Start by sending the received cmd id to host with an outbuf. */ | 662 | /* Start by sending the received cmd id to host with an outbuf. */ |
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) | |||
659 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); | 674 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); |
660 | } | 675 | } |
661 | 676 | ||
677 | static void report_free_page_func(struct work_struct *work) | ||
678 | { | ||
679 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
680 | report_free_page_work); | ||
681 | u32 cmd_id_received; | ||
682 | |||
683 | cmd_id_received = virtio_balloon_cmd_id_received(vb); | ||
684 | if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | ||
685 | /* Pass ULONG_MAX to give back all the free pages */ | ||
686 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
687 | } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
688 | cmd_id_received != | ||
689 | virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { | ||
690 | virtio_balloon_report_free_page(vb); | ||
691 | } | ||
692 | } | ||
693 | |||
662 | #ifdef CONFIG_BALLOON_COMPACTION | 694 | #ifdef CONFIG_BALLOON_COMPACTION |
663 | /* | 695 | /* |
664 | * virtballoon_migratepage - perform the balloon page migration on behalf of | 696 | * virtballoon_migratepage - perform the balloon page migration on behalf of |
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
885 | goto out_del_vqs; | 917 | goto out_del_vqs; |
886 | } | 918 | } |
887 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); | 919 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); |
888 | vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; | 920 | vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; |
889 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, | 921 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, |
890 | VIRTIO_BALLOON_CMD_ID_STOP); | 922 | VIRTIO_BALLOON_CMD_ID_STOP); |
891 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, | 923 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 4cd9ea5c75be..d9dd0f789279 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
468 | { | 468 | { |
469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); | 470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); |
471 | int i, err; | 471 | int i, err, queue_idx = 0; |
472 | 472 | ||
473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, | 473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, |
474 | dev_name(&vdev->dev), vm_dev); | 474 | dev_name(&vdev->dev), vm_dev); |
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
476 | return err; | 476 | return err; |
477 | 477 | ||
478 | for (i = 0; i < nvqs; ++i) { | 478 | for (i = 0; i < nvqs; ++i) { |
479 | vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], | 479 | if (!names[i]) { |
480 | vqs[i] = NULL; | ||
481 | continue; | ||
482 | } | ||
483 | |||
484 | vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
480 | ctx ? ctx[i] : false); | 485 | ctx ? ctx[i] : false); |
481 | if (IS_ERR(vqs[i])) { | 486 | if (IS_ERR(vqs[i])) { |
482 | vm_del_vqs(vdev); | 487 | vm_del_vqs(vdev); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 465a6f5142cc..d0584c040c60 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
285 | { | 285 | { |
286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
287 | u16 msix_vec; | 287 | u16 msix_vec; |
288 | int i, err, nvectors, allocated_vectors; | 288 | int i, err, nvectors, allocated_vectors, queue_idx = 0; |
289 | 289 | ||
290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
291 | if (!vp_dev->vqs) | 291 | if (!vp_dev->vqs) |
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
321 | msix_vec = allocated_vectors++; | 321 | msix_vec = allocated_vectors++; |
322 | else | 322 | else |
323 | msix_vec = VP_MSIX_VQ_VECTOR; | 323 | msix_vec = VP_MSIX_VQ_VECTOR; |
324 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 324 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
325 | ctx ? ctx[i] : false, | 325 | ctx ? ctx[i] : false, |
326 | msix_vec); | 326 | msix_vec); |
327 | if (IS_ERR(vqs[i])) { | 327 | if (IS_ERR(vqs[i])) { |
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
356 | const char * const names[], const bool *ctx) | 356 | const char * const names[], const bool *ctx) |
357 | { | 357 | { |
358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
359 | int i, err; | 359 | int i, err, queue_idx = 0; |
360 | 360 | ||
361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
362 | if (!vp_dev->vqs) | 362 | if (!vp_dev->vqs) |
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
374 | vqs[i] = NULL; | 374 | vqs[i] = NULL; |
375 | continue; | 375 | continue; |
376 | } | 376 | } |
377 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 377 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
378 | ctx ? ctx[i] : false, | 378 | ctx ? ctx[i] : false, |
379 | VIRTIO_MSI_NO_VECTOR); | 379 | VIRTIO_MSI_NO_VECTOR); |
380 | if (IS_ERR(vqs[i])) { | 380 | if (IS_ERR(vqs[i])) { |
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 5c4a764717c4..81208cd3f4ec 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/watchdog.h> | 17 | #include <linux/watchdog.h> |
18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/mod_devicetable.h> | ||
20 | 21 | ||
21 | #include <asm/mach-ralink/ralink_regs.h> | 22 | #include <asm/mach-ralink/ralink_regs.h> |
22 | 23 | ||
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 98967f0a7d10..db7c57d82cfd 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/watchdog.h> | 18 | #include <linux/watchdog.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/mod_devicetable.h> | ||
21 | 22 | ||
22 | #include <asm/mach-ralink/ralink_regs.h> | 23 | #include <asm/mach-ralink/ralink_regs.h> |
23 | 24 | ||
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c index 0d3a0fbbd7a5..52941207a12a 100644 --- a/drivers/watchdog/tqmx86_wdt.c +++ b/drivers/watchdog/tqmx86_wdt.c | |||
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev) | |||
79 | return -ENOMEM; | 79 | return -ENOMEM; |
80 | 80 | ||
81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
82 | if (IS_ERR(res)) | 82 | if (!res) |
83 | return PTR_ERR(res); | 83 | return -ENODEV; |
84 | 84 | ||
85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, | 85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, |
86 | resource_size(res)); | 86 | resource_size(res)); |
87 | if (IS_ERR(priv->io_base)) | 87 | if (!priv->io_base) |
88 | return PTR_ERR(priv->io_base); | 88 | return -ENOMEM; |
89 | 89 | ||
90 | watchdog_set_drvdata(&priv->wdd, priv); | 90 | watchdog_set_drvdata(&priv->wdd, priv); |
91 | 91 | ||
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 93194f3e7540..117e76b2f939 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void) | |||
1650 | xen_have_vector_callback = 0; | 1650 | xen_have_vector_callback = 0; |
1651 | return; | 1651 | return; |
1652 | } | 1652 | } |
1653 | pr_info("Xen HVM callback vector for event delivery is enabled\n"); | 1653 | pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); |
1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, | 1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
1655 | xen_hvm_callback_vector); | 1655 | xen_hvm_callback_vector); |
1656 | } | 1656 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 2e5d845b5091..7aa64d1b119c 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque) | |||
160 | 160 | ||
161 | /* write the data, then modify the indexes */ | 161 | /* write the data, then modify the indexes */ |
162 | virt_wmb(); | 162 | virt_wmb(); |
163 | if (ret < 0) | 163 | if (ret < 0) { |
164 | atomic_set(&map->read, 0); | ||
164 | intf->in_error = ret; | 165 | intf->in_error = ret; |
165 | else | 166 | } else |
166 | intf->in_prod = prod + ret; | 167 | intf->in_prod = prod + ret; |
167 | /* update the indexes, then notify the other end */ | 168 | /* update the indexes, then notify the other end */ |
168 | virt_wmb(); | 169 | virt_wmb(); |
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev, | |||
282 | static void pvcalls_sk_state_change(struct sock *sock) | 283 | static void pvcalls_sk_state_change(struct sock *sock) |
283 | { | 284 | { |
284 | struct sock_mapping *map = sock->sk_user_data; | 285 | struct sock_mapping *map = sock->sk_user_data; |
285 | struct pvcalls_data_intf *intf; | ||
286 | 286 | ||
287 | if (map == NULL) | 287 | if (map == NULL) |
288 | return; | 288 | return; |
289 | 289 | ||
290 | intf = map->ring; | 290 | atomic_inc(&map->read); |
291 | intf->in_error = -ENOTCONN; | ||
292 | notify_remote_via_irq(map->irq); | 291 | notify_remote_via_irq(map->irq); |
293 | } | 292 | } |
294 | 293 | ||
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..8a249c95c193 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
@@ -31,6 +31,12 @@ | |||
31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) | 31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) |
32 | #define PVCALLS_FRONT_MAX_SPIN 5000 | 32 | #define PVCALLS_FRONT_MAX_SPIN 5000 |
33 | 33 | ||
34 | static struct proto pvcalls_proto = { | ||
35 | .name = "PVCalls", | ||
36 | .owner = THIS_MODULE, | ||
37 | .obj_size = sizeof(struct sock), | ||
38 | }; | ||
39 | |||
34 | struct pvcalls_bedata { | 40 | struct pvcalls_bedata { |
35 | struct xen_pvcalls_front_ring ring; | 41 | struct xen_pvcalls_front_ring ring; |
36 | grant_ref_t ref; | 42 | grant_ref_t ref; |
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock) | |||
335 | return ret; | 341 | return ret; |
336 | } | 342 | } |
337 | 343 | ||
344 | static void free_active_ring(struct sock_mapping *map) | ||
345 | { | ||
346 | if (!map->active.ring) | ||
347 | return; | ||
348 | |||
349 | free_pages((unsigned long)map->active.data.in, | ||
350 | map->active.ring->ring_order); | ||
351 | free_page((unsigned long)map->active.ring); | ||
352 | } | ||
353 | |||
354 | static int alloc_active_ring(struct sock_mapping *map) | ||
355 | { | ||
356 | void *bytes; | ||
357 | |||
358 | map->active.ring = (struct pvcalls_data_intf *) | ||
359 | get_zeroed_page(GFP_KERNEL); | ||
360 | if (!map->active.ring) | ||
361 | goto out; | ||
362 | |||
363 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
364 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
365 | PVCALLS_RING_ORDER); | ||
366 | if (!bytes) | ||
367 | goto out; | ||
368 | |||
369 | map->active.data.in = bytes; | ||
370 | map->active.data.out = bytes + | ||
371 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
372 | |||
373 | return 0; | ||
374 | |||
375 | out: | ||
376 | free_active_ring(map); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | |||
338 | static int create_active(struct sock_mapping *map, int *evtchn) | 380 | static int create_active(struct sock_mapping *map, int *evtchn) |
339 | { | 381 | { |
340 | void *bytes; | 382 | void *bytes; |
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
343 | *evtchn = -1; | 385 | *evtchn = -1; |
344 | init_waitqueue_head(&map->active.inflight_conn_req); | 386 | init_waitqueue_head(&map->active.inflight_conn_req); |
345 | 387 | ||
346 | map->active.ring = (struct pvcalls_data_intf *) | 388 | bytes = map->active.data.in; |
347 | __get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
348 | if (map->active.ring == NULL) | ||
349 | goto out_error; | ||
350 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
351 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
352 | PVCALLS_RING_ORDER); | ||
353 | if (bytes == NULL) | ||
354 | goto out_error; | ||
355 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) | 389 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) |
356 | map->active.ring->ref[i] = gnttab_grant_foreign_access( | 390 | map->active.ring->ref[i] = gnttab_grant_foreign_access( |
357 | pvcalls_front_dev->otherend_id, | 391 | pvcalls_front_dev->otherend_id, |
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
361 | pvcalls_front_dev->otherend_id, | 395 | pvcalls_front_dev->otherend_id, |
362 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); | 396 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); |
363 | 397 | ||
364 | map->active.data.in = bytes; | ||
365 | map->active.data.out = bytes + | ||
366 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
367 | |||
368 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); | 398 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); |
369 | if (ret) | 399 | if (ret) |
370 | goto out_error; | 400 | goto out_error; |
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
385 | out_error: | 415 | out_error: |
386 | if (*evtchn >= 0) | 416 | if (*evtchn >= 0) |
387 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); | 417 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); |
388 | free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); | ||
389 | free_page((unsigned long)map->active.ring); | ||
390 | return ret; | 418 | return ret; |
391 | } | 419 | } |
392 | 420 | ||
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, | |||
406 | return PTR_ERR(map); | 434 | return PTR_ERR(map); |
407 | 435 | ||
408 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 436 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
437 | ret = alloc_active_ring(map); | ||
438 | if (ret < 0) { | ||
439 | pvcalls_exit_sock(sock); | ||
440 | return ret; | ||
441 | } | ||
409 | 442 | ||
410 | spin_lock(&bedata->socket_lock); | 443 | spin_lock(&bedata->socket_lock); |
411 | ret = get_request(bedata, &req_id); | 444 | ret = get_request(bedata, &req_id); |
412 | if (ret < 0) { | 445 | if (ret < 0) { |
413 | spin_unlock(&bedata->socket_lock); | 446 | spin_unlock(&bedata->socket_lock); |
447 | free_active_ring(map); | ||
414 | pvcalls_exit_sock(sock); | 448 | pvcalls_exit_sock(sock); |
415 | return ret; | 449 | return ret; |
416 | } | 450 | } |
417 | ret = create_active(map, &evtchn); | 451 | ret = create_active(map, &evtchn); |
418 | if (ret < 0) { | 452 | if (ret < 0) { |
419 | spin_unlock(&bedata->socket_lock); | 453 | spin_unlock(&bedata->socket_lock); |
454 | free_active_ring(map); | ||
420 | pvcalls_exit_sock(sock); | 455 | pvcalls_exit_sock(sock); |
421 | return ret; | 456 | return ret; |
422 | } | 457 | } |
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf, | |||
469 | virt_mb(); | 504 | virt_mb(); |
470 | 505 | ||
471 | size = pvcalls_queued(prod, cons, array_size); | 506 | size = pvcalls_queued(prod, cons, array_size); |
472 | if (size >= array_size) | 507 | if (size > array_size) |
473 | return -EINVAL; | 508 | return -EINVAL; |
509 | if (size == array_size) | ||
510 | return 0; | ||
474 | if (len > array_size - size) | 511 | if (len > array_size - size) |
475 | len = array_size - size; | 512 | len = array_size - size; |
476 | 513 | ||
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf, | |||
560 | error = intf->in_error; | 597 | error = intf->in_error; |
561 | /* get pointers before reading from the ring */ | 598 | /* get pointers before reading from the ring */ |
562 | virt_rmb(); | 599 | virt_rmb(); |
563 | if (error < 0) | ||
564 | return error; | ||
565 | 600 | ||
566 | size = pvcalls_queued(prod, cons, array_size); | 601 | size = pvcalls_queued(prod, cons, array_size); |
567 | masked_prod = pvcalls_mask(prod, array_size); | 602 | masked_prod = pvcalls_mask(prod, array_size); |
568 | masked_cons = pvcalls_mask(cons, array_size); | 603 | masked_cons = pvcalls_mask(cons, array_size); |
569 | 604 | ||
570 | if (size == 0) | 605 | if (size == 0) |
571 | return 0; | 606 | return error ?: size; |
572 | 607 | ||
573 | if (len > size) | 608 | if (len > size) |
574 | len = size; | 609 | len = size; |
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
780 | } | 815 | } |
781 | } | 816 | } |
782 | 817 | ||
783 | spin_lock(&bedata->socket_lock); | 818 | map2 = kzalloc(sizeof(*map2), GFP_KERNEL); |
784 | ret = get_request(bedata, &req_id); | 819 | if (map2 == NULL) { |
785 | if (ret < 0) { | ||
786 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 820 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
787 | (void *)&map->passive.flags); | 821 | (void *)&map->passive.flags); |
788 | spin_unlock(&bedata->socket_lock); | 822 | pvcalls_exit_sock(sock); |
823 | return -ENOMEM; | ||
824 | } | ||
825 | ret = alloc_active_ring(map2); | ||
826 | if (ret < 0) { | ||
827 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | ||
828 | (void *)&map->passive.flags); | ||
829 | kfree(map2); | ||
789 | pvcalls_exit_sock(sock); | 830 | pvcalls_exit_sock(sock); |
790 | return ret; | 831 | return ret; |
791 | } | 832 | } |
792 | map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); | 833 | spin_lock(&bedata->socket_lock); |
793 | if (map2 == NULL) { | 834 | ret = get_request(bedata, &req_id); |
835 | if (ret < 0) { | ||
794 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 836 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
795 | (void *)&map->passive.flags); | 837 | (void *)&map->passive.flags); |
796 | spin_unlock(&bedata->socket_lock); | 838 | spin_unlock(&bedata->socket_lock); |
839 | free_active_ring(map2); | ||
840 | kfree(map2); | ||
797 | pvcalls_exit_sock(sock); | 841 | pvcalls_exit_sock(sock); |
798 | return -ENOMEM; | 842 | return ret; |
799 | } | 843 | } |
844 | |||
800 | ret = create_active(map2, &evtchn); | 845 | ret = create_active(map2, &evtchn); |
801 | if (ret < 0) { | 846 | if (ret < 0) { |
847 | free_active_ring(map2); | ||
802 | kfree(map2); | 848 | kfree(map2); |
803 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 849 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
804 | (void *)&map->passive.flags); | 850 | (void *)&map->passive.flags); |
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
839 | 885 | ||
840 | received: | 886 | received: |
841 | map2->sock = newsock; | 887 | map2->sock = newsock; |
842 | newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); | 888 | newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); |
843 | if (!newsock->sk) { | 889 | if (!newsock->sk) { |
844 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 890 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
845 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; | 891 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; |
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock) | |||
1032 | spin_lock(&bedata->socket_lock); | 1078 | spin_lock(&bedata->socket_lock); |
1033 | list_del(&map->list); | 1079 | list_del(&map->list); |
1034 | spin_unlock(&bedata->socket_lock); | 1080 | spin_unlock(&bedata->socket_lock); |
1035 | if (READ_ONCE(map->passive.inflight_req_id) != | 1081 | if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && |
1036 | PVCALLS_INVALID_ID) { | 1082 | READ_ONCE(map->passive.inflight_req_id) != 0) { |
1037 | pvcalls_front_free_map(bedata, | 1083 | pvcalls_front_free_map(bedata, |
1038 | map->passive.accept_map); | 1084 | map->passive.accept_map); |
1039 | } | 1085 | } |
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0568fd986821..e432bd27a2e7 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
@@ -208,7 +208,7 @@ again: | |||
208 | /* The new front of the queue now owns the state variables. */ | 208 | /* The new front of the queue now owns the state variables. */ |
209 | next = list_entry(vnode->pending_locks.next, | 209 | next = list_entry(vnode->pending_locks.next, |
210 | struct file_lock, fl_u.afs.link); | 210 | struct file_lock, fl_u.afs.link); |
211 | vnode->lock_key = afs_file_key(next->fl_file); | 211 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
214 | goto again; | 214 | goto again; |
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl) | |||
413 | /* The new front of the queue now owns the state variables. */ | 413 | /* The new front of the queue now owns the state variables. */ |
414 | next = list_entry(vnode->pending_locks.next, | 414 | next = list_entry(vnode->pending_locks.next, |
415 | struct file_lock, fl_u.afs.link); | 415 | struct file_lock, fl_u.afs.link); |
416 | vnode->lock_key = afs_file_key(next->fl_file); | 416 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
419 | afs_lock_may_be_available(vnode); | 419 | afs_lock_may_be_available(vnode); |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 6b17d3620414..1a4ce07fb406 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | 414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { |
415 | valid = true; | 415 | valid = true; |
416 | } else { | 416 | } else { |
417 | vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; | ||
418 | vnode->cb_v_break = vnode->volume->cb_v_break; | 417 | vnode->cb_v_break = vnode->volume->cb_v_break; |
419 | valid = false; | 418 | valid = false; |
420 | } | 419 | } |
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode) | |||
546 | #endif | 545 | #endif |
547 | 546 | ||
548 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); | 547 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); |
548 | key_put(vnode->lock_key); | ||
549 | vnode->lock_key = NULL; | ||
549 | _leave(""); | 550 | _leave(""); |
550 | } | 551 | } |
551 | 552 | ||
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 07bc10f076aa..d443e2bfa094 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h | |||
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus { | |||
161 | struct yfs_xdr_u64 max_quota; | 161 | struct yfs_xdr_u64 max_quota; |
162 | struct yfs_xdr_u64 file_quota; | 162 | struct yfs_xdr_u64 file_quota; |
163 | } __packed; | 163 | } __packed; |
164 | |||
165 | enum yfs_lock_type { | ||
166 | yfs_LockNone = -1, | ||
167 | yfs_LockRead = 0, | ||
168 | yfs_LockWrite = 1, | ||
169 | yfs_LockExtend = 2, | ||
170 | yfs_LockRelease = 3, | ||
171 | yfs_LockMandatoryRead = 0x100, | ||
172 | yfs_LockMandatoryWrite = 0x101, | ||
173 | yfs_LockMandatoryExtend = 0x102, | ||
174 | }; | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a7b44863d502..2c588f9bbbda 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls; | |||
23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); | 23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); |
24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); | 24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); |
25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); | 25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); |
26 | static void afs_delete_async_call(struct work_struct *); | ||
26 | static void afs_process_async_call(struct work_struct *); | 27 | static void afs_process_async_call(struct work_struct *); |
27 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); | 28 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); |
28 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); | 29 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); |
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call) | |||
203 | } | 204 | } |
204 | } | 205 | } |
205 | 206 | ||
207 | static struct afs_call *afs_get_call(struct afs_call *call, | ||
208 | enum afs_call_trace why) | ||
209 | { | ||
210 | int u = atomic_inc_return(&call->usage); | ||
211 | |||
212 | trace_afs_call(call, why, u, | ||
213 | atomic_read(&call->net->nr_outstanding_calls), | ||
214 | __builtin_return_address(0)); | ||
215 | return call; | ||
216 | } | ||
217 | |||
206 | /* | 218 | /* |
207 | * Queue the call for actual work. | 219 | * Queue the call for actual work. |
208 | */ | 220 | */ |
209 | static void afs_queue_call_work(struct afs_call *call) | 221 | static void afs_queue_call_work(struct afs_call *call) |
210 | { | 222 | { |
211 | if (call->type->work) { | 223 | if (call->type->work) { |
212 | int u = atomic_inc_return(&call->usage); | ||
213 | |||
214 | trace_afs_call(call, afs_call_trace_work, u, | ||
215 | atomic_read(&call->net->nr_outstanding_calls), | ||
216 | __builtin_return_address(0)); | ||
217 | |||
218 | INIT_WORK(&call->work, call->type->work); | 224 | INIT_WORK(&call->work, call->type->work); |
219 | 225 | ||
226 | afs_get_call(call, afs_call_trace_work); | ||
220 | if (!queue_work(afs_wq, &call->work)) | 227 | if (!queue_work(afs_wq, &call->work)) |
221 | afs_put_call(call); | 228 | afs_put_call(call); |
222 | } | 229 | } |
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
398 | } | 405 | } |
399 | } | 406 | } |
400 | 407 | ||
408 | /* If the call is going to be asynchronous, we need an extra ref for | ||
409 | * the call to hold itself so the caller need not hang on to its ref. | ||
410 | */ | ||
411 | if (call->async) | ||
412 | afs_get_call(call, afs_call_trace_get); | ||
413 | |||
401 | /* create a call */ | 414 | /* create a call */ |
402 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, | 415 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, |
403 | (unsigned long)call, | 416 | (unsigned long)call, |
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
438 | goto error_do_abort; | 451 | goto error_do_abort; |
439 | } | 452 | } |
440 | 453 | ||
441 | /* at this point, an async call may no longer exist as it may have | 454 | /* Note that at this point, we may have received the reply or an abort |
442 | * already completed */ | 455 | * - and an asynchronous call may already have completed. |
443 | if (call->async) | 456 | */ |
457 | if (call->async) { | ||
458 | afs_put_call(call); | ||
444 | return -EINPROGRESS; | 459 | return -EINPROGRESS; |
460 | } | ||
445 | 461 | ||
446 | return afs_wait_for_call_to_complete(call, ac); | 462 | return afs_wait_for_call_to_complete(call, ac); |
447 | 463 | ||
448 | error_do_abort: | 464 | error_do_abort: |
449 | call->state = AFS_CALL_COMPLETE; | ||
450 | if (ret != -ECONNABORTED) { | 465 | if (ret != -ECONNABORTED) { |
451 | rxrpc_kernel_abort_call(call->net->socket, rxcall, | 466 | rxrpc_kernel_abort_call(call->net->socket, rxcall, |
452 | RX_USER_ABORT, ret, "KSD"); | 467 | RX_USER_ABORT, ret, "KSD"); |
@@ -463,8 +478,24 @@ error_do_abort: | |||
463 | error_kill_call: | 478 | error_kill_call: |
464 | if (call->type->done) | 479 | if (call->type->done) |
465 | call->type->done(call); | 480 | call->type->done(call); |
466 | afs_put_call(call); | 481 | |
482 | /* We need to dispose of the extra ref we grabbed for an async call. | ||
483 | * The call, however, might be queued on afs_async_calls and we need to | ||
484 | * make sure we don't get any more notifications that might requeue it. | ||
485 | */ | ||
486 | if (call->rxcall) { | ||
487 | rxrpc_kernel_end_call(call->net->socket, call->rxcall); | ||
488 | call->rxcall = NULL; | ||
489 | } | ||
490 | if (call->async) { | ||
491 | if (cancel_work_sync(&call->async_work)) | ||
492 | afs_put_call(call); | ||
493 | afs_put_call(call); | ||
494 | } | ||
495 | |||
467 | ac->error = ret; | 496 | ac->error = ret; |
497 | call->state = AFS_CALL_COMPLETE; | ||
498 | afs_put_call(call); | ||
468 | _leave(" = %d", ret); | 499 | _leave(" = %d", ret); |
469 | return ret; | 500 | return ret; |
470 | } | 501 | } |
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 95d0761cdb34..155dc14caef9 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c | |||
@@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, | |||
42 | if (vldb->fs_mask[i] & type_mask) | 42 | if (vldb->fs_mask[i] & type_mask) |
43 | nr_servers++; | 43 | nr_servers++; |
44 | 44 | ||
45 | slist = kzalloc(sizeof(struct afs_server_list) + | 45 | slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); |
46 | sizeof(struct afs_server_entry) * nr_servers, | ||
47 | GFP_KERNEL); | ||
48 | if (!slist) | 46 | if (!slist) |
49 | goto error; | 47 | goto error; |
50 | 48 | ||
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 12658c1363ae..5aa57929e8c2 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c | |||
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, | |||
803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); | 803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); |
804 | bp = xdr_encode_string(bp, name, namesz); | 804 | bp = xdr_encode_string(bp, name, namesz); |
805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); | 805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); |
806 | bp = xdr_encode_u32(bp, 0); /* ViceLockType */ | 806 | bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ |
807 | yfs_check_req(call, bp); | 807 | yfs_check_req(call, bp); |
808 | 808 | ||
809 | afs_use_fs_server(call, fc->cbi); | 809 | afs_use_fs_server(call, fc->cbi); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index c546cdce77e6..58a4c1217fa8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) | |||
104 | } | 104 | } |
105 | EXPORT_SYMBOL(invalidate_bdev); | 105 | EXPORT_SYMBOL(invalidate_bdev); |
106 | 106 | ||
107 | static void set_init_blocksize(struct block_device *bdev) | ||
108 | { | ||
109 | unsigned bsize = bdev_logical_block_size(bdev); | ||
110 | loff_t size = i_size_read(bdev->bd_inode); | ||
111 | |||
112 | while (bsize < PAGE_SIZE) { | ||
113 | if (size & bsize) | ||
114 | break; | ||
115 | bsize <<= 1; | ||
116 | } | ||
117 | bdev->bd_block_size = bsize; | ||
118 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
119 | } | ||
120 | |||
107 | int set_blocksize(struct block_device *bdev, int size) | 121 | int set_blocksize(struct block_device *bdev, int size) |
108 | { | 122 | { |
109 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ | 123 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); | |||
1431 | 1445 | ||
1432 | void bd_set_size(struct block_device *bdev, loff_t size) | 1446 | void bd_set_size(struct block_device *bdev, loff_t size) |
1433 | { | 1447 | { |
1434 | unsigned bsize = bdev_logical_block_size(bdev); | ||
1435 | |||
1436 | inode_lock(bdev->bd_inode); | 1448 | inode_lock(bdev->bd_inode); |
1437 | i_size_write(bdev->bd_inode, size); | 1449 | i_size_write(bdev->bd_inode, size); |
1438 | inode_unlock(bdev->bd_inode); | 1450 | inode_unlock(bdev->bd_inode); |
1439 | while (bsize < PAGE_SIZE) { | ||
1440 | if (size & bsize) | ||
1441 | break; | ||
1442 | bsize <<= 1; | ||
1443 | } | ||
1444 | bdev->bd_block_size = bsize; | ||
1445 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
1446 | } | 1451 | } |
1447 | EXPORT_SYMBOL(bd_set_size); | 1452 | EXPORT_SYMBOL(bd_set_size); |
1448 | 1453 | ||
@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1519 | } | 1524 | } |
1520 | } | 1525 | } |
1521 | 1526 | ||
1522 | if (!ret) | 1527 | if (!ret) { |
1523 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1528 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
1529 | set_init_blocksize(bdev); | ||
1530 | } | ||
1524 | 1531 | ||
1525 | /* | 1532 | /* |
1526 | * If the device is invalidated, rescan partition | 1533 | * If the device is invalidated, rescan partition |
@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1555 | goto out_clear; | 1562 | goto out_clear; |
1556 | } | 1563 | } |
1557 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); | 1564 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
1565 | set_init_blocksize(bdev); | ||
1558 | } | 1566 | } |
1559 | 1567 | ||
1560 | if (bdev->bd_bdi == &noop_backing_dev_info) | 1568 | if (bdev->bd_bdi == &noop_backing_dev_info) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a68cf7032f5..7a2a2621f0d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -35,6 +35,7 @@ | |||
35 | struct btrfs_trans_handle; | 35 | struct btrfs_trans_handle; |
36 | struct btrfs_transaction; | 36 | struct btrfs_transaction; |
37 | struct btrfs_pending_snapshot; | 37 | struct btrfs_pending_snapshot; |
38 | struct btrfs_delayed_ref_root; | ||
38 | extern struct kmem_cache *btrfs_trans_handle_cachep; | 39 | extern struct kmem_cache *btrfs_trans_handle_cachep; |
39 | extern struct kmem_cache *btrfs_bit_radix_cachep; | 40 | extern struct kmem_cache *btrfs_bit_radix_cachep; |
40 | extern struct kmem_cache *btrfs_path_cachep; | 41 | extern struct kmem_cache *btrfs_path_cachep; |
@@ -786,6 +787,9 @@ enum { | |||
786 | * main phase. The fs_info::balance_ctl is initialized. | 787 | * main phase. The fs_info::balance_ctl is initialized. |
787 | */ | 788 | */ |
788 | BTRFS_FS_BALANCE_RUNNING, | 789 | BTRFS_FS_BALANCE_RUNNING, |
790 | |||
791 | /* Indicate that the cleaner thread is awake and doing something. */ | ||
792 | BTRFS_FS_CLEANER_RUNNING, | ||
789 | }; | 793 | }; |
790 | 794 | ||
791 | struct btrfs_fs_info { | 795 | struct btrfs_fs_info { |
@@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2661 | unsigned long count); | 2665 | unsigned long count); |
2662 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, | 2666 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, |
2663 | unsigned long count, u64 transid, int wait); | 2667 | unsigned long count, u64 transid, int wait); |
2668 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, | ||
2669 | struct btrfs_delayed_ref_root *delayed_refs, | ||
2670 | struct btrfs_delayed_ref_head *head); | ||
2664 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); | 2671 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); |
2665 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | 2672 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
2666 | struct btrfs_fs_info *fs_info, u64 bytenr, | 2673 | struct btrfs_fs_info *fs_info, u64 bytenr, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8da2f380d3c0..6a2a2a951705 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg) | |||
1682 | while (1) { | 1682 | while (1) { |
1683 | again = 0; | 1683 | again = 0; |
1684 | 1684 | ||
1685 | set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
1686 | |||
1685 | /* Make the cleaner go to sleep early. */ | 1687 | /* Make the cleaner go to sleep early. */ |
1686 | if (btrfs_need_cleaner_sleep(fs_info)) | 1688 | if (btrfs_need_cleaner_sleep(fs_info)) |
1687 | goto sleep; | 1689 | goto sleep; |
@@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg) | |||
1728 | */ | 1730 | */ |
1729 | btrfs_delete_unused_bgs(fs_info); | 1731 | btrfs_delete_unused_bgs(fs_info); |
1730 | sleep: | 1732 | sleep: |
1733 | clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
1731 | if (kthread_should_park()) | 1734 | if (kthread_should_park()) |
1732 | kthread_parkme(); | 1735 | kthread_parkme(); |
1733 | if (kthread_should_stop()) | 1736 | if (kthread_should_stop()) |
@@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |||
4201 | spin_lock(&fs_info->ordered_root_lock); | 4204 | spin_lock(&fs_info->ordered_root_lock); |
4202 | } | 4205 | } |
4203 | spin_unlock(&fs_info->ordered_root_lock); | 4206 | spin_unlock(&fs_info->ordered_root_lock); |
4207 | |||
4208 | /* | ||
4209 | * We need this here because if we've been flipped read-only we won't | ||
4210 | * get sync() from the umount, so we need to make sure any ordered | ||
4211 | * extents that haven't had their dirty pages IO start writeout yet | ||
4212 | * actually get run and error out properly. | ||
4213 | */ | ||
4214 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | ||
4204 | } | 4215 | } |
4205 | 4216 | ||
4206 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 4217 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
@@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
4265 | if (pin_bytes) | 4276 | if (pin_bytes) |
4266 | btrfs_pin_extent(fs_info, head->bytenr, | 4277 | btrfs_pin_extent(fs_info, head->bytenr, |
4267 | head->num_bytes, 1); | 4278 | head->num_bytes, 1); |
4279 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); | ||
4268 | btrfs_put_delayed_ref_head(head); | 4280 | btrfs_put_delayed_ref_head(head); |
4269 | cond_resched(); | 4281 | cond_resched(); |
4270 | spin_lock(&delayed_refs->lock); | 4282 | spin_lock(&delayed_refs->lock); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b15afeae16df..d81035b7ea7d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, | |||
2456 | return ret ? ret : 1; | 2456 | return ret ? ret : 1; |
2457 | } | 2457 | } |
2458 | 2458 | ||
2459 | static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, | 2459 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, |
2460 | struct btrfs_delayed_ref_head *head) | 2460 | struct btrfs_delayed_ref_root *delayed_refs, |
2461 | struct btrfs_delayed_ref_head *head) | ||
2461 | { | 2462 | { |
2462 | struct btrfs_fs_info *fs_info = trans->fs_info; | ||
2463 | struct btrfs_delayed_ref_root *delayed_refs = | ||
2464 | &trans->transaction->delayed_refs; | ||
2465 | int nr_items = 1; /* Dropping this ref head update. */ | 2463 | int nr_items = 1; /* Dropping this ref head update. */ |
2466 | 2464 | ||
2467 | if (head->total_ref_mod < 0) { | 2465 | if (head->total_ref_mod < 0) { |
@@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, | |||
2544 | } | 2542 | } |
2545 | } | 2543 | } |
2546 | 2544 | ||
2547 | cleanup_ref_head_accounting(trans, head); | 2545 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
2548 | 2546 | ||
2549 | trace_run_delayed_ref_head(fs_info, head, 0); | 2547 | trace_run_delayed_ref_head(fs_info, head, 0); |
2550 | btrfs_delayed_ref_unlock(head); | 2548 | btrfs_delayed_ref_unlock(head); |
@@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info, | |||
4954 | ret = 0; | 4952 | ret = 0; |
4955 | break; | 4953 | break; |
4956 | case COMMIT_TRANS: | 4954 | case COMMIT_TRANS: |
4955 | /* | ||
4956 | * If we have pending delayed iputs then we could free up a | ||
4957 | * bunch of pinned space, so make sure we run the iputs before | ||
4958 | * we do our pinned bytes check below. | ||
4959 | */ | ||
4960 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); | ||
4961 | btrfs_run_delayed_iputs(fs_info); | ||
4962 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); | ||
4963 | |||
4957 | ret = may_commit_transaction(fs_info, space_info); | 4964 | ret = may_commit_transaction(fs_info, space_info); |
4958 | break; | 4965 | break; |
4959 | default: | 4966 | default: |
@@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
7188 | if (head->must_insert_reserved) | 7195 | if (head->must_insert_reserved) |
7189 | ret = 1; | 7196 | ret = 1; |
7190 | 7197 | ||
7191 | cleanup_ref_head_accounting(trans, head); | 7198 | btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); |
7192 | mutex_unlock(&head->mutex); | 7199 | mutex_unlock(&head->mutex); |
7193 | btrfs_put_delayed_ref_head(head); | 7200 | btrfs_put_delayed_ref_head(head); |
7194 | return ret; | 7201 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43eb4535319d..5c349667c761 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3129,9 +3129,6 @@ out: | |||
3129 | /* once for the tree */ | 3129 | /* once for the tree */ |
3130 | btrfs_put_ordered_extent(ordered_extent); | 3130 | btrfs_put_ordered_extent(ordered_extent); |
3131 | 3131 | ||
3132 | /* Try to release some metadata so we don't get an OOM but don't wait */ | ||
3133 | btrfs_btree_balance_dirty_nodelay(fs_info); | ||
3134 | |||
3135 | return ret; | 3132 | return ret; |
3136 | } | 3133 | } |
3137 | 3134 | ||
@@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode) | |||
3254 | ASSERT(list_empty(&binode->delayed_iput)); | 3251 | ASSERT(list_empty(&binode->delayed_iput)); |
3255 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); | 3252 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); |
3256 | spin_unlock(&fs_info->delayed_iput_lock); | 3253 | spin_unlock(&fs_info->delayed_iput_lock); |
3254 | if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) | ||
3255 | wake_up_process(fs_info->cleaner_kthread); | ||
3257 | } | 3256 | } |
3258 | 3257 | ||
3259 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) | 3258 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) |
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 46d691ba04bc..45b2322e092d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c | |||
@@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, | |||
133 | struct file *file_out, loff_t pos_out, | 133 | struct file *file_out, loff_t pos_out, |
134 | size_t count, unsigned int flags) | 134 | size_t count, unsigned int flags) |
135 | { | 135 | { |
136 | ssize_t ret; | ||
137 | |||
138 | if (file_inode(file_in) == file_inode(file_out)) | 136 | if (file_inode(file_in) == file_inode(file_out)) |
139 | return -EINVAL; | 137 | return -EINVAL; |
140 | retry: | 138 | return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); |
141 | ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); | ||
142 | if (ret == -EAGAIN) | ||
143 | goto retry; | ||
144 | return ret; | ||
145 | } | 139 | } |
146 | 140 | ||
147 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) | 141 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) |
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 96f7d32cd184..898c8321b343 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c | |||
@@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, | |||
128 | struct pstore_record *record) | 128 | struct pstore_record *record) |
129 | { | 129 | { |
130 | struct persistent_ram_zone *prz; | 130 | struct persistent_ram_zone *prz; |
131 | bool update = (record->type == PSTORE_TYPE_DMESG); | ||
132 | 131 | ||
133 | /* Give up if we never existed or have hit the end. */ | 132 | /* Give up if we never existed or have hit the end. */ |
134 | if (!przs) | 133 | if (!przs) |
@@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, | |||
139 | return NULL; | 138 | return NULL; |
140 | 139 | ||
141 | /* Update old/shadowed buffer. */ | 140 | /* Update old/shadowed buffer. */ |
142 | if (update) | 141 | if (prz->type == PSTORE_TYPE_DMESG) |
143 | persistent_ram_save_old(prz); | 142 | persistent_ram_save_old(prz); |
144 | 143 | ||
145 | if (!persistent_ram_old_size(prz)) | 144 | if (!persistent_ram_old_size(prz)) |
@@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev) | |||
711 | { | 710 | { |
712 | struct device *dev = &pdev->dev; | 711 | struct device *dev = &pdev->dev; |
713 | struct ramoops_platform_data *pdata = dev->platform_data; | 712 | struct ramoops_platform_data *pdata = dev->platform_data; |
713 | struct ramoops_platform_data pdata_local; | ||
714 | struct ramoops_context *cxt = &oops_cxt; | 714 | struct ramoops_context *cxt = &oops_cxt; |
715 | size_t dump_mem_sz; | 715 | size_t dump_mem_sz; |
716 | phys_addr_t paddr; | 716 | phys_addr_t paddr; |
717 | int err = -EINVAL; | 717 | int err = -EINVAL; |
718 | 718 | ||
719 | if (dev_of_node(dev) && !pdata) { | 719 | if (dev_of_node(dev) && !pdata) { |
720 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 720 | pdata = &pdata_local; |
721 | if (!pdata) { | 721 | memset(pdata, 0, sizeof(*pdata)); |
722 | pr_err("cannot allocate platform data buffer\n"); | ||
723 | err = -ENOMEM; | ||
724 | goto fail_out; | ||
725 | } | ||
726 | 722 | ||
727 | err = ramoops_parse_dt(pdev, pdata); | 723 | err = ramoops_parse_dt(pdev, pdata); |
728 | if (err < 0) | 724 | if (err < 0) |
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h index ad6f55dabd6d..0f2e0fe45ca4 100644 --- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h | |||
@@ -1,12 +1,11 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ | ||
1 | /* | 2 | /* |
2 | * | ||
3 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
5 | * | 5 | * |
6 | * Copyright (c) 2017 Amlogic, inc. | 6 | * Copyright (c) 2017 Amlogic, inc. |
7 | * Author: Yixun Lan <yixun.lan@amlogic.com> | 7 | * Author: Yixun Lan <yixun.lan@amlogic.com> |
8 | * | 8 | * |
9 | * SPDX-License-Identifier: (GPL-2.0+ OR BSD) | ||
10 | */ | 9 | */ |
11 | 10 | ||
12 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H | 11 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H |
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | struct bcma_soc { | 7 | struct bcma_soc { |
8 | struct bcma_bus bus; | 8 | struct bcma_bus bus; |
9 | struct device *dev; | ||
9 | }; | 10 | }; |
10 | 11 | ||
11 | int __init bcma_host_soc_register(struct bcma_soc *soc); | 12 | int __init bcma_host_soc_register(struct bcma_soc *soc); |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -3,9 +3,8 @@ | |||
3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." |
4 | #endif | 4 | #endif |
5 | 5 | ||
6 | /* Some compiler specific definitions are overwritten here | 6 | /* Compiler specific definitions for Clang compiler */ |
7 | * for Clang compiler | 7 | |
8 | */ | ||
9 | #define uninitialized_var(x) x = *(&(x)) | 8 | #define uninitialized_var(x) x = *(&(x)) |
10 | 9 | ||
11 | /* same as gcc, this was present in clang-2.6 so we can assume it works | 10 | /* same as gcc, this was present in clang-2.6 so we can assume it works |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dd8268f5f5f0..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -58,10 +58,6 @@ | |||
58 | (typeof(ptr)) (__ptr + (off)); \ | 58 | (typeof(ptr)) (__ptr + (off)); \ |
59 | }) | 59 | }) |
60 | 60 | ||
61 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ | ||
62 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
63 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
64 | |||
65 | /* | 61 | /* |
66 | * A trick to suppress uninitialized variable warning without generating any | 62 | * A trick to suppress uninitialized variable warning without generating any |
67 | * code | 63 | * code |
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
@@ -5,9 +5,7 @@ | |||
5 | 5 | ||
6 | #ifdef __ECC | 6 | #ifdef __ECC |
7 | 7 | ||
8 | /* Some compiler specific definitions are overwritten here | 8 | /* Compiler specific definitions for Intel ECC compiler */ |
9 | * for Intel ECC compiler | ||
10 | */ | ||
11 | 9 | ||
12 | #include <asm/intrinsics.h> | 10 | #include <asm/intrinsics.h> |
13 | 11 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifndef OPTIMIZER_HIDE_VAR | 163 | #ifndef OPTIMIZER_HIDE_VAR |
164 | #define OPTIMIZER_HIDE_VAR(var) barrier() | 164 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
165 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
166 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
165 | #endif | 167 | #endif |
166 | 168 | ||
167 | /* Not-quite-unique ID. */ | 169 | /* Not-quite-unique ID. */ |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7cdd31a69719..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); | |||
653 | 653 | ||
654 | extern struct fb_info *registered_fb[FB_MAX]; | 654 | extern struct fb_info *registered_fb[FB_MAX]; |
655 | extern int num_registered_fb; | 655 | extern int num_registered_fb; |
656 | extern bool fb_center_logo; | ||
656 | extern struct class *fb_class; | 657 | extern struct class *fb_class; |
657 | 658 | ||
658 | #define for_each_registered_fb(i) \ | 659 | #define for_each_registered_fb(i) \ |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..7315977b64da 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( | |||
160 | } | 160 | } |
161 | 161 | ||
162 | enum nvdimm_security_state { | 162 | enum nvdimm_security_state { |
163 | NVDIMM_SECURITY_ERROR = -1, | ||
163 | NVDIMM_SECURITY_DISABLED, | 164 | NVDIMM_SECURITY_DISABLED, |
164 | NVDIMM_SECURITY_UNLOCKED, | 165 | NVDIMM_SECURITY_UNLOCKED, |
165 | NVDIMM_SECURITY_LOCKED, | 166 | NVDIMM_SECURITY_LOCKED, |
diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -50,7 +50,6 @@ struct of_irq_controller; | |||
50 | 50 | ||
51 | struct device_node { | 51 | struct device_node { |
52 | const char *name; | 52 | const char *name; |
53 | const char *type; | ||
54 | phandle phandle; | 53 | phandle phandle; |
55 | const char *full_name; | 54 | const char *full_name; |
56 | struct fwnode_handle fwnode; | 55 | struct fwnode_handle fwnode; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 0990f913d649..1f3873a2ff29 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; | |||
48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; | 48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; |
49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; | 49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; |
50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; | 50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; | ||
51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; | 52 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
52 | 53 | ||
53 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) | 54 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) |
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini | |||
56 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) | 57 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) |
57 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) | 58 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) |
58 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) | 59 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) |
60 | #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) | ||
59 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) | 61 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) |
60 | 62 | ||
61 | extern const int phy_10_100_features_array[4]; | 63 | extern const int phy_10_100_features_array[4]; |
@@ -461,8 +463,8 @@ struct phy_device { | |||
461 | * only works for PHYs with IDs which match this field | 463 | * only works for PHYs with IDs which match this field |
462 | * name: The friendly name of this PHY type | 464 | * name: The friendly name of this PHY type |
463 | * phy_id_mask: Defines the important bits of the phy_id | 465 | * phy_id_mask: Defines the important bits of the phy_id |
464 | * features: A list of features (speed, duplex, etc) supported | 466 | * features: A mandatory list of features (speed, duplex, etc) |
465 | * by this PHY | 467 | * supported by this PHY |
466 | * flags: A bitfield defining certain other features this PHY | 468 | * flags: A bitfield defining certain other features this PHY |
467 | * supports (like interrupts) | 469 | * supports (like interrupts) |
468 | * | 470 | * |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..95d25b010a25 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); | |||
3218 | * | 3218 | * |
3219 | * This is exactly the same as pskb_trim except that it ensures the | 3219 | * This is exactly the same as pskb_trim except that it ensures the |
3220 | * checksum of received packets are still valid after the operation. | 3220 | * checksum of received packets are still valid after the operation. |
3221 | * It can change skb pointers. | ||
3221 | */ | 3222 | */ |
3222 | 3223 | ||
3223 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | 3224 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..987b6491b946 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -12,6 +12,11 @@ struct irq_affinity; | |||
12 | 12 | ||
13 | /** | 13 | /** |
14 | * virtio_config_ops - operations for configuring a virtio device | 14 | * virtio_config_ops - operations for configuring a virtio device |
15 | * Note: Do not assume that a transport implements all of the operations | ||
16 | * getting/setting a value as a simple read/write! Generally speaking, | ||
17 | * any of @get/@set, @get_status/@set_status, or @get_features/ | ||
18 | * @finalize_features are NOT safe to be called from an atomic | ||
19 | * context. | ||
15 | * @get: read the value of a configuration field | 20 | * @get: read the value of a configuration field |
16 | * vdev: the virtio_device | 21 | * vdev: the virtio_device |
17 | * offset: the offset of the configuration field | 22 | * offset: the offset of the configuration field |
@@ -22,7 +27,7 @@ struct irq_affinity; | |||
22 | * offset: the offset of the configuration field | 27 | * offset: the offset of the configuration field |
23 | * buf: the buffer to read the field value from. | 28 | * buf: the buffer to read the field value from. |
24 | * len: the length of the buffer | 29 | * len: the length of the buffer |
25 | * @generation: config generation counter | 30 | * @generation: config generation counter (optional) |
26 | * vdev: the virtio_device | 31 | * vdev: the virtio_device |
27 | * Returns the config generation counter | 32 | * Returns the config generation counter |
28 | * @get_status: read the status byte | 33 | * @get_status: read the status byte |
@@ -48,17 +53,17 @@ struct irq_affinity; | |||
48 | * @del_vqs: free virtqueues found by find_vqs(). | 53 | * @del_vqs: free virtqueues found by find_vqs(). |
49 | * @get_features: get the array of feature bits for this device. | 54 | * @get_features: get the array of feature bits for this device. |
50 | * vdev: the virtio_device | 55 | * vdev: the virtio_device |
51 | * Returns the first 32 feature bits (all we currently need). | 56 | * Returns the first 64 feature bits (all we currently need). |
52 | * @finalize_features: confirm what device features we'll be using. | 57 | * @finalize_features: confirm what device features we'll be using. |
53 | * vdev: the virtio_device | 58 | * vdev: the virtio_device |
54 | * This gives the final feature bits for the device: it can change | 59 | * This gives the final feature bits for the device: it can change |
55 | * the dev->feature bits if it wants. | 60 | * the dev->feature bits if it wants. |
56 | * Returns 0 on success or error status | 61 | * Returns 0 on success or error status |
57 | * @bus_name: return the bus name associated with the device | 62 | * @bus_name: return the bus name associated with the device (optional) |
58 | * vdev: the virtio_device | 63 | * vdev: the virtio_device |
59 | * This returns a pointer to the bus name a la pci_name from which | 64 | * This returns a pointer to the bus name a la pci_name from which |
60 | * the caller can then copy. | 65 | * the caller can then copy. |
61 | * @set_vq_affinity: set the affinity for a virtqueue. | 66 | * @set_vq_affinity: set the affinity for a virtqueue (optional). |
62 | * @get_vq_affinity: get the affinity for a virtqueue (optional). | 67 | * @get_vq_affinity: get the affinity for a virtqueue (optional). |
63 | */ | 68 | */ |
64 | typedef void vq_callback_t(struct virtqueue *); | 69 | typedef void vq_callback_t(struct virtqueue *); |
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1adefe42c0a6..2bfb87eb98ce 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h | |||
@@ -21,18 +21,6 @@ struct socket; | |||
21 | struct rxrpc_call; | 21 | struct rxrpc_call; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Call completion condition (state == RXRPC_CALL_COMPLETE). | ||
25 | */ | ||
26 | enum rxrpc_call_completion { | ||
27 | RXRPC_CALL_SUCCEEDED, /* - Normal termination */ | ||
28 | RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ | ||
29 | RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ | ||
30 | RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ | ||
31 | RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ | ||
32 | NR__RXRPC_CALL_COMPLETIONS | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * Debug ID counter for tracing. | 24 | * Debug ID counter for tracing. |
37 | */ | 25 | */ |
38 | extern atomic_t rxrpc_debug_id; | 26 | extern atomic_t rxrpc_debug_id; |
@@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, | |||
73 | rxrpc_user_attach_call_t, unsigned long, gfp_t, | 61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, |
74 | unsigned int); | 62 | unsigned int); |
75 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); | 63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); |
76 | int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, | ||
77 | struct sockaddr_rxrpc *, struct key *); | ||
78 | int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, | ||
79 | enum rxrpc_call_completion *, u32 *); | ||
80 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); | 64 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); |
81 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); | 65 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); |
82 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); | 66 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c5969762a8f4..9c8214d2116d 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, | |||
241 | struct netlink_ext_ack *extack); | 241 | struct netlink_ext_ack *extack); |
242 | int fib_table_dump(struct fib_table *table, struct sk_buff *skb, | 242 | int fib_table_dump(struct fib_table *table, struct sk_buff *skb, |
243 | struct netlink_callback *cb, struct fib_dump_filter *filter); | 243 | struct netlink_callback *cb, struct fib_dump_filter *filter); |
244 | int fib_table_flush(struct net *net, struct fib_table *table); | 244 | int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); |
245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); | 245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); |
246 | void fib_table_flush_external(struct fib_table *table); | 246 | void fib_table_flush_external(struct fib_table *table); |
247 | void fib_free_table(struct fib_table *tb); | 247 | void fib_free_table(struct fib_table *tb); |
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 7d5cda7ce32a..3e370cb36263 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h | |||
@@ -84,7 +84,6 @@ struct flow_offload { | |||
84 | struct nf_flow_route { | 84 | struct nf_flow_route { |
85 | struct { | 85 | struct { |
86 | struct dst_entry *dst; | 86 | struct dst_entry *dst; |
87 | int ifindex; | ||
88 | } tuple[FLOW_OFFLOAD_DIR_MAX]; | 87 | } tuple[FLOW_OFFLOAD_DIR_MAX]; |
89 | }; | 88 | }; |
90 | 89 | ||
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 33d291888ba9..e3f005eae1f7 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h | |||
@@ -25,6 +25,7 @@ | |||
25 | enum afs_call_trace { | 25 | enum afs_call_trace { |
26 | afs_call_trace_alloc, | 26 | afs_call_trace_alloc, |
27 | afs_call_trace_free, | 27 | afs_call_trace_free, |
28 | afs_call_trace_get, | ||
28 | afs_call_trace_put, | 29 | afs_call_trace_put, |
29 | afs_call_trace_wake, | 30 | afs_call_trace_wake, |
30 | afs_call_trace_work, | 31 | afs_call_trace_work, |
@@ -159,6 +160,7 @@ enum afs_file_error { | |||
159 | #define afs_call_traces \ | 160 | #define afs_call_traces \ |
160 | EM(afs_call_trace_alloc, "ALLOC") \ | 161 | EM(afs_call_trace_alloc, "ALLOC") \ |
161 | EM(afs_call_trace_free, "FREE ") \ | 162 | EM(afs_call_trace_free, "FREE ") \ |
163 | EM(afs_call_trace_get, "GET ") \ | ||
162 | EM(afs_call_trace_put, "PUT ") \ | 164 | EM(afs_call_trace_put, "PUT ") \ |
163 | EM(afs_call_trace_wake, "WAKE ") \ | 165 | EM(afs_call_trace_wake, "WAKE ") \ |
164 | E_(afs_call_trace_work, "WORK ") | 166 | E_(afs_call_trace_work, "WORK ") |
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h | |||
@@ -268,7 +268,7 @@ struct sockaddr_in { | |||
268 | #define IN_MULTICAST(a) IN_CLASSD(a) | 268 | #define IN_MULTICAST(a) IN_CLASSD(a) |
269 | #define IN_MULTICAST_NET 0xe0000000 | 269 | #define IN_MULTICAST_NET 0xe0000000 |
270 | 270 | ||
271 | #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) | 271 | #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) |
272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) | 272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) |
273 | 273 | ||
274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) | 274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) |
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index d13fd490b66d..6e73f0274e41 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h | |||
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { | |||
78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
79 | PVRDMA_WR_BIND_MW, | 79 | PVRDMA_WR_BIND_MW, |
80 | PVRDMA_WR_REG_SIG_MR, | 80 | PVRDMA_WR_REG_SIG_MR, |
81 | PVRDMA_WR_ERROR, | ||
81 | }; | 82 | }; |
82 | 83 | ||
83 | enum pvrdma_wc_status { | 84 | enum pvrdma_wc_status { |
diff --git a/init/Kconfig b/init/Kconfig index d47cb77a220e..513fa544a134 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION | |||
1124 | bool "Dead code and data elimination (EXPERIMENTAL)" | 1124 | bool "Dead code and data elimination (EXPERIMENTAL)" |
1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION | 1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION |
1126 | depends on EXPERT | 1126 | depends on EXPERT |
1127 | depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) | ||
1127 | depends on $(cc-option,-ffunction-sections -fdata-sections) | 1128 | depends on $(cc-option,-ffunction-sections -fdata-sections) |
1128 | depends on $(ld-option,--gc-sections) | 1129 | depends on $(ld-option,--gc-sections) |
1129 | help | 1130 | help |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a2f53642592b..befe570be5ba 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) | |||
467 | return kind_ops[BTF_INFO_KIND(t->info)]; | 467 | return kind_ops[BTF_INFO_KIND(t->info)]; |
468 | } | 468 | } |
469 | 469 | ||
470 | bool btf_name_offset_valid(const struct btf *btf, u32 offset) | 470 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
471 | { | 471 | { |
472 | return BTF_STR_OFFSET_VALID(offset) && | 472 | return BTF_STR_OFFSET_VALID(offset) && |
473 | offset < btf->hdr.str_len; | 473 | offset < btf->hdr.str_len; |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9425c2fb872f..ab612fe9862f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
718 | case BPF_FUNC_trace_printk: | 718 | case BPF_FUNC_trace_printk: |
719 | if (capable(CAP_SYS_ADMIN)) | 719 | if (capable(CAP_SYS_ADMIN)) |
720 | return bpf_get_trace_printk_proto(); | 720 | return bpf_get_trace_printk_proto(); |
721 | /* fall through */ | ||
721 | default: | 722 | default: |
722 | return NULL; | 723 | return NULL; |
723 | } | 724 | } |
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 99d243e1ad6e..52378d3e34b3 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c | |||
@@ -12,6 +12,7 @@ | |||
12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | 12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) |
13 | { | 13 | { |
14 | struct bpf_map *inner_map, *inner_map_meta; | 14 | struct bpf_map *inner_map, *inner_map_meta; |
15 | u32 inner_map_meta_size; | ||
15 | struct fd f; | 16 | struct fd f; |
16 | 17 | ||
17 | f = fdget(inner_map_ufd); | 18 | f = fdget(inner_map_ufd); |
@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
36 | return ERR_PTR(-EINVAL); | 37 | return ERR_PTR(-EINVAL); |
37 | } | 38 | } |
38 | 39 | ||
39 | inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); | 40 | inner_map_meta_size = sizeof(*inner_map_meta); |
41 | /* In some cases verifier needs to access beyond just base map. */ | ||
42 | if (inner_map->ops == &array_map_ops) | ||
43 | inner_map_meta_size = sizeof(struct bpf_array); | ||
44 | |||
45 | inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); | ||
40 | if (!inner_map_meta) { | 46 | if (!inner_map_meta) { |
41 | fdput(f); | 47 | fdput(f); |
42 | return ERR_PTR(-ENOMEM); | 48 | return ERR_PTR(-ENOMEM); |
@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
46 | inner_map_meta->key_size = inner_map->key_size; | 52 | inner_map_meta->key_size = inner_map->key_size; |
47 | inner_map_meta->value_size = inner_map->value_size; | 53 | inner_map_meta->value_size = inner_map->value_size; |
48 | inner_map_meta->map_flags = inner_map->map_flags; | 54 | inner_map_meta->map_flags = inner_map->map_flags; |
49 | inner_map_meta->ops = inner_map->ops; | ||
50 | inner_map_meta->max_entries = inner_map->max_entries; | 55 | inner_map_meta->max_entries = inner_map->max_entries; |
51 | 56 | ||
57 | /* Misc members not needed in bpf_map_meta_equal() check. */ | ||
58 | inner_map_meta->ops = inner_map->ops; | ||
59 | if (inner_map->ops == &array_map_ops) { | ||
60 | inner_map_meta->unpriv_array = inner_map->unpriv_array; | ||
61 | container_of(inner_map_meta, struct bpf_array, map)->index_mask = | ||
62 | container_of(inner_map, struct bpf_array, map)->index_mask; | ||
63 | } | ||
64 | |||
52 | fdput(f); | 65 | fdput(f); |
53 | return inner_map_meta; | 66 | return inner_map_meta; |
54 | } | 67 | } |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d9e2483669d0..d43b14535827 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, | |||
180 | 180 | ||
181 | if (nhdr->n_type == BPF_BUILD_ID && | 181 | if (nhdr->n_type == BPF_BUILD_ID && |
182 | nhdr->n_namesz == sizeof("GNU") && | 182 | nhdr->n_namesz == sizeof("GNU") && |
183 | nhdr->n_descsz == BPF_BUILD_ID_SIZE) { | 183 | nhdr->n_descsz > 0 && |
184 | nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { | ||
184 | memcpy(build_id, | 185 | memcpy(build_id, |
185 | note_start + note_offs + | 186 | note_start + note_offs + |
186 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), | 187 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), |
187 | BPF_BUILD_ID_SIZE); | 188 | nhdr->n_descsz); |
189 | memset(build_id + nhdr->n_descsz, 0, | ||
190 | BPF_BUILD_ID_SIZE - nhdr->n_descsz); | ||
188 | return 0; | 191 | return 0; |
189 | } | 192 | } |
190 | new_offs = note_offs + sizeof(Elf32_Nhdr) + | 193 | new_offs = note_offs + sizeof(Elf32_Nhdr) + |
@@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
311 | for (i = 0; i < trace_nr; i++) { | 314 | for (i = 0; i < trace_nr; i++) { |
312 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 315 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
313 | id_offs[i].ip = ips[i]; | 316 | id_offs[i].ip = ips[i]; |
317 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
314 | } | 318 | } |
315 | return; | 319 | return; |
316 | } | 320 | } |
@@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
321 | /* per entry fall back to ips */ | 325 | /* per entry fall back to ips */ |
322 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 326 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
323 | id_offs[i].ip = ips[i]; | 327 | id_offs[i].ip = ips[i]; |
328 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
324 | continue; | 329 | continue; |
325 | } | 330 | } |
326 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] | 331 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] |
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d6361776dc5c..1fb6fd68b9c7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c | |||
@@ -378,6 +378,8 @@ void __init swiotlb_exit(void) | |||
378 | memblock_free_late(io_tlb_start, | 378 | memblock_free_late(io_tlb_start, |
379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
380 | } | 380 | } |
381 | io_tlb_start = 0; | ||
382 | io_tlb_end = 0; | ||
381 | io_tlb_nslabs = 0; | 383 | io_tlb_nslabs = 0; |
382 | max_segment = 0; | 384 | max_segment = 0; |
383 | } | 385 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d7f538847b84..e815781ed751 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) | |||
976 | struct seccomp_filter *filter = file->private_data; | 976 | struct seccomp_filter *filter = file->private_data; |
977 | struct seccomp_knotif *knotif; | 977 | struct seccomp_knotif *knotif; |
978 | 978 | ||
979 | if (!filter) | ||
980 | return 0; | ||
981 | |||
979 | mutex_lock(&filter->notify_lock); | 982 | mutex_lock(&filter->notify_lock); |
980 | 983 | ||
981 | /* | 984 | /* |
@@ -1300,6 +1303,7 @@ out: | |||
1300 | out_put_fd: | 1303 | out_put_fd: |
1301 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { | 1304 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { |
1302 | if (ret < 0) { | 1305 | if (ret < 0) { |
1306 | listener_f->private_data = NULL; | ||
1303 | fput(listener_f); | 1307 | fput(listener_f); |
1304 | put_unused_fd(listener); | 1308 | put_unused_fd(listener); |
1305 | } else { | 1309 | } else { |
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c | |||
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) | |||
52 | if (x <= ULONG_MAX) | 52 | if (x <= ULONG_MAX) |
53 | return int_sqrt((unsigned long) x); | 53 | return int_sqrt((unsigned long) x); |
54 | 54 | ||
55 | m = 1ULL << (fls64(x) & ~1ULL); | 55 | m = 1ULL << ((fls64(x) - 1) & ~1ULL); |
56 | while (m != 0) { | 56 | while (m != 0) { |
57 | b = y + m; | 57 | b = y + m; |
58 | y >>= 1; | 58 | y >>= 1; |
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S index 7f1c521dcc2f..9ea6100dca87 100644 --- a/net/bpfilter/bpfilter_umh_blob.S +++ b/net/bpfilter/bpfilter_umh_blob.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | .section .bpfilter_umh, "a" | 2 | .section .rodata, "a" |
3 | .global bpfilter_umh_start | 3 | .global bpfilter_umh_start |
4 | bpfilter_umh_start: | 4 | bpfilter_umh_start: |
5 | .incbin "net/bpfilter/bpfilter_umh" | 5 | .incbin "net/bpfilter/bpfilter_umh" |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 6664cb8590f8..00573cc46c98 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -1129,6 +1129,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, | |||
1129 | err = -ENOMEM; | 1129 | err = -ENOMEM; |
1130 | goto err_unlock; | 1130 | goto err_unlock; |
1131 | } | 1131 | } |
1132 | if (swdev_notify) | ||
1133 | fdb->added_by_user = 1; | ||
1132 | fdb->added_by_external_learn = 1; | 1134 | fdb->added_by_external_learn = 1; |
1133 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); | 1135 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); |
1134 | } else { | 1136 | } else { |
@@ -1148,6 +1150,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, | |||
1148 | modified = true; | 1150 | modified = true; |
1149 | } | 1151 | } |
1150 | 1152 | ||
1153 | if (swdev_notify) | ||
1154 | fdb->added_by_user = 1; | ||
1155 | |||
1151 | if (modified) | 1156 | if (modified) |
1152 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); | 1157 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); |
1153 | } | 1158 | } |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 2cb8da465b98..48ddc60b4fbd 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
36 | 36 | ||
37 | int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) | 37 | int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) |
38 | { | 38 | { |
39 | skb_push(skb, ETH_HLEN); | ||
39 | if (!is_skb_forwardable(skb->dev, skb)) | 40 | if (!is_skb_forwardable(skb->dev, skb)) |
40 | goto drop; | 41 | goto drop; |
41 | 42 | ||
42 | skb_push(skb, ETH_HLEN); | ||
43 | br_drop_fake_rtable(skb); | 43 | br_drop_fake_rtable(skb); |
44 | 44 | ||
45 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 45 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to, | |||
98 | net = dev_net(indev); | 98 | net = dev_net(indev); |
99 | } else { | 99 | } else { |
100 | if (unlikely(netpoll_tx_running(to->br->dev))) { | 100 | if (unlikely(netpoll_tx_running(to->br->dev))) { |
101 | if (!is_skb_forwardable(skb->dev, skb)) { | 101 | skb_push(skb, ETH_HLEN); |
102 | if (!is_skb_forwardable(skb->dev, skb)) | ||
102 | kfree_skb(skb); | 103 | kfree_skb(skb); |
103 | } else { | 104 | else |
104 | skb_push(skb, ETH_HLEN); | ||
105 | br_netpoll_send_skb(to, skb); | 105 | br_netpoll_send_skb(to, skb); |
106 | } | ||
107 | return; | 106 | return; |
108 | } | 107 | } |
109 | br_hook = NF_BR_LOCAL_OUT; | 108 | br_hook = NF_BR_LOCAL_OUT; |
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index 94039f588f1d..564710f88f93 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c | |||
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) | |||
131 | IPSTATS_MIB_INDISCARDS); | 131 | IPSTATS_MIB_INDISCARDS); |
132 | goto drop; | 132 | goto drop; |
133 | } | 133 | } |
134 | hdr = ipv6_hdr(skb); | ||
134 | } | 135 | } |
135 | if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) | 136 | if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) |
136 | goto drop; | 137 | goto drop; |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 491828713e0b..5e55cef0cec3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user, | |||
1137 | tmp.name[sizeof(tmp.name) - 1] = 0; | 1137 | tmp.name[sizeof(tmp.name) - 1] = 0; |
1138 | 1138 | ||
1139 | countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; | 1139 | countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; |
1140 | newinfo = vmalloc(sizeof(*newinfo) + countersize); | 1140 | newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, |
1141 | PAGE_KERNEL); | ||
1141 | if (!newinfo) | 1142 | if (!newinfo) |
1142 | return -ENOMEM; | 1143 | return -ENOMEM; |
1143 | 1144 | ||
1144 | if (countersize) | 1145 | if (countersize) |
1145 | memset(newinfo->counters, 0, countersize); | 1146 | memset(newinfo->counters, 0, countersize); |
1146 | 1147 | ||
1147 | newinfo->entries = vmalloc(tmp.entries_size); | 1148 | newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, |
1149 | PAGE_KERNEL); | ||
1148 | if (!newinfo->entries) { | 1150 | if (!newinfo->entries) { |
1149 | ret = -ENOMEM; | 1151 | ret = -ENOMEM; |
1150 | goto free_newinfo; | 1152 | goto free_newinfo; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 08cbed7d940e..419e8edf23ba 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) | |||
229 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) | 229 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) |
230 | return false; | 230 | return false; |
231 | 231 | ||
232 | ip6h = ipv6_hdr(skb); | ||
232 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); | 233 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); |
233 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) | 234 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) |
234 | return false; | 235 | return false; |
diff --git a/net/core/filter.c b/net/core/filter.c index 2b3b436ef545..7559d6835ecb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | 2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
2021 | u32 flags) | 2021 | u32 flags) |
2022 | { | 2022 | { |
2023 | /* skb->mac_len is not set on normal egress */ | 2023 | unsigned int mlen = skb_network_offset(skb); |
2024 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
2025 | 2024 | ||
2026 | __skb_pull(skb, mlen); | 2025 | if (mlen) { |
2026 | __skb_pull(skb, mlen); | ||
2027 | 2027 | ||
2028 | /* At ingress, the mac header has already been pulled once. | 2028 | /* At ingress, the mac header has already been pulled once. |
2029 | * At egress, skb_pospull_rcsum has to be done in case that | 2029 | * At egress, skb_pospull_rcsum has to be done in case that |
2030 | * the skb is originated from ingress (i.e. a forwarded skb) | 2030 | * the skb is originated from ingress (i.e. a forwarded skb) |
2031 | * to ensure that rcsum starts at net header. | 2031 | * to ensure that rcsum starts at net header. |
2032 | */ | 2032 | */ |
2033 | if (!skb_at_tc_ingress(skb)) | 2033 | if (!skb_at_tc_ingress(skb)) |
2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | 2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
2035 | } | ||
2035 | skb_pop_mac_header(skb); | 2036 | skb_pop_mac_header(skb); |
2036 | skb_reset_mac_len(skb); | 2037 | skb_reset_mac_len(skb); |
2037 | return flags & BPF_F_INGRESS ? | 2038 | return flags & BPF_F_INGRESS ? |
@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4119 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
4120 | break; | 4121 | break; |
4121 | case SO_MAX_PACING_RATE: /* 32bit version */ | 4122 | case SO_MAX_PACING_RATE: /* 32bit version */ |
4123 | if (val != ~0U) | ||
4124 | cmpxchg(&sk->sk_pacing_status, | ||
4125 | SK_PACING_NONE, | ||
4126 | SK_PACING_NEEDED); | ||
4122 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; | 4127 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; |
4123 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, | 4128 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, |
4124 | sk->sk_max_pacing_rate); | 4129 | sk->sk_max_pacing_rate); |
@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4132 | sk->sk_rcvlowat = val ? : 1; | 4137 | sk->sk_rcvlowat = val ? : 1; |
4133 | break; | 4138 | break; |
4134 | case SO_MARK: | 4139 | case SO_MARK: |
4135 | sk->sk_mark = val; | 4140 | if (sk->sk_mark != val) { |
4141 | sk->sk_mark = val; | ||
4142 | sk_dst_reset(sk); | ||
4143 | } | ||
4136 | break; | 4144 | break; |
4137 | default: | 4145 | default: |
4138 | ret = -EINVAL; | 4146 | ret = -EINVAL; |
@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) | |||
5309 | case BPF_FUNC_trace_printk: | 5317 | case BPF_FUNC_trace_printk: |
5310 | if (capable(CAP_SYS_ADMIN)) | 5318 | if (capable(CAP_SYS_ADMIN)) |
5311 | return bpf_get_trace_printk_proto(); | 5319 | return bpf_get_trace_printk_proto(); |
5312 | /* else: fall through */ | 5320 | /* else, fall through */ |
5313 | default: | 5321 | default: |
5314 | return NULL; | 5322 | return NULL; |
5315 | } | 5323 | } |
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 3e85437f7106..a648568c5e8f 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c | |||
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, | |||
63 | lwt->name ? : "<unknown>"); | 63 | lwt->name ? : "<unknown>"); |
64 | ret = BPF_OK; | 64 | ret = BPF_OK; |
65 | } else { | 65 | } else { |
66 | skb_reset_mac_header(skb); | ||
66 | ret = skb_do_redirect(skb); | 67 | ret = skb_do_redirect(skb); |
67 | if (ret == 0) | 68 | if (ret == 0) |
68 | ret = BPF_REDIRECT; | 69 | ret = BPF_REDIRECT; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 3e27a779f288..4230400b9a30 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -450,7 +450,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) | |||
450 | buckets = (struct neighbour __rcu **) | 450 | buckets = (struct neighbour __rcu **) |
451 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, | 451 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, |
452 | get_order(size)); | 452 | get_order(size)); |
453 | kmemleak_alloc(buckets, size, 0, GFP_ATOMIC); | 453 | kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); |
454 | } | 454 | } |
455 | if (!buckets) { | 455 | if (!buckets) { |
456 | kfree(ret); | 456 | kfree(ret); |
@@ -1007,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh) | |||
1007 | if (neigh->ops->solicit) | 1007 | if (neigh->ops->solicit) |
1008 | neigh->ops->solicit(neigh, skb); | 1008 | neigh->ops->solicit(neigh, skb); |
1009 | atomic_inc(&neigh->probes); | 1009 | atomic_inc(&neigh->probes); |
1010 | kfree_skb(skb); | 1010 | consume_skb(skb); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | /* Called when a timer expires for a neighbour entry. */ | 1013 | /* Called when a timer expires for a neighbour entry. */ |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6df95be96311..fe4f6a624238 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net) | |||
203 | struct fib_table *tb; | 203 | struct fib_table *tb; |
204 | 204 | ||
205 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) | 205 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) |
206 | flushed += fib_table_flush(net, tb); | 206 | flushed += fib_table_flush(net, tb, false); |
207 | } | 207 | } |
208 | 208 | ||
209 | if (flushed) | 209 | if (flushed) |
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net) | |||
1463 | 1463 | ||
1464 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { | 1464 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { |
1465 | hlist_del(&tb->tb_hlist); | 1465 | hlist_del(&tb->tb_hlist); |
1466 | fib_table_flush(net, tb); | 1466 | fib_table_flush(net, tb, true); |
1467 | fib_free_table(tb); | 1467 | fib_free_table(tb); |
1468 | } | 1468 | } |
1469 | } | 1469 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 237c9f72b265..a573e37e0615 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb) | |||
1856 | } | 1856 | } |
1857 | 1857 | ||
1858 | /* Caller must hold RTNL. */ | 1858 | /* Caller must hold RTNL. */ |
1859 | int fib_table_flush(struct net *net, struct fib_table *tb) | 1859 | int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) |
1860 | { | 1860 | { |
1861 | struct trie *t = (struct trie *)tb->tb_data; | 1861 | struct trie *t = (struct trie *)tb->tb_data; |
1862 | struct key_vector *pn = t->kv; | 1862 | struct key_vector *pn = t->kv; |
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb) | |||
1904 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { | 1904 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { |
1905 | struct fib_info *fi = fa->fa_info; | 1905 | struct fib_info *fi = fa->fa_info; |
1906 | 1906 | ||
1907 | if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || | 1907 | if (!fi || tb->tb_id != fa->tb_id || |
1908 | tb->tb_id != fa->tb_id) { | 1908 | (!(fi->fib_flags & RTNH_F_DEAD) && |
1909 | !fib_props[fa->fa_type].error)) { | ||
1910 | slen = fa->fa_slen; | ||
1911 | continue; | ||
1912 | } | ||
1913 | |||
1914 | /* Do not flush error routes if network namespace is | ||
1915 | * not being dismantled | ||
1916 | */ | ||
1917 | if (!flush_all && fib_props[fa->fa_type].error) { | ||
1909 | slen = fa->fa_slen; | 1918 | slen = fa->fa_slen; |
1910 | continue; | 1919 | continue; |
1911 | } | 1920 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 632863541082..437070d1ffb1 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
1020 | { | 1020 | { |
1021 | int transport_offset = skb_transport_offset(skb); | 1021 | int transport_offset = skb_transport_offset(skb); |
1022 | struct guehdr *guehdr; | 1022 | struct guehdr *guehdr; |
1023 | size_t optlen; | 1023 | size_t len, optlen; |
1024 | int ret; | 1024 | int ret; |
1025 | 1025 | ||
1026 | if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) | 1026 | len = sizeof(struct udphdr) + sizeof(struct guehdr); |
1027 | if (!pskb_may_pull(skb, len)) | ||
1027 | return -EINVAL; | 1028 | return -EINVAL; |
1028 | 1029 | ||
1029 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | 1030 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; |
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
1058 | 1059 | ||
1059 | optlen = guehdr->hlen << 2; | 1060 | optlen = guehdr->hlen << 2; |
1060 | 1061 | ||
1062 | if (!pskb_may_pull(skb, len + optlen)) | ||
1063 | return -EINVAL; | ||
1064 | |||
1065 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | ||
1061 | if (validate_gue_flags(guehdr, optlen)) | 1066 | if (validate_gue_flags(guehdr, optlen)) |
1062 | return -EINVAL; | 1067 | return -EINVAL; |
1063 | 1068 | ||
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d1d09f3e5f9e..b1a74d80d868 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -569,8 +569,7 @@ err_free_skb: | |||
569 | dev->stats.tx_dropped++; | 569 | dev->stats.tx_dropped++; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | 572 | static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) |
573 | __be16 proto) | ||
574 | { | 573 | { |
575 | struct ip_tunnel *tunnel = netdev_priv(dev); | 574 | struct ip_tunnel *tunnel = netdev_priv(dev); |
576 | struct ip_tunnel_info *tun_info; | 575 | struct ip_tunnel_info *tun_info; |
@@ -578,10 +577,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | |||
578 | struct erspan_metadata *md; | 577 | struct erspan_metadata *md; |
579 | struct rtable *rt = NULL; | 578 | struct rtable *rt = NULL; |
580 | bool truncate = false; | 579 | bool truncate = false; |
580 | __be16 df, proto; | ||
581 | struct flowi4 fl; | 581 | struct flowi4 fl; |
582 | int tunnel_hlen; | 582 | int tunnel_hlen; |
583 | int version; | 583 | int version; |
584 | __be16 df; | ||
585 | int nhoff; | 584 | int nhoff; |
586 | int thoff; | 585 | int thoff; |
587 | 586 | ||
@@ -626,18 +625,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | |||
626 | if (version == 1) { | 625 | if (version == 1) { |
627 | erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), | 626 | erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), |
628 | ntohl(md->u.index), truncate, true); | 627 | ntohl(md->u.index), truncate, true); |
628 | proto = htons(ETH_P_ERSPAN); | ||
629 | } else if (version == 2) { | 629 | } else if (version == 2) { |
630 | erspan_build_header_v2(skb, | 630 | erspan_build_header_v2(skb, |
631 | ntohl(tunnel_id_to_key32(key->tun_id)), | 631 | ntohl(tunnel_id_to_key32(key->tun_id)), |
632 | md->u.md2.dir, | 632 | md->u.md2.dir, |
633 | get_hwid(&md->u.md2), | 633 | get_hwid(&md->u.md2), |
634 | truncate, true); | 634 | truncate, true); |
635 | proto = htons(ETH_P_ERSPAN2); | ||
635 | } else { | 636 | } else { |
636 | goto err_free_rt; | 637 | goto err_free_rt; |
637 | } | 638 | } |
638 | 639 | ||
639 | gre_build_header(skb, 8, TUNNEL_SEQ, | 640 | gre_build_header(skb, 8, TUNNEL_SEQ, |
640 | htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); | 641 | proto, 0, htonl(tunnel->o_seqno++)); |
641 | 642 | ||
642 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; | 643 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
643 | 644 | ||
@@ -721,12 +722,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
721 | { | 722 | { |
722 | struct ip_tunnel *tunnel = netdev_priv(dev); | 723 | struct ip_tunnel *tunnel = netdev_priv(dev); |
723 | bool truncate = false; | 724 | bool truncate = false; |
725 | __be16 proto; | ||
724 | 726 | ||
725 | if (!pskb_inet_may_pull(skb)) | 727 | if (!pskb_inet_may_pull(skb)) |
726 | goto free_skb; | 728 | goto free_skb; |
727 | 729 | ||
728 | if (tunnel->collect_md) { | 730 | if (tunnel->collect_md) { |
729 | erspan_fb_xmit(skb, dev, skb->protocol); | 731 | erspan_fb_xmit(skb, dev); |
730 | return NETDEV_TX_OK; | 732 | return NETDEV_TX_OK; |
731 | } | 733 | } |
732 | 734 | ||
@@ -742,19 +744,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
742 | } | 744 | } |
743 | 745 | ||
744 | /* Push ERSPAN header */ | 746 | /* Push ERSPAN header */ |
745 | if (tunnel->erspan_ver == 1) | 747 | if (tunnel->erspan_ver == 1) { |
746 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), | 748 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), |
747 | tunnel->index, | 749 | tunnel->index, |
748 | truncate, true); | 750 | truncate, true); |
749 | else if (tunnel->erspan_ver == 2) | 751 | proto = htons(ETH_P_ERSPAN); |
752 | } else if (tunnel->erspan_ver == 2) { | ||
750 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), | 753 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), |
751 | tunnel->dir, tunnel->hwid, | 754 | tunnel->dir, tunnel->hwid, |
752 | truncate, true); | 755 | truncate, true); |
753 | else | 756 | proto = htons(ETH_P_ERSPAN2); |
757 | } else { | ||
754 | goto free_skb; | 758 | goto free_skb; |
759 | } | ||
755 | 760 | ||
756 | tunnel->parms.o_flags &= ~TUNNEL_KEY; | 761 | tunnel->parms.o_flags &= ~TUNNEL_KEY; |
757 | __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); | 762 | __gre_xmit(skb, dev, &tunnel->parms.iph, proto); |
758 | return NETDEV_TX_OK; | 763 | return NETDEV_TX_OK; |
759 | 764 | ||
760 | free_skb: | 765 | free_skb: |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 26921f6b3b92..51d8efba6de2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) | |||
488 | goto drop; | 488 | goto drop; |
489 | } | 489 | } |
490 | 490 | ||
491 | iph = ip_hdr(skb); | ||
491 | skb->transport_header = skb->network_header + iph->ihl*4; | 492 | skb->transport_header = skb->network_header + iph->ihl*4; |
492 | 493 | ||
493 | /* Remove any debris in the socket control block */ | 494 | /* Remove any debris in the socket control block */ |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 541bdb9f81d7..5f099c9d04e5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | |||
1186 | flags = msg->msg_flags; | 1186 | flags = msg->msg_flags; |
1187 | 1187 | ||
1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { | 1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { |
1189 | if (sk->sk_state != TCP_ESTABLISHED) { | 1189 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { |
1190 | err = -EINVAL; | 1190 | err = -EINVAL; |
1191 | goto out_err; | 1191 | goto out_err; |
1192 | } | 1192 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3fb0ed5e4789..5c3cd5d84a6f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, | |||
847 | const int hlen = skb_network_header_len(skb) + | 847 | const int hlen = skb_network_header_len(skb) + |
848 | sizeof(struct udphdr); | 848 | sizeof(struct udphdr); |
849 | 849 | ||
850 | if (hlen + cork->gso_size > cork->fragsize) | 850 | if (hlen + cork->gso_size > cork->fragsize) { |
851 | kfree_skb(skb); | ||
851 | return -EINVAL; | 852 | return -EINVAL; |
852 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | 853 | } |
854 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
855 | kfree_skb(skb); | ||
853 | return -EINVAL; | 856 | return -EINVAL; |
854 | if (sk->sk_no_check_tx) | 857 | } |
858 | if (sk->sk_no_check_tx) { | ||
859 | kfree_skb(skb); | ||
855 | return -EINVAL; | 860 | return -EINVAL; |
861 | } | ||
856 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | 862 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || |
857 | dst_xfrm(skb_dst(skb))) | 863 | dst_xfrm(skb_dst(skb))) { |
864 | kfree_skb(skb); | ||
858 | return -EIO; | 865 | return -EIO; |
866 | } | ||
859 | 867 | ||
860 | skb_shinfo(skb)->gso_size = cork->gso_size; | 868 | skb_shinfo(skb)->gso_size = cork->gso_size; |
861 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | 869 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
@@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) | |||
1918 | } | 1926 | } |
1919 | EXPORT_SYMBOL(udp_lib_rehash); | 1927 | EXPORT_SYMBOL(udp_lib_rehash); |
1920 | 1928 | ||
1921 | static void udp_v4_rehash(struct sock *sk) | 1929 | void udp_v4_rehash(struct sock *sk) |
1922 | { | 1930 | { |
1923 | u16 new_hash = ipv4_portaddr_hash(sock_net(sk), | 1931 | u16 new_hash = ipv4_portaddr_hash(sock_net(sk), |
1924 | inet_sk(sk)->inet_rcv_saddr, | 1932 | inet_sk(sk)->inet_rcv_saddr, |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 322672655419..6b2fa77eeb1c 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int); | |||
10 | int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); | 10 | int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); |
11 | 11 | ||
12 | int udp_v4_get_port(struct sock *sk, unsigned short snum); | 12 | int udp_v4_get_port(struct sock *sk, unsigned short snum); |
13 | void udp_v4_rehash(struct sock *sk); | ||
13 | 14 | ||
14 | int udp_setsockopt(struct sock *sk, int level, int optname, | 15 | int udp_setsockopt(struct sock *sk, int level, int optname, |
15 | char __user *optval, unsigned int optlen); | 16 | char __user *optval, unsigned int optlen); |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 39c7f17d916f..3c94b8f0ff27 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -53,6 +53,7 @@ struct proto udplite_prot = { | |||
53 | .sendpage = udp_sendpage, | 53 | .sendpage = udp_sendpage, |
54 | .hash = udp_lib_hash, | 54 | .hash = udp_lib_hash, |
55 | .unhash = udp_lib_unhash, | 55 | .unhash = udp_lib_unhash, |
56 | .rehash = udp_v4_rehash, | ||
56 | .get_port = udp_v4_get_port, | 57 | .get_port = udp_v4_get_port, |
57 | .memory_allocated = &udp_memory_allocated, | 58 | .memory_allocated = &udp_memory_allocated, |
58 | .sysctl_mem = sysctl_udp_mem, | 59 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index 7da7bf3b7fe3..b858bd5280bf 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
90 | { | 90 | { |
91 | int transport_offset = skb_transport_offset(skb); | 91 | int transport_offset = skb_transport_offset(skb); |
92 | struct guehdr *guehdr; | 92 | struct guehdr *guehdr; |
93 | size_t optlen; | 93 | size_t len, optlen; |
94 | int ret; | 94 | int ret; |
95 | 95 | ||
96 | if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) | 96 | len = sizeof(struct udphdr) + sizeof(struct guehdr); |
97 | if (!pskb_may_pull(skb, len)) | ||
97 | return -EINVAL; | 98 | return -EINVAL; |
98 | 99 | ||
99 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | 100 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; |
@@ -128,6 +129,10 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
128 | 129 | ||
129 | optlen = guehdr->hlen << 2; | 130 | optlen = guehdr->hlen << 2; |
130 | 131 | ||
132 | if (!pskb_may_pull(skb, len + optlen)) | ||
133 | return -EINVAL; | ||
134 | |||
135 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | ||
131 | if (validate_gue_flags(guehdr, optlen)) | 136 | if (validate_gue_flags(guehdr, optlen)) |
132 | return -EINVAL; | 137 | return -EINVAL; |
133 | 138 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4a1a86e9c0e9..c465d8a102f2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -922,6 +922,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
922 | __u8 dsfield = false; | 922 | __u8 dsfield = false; |
923 | struct flowi6 fl6; | 923 | struct flowi6 fl6; |
924 | int err = -EINVAL; | 924 | int err = -EINVAL; |
925 | __be16 proto; | ||
925 | __u32 mtu; | 926 | __u32 mtu; |
926 | int nhoff; | 927 | int nhoff; |
927 | int thoff; | 928 | int thoff; |
@@ -1035,8 +1036,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | /* Push GRE header. */ | 1038 | /* Push GRE header. */ |
1038 | gre_build_header(skb, 8, TUNNEL_SEQ, | 1039 | proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) |
1039 | htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); | 1040 | : htons(ETH_P_ERSPAN2); |
1041 | gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); | ||
1040 | 1042 | ||
1041 | /* TooBig packet may have updated dst->dev's mtu */ | 1043 | /* TooBig packet may have updated dst->dev's mtu */ |
1042 | if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) | 1044 | if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) |
@@ -1169,6 +1171,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, | |||
1169 | t->parms.i_flags = p->i_flags; | 1171 | t->parms.i_flags = p->i_flags; |
1170 | t->parms.o_flags = p->o_flags; | 1172 | t->parms.o_flags = p->o_flags; |
1171 | t->parms.fwmark = p->fwmark; | 1173 | t->parms.fwmark = p->fwmark; |
1174 | t->parms.erspan_ver = p->erspan_ver; | ||
1175 | t->parms.index = p->index; | ||
1176 | t->parms.dir = p->dir; | ||
1177 | t->parms.hwid = p->hwid; | ||
1172 | dst_cache_reset(&t->dst_cache); | 1178 | dst_cache_reset(&t->dst_cache); |
1173 | } | 1179 | } |
1174 | 1180 | ||
@@ -2025,9 +2031,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
2025 | struct nlattr *data[], | 2031 | struct nlattr *data[], |
2026 | struct netlink_ext_ack *extack) | 2032 | struct netlink_ext_ack *extack) |
2027 | { | 2033 | { |
2028 | struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); | 2034 | struct ip6_tnl *t = netdev_priv(dev); |
2035 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | ||
2029 | struct __ip6_tnl_parm p; | 2036 | struct __ip6_tnl_parm p; |
2030 | struct ip6_tnl *t; | ||
2031 | 2037 | ||
2032 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); | 2038 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); |
2033 | if (IS_ERR(t)) | 2039 | if (IS_ERR(t)) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8e11f9a557b1..dc066fdf7e46 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -4251,17 +4251,6 @@ struct rt6_nh { | |||
4251 | struct list_head next; | 4251 | struct list_head next; |
4252 | }; | 4252 | }; |
4253 | 4253 | ||
4254 | static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) | ||
4255 | { | ||
4256 | struct rt6_nh *nh; | ||
4257 | |||
4258 | list_for_each_entry(nh, rt6_nh_list, next) { | ||
4259 | pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", | ||
4260 | &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, | ||
4261 | nh->r_cfg.fc_ifindex); | ||
4262 | } | ||
4263 | } | ||
4264 | |||
4265 | static int ip6_route_info_append(struct net *net, | 4254 | static int ip6_route_info_append(struct net *net, |
4266 | struct list_head *rt6_nh_list, | 4255 | struct list_head *rt6_nh_list, |
4267 | struct fib6_info *rt, | 4256 | struct fib6_info *rt, |
@@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
4407 | nh->fib6_info = NULL; | 4396 | nh->fib6_info = NULL; |
4408 | if (err) { | 4397 | if (err) { |
4409 | if (replace && nhn) | 4398 | if (replace && nhn) |
4410 | ip6_print_replace_route_err(&rt6_nh_list); | 4399 | NL_SET_ERR_MSG_MOD(extack, |
4400 | "multipath route replace failed (check consistency of installed routes)"); | ||
4411 | err_nh = nh; | 4401 | err_nh = nh; |
4412 | goto add_errout; | 4402 | goto add_errout; |
4413 | } | 4403 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7c3505006f8e..2596ffdeebea 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
102 | return udp_lib_get_port(sk, snum, hash2_nulladdr); | 102 | return udp_lib_get_port(sk, snum, hash2_nulladdr); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void udp_v6_rehash(struct sock *sk) | 105 | void udp_v6_rehash(struct sock *sk) |
106 | { | 106 | { |
107 | u16 new_hash = ipv6_portaddr_hash(sock_net(sk), | 107 | u16 new_hash = ipv6_portaddr_hash(sock_net(sk), |
108 | &sk->sk_v6_rcv_saddr, | 108 | &sk->sk_v6_rcv_saddr, |
@@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, | |||
1132 | const int hlen = skb_network_header_len(skb) + | 1132 | const int hlen = skb_network_header_len(skb) + |
1133 | sizeof(struct udphdr); | 1133 | sizeof(struct udphdr); |
1134 | 1134 | ||
1135 | if (hlen + cork->gso_size > cork->fragsize) | 1135 | if (hlen + cork->gso_size > cork->fragsize) { |
1136 | kfree_skb(skb); | ||
1136 | return -EINVAL; | 1137 | return -EINVAL; |
1137 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | 1138 | } |
1139 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
1140 | kfree_skb(skb); | ||
1138 | return -EINVAL; | 1141 | return -EINVAL; |
1139 | if (udp_sk(sk)->no_check6_tx) | 1142 | } |
1143 | if (udp_sk(sk)->no_check6_tx) { | ||
1144 | kfree_skb(skb); | ||
1140 | return -EINVAL; | 1145 | return -EINVAL; |
1146 | } | ||
1141 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | 1147 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || |
1142 | dst_xfrm(skb_dst(skb))) | 1148 | dst_xfrm(skb_dst(skb))) { |
1149 | kfree_skb(skb); | ||
1143 | return -EIO; | 1150 | return -EIO; |
1151 | } | ||
1144 | 1152 | ||
1145 | skb_shinfo(skb)->gso_size = cork->gso_size; | 1153 | skb_shinfo(skb)->gso_size = cork->gso_size; |
1146 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | 1154 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 5730e6503cb4..20e324b6f358 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, | |||
13 | __be32, struct udp_table *); | 13 | __be32, struct udp_table *); |
14 | 14 | ||
15 | int udp_v6_get_port(struct sock *sk, unsigned short snum); | 15 | int udp_v6_get_port(struct sock *sk, unsigned short snum); |
16 | void udp_v6_rehash(struct sock *sk); | ||
16 | 17 | ||
17 | int udpv6_getsockopt(struct sock *sk, int level, int optname, | 18 | int udpv6_getsockopt(struct sock *sk, int level, int optname, |
18 | char __user *optval, int __user *optlen); | 19 | char __user *optval, int __user *optlen); |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index a125aebc29e5..f35907836444 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = { | |||
49 | .recvmsg = udpv6_recvmsg, | 49 | .recvmsg = udpv6_recvmsg, |
50 | .hash = udp_lib_hash, | 50 | .hash = udp_lib_hash, |
51 | .unhash = udp_lib_unhash, | 51 | .unhash = udp_lib_unhash, |
52 | .rehash = udp_v6_rehash, | ||
52 | .get_port = udp_v6_get_port, | 53 | .get_port = udp_v6_get_port, |
53 | .memory_allocated = &udp_memory_allocated, | 54 | .memory_allocated = &udp_memory_allocated, |
54 | .sysctl_mem = sysctl_udp_mem, | 55 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index fa0844e2a68d..c0c72ae9df42 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c | |||
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, | |||
28 | { | 28 | { |
29 | struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; | 29 | struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; |
30 | struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; | 30 | struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; |
31 | struct dst_entry *other_dst = route->tuple[!dir].dst; | ||
31 | struct dst_entry *dst = route->tuple[dir].dst; | 32 | struct dst_entry *dst = route->tuple[dir].dst; |
32 | 33 | ||
33 | ft->dir = dir; | 34 | ft->dir = dir; |
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, | |||
50 | ft->src_port = ctt->src.u.tcp.port; | 51 | ft->src_port = ctt->src.u.tcp.port; |
51 | ft->dst_port = ctt->dst.u.tcp.port; | 52 | ft->dst_port = ctt->dst.u.tcp.port; |
52 | 53 | ||
53 | ft->iifidx = route->tuple[dir].ifindex; | 54 | ft->iifidx = other_dst->dev->ifindex; |
54 | ft->oifidx = route->tuple[!dir].ifindex; | 55 | ft->oifidx = dst->dev->ifindex; |
55 | ft->dst_cache = dst; | 56 | ft->dst_cache = dst; |
56 | } | 57 | } |
57 | 58 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2b0a93300dd7..fb07f6cfc719 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, | |||
2304 | struct net *net = sock_net(skb->sk); | 2304 | struct net *net = sock_net(skb->sk); |
2305 | unsigned int s_idx = cb->args[0]; | 2305 | unsigned int s_idx = cb->args[0]; |
2306 | const struct nft_rule *rule; | 2306 | const struct nft_rule *rule; |
2307 | int rc = 1; | ||
2308 | 2307 | ||
2309 | list_for_each_entry_rcu(rule, &chain->rules, list) { | 2308 | list_for_each_entry_rcu(rule, &chain->rules, list) { |
2310 | if (!nft_is_active(net, rule)) | 2309 | if (!nft_is_active(net, rule)) |
@@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, | |||
2321 | NLM_F_MULTI | NLM_F_APPEND, | 2320 | NLM_F_MULTI | NLM_F_APPEND, |
2322 | table->family, | 2321 | table->family, |
2323 | table, chain, rule) < 0) | 2322 | table, chain, rule) < 0) |
2324 | goto out_unfinished; | 2323 | return 1; |
2325 | 2324 | ||
2326 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); | 2325 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); |
2327 | cont: | 2326 | cont: |
2328 | (*idx)++; | 2327 | (*idx)++; |
2329 | } | 2328 | } |
2330 | rc = 0; | 2329 | return 0; |
2331 | out_unfinished: | ||
2332 | cb->args[0] = *idx; | ||
2333 | return rc; | ||
2334 | } | 2330 | } |
2335 | 2331 | ||
2336 | static int nf_tables_dump_rules(struct sk_buff *skb, | 2332 | static int nf_tables_dump_rules(struct sk_buff *skb, |
@@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, | |||
2354 | if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) | 2350 | if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) |
2355 | continue; | 2351 | continue; |
2356 | 2352 | ||
2357 | if (ctx && ctx->chain) { | 2353 | if (ctx && ctx->table && ctx->chain) { |
2358 | struct rhlist_head *list, *tmp; | 2354 | struct rhlist_head *list, *tmp; |
2359 | 2355 | ||
2360 | list = rhltable_lookup(&table->chains_ht, ctx->chain, | 2356 | list = rhltable_lookup(&table->chains_ht, ctx->chain, |
@@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb, | |||
2382 | } | 2378 | } |
2383 | done: | 2379 | done: |
2384 | rcu_read_unlock(); | 2380 | rcu_read_unlock(); |
2381 | |||
2382 | cb->args[0] = idx; | ||
2385 | return skb->len; | 2383 | return skb->len; |
2386 | } | 2384 | } |
2387 | 2385 | ||
@@ -4508,6 +4506,8 @@ err6: | |||
4508 | err5: | 4506 | err5: |
4509 | kfree(trans); | 4507 | kfree(trans); |
4510 | err4: | 4508 | err4: |
4509 | if (obj) | ||
4510 | obj->use--; | ||
4511 | kfree(elem.priv); | 4511 | kfree(elem.priv); |
4512 | err3: | 4512 | err3: |
4513 | if (nla[NFTA_SET_ELEM_DATA] != NULL) | 4513 | if (nla[NFTA_SET_ELEM_DATA] != NULL) |
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 974525eb92df..6e6b9adf7d38 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <net/netfilter/nf_conntrack_core.h> | 12 | #include <net/netfilter/nf_conntrack_core.h> |
13 | #include <linux/netfilter/nf_conntrack_common.h> | 13 | #include <linux/netfilter/nf_conntrack_common.h> |
14 | #include <net/netfilter/nf_flow_table.h> | 14 | #include <net/netfilter/nf_flow_table.h> |
15 | #include <net/netfilter/nf_conntrack_helper.h> | ||
15 | 16 | ||
16 | struct nft_flow_offload { | 17 | struct nft_flow_offload { |
17 | struct nft_flowtable *flowtable; | 18 | struct nft_flowtable *flowtable; |
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, | |||
29 | memset(&fl, 0, sizeof(fl)); | 30 | memset(&fl, 0, sizeof(fl)); |
30 | switch (nft_pf(pkt)) { | 31 | switch (nft_pf(pkt)) { |
31 | case NFPROTO_IPV4: | 32 | case NFPROTO_IPV4: |
32 | fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; | 33 | fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; |
34 | fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; | ||
33 | break; | 35 | break; |
34 | case NFPROTO_IPV6: | 36 | case NFPROTO_IPV6: |
35 | fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; | 37 | fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; |
38 | fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; | ||
36 | break; | 39 | break; |
37 | } | 40 | } |
38 | 41 | ||
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, | |||
41 | return -ENOENT; | 44 | return -ENOENT; |
42 | 45 | ||
43 | route->tuple[dir].dst = this_dst; | 46 | route->tuple[dir].dst = this_dst; |
44 | route->tuple[dir].ifindex = nft_in(pkt)->ifindex; | ||
45 | route->tuple[!dir].dst = other_dst; | 47 | route->tuple[!dir].dst = other_dst; |
46 | route->tuple[!dir].ifindex = nft_out(pkt)->ifindex; | ||
47 | 48 | ||
48 | return 0; | 49 | return 0; |
49 | } | 50 | } |
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, | |||
66 | { | 67 | { |
67 | struct nft_flow_offload *priv = nft_expr_priv(expr); | 68 | struct nft_flow_offload *priv = nft_expr_priv(expr); |
68 | struct nf_flowtable *flowtable = &priv->flowtable->data; | 69 | struct nf_flowtable *flowtable = &priv->flowtable->data; |
70 | const struct nf_conn_help *help; | ||
69 | enum ip_conntrack_info ctinfo; | 71 | enum ip_conntrack_info ctinfo; |
70 | struct nf_flow_route route; | 72 | struct nf_flow_route route; |
71 | struct flow_offload *flow; | 73 | struct flow_offload *flow; |
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, | |||
88 | goto out; | 90 | goto out; |
89 | } | 91 | } |
90 | 92 | ||
91 | if (test_bit(IPS_HELPER_BIT, &ct->status)) | 93 | help = nfct_help(ct); |
94 | if (help) | ||
92 | goto out; | 95 | goto out; |
93 | 96 | ||
94 | if (ctinfo == IP_CT_NEW || | 97 | if (ctinfo == IP_CT_NEW || |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 435a4bdf8f89..691da853bef5 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, | |||
500 | return -EINVAL; | 500 | return -EINVAL; |
501 | } | 501 | } |
502 | 502 | ||
503 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | 503 | if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { |
504 | attrs |= 1 << type; | 504 | attrs |= 1 << type; |
505 | a[type] = nla; | 505 | a[type] = nla; |
506 | } | 506 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d0945253f43b..3b1a78906bc0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2887 | goto out_free; | 2887 | goto out_free; |
2888 | } else if (reserve) { | 2888 | } else if (reserve) { |
2889 | skb_reserve(skb, -reserve); | 2889 | skb_reserve(skb, -reserve); |
2890 | if (len < reserve) | 2890 | if (len < reserve + sizeof(struct ipv6hdr) && |
2891 | dev->min_header_len != dev->hard_header_len) | ||
2891 | skb_reset_network_header(skb); | 2892 | skb_reset_network_header(skb); |
2892 | } | 2893 | } |
2893 | 2894 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a2522f9d71e2..96f2952bbdfd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) | |||
419 | EXPORT_SYMBOL(rxrpc_kernel_get_epoch); | 419 | EXPORT_SYMBOL(rxrpc_kernel_get_epoch); |
420 | 420 | ||
421 | /** | 421 | /** |
422 | * rxrpc_kernel_check_call - Check a call's state | ||
423 | * @sock: The socket the call is on | ||
424 | * @call: The call to check | ||
425 | * @_compl: Where to store the completion state | ||
426 | * @_abort_code: Where to store any abort code | ||
427 | * | ||
428 | * Allow a kernel service to query the state of a call and find out the manner | ||
429 | * of its termination if it has completed. Returns -EINPROGRESS if the call is | ||
430 | * still going, 0 if the call finished successfully, -ECONNABORTED if the call | ||
431 | * was aborted and an appropriate error if the call failed in some other way. | ||
432 | */ | ||
433 | int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, | ||
434 | enum rxrpc_call_completion *_compl, u32 *_abort_code) | ||
435 | { | ||
436 | if (call->state != RXRPC_CALL_COMPLETE) | ||
437 | return -EINPROGRESS; | ||
438 | smp_rmb(); | ||
439 | *_compl = call->completion; | ||
440 | *_abort_code = call->abort_code; | ||
441 | return call->error; | ||
442 | } | ||
443 | EXPORT_SYMBOL(rxrpc_kernel_check_call); | ||
444 | |||
445 | /** | ||
446 | * rxrpc_kernel_retry_call - Allow a kernel service to retry a call | ||
447 | * @sock: The socket the call is on | ||
448 | * @call: The call to retry | ||
449 | * @srx: The address of the peer to contact | ||
450 | * @key: The security context to use (defaults to socket setting) | ||
451 | * | ||
452 | * Allow a kernel service to try resending a client call that failed due to a | ||
453 | * network error to a new address. The Tx queue is maintained intact, thereby | ||
454 | * relieving the need to re-encrypt any request data that has already been | ||
455 | * buffered. | ||
456 | */ | ||
457 | int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, | ||
458 | struct sockaddr_rxrpc *srx, struct key *key) | ||
459 | { | ||
460 | struct rxrpc_conn_parameters cp; | ||
461 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
462 | int ret; | ||
463 | |||
464 | _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); | ||
465 | |||
466 | if (!key) | ||
467 | key = rx->key; | ||
468 | if (key && !key->payload.data[0]) | ||
469 | key = NULL; /* a no-security key */ | ||
470 | |||
471 | memset(&cp, 0, sizeof(cp)); | ||
472 | cp.local = rx->local; | ||
473 | cp.key = key; | ||
474 | cp.security_level = 0; | ||
475 | cp.exclusive = false; | ||
476 | cp.service_id = srx->srx_service; | ||
477 | |||
478 | mutex_lock(&call->user_mutex); | ||
479 | |||
480 | ret = rxrpc_prepare_call_for_retry(rx, call); | ||
481 | if (ret == 0) | ||
482 | ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); | ||
483 | |||
484 | mutex_unlock(&call->user_mutex); | ||
485 | rxrpc_put_peer(cp.peer); | ||
486 | _leave(" = %d", ret); | ||
487 | return ret; | ||
488 | } | ||
489 | EXPORT_SYMBOL(rxrpc_kernel_retry_call); | ||
490 | |||
491 | /** | ||
492 | * rxrpc_kernel_new_call_notification - Get notifications of new calls | 422 | * rxrpc_kernel_new_call_notification - Get notifications of new calls |
493 | * @sock: The socket to intercept received messages on | 423 | * @sock: The socket to intercept received messages on |
494 | * @notify_new_call: Function to be called when new calls appear | 424 | * @notify_new_call: Function to be called when new calls appear |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index bc628acf4f4f..4b1a534d290a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -476,7 +476,6 @@ enum rxrpc_call_flag { | |||
476 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ | 476 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ |
477 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ | 477 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ |
478 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ | 478 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ |
479 | RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */ | ||
480 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ | 479 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ |
481 | RXRPC_CALL_PINGING, /* Ping in process */ | 480 | RXRPC_CALL_PINGING, /* Ping in process */ |
482 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ | 481 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ |
@@ -518,6 +517,18 @@ enum rxrpc_call_state { | |||
518 | }; | 517 | }; |
519 | 518 | ||
520 | /* | 519 | /* |
520 | * Call completion condition (state == RXRPC_CALL_COMPLETE). | ||
521 | */ | ||
522 | enum rxrpc_call_completion { | ||
523 | RXRPC_CALL_SUCCEEDED, /* - Normal termination */ | ||
524 | RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ | ||
525 | RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ | ||
526 | RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ | ||
527 | RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ | ||
528 | NR__RXRPC_CALL_COMPLETIONS | ||
529 | }; | ||
530 | |||
531 | /* | ||
521 | * Call Tx congestion management modes. | 532 | * Call Tx congestion management modes. |
522 | */ | 533 | */ |
523 | enum rxrpc_congest_mode { | 534 | enum rxrpc_congest_mode { |
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, | |||
761 | struct sockaddr_rxrpc *, | 772 | struct sockaddr_rxrpc *, |
762 | struct rxrpc_call_params *, gfp_t, | 773 | struct rxrpc_call_params *, gfp_t, |
763 | unsigned int); | 774 | unsigned int); |
764 | int rxrpc_retry_client_call(struct rxrpc_sock *, | ||
765 | struct rxrpc_call *, | ||
766 | struct rxrpc_conn_parameters *, | ||
767 | struct sockaddr_rxrpc *, | ||
768 | gfp_t); | ||
769 | void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, | 775 | void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, |
770 | struct sk_buff *); | 776 | struct sk_buff *); |
771 | void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); | 777 | void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); |
772 | int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *); | ||
773 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *); | 778 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *); |
774 | bool __rxrpc_queue_call(struct rxrpc_call *); | 779 | bool __rxrpc_queue_call(struct rxrpc_call *); |
775 | bool rxrpc_queue_call(struct rxrpc_call *); | 780 | bool rxrpc_queue_call(struct rxrpc_call *); |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8f1a8f85b1f9..8aa2937b069f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -325,48 +325,6 @@ error: | |||
325 | } | 325 | } |
326 | 326 | ||
327 | /* | 327 | /* |
328 | * Retry a call to a new address. It is expected that the Tx queue of the call | ||
329 | * will contain data previously packaged for an old call. | ||
330 | */ | ||
331 | int rxrpc_retry_client_call(struct rxrpc_sock *rx, | ||
332 | struct rxrpc_call *call, | ||
333 | struct rxrpc_conn_parameters *cp, | ||
334 | struct sockaddr_rxrpc *srx, | ||
335 | gfp_t gfp) | ||
336 | { | ||
337 | const void *here = __builtin_return_address(0); | ||
338 | int ret; | ||
339 | |||
340 | /* Set up or get a connection record and set the protocol parameters, | ||
341 | * including channel number and call ID. | ||
342 | */ | ||
343 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); | ||
344 | if (ret < 0) | ||
345 | goto error; | ||
346 | |||
347 | trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), | ||
348 | here, NULL); | ||
349 | |||
350 | rxrpc_start_call_timer(call); | ||
351 | |||
352 | _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); | ||
353 | |||
354 | if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) | ||
355 | rxrpc_queue_call(call); | ||
356 | |||
357 | _leave(" = 0"); | ||
358 | return 0; | ||
359 | |||
360 | error: | ||
361 | rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, | ||
362 | RX_CALL_DEAD, ret); | ||
363 | trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), | ||
364 | here, ERR_PTR(ret)); | ||
365 | _leave(" = %d", ret); | ||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * Set up an incoming call. call->conn points to the connection. | 328 | * Set up an incoming call. call->conn points to the connection. |
371 | * This is called in BH context and isn't allowed to fail. | 329 | * This is called in BH context and isn't allowed to fail. |
372 | */ | 330 | */ |
@@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) | |||
534 | } | 492 | } |
535 | 493 | ||
536 | /* | 494 | /* |
537 | * Prepare a kernel service call for retry. | ||
538 | */ | ||
539 | int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call) | ||
540 | { | ||
541 | const void *here = __builtin_return_address(0); | ||
542 | int i; | ||
543 | u8 last = 0; | ||
544 | |||
545 | _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); | ||
546 | |||
547 | trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), | ||
548 | here, (const void *)call->flags); | ||
549 | |||
550 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); | ||
551 | ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED); | ||
552 | ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED); | ||
553 | ASSERT(list_empty(&call->recvmsg_link)); | ||
554 | |||
555 | del_timer_sync(&call->timer); | ||
556 | |||
557 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn); | ||
558 | |||
559 | if (call->conn) | ||
560 | rxrpc_disconnect_call(call); | ||
561 | |||
562 | if (rxrpc_is_service_call(call) || | ||
563 | !call->tx_phase || | ||
564 | call->tx_hard_ack != 0 || | ||
565 | call->rx_hard_ack != 0 || | ||
566 | call->rx_top != 0) | ||
567 | return -EINVAL; | ||
568 | |||
569 | call->state = RXRPC_CALL_UNINITIALISED; | ||
570 | call->completion = RXRPC_CALL_SUCCEEDED; | ||
571 | call->call_id = 0; | ||
572 | call->cid = 0; | ||
573 | call->cong_cwnd = 0; | ||
574 | call->cong_extra = 0; | ||
575 | call->cong_ssthresh = 0; | ||
576 | call->cong_mode = 0; | ||
577 | call->cong_dup_acks = 0; | ||
578 | call->cong_cumul_acks = 0; | ||
579 | call->acks_lowest_nak = 0; | ||
580 | |||
581 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { | ||
582 | last |= call->rxtx_annotations[i]; | ||
583 | call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST; | ||
584 | call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS; | ||
585 | } | ||
586 | |||
587 | _leave(" = 0"); | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * release all the calls associated with a socket | 495 | * release all the calls associated with a socket |
593 | */ | 496 | */ |
594 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | 497 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) |
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 521189f4b666..b2adfa825363 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, | |||
562 | clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); | 562 | clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); |
563 | 563 | ||
564 | write_lock_bh(&call->state_lock); | 564 | write_lock_bh(&call->state_lock); |
565 | if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) | 565 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; |
566 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; | ||
567 | else | ||
568 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | ||
569 | write_unlock_bh(&call->state_lock); | 566 | write_unlock_bh(&call->state_lock); |
570 | 567 | ||
571 | rxrpc_see_call(call); | 568 | rxrpc_see_call(call); |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index be01f9c5d963..46c9312085b1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
169 | 169 | ||
170 | ASSERTCMP(seq, ==, call->tx_top + 1); | 170 | ASSERTCMP(seq, ==, call->tx_top + 1); |
171 | 171 | ||
172 | if (last) { | 172 | if (last) |
173 | annotation |= RXRPC_TX_ANNO_LAST; | 173 | annotation |= RXRPC_TX_ANNO_LAST; |
174 | set_bit(RXRPC_CALL_TX_LASTQ, &call->flags); | ||
175 | } | ||
176 | 174 | ||
177 | /* We have to set the timestamp before queueing as the retransmit | 175 | /* We have to set the timestamp before queueing as the retransmit |
178 | * algorithm can see the packet as soon as we queue it. | 176 | * algorithm can see the packet as soon as we queue it. |
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
386 | call->tx_total_len -= copy; | 384 | call->tx_total_len -= copy; |
387 | } | 385 | } |
388 | 386 | ||
387 | /* check for the far side aborting the call or a network error | ||
388 | * occurring */ | ||
389 | if (call->state == RXRPC_CALL_COMPLETE) | ||
390 | goto call_terminated; | ||
391 | |||
389 | /* add the packet to the send queue if it's now full */ | 392 | /* add the packet to the send queue if it's now full */ |
390 | if (sp->remain <= 0 || | 393 | if (sp->remain <= 0 || |
391 | (msg_data_left(msg) == 0 && !more)) { | 394 | (msg_data_left(msg) == 0 && !more)) { |
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
425 | notify_end_tx); | 428 | notify_end_tx); |
426 | skb = NULL; | 429 | skb = NULL; |
427 | } | 430 | } |
428 | |||
429 | /* Check for the far side aborting the call or a network error | ||
430 | * occurring. If this happens, save any packet that was under | ||
431 | * construction so that in the case of a network error, the | ||
432 | * call can be retried or redirected. | ||
433 | */ | ||
434 | if (call->state == RXRPC_CALL_COMPLETE) { | ||
435 | ret = call->error; | ||
436 | goto out; | ||
437 | } | ||
438 | } while (msg_data_left(msg) > 0); | 431 | } while (msg_data_left(msg) > 0); |
439 | 432 | ||
440 | success: | 433 | success: |
@@ -444,6 +437,11 @@ out: | |||
444 | _leave(" = %d", ret); | 437 | _leave(" = %d", ret); |
445 | return ret; | 438 | return ret; |
446 | 439 | ||
440 | call_terminated: | ||
441 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | ||
442 | _leave(" = %d", call->error); | ||
443 | return call->error; | ||
444 | |||
447 | maybe_error: | 445 | maybe_error: |
448 | if (copied) | 446 | if (copied) |
449 | goto success; | 447 | goto success; |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index c3b90fadaff6..8b43fe0130f7 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { | |||
197 | [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, | 197 | [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, |
198 | }; | 198 | }; |
199 | 199 | ||
200 | static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) | ||
201 | { | ||
202 | if (!p) | ||
203 | return; | ||
204 | if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
205 | dst_release(&p->tcft_enc_metadata->dst); | ||
206 | kfree_rcu(p, rcu); | ||
207 | } | ||
208 | |||
200 | static int tunnel_key_init(struct net *net, struct nlattr *nla, | 209 | static int tunnel_key_init(struct net *net, struct nlattr *nla, |
201 | struct nlattr *est, struct tc_action **a, | 210 | struct nlattr *est, struct tc_action **a, |
202 | int ovr, int bind, bool rtnl_held, | 211 | int ovr, int bind, bool rtnl_held, |
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
360 | rcu_swap_protected(t->params, params_new, | 369 | rcu_swap_protected(t->params, params_new, |
361 | lockdep_is_held(&t->tcf_lock)); | 370 | lockdep_is_held(&t->tcf_lock)); |
362 | spin_unlock_bh(&t->tcf_lock); | 371 | spin_unlock_bh(&t->tcf_lock); |
363 | if (params_new) | 372 | tunnel_key_release_params(params_new); |
364 | kfree_rcu(params_new, rcu); | ||
365 | 373 | ||
366 | if (ret == ACT_P_CREATED) | 374 | if (ret == ACT_P_CREATED) |
367 | tcf_idr_insert(tn, *a); | 375 | tcf_idr_insert(tn, *a); |
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a) | |||
385 | struct tcf_tunnel_key_params *params; | 393 | struct tcf_tunnel_key_params *params; |
386 | 394 | ||
387 | params = rcu_dereference_protected(t->params, 1); | 395 | params = rcu_dereference_protected(t->params, 1); |
388 | if (params) { | 396 | tunnel_key_release_params(params); |
389 | if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
390 | dst_release(¶ms->tcft_enc_metadata->dst); | ||
391 | |||
392 | kfree_rcu(params, rcu); | ||
393 | } | ||
394 | } | 397 | } |
395 | 398 | ||
396 | static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, | 399 | static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8ce2a0507970..e2b5cb2eb34e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister); | |||
1277 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 1277 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
1278 | struct tcf_result *res, bool compat_mode) | 1278 | struct tcf_result *res, bool compat_mode) |
1279 | { | 1279 | { |
1280 | __be16 protocol = tc_skb_protocol(skb); | ||
1281 | #ifdef CONFIG_NET_CLS_ACT | 1280 | #ifdef CONFIG_NET_CLS_ACT |
1282 | const int max_reclassify_loop = 4; | 1281 | const int max_reclassify_loop = 4; |
1283 | const struct tcf_proto *orig_tp = tp; | 1282 | const struct tcf_proto *orig_tp = tp; |
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
1287 | reclassify: | 1286 | reclassify: |
1288 | #endif | 1287 | #endif |
1289 | for (; tp; tp = rcu_dereference_bh(tp->next)) { | 1288 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
1289 | __be16 protocol = tc_skb_protocol(skb); | ||
1290 | int err; | 1290 | int err; |
1291 | 1291 | ||
1292 | if (tp->protocol != protocol && | 1292 | if (tp->protocol != protocol && |
@@ -1319,7 +1319,6 @@ reset: | |||
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | tp = first_tp; | 1321 | tp = first_tp; |
1322 | protocol = tc_skb_protocol(skb); | ||
1323 | goto reclassify; | 1322 | goto reclassify; |
1324 | #endif | 1323 | #endif |
1325 | } | 1324 | } |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index dad04e710493..f6aa57fbbbaf 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1290 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 1290 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
1291 | struct cls_fl_filter *fold = *arg; | 1291 | struct cls_fl_filter *fold = *arg; |
1292 | struct cls_fl_filter *fnew; | 1292 | struct cls_fl_filter *fnew; |
1293 | struct fl_flow_mask *mask; | ||
1293 | struct nlattr **tb; | 1294 | struct nlattr **tb; |
1294 | struct fl_flow_mask mask = {}; | ||
1295 | int err; | 1295 | int err; |
1296 | 1296 | ||
1297 | if (!tca[TCA_OPTIONS]) | 1297 | if (!tca[TCA_OPTIONS]) |
1298 | return -EINVAL; | 1298 | return -EINVAL; |
1299 | 1299 | ||
1300 | tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | 1300 | mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); |
1301 | if (!tb) | 1301 | if (!mask) |
1302 | return -ENOBUFS; | 1302 | return -ENOBUFS; |
1303 | 1303 | ||
1304 | tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | ||
1305 | if (!tb) { | ||
1306 | err = -ENOBUFS; | ||
1307 | goto errout_mask_alloc; | ||
1308 | } | ||
1309 | |||
1304 | err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], | 1310 | err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], |
1305 | fl_policy, NULL); | 1311 | fl_policy, NULL); |
1306 | if (err < 0) | 1312 | if (err < 0) |
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1343 | } | 1349 | } |
1344 | } | 1350 | } |
1345 | 1351 | ||
1346 | err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, | 1352 | err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, |
1347 | tp->chain->tmplt_priv, extack); | 1353 | tp->chain->tmplt_priv, extack); |
1348 | if (err) | 1354 | if (err) |
1349 | goto errout_idr; | 1355 | goto errout_idr; |
1350 | 1356 | ||
1351 | err = fl_check_assign_mask(head, fnew, fold, &mask); | 1357 | err = fl_check_assign_mask(head, fnew, fold, mask); |
1352 | if (err) | 1358 | if (err) |
1353 | goto errout_idr; | 1359 | goto errout_idr; |
1354 | 1360 | ||
@@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1392 | } | 1398 | } |
1393 | 1399 | ||
1394 | kfree(tb); | 1400 | kfree(tb); |
1401 | kfree(mask); | ||
1395 | return 0; | 1402 | return 0; |
1396 | 1403 | ||
1397 | errout_mask: | 1404 | errout_mask: |
@@ -1405,6 +1412,8 @@ errout: | |||
1405 | kfree(fnew); | 1412 | kfree(fnew); |
1406 | errout_tb: | 1413 | errout_tb: |
1407 | kfree(tb); | 1414 | kfree(tb); |
1415 | errout_mask_alloc: | ||
1416 | kfree(mask); | ||
1408 | return err; | 1417 | return err; |
1409 | } | 1418 | } |
1410 | 1419 | ||
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index b910cd5c56f7..73940293700d 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1667 | if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { | 1667 | if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { |
1668 | struct sk_buff *segs, *nskb; | 1668 | struct sk_buff *segs, *nskb; |
1669 | netdev_features_t features = netif_skb_features(skb); | 1669 | netdev_features_t features = netif_skb_features(skb); |
1670 | unsigned int slen = 0; | 1670 | unsigned int slen = 0, numsegs = 0; |
1671 | 1671 | ||
1672 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | 1672 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
1673 | if (IS_ERR_OR_NULL(segs)) | 1673 | if (IS_ERR_OR_NULL(segs)) |
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1683 | flow_queue_add(flow, segs); | 1683 | flow_queue_add(flow, segs); |
1684 | 1684 | ||
1685 | sch->q.qlen++; | 1685 | sch->q.qlen++; |
1686 | numsegs++; | ||
1686 | slen += segs->len; | 1687 | slen += segs->len; |
1687 | q->buffer_used += segs->truesize; | 1688 | q->buffer_used += segs->truesize; |
1688 | b->packets++; | 1689 | b->packets++; |
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1696 | sch->qstats.backlog += slen; | 1697 | sch->qstats.backlog += slen; |
1697 | q->avg_window_bytes += slen; | 1698 | q->avg_window_bytes += slen; |
1698 | 1699 | ||
1699 | qdisc_tree_reduce_backlog(sch, 1, len); | 1700 | qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); |
1700 | consume_skb(skb); | 1701 | consume_skb(skb); |
1701 | } else { | 1702 | } else { |
1702 | /* not splitting */ | 1703 | /* not splitting */ |
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e689e11b6d0f..c6a502933fe7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c | |||
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
88 | struct Qdisc *child, | 88 | struct Qdisc *child, |
89 | struct sk_buff **to_free) | 89 | struct sk_buff **to_free) |
90 | { | 90 | { |
91 | unsigned int len = qdisc_pkt_len(skb); | ||
91 | int err; | 92 | int err; |
92 | 93 | ||
93 | err = child->ops->enqueue(skb, child, to_free); | 94 | err = child->ops->enqueue(skb, child, to_free); |
94 | if (err != NET_XMIT_SUCCESS) | 95 | if (err != NET_XMIT_SUCCESS) |
95 | return err; | 96 | return err; |
96 | 97 | ||
97 | qdisc_qstats_backlog_inc(sch, skb); | 98 | sch->qstats.backlog += len; |
98 | sch->q.qlen++; | 99 | sch->q.qlen++; |
99 | 100 | ||
100 | return NET_XMIT_SUCCESS; | 101 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index cdebaed0f8cf..09b800991065 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
351 | struct sk_buff **to_free) | 351 | struct sk_buff **to_free) |
352 | { | 352 | { |
353 | unsigned int len = qdisc_pkt_len(skb); | ||
353 | struct drr_sched *q = qdisc_priv(sch); | 354 | struct drr_sched *q = qdisc_priv(sch); |
354 | struct drr_class *cl; | 355 | struct drr_class *cl; |
355 | int err = 0; | 356 | int err = 0; |
357 | bool first; | ||
356 | 358 | ||
357 | cl = drr_classify(skb, sch, &err); | 359 | cl = drr_classify(skb, sch, &err); |
358 | if (cl == NULL) { | 360 | if (cl == NULL) { |
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
362 | return err; | 364 | return err; |
363 | } | 365 | } |
364 | 366 | ||
367 | first = !cl->qdisc->q.qlen; | ||
365 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 368 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
366 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 369 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
367 | if (net_xmit_drop_count(err)) { | 370 | if (net_xmit_drop_count(err)) { |
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
371 | return err; | 374 | return err; |
372 | } | 375 | } |
373 | 376 | ||
374 | if (cl->qdisc->q.qlen == 1) { | 377 | if (first) { |
375 | list_add_tail(&cl->alist, &q->active); | 378 | list_add_tail(&cl->alist, &q->active); |
376 | cl->deficit = cl->quantum; | 379 | cl->deficit = cl->quantum; |
377 | } | 380 | } |
378 | 381 | ||
379 | qdisc_qstats_backlog_inc(sch, skb); | 382 | sch->qstats.backlog += len; |
380 | sch->q.qlen++; | 383 | sch->q.qlen++; |
381 | return err; | 384 | return err; |
382 | } | 385 | } |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index f6f480784bc6..42471464ded3 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, | |||
199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
200 | struct sk_buff **to_free) | 200 | struct sk_buff **to_free) |
201 | { | 201 | { |
202 | unsigned int len = qdisc_pkt_len(skb); | ||
202 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 203 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
203 | int err; | 204 | int err; |
204 | 205 | ||
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
271 | return err; | 272 | return err; |
272 | } | 273 | } |
273 | 274 | ||
274 | qdisc_qstats_backlog_inc(sch, skb); | 275 | sch->qstats.backlog += len; |
275 | sch->q.qlen++; | 276 | sch->q.qlen++; |
276 | 277 | ||
277 | return NET_XMIT_SUCCESS; | 278 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b18ec1f6de60..24cc220a3218 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
1539 | static int | 1539 | static int |
1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
1541 | { | 1541 | { |
1542 | unsigned int len = qdisc_pkt_len(skb); | ||
1542 | struct hfsc_class *cl; | 1543 | struct hfsc_class *cl; |
1543 | int uninitialized_var(err); | 1544 | int uninitialized_var(err); |
1545 | bool first; | ||
1544 | 1546 | ||
1545 | cl = hfsc_classify(skb, sch, &err); | 1547 | cl = hfsc_classify(skb, sch, &err); |
1546 | if (cl == NULL) { | 1548 | if (cl == NULL) { |
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
1550 | return err; | 1552 | return err; |
1551 | } | 1553 | } |
1552 | 1554 | ||
1555 | first = !cl->qdisc->q.qlen; | ||
1553 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 1556 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
1554 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1557 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
1555 | if (net_xmit_drop_count(err)) { | 1558 | if (net_xmit_drop_count(err)) { |
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
1559 | return err; | 1562 | return err; |
1560 | } | 1563 | } |
1561 | 1564 | ||
1562 | if (cl->qdisc->q.qlen == 1) { | 1565 | if (first) { |
1563 | unsigned int len = qdisc_pkt_len(skb); | ||
1564 | |||
1565 | if (cl->cl_flags & HFSC_RSC) | 1566 | if (cl->cl_flags & HFSC_RSC) |
1566 | init_ed(cl, len); | 1567 | init_ed(cl, len); |
1567 | if (cl->cl_flags & HFSC_FSC) | 1568 | if (cl->cl_flags & HFSC_FSC) |
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
1576 | 1577 | ||
1577 | } | 1578 | } |
1578 | 1579 | ||
1579 | qdisc_qstats_backlog_inc(sch, skb); | 1580 | sch->qstats.backlog += len; |
1580 | sch->q.qlen++; | 1581 | sch->q.qlen++; |
1581 | 1582 | ||
1582 | return NET_XMIT_SUCCESS; | 1583 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 58b449490757..30f9da7e1076 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
581 | struct sk_buff **to_free) | 581 | struct sk_buff **to_free) |
582 | { | 582 | { |
583 | int uninitialized_var(ret); | 583 | int uninitialized_var(ret); |
584 | unsigned int len = qdisc_pkt_len(skb); | ||
584 | struct htb_sched *q = qdisc_priv(sch); | 585 | struct htb_sched *q = qdisc_priv(sch); |
585 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 586 | struct htb_class *cl = htb_classify(skb, sch, &ret); |
586 | 587 | ||
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
610 | htb_activate(q, cl); | 611 | htb_activate(q, cl); |
611 | } | 612 | } |
612 | 613 | ||
613 | qdisc_qstats_backlog_inc(sch, skb); | 614 | sch->qstats.backlog += len; |
614 | sch->q.qlen++; | 615 | sch->q.qlen++; |
615 | return NET_XMIT_SUCCESS; | 616 | return NET_XMIT_SUCCESS; |
616 | } | 617 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cdf68706e40f..847141cd900f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
72 | static int | 72 | static int |
73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
74 | { | 74 | { |
75 | unsigned int len = qdisc_pkt_len(skb); | ||
75 | struct Qdisc *qdisc; | 76 | struct Qdisc *qdisc; |
76 | int ret; | 77 | int ret; |
77 | 78 | ||
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
88 | 89 | ||
89 | ret = qdisc_enqueue(skb, qdisc, to_free); | 90 | ret = qdisc_enqueue(skb, qdisc, to_free); |
90 | if (ret == NET_XMIT_SUCCESS) { | 91 | if (ret == NET_XMIT_SUCCESS) { |
91 | qdisc_qstats_backlog_inc(sch, skb); | 92 | sch->qstats.backlog += len; |
92 | sch->q.qlen++; | 93 | sch->q.qlen++; |
93 | return NET_XMIT_SUCCESS; | 94 | return NET_XMIT_SUCCESS; |
94 | } | 95 | } |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index dc37c4ead439..29f5c4a24688 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) | |||
1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1211 | struct sk_buff **to_free) | 1211 | struct sk_buff **to_free) |
1212 | { | 1212 | { |
1213 | unsigned int len = qdisc_pkt_len(skb), gso_segs; | ||
1213 | struct qfq_sched *q = qdisc_priv(sch); | 1214 | struct qfq_sched *q = qdisc_priv(sch); |
1214 | struct qfq_class *cl; | 1215 | struct qfq_class *cl; |
1215 | struct qfq_aggregate *agg; | 1216 | struct qfq_aggregate *agg; |
1216 | int err = 0; | 1217 | int err = 0; |
1218 | bool first; | ||
1217 | 1219 | ||
1218 | cl = qfq_classify(skb, sch, &err); | 1220 | cl = qfq_classify(skb, sch, &err); |
1219 | if (cl == NULL) { | 1221 | if (cl == NULL) { |
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1224 | } | 1226 | } |
1225 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); | 1227 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); |
1226 | 1228 | ||
1227 | if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { | 1229 | if (unlikely(cl->agg->lmax < len)) { |
1228 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", | 1230 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", |
1229 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1231 | cl->agg->lmax, len, cl->common.classid); |
1230 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1232 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); |
1231 | qdisc_pkt_len(skb)); | ||
1232 | if (err) { | 1233 | if (err) { |
1233 | cl->qstats.drops++; | 1234 | cl->qstats.drops++; |
1234 | return qdisc_drop(skb, sch, to_free); | 1235 | return qdisc_drop(skb, sch, to_free); |
1235 | } | 1236 | } |
1236 | } | 1237 | } |
1237 | 1238 | ||
1239 | gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
1240 | first = !cl->qdisc->q.qlen; | ||
1238 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 1241 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
1239 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1242 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
1240 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); | 1243 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); |
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
1245 | return err; | 1248 | return err; |
1246 | } | 1249 | } |
1247 | 1250 | ||
1248 | bstats_update(&cl->bstats, skb); | 1251 | cl->bstats.bytes += len; |
1249 | qdisc_qstats_backlog_inc(sch, skb); | 1252 | cl->bstats.packets += gso_segs; |
1253 | sch->qstats.backlog += len; | ||
1250 | ++sch->q.qlen; | 1254 | ++sch->q.qlen; |
1251 | 1255 | ||
1252 | agg = cl->agg; | 1256 | agg = cl->agg; |
1253 | /* if the queue was not empty, then done here */ | 1257 | /* if the queue was not empty, then done here */ |
1254 | if (cl->qdisc->q.qlen != 1) { | 1258 | if (!first) { |
1255 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && | 1259 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && |
1256 | list_first_entry(&agg->active, struct qfq_class, alist) | 1260 | list_first_entry(&agg->active, struct qfq_class, alist) |
1257 | == cl && cl->deficit < qdisc_pkt_len(skb)) | 1261 | == cl && cl->deficit < len) |
1258 | list_move_tail(&cl->alist, &agg->active); | 1262 | list_move_tail(&cl->alist, &agg->active); |
1259 | 1263 | ||
1260 | return err; | 1264 | return err; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 942dcca09cf2..7f272a9070c5 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
185 | struct sk_buff **to_free) | 185 | struct sk_buff **to_free) |
186 | { | 186 | { |
187 | struct tbf_sched_data *q = qdisc_priv(sch); | 187 | struct tbf_sched_data *q = qdisc_priv(sch); |
188 | unsigned int len = qdisc_pkt_len(skb); | ||
188 | int ret; | 189 | int ret; |
189 | 190 | ||
190 | if (qdisc_pkt_len(skb) > q->max_size) { | 191 | if (qdisc_pkt_len(skb) > q->max_size) { |
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
200 | return ret; | 201 | return ret; |
201 | } | 202 | } |
202 | 203 | ||
203 | qdisc_qstats_backlog_inc(sch, skb); | 204 | sch->qstats.backlog += len; |
204 | sch->q.qlen++; | 205 | sch->q.qlen++; |
205 | return NET_XMIT_SUCCESS; | 206 | return NET_XMIT_SUCCESS; |
206 | } | 207 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index b9ed271b7ef7..ed8e006dae85 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
97 | 97 | ||
98 | switch (ev) { | 98 | switch (ev) { |
99 | case NETDEV_UP: | 99 | case NETDEV_UP: |
100 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 100 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
101 | if (addr) { | 101 | if (addr) { |
102 | addr->a.v6.sin6_family = AF_INET6; | 102 | addr->a.v6.sin6_family = AF_INET6; |
103 | addr->a.v6.sin6_port = 0; | ||
104 | addr->a.v6.sin6_flowinfo = 0; | ||
105 | addr->a.v6.sin6_addr = ifa->addr; | 103 | addr->a.v6.sin6_addr = ifa->addr; |
106 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; | 104 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; |
107 | addr->valid = 1; | 105 | addr->valid = 1; |
@@ -434,7 +432,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
434 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | 432 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
435 | if (addr) { | 433 | if (addr) { |
436 | addr->a.v6.sin6_family = AF_INET6; | 434 | addr->a.v6.sin6_family = AF_INET6; |
437 | addr->a.v6.sin6_port = 0; | ||
438 | addr->a.v6.sin6_addr = ifp->addr; | 435 | addr->a.v6.sin6_addr = ifp->addr; |
439 | addr->a.v6.sin6_scope_id = dev->ifindex; | 436 | addr->a.v6.sin6_scope_id = dev->ifindex; |
440 | addr->valid = 1; | 437 | addr->valid = 1; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d5878ae55840..4e0eeb113ef5 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, | |||
101 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | 101 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
102 | if (addr) { | 102 | if (addr) { |
103 | addr->a.v4.sin_family = AF_INET; | 103 | addr->a.v4.sin_family = AF_INET; |
104 | addr->a.v4.sin_port = 0; | ||
105 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 104 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
106 | addr->valid = 1; | 105 | addr->valid = 1; |
107 | INIT_LIST_HEAD(&addr->list); | 106 | INIT_LIST_HEAD(&addr->list); |
@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
776 | 775 | ||
777 | switch (ev) { | 776 | switch (ev) { |
778 | case NETDEV_UP: | 777 | case NETDEV_UP: |
779 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 778 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
780 | if (addr) { | 779 | if (addr) { |
781 | addr->a.v4.sin_family = AF_INET; | 780 | addr->a.v4.sin_family = AF_INET; |
782 | addr->a.v4.sin_port = 0; | ||
783 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 781 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
784 | addr->valid = 1; | 782 | addr->valid = 1; |
785 | spin_lock_bh(&net->sctp.local_addr_lock); | 783 | spin_lock_bh(&net->sctp.local_addr_lock); |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 1ff9768f5456..f3023bbc0b7f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -41,6 +41,9 @@ static unsigned long number_cred_unused; | |||
41 | 41 | ||
42 | static struct cred machine_cred = { | 42 | static struct cred machine_cred = { |
43 | .usage = ATOMIC_INIT(1), | 43 | .usage = ATOMIC_INIT(1), |
44 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
45 | .magic = CRED_MAGIC, | ||
46 | #endif | ||
44 | }; | 47 | }; |
45 | 48 | ||
46 | /* | 49 | /* |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc86713b32b6..1531b0219344 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
1549 | cred_len = p++; | 1549 | cred_len = p++; |
1550 | 1550 | ||
1551 | spin_lock(&ctx->gc_seq_lock); | 1551 | spin_lock(&ctx->gc_seq_lock); |
1552 | req->rq_seqno = ctx->gc_seq++; | 1552 | req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; |
1553 | spin_unlock(&ctx->gc_seq_lock); | 1553 | spin_unlock(&ctx->gc_seq_lock); |
1554 | if (req->rq_seqno == MAXSEQ) | ||
1555 | goto out_expired; | ||
1554 | 1556 | ||
1555 | *p++ = htonl((u32) RPC_GSS_VERSION); | 1557 | *p++ = htonl((u32) RPC_GSS_VERSION); |
1556 | *p++ = htonl((u32) ctx->gc_proc); | 1558 | *p++ = htonl((u32) ctx->gc_proc); |
@@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
1572 | mic.data = (u8 *)(p + 1); | 1574 | mic.data = (u8 *)(p + 1); |
1573 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 1575 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
1574 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 1576 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
1575 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | 1577 | goto out_expired; |
1576 | } else if (maj_stat != 0) { | 1578 | } else if (maj_stat != 0) { |
1577 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); | 1579 | pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); |
1580 | task->tk_status = -EIO; | ||
1578 | goto out_put_ctx; | 1581 | goto out_put_ctx; |
1579 | } | 1582 | } |
1580 | p = xdr_encode_opaque(p, NULL, mic.len); | 1583 | p = xdr_encode_opaque(p, NULL, mic.len); |
1581 | gss_put_ctx(ctx); | 1584 | gss_put_ctx(ctx); |
1582 | return p; | 1585 | return p; |
1586 | out_expired: | ||
1587 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | ||
1588 | task->tk_status = -EKEYEXPIRED; | ||
1583 | out_put_ctx: | 1589 | out_put_ctx: |
1584 | gss_put_ctx(ctx); | 1590 | gss_put_ctx(ctx); |
1585 | return NULL; | 1591 | return NULL; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 71d9599b5816..d7ec6132c046 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task) | |||
1739 | xdr_buf_init(&req->rq_rcv_buf, | 1739 | xdr_buf_init(&req->rq_rcv_buf, |
1740 | req->rq_rbuffer, | 1740 | req->rq_rbuffer, |
1741 | req->rq_rcvsize); | 1741 | req->rq_rcvsize); |
1742 | req->rq_bytes_sent = 0; | ||
1743 | 1742 | ||
1744 | p = rpc_encode_header(task); | 1743 | p = rpc_encode_header(task); |
1745 | if (p == NULL) { | 1744 | if (p == NULL) |
1746 | printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); | ||
1747 | rpc_exit(task, -EIO); | ||
1748 | return; | 1745 | return; |
1749 | } | ||
1750 | 1746 | ||
1751 | encode = task->tk_msg.rpc_proc->p_encode; | 1747 | encode = task->tk_msg.rpc_proc->p_encode; |
1752 | if (encode == NULL) | 1748 | if (encode == NULL) |
@@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task) | |||
1771 | /* Did the encode result in an error condition? */ | 1767 | /* Did the encode result in an error condition? */ |
1772 | if (task->tk_status != 0) { | 1768 | if (task->tk_status != 0) { |
1773 | /* Was the error nonfatal? */ | 1769 | /* Was the error nonfatal? */ |
1774 | if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) | 1770 | switch (task->tk_status) { |
1771 | case -EAGAIN: | ||
1772 | case -ENOMEM: | ||
1775 | rpc_delay(task, HZ >> 4); | 1773 | rpc_delay(task, HZ >> 4); |
1776 | else | 1774 | break; |
1775 | case -EKEYEXPIRED: | ||
1776 | task->tk_action = call_refresh; | ||
1777 | break; | ||
1778 | default: | ||
1777 | rpc_exit(task, task->tk_status); | 1779 | rpc_exit(task, task->tk_status); |
1780 | } | ||
1778 | return; | 1781 | return; |
1779 | } | 1782 | } |
1780 | 1783 | ||
@@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task) | |||
2336 | *p++ = htonl(clnt->cl_vers); /* program version */ | 2339 | *p++ = htonl(clnt->cl_vers); /* program version */ |
2337 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ | 2340 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ |
2338 | p = rpcauth_marshcred(task, p); | 2341 | p = rpcauth_marshcred(task, p); |
2339 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | 2342 | if (p) |
2343 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | ||
2340 | return p; | 2344 | return p; |
2341 | } | 2345 | } |
2342 | 2346 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 73547d17d3c6..f1ec2110efeb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
1151 | struct rpc_xprt *xprt = req->rq_xprt; | 1151 | struct rpc_xprt *xprt = req->rq_xprt; |
1152 | 1152 | ||
1153 | if (xprt_request_need_enqueue_transmit(task, req)) { | 1153 | if (xprt_request_need_enqueue_transmit(task, req)) { |
1154 | req->rq_bytes_sent = 0; | ||
1154 | spin_lock(&xprt->queue_lock); | 1155 | spin_lock(&xprt->queue_lock); |
1155 | /* | 1156 | /* |
1156 | * Requests that carry congestion control credits are added | 1157 | * Requests that carry congestion control credits are added |
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
1177 | INIT_LIST_HEAD(&req->rq_xmit2); | 1178 | INIT_LIST_HEAD(&req->rq_xmit2); |
1178 | goto out; | 1179 | goto out; |
1179 | } | 1180 | } |
1180 | } else { | 1181 | } else if (!req->rq_seqno) { |
1181 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | 1182 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { |
1182 | if (pos->rq_task->tk_owner != task->tk_owner) | 1183 | if (pos->rq_task->tk_owner != task->tk_owner) |
1183 | continue; | 1184 | continue; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7749a2bf6887..4994e75945b8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |||
845 | for (i = 0; i <= buf->rb_sc_last; i++) { | 845 | for (i = 0; i <= buf->rb_sc_last; i++) { |
846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | 846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
847 | if (!sc) | 847 | if (!sc) |
848 | goto out_destroy; | 848 | return -ENOMEM; |
849 | 849 | ||
850 | sc->sc_xprt = r_xprt; | 850 | sc->sc_xprt = r_xprt; |
851 | buf->rb_sc_ctxs[i] = sc; | 851 | buf->rb_sc_ctxs[i] = sc; |
852 | } | 852 | } |
853 | 853 | ||
854 | return 0; | 854 | return 0; |
855 | |||
856 | out_destroy: | ||
857 | rpcrdma_sendctxs_destroy(buf); | ||
858 | return -ENOMEM; | ||
859 | } | 855 | } |
860 | 856 | ||
861 | /* The sendctx queue is not guaranteed to have a size that is a | 857 | /* The sendctx queue is not guaranteed to have a size that is a |
@@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) | |||
1113 | WQ_MEM_RECLAIM | WQ_HIGHPRI, | 1109 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
1114 | 0, | 1110 | 0, |
1115 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); | 1111 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); |
1116 | if (!buf->rb_completion_wq) | 1112 | if (!buf->rb_completion_wq) { |
1113 | rc = -ENOMEM; | ||
1117 | goto out; | 1114 | goto out; |
1115 | } | ||
1118 | 1116 | ||
1119 | return 0; | 1117 | return 0; |
1120 | out: | 1118 | out: |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 13559e6a460b..7754aa3e434f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <net/udp.h> | 48 | #include <net/udp.h> |
49 | #include <net/tcp.h> | 49 | #include <net/tcp.h> |
50 | #include <linux/bvec.h> | 50 | #include <linux/bvec.h> |
51 | #include <linux/highmem.h> | ||
51 | #include <linux/uio.h> | 52 | #include <linux/uio.h> |
52 | 53 | ||
53 | #include <trace/events/sunrpc.h> | 54 | #include <trace/events/sunrpc.h> |
@@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, | |||
376 | return sock_recvmsg(sock, msg, flags); | 377 | return sock_recvmsg(sock, msg, flags); |
377 | } | 378 | } |
378 | 379 | ||
380 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | ||
381 | static void | ||
382 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
383 | { | ||
384 | struct bvec_iter bi = { | ||
385 | .bi_size = count, | ||
386 | }; | ||
387 | struct bio_vec bv; | ||
388 | |||
389 | bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); | ||
390 | for_each_bvec(bv, bvec, bi, bi) | ||
391 | flush_dcache_page(bv.bv_page); | ||
392 | } | ||
393 | #else | ||
394 | static inline void | ||
395 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
396 | { | ||
397 | } | ||
398 | #endif | ||
399 | |||
379 | static ssize_t | 400 | static ssize_t |
380 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | 401 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, |
381 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) | 402 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) |
@@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
409 | seek + buf->page_base); | 430 | seek + buf->page_base); |
410 | if (ret <= 0) | 431 | if (ret <= 0) |
411 | goto sock_err; | 432 | goto sock_err; |
433 | xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); | ||
412 | offset += ret - buf->page_base; | 434 | offset += ret - buf->page_base; |
413 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 435 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
414 | goto out; | 436 | goto out; |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 77e4b2418f30..4ad3586da8f0 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb) | |||
87 | return limit; | 87 | return limit; |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv) | ||
91 | { | ||
92 | return TLV_GET_LEN(tlv) - TLV_SPACE(0); | ||
93 | } | ||
94 | |||
90 | static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) | 95 | static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) |
91 | { | 96 | { |
92 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); | 97 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); |
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str) | |||
166 | return buf; | 171 | return buf; |
167 | } | 172 | } |
168 | 173 | ||
174 | static inline bool string_is_valid(char *s, int len) | ||
175 | { | ||
176 | return memchr(s, '\0', len) ? true : false; | ||
177 | } | ||
178 | |||
169 | static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | 179 | static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, |
170 | struct tipc_nl_compat_msg *msg, | 180 | struct tipc_nl_compat_msg *msg, |
171 | struct sk_buff *arg) | 181 | struct sk_buff *arg) |
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
379 | struct nlattr *prop; | 389 | struct nlattr *prop; |
380 | struct nlattr *bearer; | 390 | struct nlattr *bearer; |
381 | struct tipc_bearer_config *b; | 391 | struct tipc_bearer_config *b; |
392 | int len; | ||
382 | 393 | ||
383 | b = (struct tipc_bearer_config *)TLV_DATA(msg->req); | 394 | b = (struct tipc_bearer_config *)TLV_DATA(msg->req); |
384 | 395 | ||
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
386 | if (!bearer) | 397 | if (!bearer) |
387 | return -EMSGSIZE; | 398 | return -EMSGSIZE; |
388 | 399 | ||
400 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | ||
401 | if (!string_is_valid(b->name, len)) | ||
402 | return -EINVAL; | ||
403 | |||
389 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) | 404 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) |
390 | return -EMSGSIZE; | 405 | return -EMSGSIZE; |
391 | 406 | ||
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, | |||
411 | { | 426 | { |
412 | char *name; | 427 | char *name; |
413 | struct nlattr *bearer; | 428 | struct nlattr *bearer; |
429 | int len; | ||
414 | 430 | ||
415 | name = (char *)TLV_DATA(msg->req); | 431 | name = (char *)TLV_DATA(msg->req); |
416 | 432 | ||
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, | |||
418 | if (!bearer) | 434 | if (!bearer) |
419 | return -EMSGSIZE; | 435 | return -EMSGSIZE; |
420 | 436 | ||
437 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | ||
438 | if (!string_is_valid(name, len)) | ||
439 | return -EINVAL; | ||
440 | |||
421 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) | 441 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) |
422 | return -EMSGSIZE; | 442 | return -EMSGSIZE; |
423 | 443 | ||
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
478 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; | 498 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; |
479 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; | 499 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; |
480 | int err; | 500 | int err; |
501 | int len; | ||
481 | 502 | ||
482 | if (!attrs[TIPC_NLA_LINK]) | 503 | if (!attrs[TIPC_NLA_LINK]) |
483 | return -EINVAL; | 504 | return -EINVAL; |
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
504 | return err; | 525 | return err; |
505 | 526 | ||
506 | name = (char *)TLV_DATA(msg->req); | 527 | name = (char *)TLV_DATA(msg->req); |
528 | |||
529 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
530 | if (!string_is_valid(name, len)) | ||
531 | return -EINVAL; | ||
532 | |||
507 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) | 533 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) |
508 | return 0; | 534 | return 0; |
509 | 535 | ||
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, | |||
644 | struct nlattr *prop; | 670 | struct nlattr *prop; |
645 | struct nlattr *media; | 671 | struct nlattr *media; |
646 | struct tipc_link_config *lc; | 672 | struct tipc_link_config *lc; |
673 | int len; | ||
647 | 674 | ||
648 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 675 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
649 | 676 | ||
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, | |||
651 | if (!media) | 678 | if (!media) |
652 | return -EMSGSIZE; | 679 | return -EMSGSIZE; |
653 | 680 | ||
681 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); | ||
682 | if (!string_is_valid(lc->name, len)) | ||
683 | return -EINVAL; | ||
684 | |||
654 | if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) | 685 | if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) |
655 | return -EMSGSIZE; | 686 | return -EMSGSIZE; |
656 | 687 | ||
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, | |||
671 | struct nlattr *prop; | 702 | struct nlattr *prop; |
672 | struct nlattr *bearer; | 703 | struct nlattr *bearer; |
673 | struct tipc_link_config *lc; | 704 | struct tipc_link_config *lc; |
705 | int len; | ||
674 | 706 | ||
675 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 707 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
676 | 708 | ||
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, | |||
678 | if (!bearer) | 710 | if (!bearer) |
679 | return -EMSGSIZE; | 711 | return -EMSGSIZE; |
680 | 712 | ||
713 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); | ||
714 | if (!string_is_valid(lc->name, len)) | ||
715 | return -EINVAL; | ||
716 | |||
681 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) | 717 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) |
682 | return -EMSGSIZE; | 718 | return -EMSGSIZE; |
683 | 719 | ||
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
726 | struct tipc_link_config *lc; | 762 | struct tipc_link_config *lc; |
727 | struct tipc_bearer *bearer; | 763 | struct tipc_bearer *bearer; |
728 | struct tipc_media *media; | 764 | struct tipc_media *media; |
765 | int len; | ||
729 | 766 | ||
730 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 767 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
731 | 768 | ||
769 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
770 | if (!string_is_valid(lc->name, len)) | ||
771 | return -EINVAL; | ||
772 | |||
732 | media = tipc_media_find(lc->name); | 773 | media = tipc_media_find(lc->name); |
733 | if (media) { | 774 | if (media) { |
734 | cmd->doit = &__tipc_nl_media_set; | 775 | cmd->doit = &__tipc_nl_media_set; |
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, | |||
750 | { | 791 | { |
751 | char *name; | 792 | char *name; |
752 | struct nlattr *link; | 793 | struct nlattr *link; |
794 | int len; | ||
753 | 795 | ||
754 | name = (char *)TLV_DATA(msg->req); | 796 | name = (char *)TLV_DATA(msg->req); |
755 | 797 | ||
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, | |||
757 | if (!link) | 799 | if (!link) |
758 | return -EMSGSIZE; | 800 | return -EMSGSIZE; |
759 | 801 | ||
802 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
803 | if (!string_is_valid(name, len)) | ||
804 | return -EINVAL; | ||
805 | |||
760 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) | 806 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) |
761 | return -EMSGSIZE; | 807 | return -EMSGSIZE; |
762 | 808 | ||
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) | |||
778 | }; | 824 | }; |
779 | 825 | ||
780 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | 826 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); |
827 | if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) | ||
828 | return -EINVAL; | ||
781 | 829 | ||
782 | depth = ntohl(ntq->depth); | 830 | depth = ntohl(ntq->depth); |
783 | 831 | ||
@@ -1208,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) | |||
1208 | } | 1256 | } |
1209 | 1257 | ||
1210 | len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); | 1258 | len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); |
1211 | if (len && !TLV_OK(msg.req, len)) { | 1259 | if (!len || !TLV_OK(msg.req, len)) { |
1212 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); | 1260 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); |
1213 | err = -EOPNOTSUPP; | 1261 | err = -EOPNOTSUPP; |
1214 | goto send; | 1262 | goto send; |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index efb16f69bd2c..a457c0fbbef1 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) | |||
398 | ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); | 398 | ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); |
399 | if (ret == -EWOULDBLOCK) | 399 | if (ret == -EWOULDBLOCK) |
400 | return -EWOULDBLOCK; | 400 | return -EWOULDBLOCK; |
401 | if (ret > 0) { | 401 | if (ret == sizeof(s)) { |
402 | read_lock_bh(&sk->sk_callback_lock); | 402 | read_lock_bh(&sk->sk_callback_lock); |
403 | ret = tipc_conn_rcv_sub(srv, con, &s); | 403 | ret = tipc_conn_rcv_sub(srv, con, &s); |
404 | read_unlock_bh(&sk->sk_callback_lock); | 404 | read_unlock_bh(&sk->sk_callback_lock); |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..d4de871e7d4d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) | |||
41 | * not know if the device has more tx queues than rx, or the opposite. | 41 | * not know if the device has more tx queues than rx, or the opposite. |
42 | * This might also change during run time. | 42 | * This might also change during run time. |
43 | */ | 43 | */ |
44 | static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, | 44 | static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, |
45 | u16 queue_id) | 45 | u16 queue_id) |
46 | { | 46 | { |
47 | if (queue_id >= max_t(unsigned int, | ||
48 | dev->real_num_rx_queues, | ||
49 | dev->real_num_tx_queues)) | ||
50 | return -EINVAL; | ||
51 | |||
47 | if (queue_id < dev->real_num_rx_queues) | 52 | if (queue_id < dev->real_num_rx_queues) |
48 | dev->_rx[queue_id].umem = umem; | 53 | dev->_rx[queue_id].umem = umem; |
49 | if (queue_id < dev->real_num_tx_queues) | 54 | if (queue_id < dev->real_num_tx_queues) |
50 | dev->_tx[queue_id].umem = umem; | 55 | dev->_tx[queue_id].umem = umem; |
56 | |||
57 | return 0; | ||
51 | } | 58 | } |
52 | 59 | ||
53 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, | 60 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
88 | goto out_rtnl_unlock; | 95 | goto out_rtnl_unlock; |
89 | } | 96 | } |
90 | 97 | ||
91 | xdp_reg_umem_at_qid(dev, umem, queue_id); | 98 | err = xdp_reg_umem_at_qid(dev, umem, queue_id); |
99 | if (err) | ||
100 | goto out_rtnl_unlock; | ||
101 | |||
92 | umem->dev = dev; | 102 | umem->dev = dev; |
93 | umem->queue_id = queue_id; | 103 | umem->queue_id = queue_id; |
94 | if (force_copy) | 104 | if (force_copy) |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66ae15f27c70..db1a91dfa702 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c | |||
279 | -Wno-gnu-variable-sized-type-not-at-end \ | 279 | -Wno-gnu-variable-sized-type-not-at-end \ |
280 | -Wno-address-of-packed-member -Wno-tautological-compare \ | 280 | -Wno-address-of-packed-member -Wno-tautological-compare \ |
281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ | 281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ |
282 | -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ | ||
282 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ | 283 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ |
283 | ifeq ($(DWARF2BTF),y) | 284 | ifeq ($(DWARF2BTF),y) |
284 | $(BTF_PAHOLE) -J $@ | 285 | $(BTF_PAHOLE) -J $@ |
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h new file mode 100644 index 000000000000..5cd7c1d1a5d5 --- /dev/null +++ b/samples/bpf/asm_goto_workaround.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* Copyright (c) 2019 Facebook */ | ||
3 | #ifndef __ASM_GOTO_WORKAROUND_H | ||
4 | #define __ASM_GOTO_WORKAROUND_H | ||
5 | |||
6 | /* this will bring in asm_volatile_goto macro definition | ||
7 | * if enabled by compiler and config options. | ||
8 | */ | ||
9 | #include <linux/types.h> | ||
10 | |||
11 | #ifdef asm_volatile_goto | ||
12 | #undef asm_volatile_goto | ||
13 | #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") | ||
14 | #endif | ||
15 | |||
16 | #endif | ||
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 525bff667a52..30816037036e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
@@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d) | |||
24 | basetarget = $(basename $(notdir $@)) | 24 | basetarget = $(basename $(notdir $@)) |
25 | 25 | ||
26 | ### | 26 | ### |
27 | # filename of first prerequisite with directory and extension stripped | ||
28 | baseprereq = $(basename $(notdir $<)) | ||
29 | |||
30 | ### | ||
31 | # Escape single quote for use in echo statements | 27 | # Escape single quote for use in echo statements |
32 | escsq = $(subst $(squote),'\$(squote)',$1) | 28 | escsq = $(subst $(squote),'\$(squote)',$1) |
33 | 29 | ||
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c index de70b8470971..89c47f57d1ce 100644 --- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c +++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c | |||
@@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
13 | for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { | 13 | for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { |
14 | const char *sym; | 14 | const char *sym; |
15 | rtx body; | 15 | rtx body; |
16 | rtx masked_sp; | 16 | rtx mask, masked_sp; |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard | 19 | * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard |
@@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
33 | * produces the address of the copy of the stack canary value | 33 | * produces the address of the copy of the stack canary value |
34 | * stored in struct thread_info | 34 | * stored in struct thread_info |
35 | */ | 35 | */ |
36 | mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); | ||
36 | masked_sp = gen_reg_rtx(Pmode); | 37 | masked_sp = gen_reg_rtx(Pmode); |
37 | 38 | ||
38 | emit_insn_before(gen_rtx_SET(masked_sp, | 39 | emit_insn_before(gen_rtx_SET(masked_sp, |
39 | gen_rtx_AND(Pmode, | 40 | gen_rtx_AND(Pmode, |
40 | stack_pointer_rtx, | 41 | stack_pointer_rtx, |
41 | GEN_INT(sp_mask))), | 42 | mask)), |
42 | insn); | 43 | insn); |
43 | 44 | ||
44 | SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, | 45 | SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, |
@@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
52 | #define NO_GATE | 53 | #define NO_GATE |
53 | #include "gcc-generate-rtl-pass.h" | 54 | #include "gcc-generate-rtl-pass.h" |
54 | 55 | ||
56 | #if BUILDING_GCC_VERSION >= 9000 | ||
57 | static bool no(void) | ||
58 | { | ||
59 | return false; | ||
60 | } | ||
61 | |||
62 | static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) | ||
63 | { | ||
64 | targetm.have_stack_protect_combined_set = no; | ||
65 | targetm.have_stack_protect_combined_test = no; | ||
66 | } | ||
67 | #endif | ||
68 | |||
55 | __visible int plugin_init(struct plugin_name_args *plugin_info, | 69 | __visible int plugin_init(struct plugin_name_args *plugin_info, |
56 | struct plugin_gcc_version *version) | 70 | struct plugin_gcc_version *version) |
57 | { | 71 | { |
@@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, | |||
99 | register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, | 113 | register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, |
100 | NULL, &arm_pertask_ssp_rtl_pass_info); | 114 | NULL, &arm_pertask_ssp_rtl_pass_info); |
101 | 115 | ||
116 | #if BUILDING_GCC_VERSION >= 9000 | ||
117 | register_callback(plugin_info->base_name, PLUGIN_START_UNIT, | ||
118 | arm_pertask_ssp_start_unit, NULL); | ||
119 | #endif | ||
120 | |||
102 | return 0; | 121 | return 0; |
103 | } | 122 | } |
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index c05ab001b54c..181973509a05 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
@@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $< | |||
206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE | 206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE |
207 | $(call filechk,conf_cfg) | 207 | $(call filechk,conf_cfg) |
208 | 208 | ||
209 | clean-files += conf-cfg | 209 | clean-files += *conf-cfg |
diff --git a/security/security.c b/security/security.c index f1b8d2587639..55bc49027ba9 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) | |||
1027 | 1027 | ||
1028 | void security_cred_free(struct cred *cred) | 1028 | void security_cred_free(struct cred *cred) |
1029 | { | 1029 | { |
1030 | /* | ||
1031 | * There is a failure case in prepare_creds() that | ||
1032 | * may result in a call here with ->security being NULL. | ||
1033 | */ | ||
1034 | if (unlikely(cred->security == NULL)) | ||
1035 | return; | ||
1036 | |||
1030 | call_void_hook(cred_free, cred); | 1037 | call_void_hook(cred_free, cred); |
1031 | } | 1038 | } |
1032 | 1039 | ||
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index a50d625e7946..c1c31e33657a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) | |||
732 | kfree(key); | 732 | kfree(key); |
733 | if (datum) { | 733 | if (datum) { |
734 | levdatum = datum; | 734 | levdatum = datum; |
735 | ebitmap_destroy(&levdatum->level->cat); | 735 | if (levdatum->level) |
736 | ebitmap_destroy(&levdatum->level->cat); | ||
736 | kfree(levdatum->level); | 737 | kfree(levdatum->level); |
737 | } | 738 | } |
738 | kfree(datum); | 739 | kfree(datum); |
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index ffda91a4a1aa..02514fe558b4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
368 | break; | 368 | break; |
369 | case YAMA_SCOPE_RELATIONAL: | 369 | case YAMA_SCOPE_RELATIONAL: |
370 | rcu_read_lock(); | 370 | rcu_read_lock(); |
371 | if (!task_is_descendant(current, child) && | 371 | if (!pid_alive(child)) |
372 | rc = -EPERM; | ||
373 | if (!rc && !task_is_descendant(current, child) && | ||
372 | !ptracer_exception_found(current, child) && | 374 | !ptracer_exception_found(current, child) && |
373 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) | 375 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
374 | rc = -EPERM; | 376 | rc = -EPERM; |
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h | |||
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
50 | PERF_REG_POWERPC_MMCRA, | ||
50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
51 | }; | 52 | }; |
52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 492f0f24e2d3..4ad1f0894d53 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile | |||
@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c | |||
93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) | 93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) |
94 | 94 | ||
95 | ifeq ($(feature-libbfd),1) | 95 | ifeq ($(feature-libbfd),1) |
96 | LIBS += -lbfd -ldl -lopcodes | ||
97 | else ifeq ($(feature-libbfd-liberty),1) | ||
98 | LIBS += -lbfd -ldl -lopcodes -liberty | ||
99 | else ifeq ($(feature-libbfd-liberty-z),1) | ||
100 | LIBS += -lbfd -ldl -lopcodes -liberty -lz | ||
101 | endif | ||
102 | |||
103 | ifneq ($(filter -lbfd,$(LIBS)),) | ||
96 | CFLAGS += -DHAVE_LIBBFD_SUPPORT | 104 | CFLAGS += -DHAVE_LIBBFD_SUPPORT |
97 | SRCS += $(BFD_SRCS) | 105 | SRCS += $(BFD_SRCS) |
98 | LIBS += -lbfd -lopcodes | ||
99 | endif | 106 | endif |
100 | 107 | ||
101 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o | 108 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o |
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index bff7ee026680..6046dcab51cc 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c | |||
@@ -1,15 +1,10 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) |
2 | /* | 2 | /* |
3 | * Simple streaming JSON writer | 3 | * Simple streaming JSON writer |
4 | * | 4 | * |
5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
6 | * after elements | 6 | * after elements |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
14 | */ | 9 | */ |
15 | 10 | ||
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index c1ab51aed99c..cb9a1993681c 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h | |||
@@ -5,11 +5,6 @@ | |||
5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
6 | * after elements | 6 | * after elements |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
14 | */ | 9 | */ |
15 | 10 | ||
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h new file mode 100644 index 000000000000..0d18b1d1fbbc --- /dev/null +++ b/tools/include/uapi/linux/pkt_sched.h | |||
@@ -0,0 +1,1163 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
2 | #ifndef __LINUX_PKT_SCHED_H | ||
3 | #define __LINUX_PKT_SCHED_H | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* Logical priority bands not depending on specific packet scheduler. | ||
8 | Every scheduler will map them to real traffic classes, if it has | ||
9 | no more precise mechanism to classify packets. | ||
10 | |||
11 | These numbers have no special meaning, though their coincidence | ||
12 | with obsolete IPv6 values is not occasional :-). New IPv6 drafts | ||
13 | preferred full anarchy inspired by diffserv group. | ||
14 | |||
15 | Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy | ||
16 | class, actually, as rule it will be handled with more care than | ||
17 | filler or even bulk. | ||
18 | */ | ||
19 | |||
20 | #define TC_PRIO_BESTEFFORT 0 | ||
21 | #define TC_PRIO_FILLER 1 | ||
22 | #define TC_PRIO_BULK 2 | ||
23 | #define TC_PRIO_INTERACTIVE_BULK 4 | ||
24 | #define TC_PRIO_INTERACTIVE 6 | ||
25 | #define TC_PRIO_CONTROL 7 | ||
26 | |||
27 | #define TC_PRIO_MAX 15 | ||
28 | |||
29 | /* Generic queue statistics, available for all the elements. | ||
30 | Particular schedulers may have also their private records. | ||
31 | */ | ||
32 | |||
33 | struct tc_stats { | ||
34 | __u64 bytes; /* Number of enqueued bytes */ | ||
35 | __u32 packets; /* Number of enqueued packets */ | ||
36 | __u32 drops; /* Packets dropped because of lack of resources */ | ||
37 | __u32 overlimits; /* Number of throttle events when this | ||
38 | * flow goes out of allocated bandwidth */ | ||
39 | __u32 bps; /* Current flow byte rate */ | ||
40 | __u32 pps; /* Current flow packet rate */ | ||
41 | __u32 qlen; | ||
42 | __u32 backlog; | ||
43 | }; | ||
44 | |||
45 | struct tc_estimator { | ||
46 | signed char interval; | ||
47 | unsigned char ewma_log; | ||
48 | }; | ||
49 | |||
50 | /* "Handles" | ||
51 | --------- | ||
52 | |||
53 | All the traffic control objects have 32bit identifiers, or "handles". | ||
54 | |||
55 | They can be considered as opaque numbers from user API viewpoint, | ||
56 | but actually they always consist of two fields: major and | ||
57 | minor numbers, which are interpreted by kernel specially, | ||
58 | that may be used by applications, though not recommended. | ||
59 | |||
60 | F.e. qdisc handles always have minor number equal to zero, | ||
61 | classes (or flows) have major equal to parent qdisc major, and | ||
62 | minor uniquely identifying class inside qdisc. | ||
63 | |||
64 | Macros to manipulate handles: | ||
65 | */ | ||
66 | |||
67 | #define TC_H_MAJ_MASK (0xFFFF0000U) | ||
68 | #define TC_H_MIN_MASK (0x0000FFFFU) | ||
69 | #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) | ||
70 | #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) | ||
71 | #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) | ||
72 | |||
73 | #define TC_H_UNSPEC (0U) | ||
74 | #define TC_H_ROOT (0xFFFFFFFFU) | ||
75 | #define TC_H_INGRESS (0xFFFFFFF1U) | ||
76 | #define TC_H_CLSACT TC_H_INGRESS | ||
77 | |||
78 | #define TC_H_MIN_PRIORITY 0xFFE0U | ||
79 | #define TC_H_MIN_INGRESS 0xFFF2U | ||
80 | #define TC_H_MIN_EGRESS 0xFFF3U | ||
81 | |||
82 | /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ | ||
83 | enum tc_link_layer { | ||
84 | TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ | ||
85 | TC_LINKLAYER_ETHERNET, | ||
86 | TC_LINKLAYER_ATM, | ||
87 | }; | ||
88 | #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ | ||
89 | |||
90 | struct tc_ratespec { | ||
91 | unsigned char cell_log; | ||
92 | __u8 linklayer; /* lower 4 bits */ | ||
93 | unsigned short overhead; | ||
94 | short cell_align; | ||
95 | unsigned short mpu; | ||
96 | __u32 rate; | ||
97 | }; | ||
98 | |||
99 | #define TC_RTAB_SIZE 1024 | ||
100 | |||
101 | struct tc_sizespec { | ||
102 | unsigned char cell_log; | ||
103 | unsigned char size_log; | ||
104 | short cell_align; | ||
105 | int overhead; | ||
106 | unsigned int linklayer; | ||
107 | unsigned int mpu; | ||
108 | unsigned int mtu; | ||
109 | unsigned int tsize; | ||
110 | }; | ||
111 | |||
112 | enum { | ||
113 | TCA_STAB_UNSPEC, | ||
114 | TCA_STAB_BASE, | ||
115 | TCA_STAB_DATA, | ||
116 | __TCA_STAB_MAX | ||
117 | }; | ||
118 | |||
119 | #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) | ||
120 | |||
121 | /* FIFO section */ | ||
122 | |||
123 | struct tc_fifo_qopt { | ||
124 | __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ | ||
125 | }; | ||
126 | |||
127 | /* SKBPRIO section */ | ||
128 | |||
129 | /* | ||
130 | * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). | ||
131 | * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able | ||
132 | * to map one to one the DS field of IPV4 and IPV6 headers. | ||
133 | * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. | ||
134 | */ | ||
135 | |||
136 | #define SKBPRIO_MAX_PRIORITY 64 | ||
137 | |||
138 | struct tc_skbprio_qopt { | ||
139 | __u32 limit; /* Queue length in packets. */ | ||
140 | }; | ||
141 | |||
142 | /* PRIO section */ | ||
143 | |||
144 | #define TCQ_PRIO_BANDS 16 | ||
145 | #define TCQ_MIN_PRIO_BANDS 2 | ||
146 | |||
147 | struct tc_prio_qopt { | ||
148 | int bands; /* Number of bands */ | ||
149 | __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ | ||
150 | }; | ||
151 | |||
152 | /* MULTIQ section */ | ||
153 | |||
154 | struct tc_multiq_qopt { | ||
155 | __u16 bands; /* Number of bands */ | ||
156 | __u16 max_bands; /* Maximum number of queues */ | ||
157 | }; | ||
158 | |||
159 | /* PLUG section */ | ||
160 | |||
161 | #define TCQ_PLUG_BUFFER 0 | ||
162 | #define TCQ_PLUG_RELEASE_ONE 1 | ||
163 | #define TCQ_PLUG_RELEASE_INDEFINITE 2 | ||
164 | #define TCQ_PLUG_LIMIT 3 | ||
165 | |||
166 | struct tc_plug_qopt { | ||
167 | /* TCQ_PLUG_BUFFER: Inset a plug into the queue and | ||
168 | * buffer any incoming packets | ||
169 | * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head | ||
170 | * to beginning of the next plug. | ||
171 | * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. | ||
172 | * Stop buffering packets until the next TCQ_PLUG_BUFFER | ||
173 | * command is received (just act as a pass-thru queue). | ||
174 | * TCQ_PLUG_LIMIT: Increase/decrease queue size | ||
175 | */ | ||
176 | int action; | ||
177 | __u32 limit; | ||
178 | }; | ||
179 | |||
180 | /* TBF section */ | ||
181 | |||
182 | struct tc_tbf_qopt { | ||
183 | struct tc_ratespec rate; | ||
184 | struct tc_ratespec peakrate; | ||
185 | __u32 limit; | ||
186 | __u32 buffer; | ||
187 | __u32 mtu; | ||
188 | }; | ||
189 | |||
190 | enum { | ||
191 | TCA_TBF_UNSPEC, | ||
192 | TCA_TBF_PARMS, | ||
193 | TCA_TBF_RTAB, | ||
194 | TCA_TBF_PTAB, | ||
195 | TCA_TBF_RATE64, | ||
196 | TCA_TBF_PRATE64, | ||
197 | TCA_TBF_BURST, | ||
198 | TCA_TBF_PBURST, | ||
199 | TCA_TBF_PAD, | ||
200 | __TCA_TBF_MAX, | ||
201 | }; | ||
202 | |||
203 | #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) | ||
204 | |||
205 | |||
206 | /* TEQL section */ | ||
207 | |||
208 | /* TEQL does not require any parameters */ | ||
209 | |||
210 | /* SFQ section */ | ||
211 | |||
212 | struct tc_sfq_qopt { | ||
213 | unsigned quantum; /* Bytes per round allocated to flow */ | ||
214 | int perturb_period; /* Period of hash perturbation */ | ||
215 | __u32 limit; /* Maximal packets in queue */ | ||
216 | unsigned divisor; /* Hash divisor */ | ||
217 | unsigned flows; /* Maximal number of flows */ | ||
218 | }; | ||
219 | |||
220 | struct tc_sfqred_stats { | ||
221 | __u32 prob_drop; /* Early drops, below max threshold */ | ||
222 | __u32 forced_drop; /* Early drops, after max threshold */ | ||
223 | __u32 prob_mark; /* Marked packets, below max threshold */ | ||
224 | __u32 forced_mark; /* Marked packets, after max threshold */ | ||
225 | __u32 prob_mark_head; /* Marked packets, below max threshold */ | ||
226 | __u32 forced_mark_head;/* Marked packets, after max threshold */ | ||
227 | }; | ||
228 | |||
229 | struct tc_sfq_qopt_v1 { | ||
230 | struct tc_sfq_qopt v0; | ||
231 | unsigned int depth; /* max number of packets per flow */ | ||
232 | unsigned int headdrop; | ||
233 | /* SFQRED parameters */ | ||
234 | __u32 limit; /* HARD maximal flow queue length (bytes) */ | ||
235 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
236 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
237 | unsigned char Wlog; /* log(W) */ | ||
238 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
239 | unsigned char Scell_log; /* cell size for idle damping */ | ||
240 | unsigned char flags; | ||
241 | __u32 max_P; /* probability, high resolution */ | ||
242 | /* SFQRED stats */ | ||
243 | struct tc_sfqred_stats stats; | ||
244 | }; | ||
245 | |||
246 | |||
247 | struct tc_sfq_xstats { | ||
248 | __s32 allot; | ||
249 | }; | ||
250 | |||
251 | /* RED section */ | ||
252 | |||
253 | enum { | ||
254 | TCA_RED_UNSPEC, | ||
255 | TCA_RED_PARMS, | ||
256 | TCA_RED_STAB, | ||
257 | TCA_RED_MAX_P, | ||
258 | __TCA_RED_MAX, | ||
259 | }; | ||
260 | |||
261 | #define TCA_RED_MAX (__TCA_RED_MAX - 1) | ||
262 | |||
263 | struct tc_red_qopt { | ||
264 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
265 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
266 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
267 | unsigned char Wlog; /* log(W) */ | ||
268 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
269 | unsigned char Scell_log; /* cell size for idle damping */ | ||
270 | unsigned char flags; | ||
271 | #define TC_RED_ECN 1 | ||
272 | #define TC_RED_HARDDROP 2 | ||
273 | #define TC_RED_ADAPTATIVE 4 | ||
274 | }; | ||
275 | |||
276 | struct tc_red_xstats { | ||
277 | __u32 early; /* Early drops */ | ||
278 | __u32 pdrop; /* Drops due to queue limits */ | ||
279 | __u32 other; /* Drops due to drop() calls */ | ||
280 | __u32 marked; /* Marked packets */ | ||
281 | }; | ||
282 | |||
283 | /* GRED section */ | ||
284 | |||
285 | #define MAX_DPs 16 | ||
286 | |||
287 | enum { | ||
288 | TCA_GRED_UNSPEC, | ||
289 | TCA_GRED_PARMS, | ||
290 | TCA_GRED_STAB, | ||
291 | TCA_GRED_DPS, | ||
292 | TCA_GRED_MAX_P, | ||
293 | TCA_GRED_LIMIT, | ||
294 | TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ | ||
295 | __TCA_GRED_MAX, | ||
296 | }; | ||
297 | |||
298 | #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) | ||
299 | |||
300 | enum { | ||
301 | TCA_GRED_VQ_ENTRY_UNSPEC, | ||
302 | TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ | ||
303 | __TCA_GRED_VQ_ENTRY_MAX, | ||
304 | }; | ||
305 | #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) | ||
306 | |||
307 | enum { | ||
308 | TCA_GRED_VQ_UNSPEC, | ||
309 | TCA_GRED_VQ_PAD, | ||
310 | TCA_GRED_VQ_DP, /* u32 */ | ||
311 | TCA_GRED_VQ_STAT_BYTES, /* u64 */ | ||
312 | TCA_GRED_VQ_STAT_PACKETS, /* u32 */ | ||
313 | TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ | ||
314 | TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ | ||
315 | TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ | ||
316 | TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ | ||
317 | TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ | ||
318 | TCA_GRED_VQ_STAT_PDROP, /* u32 */ | ||
319 | TCA_GRED_VQ_STAT_OTHER, /* u32 */ | ||
320 | TCA_GRED_VQ_FLAGS, /* u32 */ | ||
321 | __TCA_GRED_VQ_MAX | ||
322 | }; | ||
323 | |||
324 | #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) | ||
325 | |||
326 | struct tc_gred_qopt { | ||
327 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
328 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
329 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
330 | __u32 DP; /* up to 2^32 DPs */ | ||
331 | __u32 backlog; | ||
332 | __u32 qave; | ||
333 | __u32 forced; | ||
334 | __u32 early; | ||
335 | __u32 other; | ||
336 | __u32 pdrop; | ||
337 | __u8 Wlog; /* log(W) */ | ||
338 | __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
339 | __u8 Scell_log; /* cell size for idle damping */ | ||
340 | __u8 prio; /* prio of this VQ */ | ||
341 | __u32 packets; | ||
342 | __u32 bytesin; | ||
343 | }; | ||
344 | |||
345 | /* gred setup */ | ||
346 | struct tc_gred_sopt { | ||
347 | __u32 DPs; | ||
348 | __u32 def_DP; | ||
349 | __u8 grio; | ||
350 | __u8 flags; | ||
351 | __u16 pad1; | ||
352 | }; | ||
353 | |||
354 | /* CHOKe section */ | ||
355 | |||
356 | enum { | ||
357 | TCA_CHOKE_UNSPEC, | ||
358 | TCA_CHOKE_PARMS, | ||
359 | TCA_CHOKE_STAB, | ||
360 | TCA_CHOKE_MAX_P, | ||
361 | __TCA_CHOKE_MAX, | ||
362 | }; | ||
363 | |||
364 | #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) | ||
365 | |||
366 | struct tc_choke_qopt { | ||
367 | __u32 limit; /* Hard queue length (packets) */ | ||
368 | __u32 qth_min; /* Min average threshold (packets) */ | ||
369 | __u32 qth_max; /* Max average threshold (packets) */ | ||
370 | unsigned char Wlog; /* log(W) */ | ||
371 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
372 | unsigned char Scell_log; /* cell size for idle damping */ | ||
373 | unsigned char flags; /* see RED flags */ | ||
374 | }; | ||
375 | |||
376 | struct tc_choke_xstats { | ||
377 | __u32 early; /* Early drops */ | ||
378 | __u32 pdrop; /* Drops due to queue limits */ | ||
379 | __u32 other; /* Drops due to drop() calls */ | ||
380 | __u32 marked; /* Marked packets */ | ||
381 | __u32 matched; /* Drops due to flow match */ | ||
382 | }; | ||
383 | |||
384 | /* HTB section */ | ||
385 | #define TC_HTB_NUMPRIO 8 | ||
386 | #define TC_HTB_MAXDEPTH 8 | ||
387 | #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ | ||
388 | |||
389 | struct tc_htb_opt { | ||
390 | struct tc_ratespec rate; | ||
391 | struct tc_ratespec ceil; | ||
392 | __u32 buffer; | ||
393 | __u32 cbuffer; | ||
394 | __u32 quantum; | ||
395 | __u32 level; /* out only */ | ||
396 | __u32 prio; | ||
397 | }; | ||
398 | struct tc_htb_glob { | ||
399 | __u32 version; /* to match HTB/TC */ | ||
400 | __u32 rate2quantum; /* bps->quantum divisor */ | ||
401 | __u32 defcls; /* default class number */ | ||
402 | __u32 debug; /* debug flags */ | ||
403 | |||
404 | /* stats */ | ||
405 | __u32 direct_pkts; /* count of non shaped packets */ | ||
406 | }; | ||
407 | enum { | ||
408 | TCA_HTB_UNSPEC, | ||
409 | TCA_HTB_PARMS, | ||
410 | TCA_HTB_INIT, | ||
411 | TCA_HTB_CTAB, | ||
412 | TCA_HTB_RTAB, | ||
413 | TCA_HTB_DIRECT_QLEN, | ||
414 | TCA_HTB_RATE64, | ||
415 | TCA_HTB_CEIL64, | ||
416 | TCA_HTB_PAD, | ||
417 | __TCA_HTB_MAX, | ||
418 | }; | ||
419 | |||
420 | #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) | ||
421 | |||
422 | struct tc_htb_xstats { | ||
423 | __u32 lends; | ||
424 | __u32 borrows; | ||
425 | __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ | ||
426 | __s32 tokens; | ||
427 | __s32 ctokens; | ||
428 | }; | ||
429 | |||
430 | /* HFSC section */ | ||
431 | |||
432 | struct tc_hfsc_qopt { | ||
433 | __u16 defcls; /* default class */ | ||
434 | }; | ||
435 | |||
436 | struct tc_service_curve { | ||
437 | __u32 m1; /* slope of the first segment in bps */ | ||
438 | __u32 d; /* x-projection of the first segment in us */ | ||
439 | __u32 m2; /* slope of the second segment in bps */ | ||
440 | }; | ||
441 | |||
442 | struct tc_hfsc_stats { | ||
443 | __u64 work; /* total work done */ | ||
444 | __u64 rtwork; /* work done by real-time criteria */ | ||
445 | __u32 period; /* current period */ | ||
446 | __u32 level; /* class level in hierarchy */ | ||
447 | }; | ||
448 | |||
449 | enum { | ||
450 | TCA_HFSC_UNSPEC, | ||
451 | TCA_HFSC_RSC, | ||
452 | TCA_HFSC_FSC, | ||
453 | TCA_HFSC_USC, | ||
454 | __TCA_HFSC_MAX, | ||
455 | }; | ||
456 | |||
457 | #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) | ||
458 | |||
459 | |||
460 | /* CBQ section */ | ||
461 | |||
462 | #define TC_CBQ_MAXPRIO 8 | ||
463 | #define TC_CBQ_MAXLEVEL 8 | ||
464 | #define TC_CBQ_DEF_EWMA 5 | ||
465 | |||
466 | struct tc_cbq_lssopt { | ||
467 | unsigned char change; | ||
468 | unsigned char flags; | ||
469 | #define TCF_CBQ_LSS_BOUNDED 1 | ||
470 | #define TCF_CBQ_LSS_ISOLATED 2 | ||
471 | unsigned char ewma_log; | ||
472 | unsigned char level; | ||
473 | #define TCF_CBQ_LSS_FLAGS 1 | ||
474 | #define TCF_CBQ_LSS_EWMA 2 | ||
475 | #define TCF_CBQ_LSS_MAXIDLE 4 | ||
476 | #define TCF_CBQ_LSS_MINIDLE 8 | ||
477 | #define TCF_CBQ_LSS_OFFTIME 0x10 | ||
478 | #define TCF_CBQ_LSS_AVPKT 0x20 | ||
479 | __u32 maxidle; | ||
480 | __u32 minidle; | ||
481 | __u32 offtime; | ||
482 | __u32 avpkt; | ||
483 | }; | ||
484 | |||
485 | struct tc_cbq_wrropt { | ||
486 | unsigned char flags; | ||
487 | unsigned char priority; | ||
488 | unsigned char cpriority; | ||
489 | unsigned char __reserved; | ||
490 | __u32 allot; | ||
491 | __u32 weight; | ||
492 | }; | ||
493 | |||
494 | struct tc_cbq_ovl { | ||
495 | unsigned char strategy; | ||
496 | #define TC_CBQ_OVL_CLASSIC 0 | ||
497 | #define TC_CBQ_OVL_DELAY 1 | ||
498 | #define TC_CBQ_OVL_LOWPRIO 2 | ||
499 | #define TC_CBQ_OVL_DROP 3 | ||
500 | #define TC_CBQ_OVL_RCLASSIC 4 | ||
501 | unsigned char priority2; | ||
502 | __u16 pad; | ||
503 | __u32 penalty; | ||
504 | }; | ||
505 | |||
506 | struct tc_cbq_police { | ||
507 | unsigned char police; | ||
508 | unsigned char __res1; | ||
509 | unsigned short __res2; | ||
510 | }; | ||
511 | |||
512 | struct tc_cbq_fopt { | ||
513 | __u32 split; | ||
514 | __u32 defmap; | ||
515 | __u32 defchange; | ||
516 | }; | ||
517 | |||
518 | struct tc_cbq_xstats { | ||
519 | __u32 borrows; | ||
520 | __u32 overactions; | ||
521 | __s32 avgidle; | ||
522 | __s32 undertime; | ||
523 | }; | ||
524 | |||
525 | enum { | ||
526 | TCA_CBQ_UNSPEC, | ||
527 | TCA_CBQ_LSSOPT, | ||
528 | TCA_CBQ_WRROPT, | ||
529 | TCA_CBQ_FOPT, | ||
530 | TCA_CBQ_OVL_STRATEGY, | ||
531 | TCA_CBQ_RATE, | ||
532 | TCA_CBQ_RTAB, | ||
533 | TCA_CBQ_POLICE, | ||
534 | __TCA_CBQ_MAX, | ||
535 | }; | ||
536 | |||
537 | #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) | ||
538 | |||
539 | /* dsmark section */ | ||
540 | |||
541 | enum { | ||
542 | TCA_DSMARK_UNSPEC, | ||
543 | TCA_DSMARK_INDICES, | ||
544 | TCA_DSMARK_DEFAULT_INDEX, | ||
545 | TCA_DSMARK_SET_TC_INDEX, | ||
546 | TCA_DSMARK_MASK, | ||
547 | TCA_DSMARK_VALUE, | ||
548 | __TCA_DSMARK_MAX, | ||
549 | }; | ||
550 | |||
551 | #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) | ||
552 | |||
553 | /* ATM section */ | ||
554 | |||
555 | enum { | ||
556 | TCA_ATM_UNSPEC, | ||
557 | TCA_ATM_FD, /* file/socket descriptor */ | ||
558 | TCA_ATM_PTR, /* pointer to descriptor - later */ | ||
559 | TCA_ATM_HDR, /* LL header */ | ||
560 | TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ | ||
561 | TCA_ATM_ADDR, /* PVC address (for output only) */ | ||
562 | TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ | ||
563 | __TCA_ATM_MAX, | ||
564 | }; | ||
565 | |||
566 | #define TCA_ATM_MAX (__TCA_ATM_MAX - 1) | ||
567 | |||
568 | /* Network emulator */ | ||
569 | |||
570 | enum { | ||
571 | TCA_NETEM_UNSPEC, | ||
572 | TCA_NETEM_CORR, | ||
573 | TCA_NETEM_DELAY_DIST, | ||
574 | TCA_NETEM_REORDER, | ||
575 | TCA_NETEM_CORRUPT, | ||
576 | TCA_NETEM_LOSS, | ||
577 | TCA_NETEM_RATE, | ||
578 | TCA_NETEM_ECN, | ||
579 | TCA_NETEM_RATE64, | ||
580 | TCA_NETEM_PAD, | ||
581 | TCA_NETEM_LATENCY64, | ||
582 | TCA_NETEM_JITTER64, | ||
583 | TCA_NETEM_SLOT, | ||
584 | TCA_NETEM_SLOT_DIST, | ||
585 | __TCA_NETEM_MAX, | ||
586 | }; | ||
587 | |||
588 | #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) | ||
589 | |||
590 | struct tc_netem_qopt { | ||
591 | __u32 latency; /* added delay (us) */ | ||
592 | __u32 limit; /* fifo limit (packets) */ | ||
593 | __u32 loss; /* random packet loss (0=none ~0=100%) */ | ||
594 | __u32 gap; /* re-ordering gap (0 for none) */ | ||
595 | __u32 duplicate; /* random packet dup (0=none ~0=100%) */ | ||
596 | __u32 jitter; /* random jitter in latency (us) */ | ||
597 | }; | ||
598 | |||
599 | struct tc_netem_corr { | ||
600 | __u32 delay_corr; /* delay correlation */ | ||
601 | __u32 loss_corr; /* packet loss correlation */ | ||
602 | __u32 dup_corr; /* duplicate correlation */ | ||
603 | }; | ||
604 | |||
605 | struct tc_netem_reorder { | ||
606 | __u32 probability; | ||
607 | __u32 correlation; | ||
608 | }; | ||
609 | |||
610 | struct tc_netem_corrupt { | ||
611 | __u32 probability; | ||
612 | __u32 correlation; | ||
613 | }; | ||
614 | |||
615 | struct tc_netem_rate { | ||
616 | __u32 rate; /* byte/s */ | ||
617 | __s32 packet_overhead; | ||
618 | __u32 cell_size; | ||
619 | __s32 cell_overhead; | ||
620 | }; | ||
621 | |||
622 | struct tc_netem_slot { | ||
623 | __s64 min_delay; /* nsec */ | ||
624 | __s64 max_delay; | ||
625 | __s32 max_packets; | ||
626 | __s32 max_bytes; | ||
627 | __s64 dist_delay; /* nsec */ | ||
628 | __s64 dist_jitter; /* nsec */ | ||
629 | }; | ||
630 | |||
631 | enum { | ||
632 | NETEM_LOSS_UNSPEC, | ||
633 | NETEM_LOSS_GI, /* General Intuitive - 4 state model */ | ||
634 | NETEM_LOSS_GE, /* Gilbert Elliot models */ | ||
635 | __NETEM_LOSS_MAX | ||
636 | }; | ||
637 | #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) | ||
638 | |||
639 | /* State transition probabilities for 4 state model */ | ||
640 | struct tc_netem_gimodel { | ||
641 | __u32 p13; | ||
642 | __u32 p31; | ||
643 | __u32 p32; | ||
644 | __u32 p14; | ||
645 | __u32 p23; | ||
646 | }; | ||
647 | |||
648 | /* Gilbert-Elliot models */ | ||
649 | struct tc_netem_gemodel { | ||
650 | __u32 p; | ||
651 | __u32 r; | ||
652 | __u32 h; | ||
653 | __u32 k1; | ||
654 | }; | ||
655 | |||
656 | #define NETEM_DIST_SCALE 8192 | ||
657 | #define NETEM_DIST_MAX 16384 | ||
658 | |||
659 | /* DRR */ | ||
660 | |||
661 | enum { | ||
662 | TCA_DRR_UNSPEC, | ||
663 | TCA_DRR_QUANTUM, | ||
664 | __TCA_DRR_MAX | ||
665 | }; | ||
666 | |||
667 | #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) | ||
668 | |||
669 | struct tc_drr_stats { | ||
670 | __u32 deficit; | ||
671 | }; | ||
672 | |||
673 | /* MQPRIO */ | ||
674 | #define TC_QOPT_BITMASK 15 | ||
675 | #define TC_QOPT_MAX_QUEUE 16 | ||
676 | |||
677 | enum { | ||
678 | TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ | ||
679 | TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ | ||
680 | __TC_MQPRIO_HW_OFFLOAD_MAX | ||
681 | }; | ||
682 | |||
683 | #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) | ||
684 | |||
685 | enum { | ||
686 | TC_MQPRIO_MODE_DCB, | ||
687 | TC_MQPRIO_MODE_CHANNEL, | ||
688 | __TC_MQPRIO_MODE_MAX | ||
689 | }; | ||
690 | |||
691 | #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) | ||
692 | |||
693 | enum { | ||
694 | TC_MQPRIO_SHAPER_DCB, | ||
695 | TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ | ||
696 | __TC_MQPRIO_SHAPER_MAX | ||
697 | }; | ||
698 | |||
699 | #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) | ||
700 | |||
701 | struct tc_mqprio_qopt { | ||
702 | __u8 num_tc; | ||
703 | __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; | ||
704 | __u8 hw; | ||
705 | __u16 count[TC_QOPT_MAX_QUEUE]; | ||
706 | __u16 offset[TC_QOPT_MAX_QUEUE]; | ||
707 | }; | ||
708 | |||
709 | #define TC_MQPRIO_F_MODE 0x1 | ||
710 | #define TC_MQPRIO_F_SHAPER 0x2 | ||
711 | #define TC_MQPRIO_F_MIN_RATE 0x4 | ||
712 | #define TC_MQPRIO_F_MAX_RATE 0x8 | ||
713 | |||
714 | enum { | ||
715 | TCA_MQPRIO_UNSPEC, | ||
716 | TCA_MQPRIO_MODE, | ||
717 | TCA_MQPRIO_SHAPER, | ||
718 | TCA_MQPRIO_MIN_RATE64, | ||
719 | TCA_MQPRIO_MAX_RATE64, | ||
720 | __TCA_MQPRIO_MAX, | ||
721 | }; | ||
722 | |||
723 | #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) | ||
724 | |||
725 | /* SFB */ | ||
726 | |||
727 | enum { | ||
728 | TCA_SFB_UNSPEC, | ||
729 | TCA_SFB_PARMS, | ||
730 | __TCA_SFB_MAX, | ||
731 | }; | ||
732 | |||
733 | #define TCA_SFB_MAX (__TCA_SFB_MAX - 1) | ||
734 | |||
735 | /* | ||
736 | * Note: increment, decrement are Q0.16 fixed-point values. | ||
737 | */ | ||
738 | struct tc_sfb_qopt { | ||
739 | __u32 rehash_interval; /* delay between hash move, in ms */ | ||
740 | __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ | ||
741 | __u32 max; /* max len of qlen_min */ | ||
742 | __u32 bin_size; /* maximum queue length per bin */ | ||
743 | __u32 increment; /* probability increment, (d1 in Blue) */ | ||
744 | __u32 decrement; /* probability decrement, (d2 in Blue) */ | ||
745 | __u32 limit; /* max SFB queue length */ | ||
746 | __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ | ||
747 | __u32 penalty_burst; | ||
748 | }; | ||
749 | |||
750 | struct tc_sfb_xstats { | ||
751 | __u32 earlydrop; | ||
752 | __u32 penaltydrop; | ||
753 | __u32 bucketdrop; | ||
754 | __u32 queuedrop; | ||
755 | __u32 childdrop; /* drops in child qdisc */ | ||
756 | __u32 marked; | ||
757 | __u32 maxqlen; | ||
758 | __u32 maxprob; | ||
759 | __u32 avgprob; | ||
760 | }; | ||
761 | |||
762 | #define SFB_MAX_PROB 0xFFFF | ||
763 | |||
764 | /* QFQ */ | ||
765 | enum { | ||
766 | TCA_QFQ_UNSPEC, | ||
767 | TCA_QFQ_WEIGHT, | ||
768 | TCA_QFQ_LMAX, | ||
769 | __TCA_QFQ_MAX | ||
770 | }; | ||
771 | |||
772 | #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) | ||
773 | |||
774 | struct tc_qfq_stats { | ||
775 | __u32 weight; | ||
776 | __u32 lmax; | ||
777 | }; | ||
778 | |||
779 | /* CODEL */ | ||
780 | |||
781 | enum { | ||
782 | TCA_CODEL_UNSPEC, | ||
783 | TCA_CODEL_TARGET, | ||
784 | TCA_CODEL_LIMIT, | ||
785 | TCA_CODEL_INTERVAL, | ||
786 | TCA_CODEL_ECN, | ||
787 | TCA_CODEL_CE_THRESHOLD, | ||
788 | __TCA_CODEL_MAX | ||
789 | }; | ||
790 | |||
791 | #define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) | ||
792 | |||
793 | struct tc_codel_xstats { | ||
794 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
795 | __u32 count; /* how many drops we've done since the last time we | ||
796 | * entered dropping state | ||
797 | */ | ||
798 | __u32 lastcount; /* count at entry to dropping state */ | ||
799 | __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ | ||
800 | __s32 drop_next; /* time to drop next packet */ | ||
801 | __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ | ||
802 | __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ | ||
803 | __u32 dropping; /* are we in dropping state ? */ | ||
804 | __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ | ||
805 | }; | ||
806 | |||
807 | /* FQ_CODEL */ | ||
808 | |||
809 | enum { | ||
810 | TCA_FQ_CODEL_UNSPEC, | ||
811 | TCA_FQ_CODEL_TARGET, | ||
812 | TCA_FQ_CODEL_LIMIT, | ||
813 | TCA_FQ_CODEL_INTERVAL, | ||
814 | TCA_FQ_CODEL_ECN, | ||
815 | TCA_FQ_CODEL_FLOWS, | ||
816 | TCA_FQ_CODEL_QUANTUM, | ||
817 | TCA_FQ_CODEL_CE_THRESHOLD, | ||
818 | TCA_FQ_CODEL_DROP_BATCH_SIZE, | ||
819 | TCA_FQ_CODEL_MEMORY_LIMIT, | ||
820 | __TCA_FQ_CODEL_MAX | ||
821 | }; | ||
822 | |||
823 | #define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) | ||
824 | |||
825 | enum { | ||
826 | TCA_FQ_CODEL_XSTATS_QDISC, | ||
827 | TCA_FQ_CODEL_XSTATS_CLASS, | ||
828 | }; | ||
829 | |||
830 | struct tc_fq_codel_qd_stats { | ||
831 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
832 | __u32 drop_overlimit; /* number of time max qdisc | ||
833 | * packet limit was hit | ||
834 | */ | ||
835 | __u32 ecn_mark; /* number of packets we ECN marked | ||
836 | * instead of being dropped | ||
837 | */ | ||
838 | __u32 new_flow_count; /* number of time packets | ||
839 | * created a 'new flow' | ||
840 | */ | ||
841 | __u32 new_flows_len; /* count of flows in new list */ | ||
842 | __u32 old_flows_len; /* count of flows in old list */ | ||
843 | __u32 ce_mark; /* packets above ce_threshold */ | ||
844 | __u32 memory_usage; /* in bytes */ | ||
845 | __u32 drop_overmemory; | ||
846 | }; | ||
847 | |||
848 | struct tc_fq_codel_cl_stats { | ||
849 | __s32 deficit; | ||
850 | __u32 ldelay; /* in-queue delay seen by most recently | ||
851 | * dequeued packet | ||
852 | */ | ||
853 | __u32 count; | ||
854 | __u32 lastcount; | ||
855 | __u32 dropping; | ||
856 | __s32 drop_next; | ||
857 | }; | ||
858 | |||
859 | struct tc_fq_codel_xstats { | ||
860 | __u32 type; | ||
861 | union { | ||
862 | struct tc_fq_codel_qd_stats qdisc_stats; | ||
863 | struct tc_fq_codel_cl_stats class_stats; | ||
864 | }; | ||
865 | }; | ||
866 | |||
867 | /* FQ */ | ||
868 | |||
869 | enum { | ||
870 | TCA_FQ_UNSPEC, | ||
871 | |||
872 | TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ | ||
873 | |||
874 | TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ | ||
875 | |||
876 | TCA_FQ_QUANTUM, /* RR quantum */ | ||
877 | |||
878 | TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ | ||
879 | |||
880 | TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ | ||
881 | |||
882 | TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ | ||
883 | |||
884 | TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ | ||
885 | |||
886 | TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ | ||
887 | |||
888 | TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ | ||
889 | |||
890 | TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ | ||
891 | |||
892 | TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ | ||
893 | |||
894 | TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ | ||
895 | |||
896 | __TCA_FQ_MAX | ||
897 | }; | ||
898 | |||
899 | #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) | ||
900 | |||
901 | struct tc_fq_qd_stats { | ||
902 | __u64 gc_flows; | ||
903 | __u64 highprio_packets; | ||
904 | __u64 tcp_retrans; | ||
905 | __u64 throttled; | ||
906 | __u64 flows_plimit; | ||
907 | __u64 pkts_too_long; | ||
908 | __u64 allocation_errors; | ||
909 | __s64 time_next_delayed_flow; | ||
910 | __u32 flows; | ||
911 | __u32 inactive_flows; | ||
912 | __u32 throttled_flows; | ||
913 | __u32 unthrottle_latency_ns; | ||
914 | __u64 ce_mark; /* packets above ce_threshold */ | ||
915 | }; | ||
916 | |||
917 | /* Heavy-Hitter Filter */ | ||
918 | |||
919 | enum { | ||
920 | TCA_HHF_UNSPEC, | ||
921 | TCA_HHF_BACKLOG_LIMIT, | ||
922 | TCA_HHF_QUANTUM, | ||
923 | TCA_HHF_HH_FLOWS_LIMIT, | ||
924 | TCA_HHF_RESET_TIMEOUT, | ||
925 | TCA_HHF_ADMIT_BYTES, | ||
926 | TCA_HHF_EVICT_TIMEOUT, | ||
927 | TCA_HHF_NON_HH_WEIGHT, | ||
928 | __TCA_HHF_MAX | ||
929 | }; | ||
930 | |||
931 | #define TCA_HHF_MAX (__TCA_HHF_MAX - 1) | ||
932 | |||
933 | struct tc_hhf_xstats { | ||
934 | __u32 drop_overlimit; /* number of times max qdisc packet limit | ||
935 | * was hit | ||
936 | */ | ||
937 | __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ | ||
938 | __u32 hh_tot_count; /* number of captured heavy-hitters so far */ | ||
939 | __u32 hh_cur_count; /* number of current heavy-hitters */ | ||
940 | }; | ||
941 | |||
942 | /* PIE */ | ||
943 | enum { | ||
944 | TCA_PIE_UNSPEC, | ||
945 | TCA_PIE_TARGET, | ||
946 | TCA_PIE_LIMIT, | ||
947 | TCA_PIE_TUPDATE, | ||
948 | TCA_PIE_ALPHA, | ||
949 | TCA_PIE_BETA, | ||
950 | TCA_PIE_ECN, | ||
951 | TCA_PIE_BYTEMODE, | ||
952 | __TCA_PIE_MAX | ||
953 | }; | ||
954 | #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) | ||
955 | |||
956 | struct tc_pie_xstats { | ||
957 | __u32 prob; /* current probability */ | ||
958 | __u32 delay; /* current delay in ms */ | ||
959 | __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ | ||
960 | __u32 packets_in; /* total number of packets enqueued */ | ||
961 | __u32 dropped; /* packets dropped due to pie_action */ | ||
962 | __u32 overlimit; /* dropped due to lack of space in queue */ | ||
963 | __u32 maxq; /* maximum queue size */ | ||
964 | __u32 ecn_mark; /* packets marked with ecn*/ | ||
965 | }; | ||
966 | |||
967 | /* CBS */ | ||
968 | struct tc_cbs_qopt { | ||
969 | __u8 offload; | ||
970 | __u8 _pad[3]; | ||
971 | __s32 hicredit; | ||
972 | __s32 locredit; | ||
973 | __s32 idleslope; | ||
974 | __s32 sendslope; | ||
975 | }; | ||
976 | |||
977 | enum { | ||
978 | TCA_CBS_UNSPEC, | ||
979 | TCA_CBS_PARMS, | ||
980 | __TCA_CBS_MAX, | ||
981 | }; | ||
982 | |||
983 | #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) | ||
984 | |||
985 | |||
986 | /* ETF */ | ||
987 | struct tc_etf_qopt { | ||
988 | __s32 delta; | ||
989 | __s32 clockid; | ||
990 | __u32 flags; | ||
991 | #define TC_ETF_DEADLINE_MODE_ON BIT(0) | ||
992 | #define TC_ETF_OFFLOAD_ON BIT(1) | ||
993 | }; | ||
994 | |||
995 | enum { | ||
996 | TCA_ETF_UNSPEC, | ||
997 | TCA_ETF_PARMS, | ||
998 | __TCA_ETF_MAX, | ||
999 | }; | ||
1000 | |||
1001 | #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) | ||
1002 | |||
1003 | |||
1004 | /* CAKE */ | ||
1005 | enum { | ||
1006 | TCA_CAKE_UNSPEC, | ||
1007 | TCA_CAKE_PAD, | ||
1008 | TCA_CAKE_BASE_RATE64, | ||
1009 | TCA_CAKE_DIFFSERV_MODE, | ||
1010 | TCA_CAKE_ATM, | ||
1011 | TCA_CAKE_FLOW_MODE, | ||
1012 | TCA_CAKE_OVERHEAD, | ||
1013 | TCA_CAKE_RTT, | ||
1014 | TCA_CAKE_TARGET, | ||
1015 | TCA_CAKE_AUTORATE, | ||
1016 | TCA_CAKE_MEMORY, | ||
1017 | TCA_CAKE_NAT, | ||
1018 | TCA_CAKE_RAW, | ||
1019 | TCA_CAKE_WASH, | ||
1020 | TCA_CAKE_MPU, | ||
1021 | TCA_CAKE_INGRESS, | ||
1022 | TCA_CAKE_ACK_FILTER, | ||
1023 | TCA_CAKE_SPLIT_GSO, | ||
1024 | __TCA_CAKE_MAX | ||
1025 | }; | ||
1026 | #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) | ||
1027 | |||
1028 | enum { | ||
1029 | __TCA_CAKE_STATS_INVALID, | ||
1030 | TCA_CAKE_STATS_PAD, | ||
1031 | TCA_CAKE_STATS_CAPACITY_ESTIMATE64, | ||
1032 | TCA_CAKE_STATS_MEMORY_LIMIT, | ||
1033 | TCA_CAKE_STATS_MEMORY_USED, | ||
1034 | TCA_CAKE_STATS_AVG_NETOFF, | ||
1035 | TCA_CAKE_STATS_MIN_NETLEN, | ||
1036 | TCA_CAKE_STATS_MAX_NETLEN, | ||
1037 | TCA_CAKE_STATS_MIN_ADJLEN, | ||
1038 | TCA_CAKE_STATS_MAX_ADJLEN, | ||
1039 | TCA_CAKE_STATS_TIN_STATS, | ||
1040 | TCA_CAKE_STATS_DEFICIT, | ||
1041 | TCA_CAKE_STATS_COBALT_COUNT, | ||
1042 | TCA_CAKE_STATS_DROPPING, | ||
1043 | TCA_CAKE_STATS_DROP_NEXT_US, | ||
1044 | TCA_CAKE_STATS_P_DROP, | ||
1045 | TCA_CAKE_STATS_BLUE_TIMER_US, | ||
1046 | __TCA_CAKE_STATS_MAX | ||
1047 | }; | ||
1048 | #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) | ||
1049 | |||
1050 | enum { | ||
1051 | __TCA_CAKE_TIN_STATS_INVALID, | ||
1052 | TCA_CAKE_TIN_STATS_PAD, | ||
1053 | TCA_CAKE_TIN_STATS_SENT_PACKETS, | ||
1054 | TCA_CAKE_TIN_STATS_SENT_BYTES64, | ||
1055 | TCA_CAKE_TIN_STATS_DROPPED_PACKETS, | ||
1056 | TCA_CAKE_TIN_STATS_DROPPED_BYTES64, | ||
1057 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, | ||
1058 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, | ||
1059 | TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, | ||
1060 | TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, | ||
1061 | TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, | ||
1062 | TCA_CAKE_TIN_STATS_BACKLOG_BYTES, | ||
1063 | TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, | ||
1064 | TCA_CAKE_TIN_STATS_TARGET_US, | ||
1065 | TCA_CAKE_TIN_STATS_INTERVAL_US, | ||
1066 | TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, | ||
1067 | TCA_CAKE_TIN_STATS_WAY_MISSES, | ||
1068 | TCA_CAKE_TIN_STATS_WAY_COLLISIONS, | ||
1069 | TCA_CAKE_TIN_STATS_PEAK_DELAY_US, | ||
1070 | TCA_CAKE_TIN_STATS_AVG_DELAY_US, | ||
1071 | TCA_CAKE_TIN_STATS_BASE_DELAY_US, | ||
1072 | TCA_CAKE_TIN_STATS_SPARSE_FLOWS, | ||
1073 | TCA_CAKE_TIN_STATS_BULK_FLOWS, | ||
1074 | TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, | ||
1075 | TCA_CAKE_TIN_STATS_MAX_SKBLEN, | ||
1076 | TCA_CAKE_TIN_STATS_FLOW_QUANTUM, | ||
1077 | __TCA_CAKE_TIN_STATS_MAX | ||
1078 | }; | ||
1079 | #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) | ||
1080 | #define TC_CAKE_MAX_TINS (8) | ||
1081 | |||
1082 | enum { | ||
1083 | CAKE_FLOW_NONE = 0, | ||
1084 | CAKE_FLOW_SRC_IP, | ||
1085 | CAKE_FLOW_DST_IP, | ||
1086 | CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ | ||
1087 | CAKE_FLOW_FLOWS, | ||
1088 | CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ | ||
1089 | CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ | ||
1090 | CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ | ||
1091 | CAKE_FLOW_MAX, | ||
1092 | }; | ||
1093 | |||
1094 | enum { | ||
1095 | CAKE_DIFFSERV_DIFFSERV3 = 0, | ||
1096 | CAKE_DIFFSERV_DIFFSERV4, | ||
1097 | CAKE_DIFFSERV_DIFFSERV8, | ||
1098 | CAKE_DIFFSERV_BESTEFFORT, | ||
1099 | CAKE_DIFFSERV_PRECEDENCE, | ||
1100 | CAKE_DIFFSERV_MAX | ||
1101 | }; | ||
1102 | |||
1103 | enum { | ||
1104 | CAKE_ACK_NONE = 0, | ||
1105 | CAKE_ACK_FILTER, | ||
1106 | CAKE_ACK_AGGRESSIVE, | ||
1107 | CAKE_ACK_MAX | ||
1108 | }; | ||
1109 | |||
1110 | enum { | ||
1111 | CAKE_ATM_NONE = 0, | ||
1112 | CAKE_ATM_ATM, | ||
1113 | CAKE_ATM_PTM, | ||
1114 | CAKE_ATM_MAX | ||
1115 | }; | ||
1116 | |||
1117 | |||
1118 | /* TAPRIO */ | ||
1119 | enum { | ||
1120 | TC_TAPRIO_CMD_SET_GATES = 0x00, | ||
1121 | TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, | ||
1122 | TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, | ||
1123 | }; | ||
1124 | |||
1125 | enum { | ||
1126 | TCA_TAPRIO_SCHED_ENTRY_UNSPEC, | ||
1127 | TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ | ||
1128 | TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ | ||
1129 | TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ | ||
1130 | TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ | ||
1131 | __TCA_TAPRIO_SCHED_ENTRY_MAX, | ||
1132 | }; | ||
1133 | #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) | ||
1134 | |||
1135 | /* The format for schedule entry list is: | ||
1136 | * [TCA_TAPRIO_SCHED_ENTRY_LIST] | ||
1137 | * [TCA_TAPRIO_SCHED_ENTRY] | ||
1138 | * [TCA_TAPRIO_SCHED_ENTRY_CMD] | ||
1139 | * [TCA_TAPRIO_SCHED_ENTRY_GATES] | ||
1140 | * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] | ||
1141 | */ | ||
1142 | enum { | ||
1143 | TCA_TAPRIO_SCHED_UNSPEC, | ||
1144 | TCA_TAPRIO_SCHED_ENTRY, | ||
1145 | __TCA_TAPRIO_SCHED_MAX, | ||
1146 | }; | ||
1147 | |||
1148 | #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) | ||
1149 | |||
1150 | enum { | ||
1151 | TCA_TAPRIO_ATTR_UNSPEC, | ||
1152 | TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ | ||
1153 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ | ||
1154 | TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ | ||
1155 | TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ | ||
1156 | TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ | ||
1157 | TCA_TAPRIO_PAD, | ||
1158 | __TCA_TAPRIO_ATTR_MAX, | ||
1159 | }; | ||
1160 | |||
1161 | #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) | ||
1162 | |||
1163 | #endif | ||
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3caaa3428774..88cbd110ae58 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c | |||
@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, | |||
65 | return syscall(__NR_bpf, cmd, attr, size); | 65 | return syscall(__NR_bpf, cmd, attr, size); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) | ||
69 | { | ||
70 | int fd; | ||
71 | |||
72 | do { | ||
73 | fd = sys_bpf(BPF_PROG_LOAD, attr, size); | ||
74 | } while (fd < 0 && errno == EAGAIN); | ||
75 | |||
76 | return fd; | ||
77 | } | ||
78 | |||
68 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) | 79 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) |
69 | { | 80 | { |
70 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; | 81 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; |
@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
232 | memcpy(attr.prog_name, load_attr->name, | 243 | memcpy(attr.prog_name, load_attr->name, |
233 | min(name_len, BPF_OBJ_NAME_LEN - 1)); | 244 | min(name_len, BPF_OBJ_NAME_LEN - 1)); |
234 | 245 | ||
235 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 246 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
236 | if (fd >= 0) | 247 | if (fd >= 0) |
237 | return fd; | 248 | return fd; |
238 | 249 | ||
@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
269 | break; | 280 | break; |
270 | } | 281 | } |
271 | 282 | ||
272 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 283 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
273 | 284 | ||
274 | if (fd >= 0) | 285 | if (fd >= 0) |
275 | goto done; | 286 | goto done; |
@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
283 | attr.log_size = log_buf_sz; | 294 | attr.log_size = log_buf_sz; |
284 | attr.log_level = 1; | 295 | attr.log_level = 1; |
285 | log_buf[0] = 0; | 296 | log_buf[0] = 0; |
286 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 297 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
287 | done: | 298 | done: |
288 | free(finfo); | 299 | free(finfo); |
289 | free(linfo); | 300 | free(linfo); |
@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, | |||
328 | attr.kern_version = kern_version; | 339 | attr.kern_version = kern_version; |
329 | attr.prog_flags = prog_flags; | 340 | attr.prog_flags = prog_flags; |
330 | 341 | ||
331 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 342 | return sys_bpf_prog_load(&attr, sizeof(attr)); |
332 | } | 343 | } |
333 | 344 | ||
334 | int bpf_map_update_elem(int fd, const void *key, const void *value, | 345 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 1076393e6f43..e18a3556f5e3 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h | |||
@@ -63,7 +63,8 @@ static const char *reg_names[] = { | |||
63 | [PERF_REG_POWERPC_TRAP] = "trap", | 63 | [PERF_REG_POWERPC_TRAP] = "trap", |
64 | [PERF_REG_POWERPC_DAR] = "dar", | 64 | [PERF_REG_POWERPC_DAR] = "dar", |
65 | [PERF_REG_POWERPC_DSISR] = "dsisr", | 65 | [PERF_REG_POWERPC_DSISR] = "dsisr", |
66 | [PERF_REG_POWERPC_SIER] = "sier" | 66 | [PERF_REG_POWERPC_SIER] = "sier", |
67 | [PERF_REG_POWERPC_MMCRA] = "mmcra" | ||
67 | }; | 68 | }; |
68 | 69 | ||
69 | static inline const char *perf_reg_name(int id) | 70 | static inline const char *perf_reg_name(int id) |
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index 07fcd977d93e..34d5134681d9 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c | |||
@@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = { | |||
53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), | 53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), |
54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), | 54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), |
55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), | 55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), |
56 | SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), | ||
56 | SMPL_REG_END | 57 | SMPL_REG_END |
57 | }; | 58 | }; |
58 | 59 | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 70229de510f5..41ab7a3668b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
@@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \ | |||
56 | test_xdp_vlan.sh | 56 | test_xdp_vlan.sh |
57 | 57 | ||
58 | TEST_PROGS_EXTENDED := with_addr.sh \ | 58 | TEST_PROGS_EXTENDED := with_addr.sh \ |
59 | with_tunnels.sh \ | ||
59 | tcp_client.py \ | 60 | tcp_client.py \ |
60 | tcp_server.py | 61 | tcp_server.py |
61 | 62 | ||
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..25f0083a9b2e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) | |||
1188 | int i, j; | 1188 | int i, j; |
1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
1190 | int build_id_matches = 0; | 1190 | int build_id_matches = 0; |
1191 | int retry = 1; | ||
1191 | 1192 | ||
1193 | retry: | ||
1192 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); | 1194 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); |
1193 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1195 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
1194 | goto out; | 1196 | goto out; |
@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) | |||
1301 | previous_key = key; | 1303 | previous_key = key; |
1302 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1304 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
1303 | 1305 | ||
1306 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
1307 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
1308 | * try it one more time. | ||
1309 | */ | ||
1310 | if (build_id_matches < 1 && retry--) { | ||
1311 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
1312 | close(pmu_fd); | ||
1313 | bpf_object__close(obj); | ||
1314 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
1315 | __func__); | ||
1316 | goto retry; | ||
1317 | } | ||
1318 | |||
1304 | if (CHECK(build_id_matches < 1, "build id match", | 1319 | if (CHECK(build_id_matches < 1, "build id match", |
1305 | "Didn't find expected build ID from the map\n")) | 1320 | "Didn't find expected build ID from the map\n")) |
1306 | goto disable_pmu; | 1321 | goto disable_pmu; |
@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) | |||
1341 | int i, j; | 1356 | int i, j; |
1342 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1357 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
1343 | int build_id_matches = 0; | 1358 | int build_id_matches = 0; |
1359 | int retry = 1; | ||
1344 | 1360 | ||
1361 | retry: | ||
1345 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); | 1362 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); |
1346 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1363 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
1347 | return; | 1364 | return; |
@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) | |||
1436 | previous_key = key; | 1453 | previous_key = key; |
1437 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1454 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
1438 | 1455 | ||
1456 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
1457 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
1458 | * try it one more time. | ||
1459 | */ | ||
1460 | if (build_id_matches < 1 && retry--) { | ||
1461 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
1462 | close(pmu_fd); | ||
1463 | bpf_object__close(obj); | ||
1464 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
1465 | __func__); | ||
1466 | goto retry; | ||
1467 | } | ||
1468 | |||
1439 | if (CHECK(build_id_matches < 1, "build id match", | 1469 | if (CHECK(build_id_matches < 1, "build id match", |
1440 | "Didn't find expected build ID from the map\n")) | 1470 | "Didn't find expected build ID from the map\n")) |
1441 | goto disable_pmu; | 1471 | goto disable_pmu; |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index 94fdbf215c14..c4cf6e6d800e 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh | |||
@@ -25,6 +25,7 @@ ALL_TESTS=" | |||
25 | lag_unlink_slaves_test | 25 | lag_unlink_slaves_test |
26 | lag_dev_deletion_test | 26 | lag_dev_deletion_test |
27 | vlan_interface_uppers_test | 27 | vlan_interface_uppers_test |
28 | bridge_extern_learn_test | ||
28 | devlink_reload_test | 29 | devlink_reload_test |
29 | " | 30 | " |
30 | NUM_NETIFS=2 | 31 | NUM_NETIFS=2 |
@@ -541,6 +542,25 @@ vlan_interface_uppers_test() | |||
541 | ip link del dev br0 | 542 | ip link del dev br0 |
542 | } | 543 | } |
543 | 544 | ||
545 | bridge_extern_learn_test() | ||
546 | { | ||
547 | # Test that externally learned entries added from user space are | ||
548 | # marked as offloaded | ||
549 | RET=0 | ||
550 | |||
551 | ip link add name br0 type bridge | ||
552 | ip link set dev $swp1 master br0 | ||
553 | |||
554 | bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn | ||
555 | |||
556 | bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload | ||
557 | check_err $? "fdb entry not marked as offloaded when should" | ||
558 | |||
559 | log_test "externally learned fdb entry" | ||
560 | |||
561 | ip link del dev br0 | ||
562 | } | ||
563 | |||
544 | devlink_reload_test() | 564 | devlink_reload_test() |
545 | { | 565 | { |
546 | # Test that after executing all the above configuration tests, a | 566 | # Test that after executing all the above configuration tests, a |
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh index 04c6431b2bd8..b90dff8d3a94 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh | |||
@@ -1,7 +1,7 @@ | |||
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion" | 4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn" |
5 | NUM_NETIFS=4 | 5 | NUM_NETIFS=4 |
6 | CHECK_TC="yes" | 6 | CHECK_TC="yes" |
7 | source lib.sh | 7 | source lib.sh |
@@ -109,6 +109,38 @@ vlan_deletion() | |||
109 | ping_ipv6 | 109 | ping_ipv6 |
110 | } | 110 | } |
111 | 111 | ||
112 | extern_learn() | ||
113 | { | ||
114 | local mac=de:ad:be:ef:13:37 | ||
115 | local ageing_time | ||
116 | |||
117 | # Test that externally learned FDB entries can roam, but not age out | ||
118 | RET=0 | ||
119 | |||
120 | bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1 | ||
121 | |||
122 | bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 | ||
123 | check_err $? "Did not find FDB entry when should" | ||
124 | |||
125 | # Wait for 10 seconds after the ageing time to make sure the FDB entry | ||
126 | # was not aged out | ||
127 | ageing_time=$(bridge_ageing_time_get br0) | ||
128 | sleep $((ageing_time + 10)) | ||
129 | |||
130 | bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 | ||
131 | check_err $? "FDB entry was aged out when should not" | ||
132 | |||
133 | $MZ $h2 -c 1 -p 64 -a $mac -t ip -q | ||
134 | |||
135 | bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37 | ||
136 | check_err $? "FDB entry did not roam when should" | ||
137 | |||
138 | log_test "Externally learned FDB entry - ageing & roaming" | ||
139 | |||
140 | bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null | ||
141 | bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null | ||
142 | } | ||
143 | |||
112 | trap cleanup EXIT | 144 | trap cleanup EXIT |
113 | 145 | ||
114 | setup_prepare | 146 | setup_prepare |
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c index 2e563d17cf0c..d1bbafb16f47 100644 --- a/tools/testing/selftests/networking/timestamping/txtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c | |||
@@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) | |||
240 | cm->cmsg_type == IP_RECVERR) || | 240 | cm->cmsg_type == IP_RECVERR) || |
241 | (cm->cmsg_level == SOL_IPV6 && | 241 | (cm->cmsg_level == SOL_IPV6 && |
242 | cm->cmsg_type == IPV6_RECVERR) || | 242 | cm->cmsg_type == IPV6_RECVERR) || |
243 | (cm->cmsg_level = SOL_PACKET && | 243 | (cm->cmsg_level == SOL_PACKET && |
244 | cm->cmsg_type == PACKET_TX_TIMESTAMP)) { | 244 | cm->cmsg_type == PACKET_TX_TIMESTAMP)) { |
245 | serr = (void *) CMSG_DATA(cm); | 245 | serr = (void *) CMSG_DATA(cm); |
246 | if (serr->ee_errno != ENOMSG || | 246 | if (serr->ee_errno != ENOMSG || |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json index 637ea0219617..0da3545cabdb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json | |||
@@ -17,7 +17,7 @@ | |||
17 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", | 17 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", |
18 | "expExitCode": "0", | 18 | "expExitCode": "0", |
19 | "verifyCmd": "$TC actions get action ife index 2", | 19 | "verifyCmd": "$TC actions get action ife index 2", |
20 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", | 20 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2", |
21 | "matchCount": "1", | 21 | "matchCount": "1", |
22 | "teardown": [ | 22 | "teardown": [ |
23 | "$TC actions flush action ife" | 23 | "$TC actions flush action ife" |
@@ -41,7 +41,7 @@ | |||
41 | "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", | 41 | "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", |
42 | "expExitCode": "0", | 42 | "expExitCode": "0", |
43 | "verifyCmd": "$TC actions get action ife index 2", | 43 | "verifyCmd": "$TC actions get action ife index 2", |
44 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", | 44 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2", |
45 | "matchCount": "1", | 45 | "matchCount": "1", |
46 | "teardown": [ | 46 | "teardown": [ |
47 | "$TC actions flush action ife" | 47 | "$TC actions flush action ife" |
@@ -65,7 +65,7 @@ | |||
65 | "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", | 65 | "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", |
66 | "expExitCode": "0", | 66 | "expExitCode": "0", |
67 | "verifyCmd": "$TC actions get action ife index 2", | 67 | "verifyCmd": "$TC actions get action ife index 2", |
68 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", | 68 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2", |
69 | "matchCount": "1", | 69 | "matchCount": "1", |
70 | "teardown": [ | 70 | "teardown": [ |
71 | "$TC actions flush action ife" | 71 | "$TC actions flush action ife" |
@@ -89,7 +89,7 @@ | |||
89 | "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", | 89 | "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", |
90 | "expExitCode": "0", | 90 | "expExitCode": "0", |
91 | "verifyCmd": "$TC actions get action ife index 2", | 91 | "verifyCmd": "$TC actions get action ife index 2", |
92 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", | 92 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2", |
93 | "matchCount": "1", | 93 | "matchCount": "1", |
94 | "teardown": [ | 94 | "teardown": [ |
95 | "$TC actions flush action ife" | 95 | "$TC actions flush action ife" |
@@ -113,7 +113,7 @@ | |||
113 | "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", | 113 | "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", |
114 | "expExitCode": "0", | 114 | "expExitCode": "0", |
115 | "verifyCmd": "$TC actions get action ife index 2", | 115 | "verifyCmd": "$TC actions get action ife index 2", |
116 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", | 116 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2", |
117 | "matchCount": "1", | 117 | "matchCount": "1", |
118 | "teardown": [ | 118 | "teardown": [ |
119 | "$TC actions flush action ife" | 119 | "$TC actions flush action ife" |
@@ -137,7 +137,7 @@ | |||
137 | "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", | 137 | "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", |
138 | "expExitCode": "0", | 138 | "expExitCode": "0", |
139 | "verifyCmd": "$TC actions get action ife index 2", | 139 | "verifyCmd": "$TC actions get action ife index 2", |
140 | "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", | 140 | "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2", |
141 | "matchCount": "1", | 141 | "matchCount": "1", |
142 | "teardown": [ | 142 | "teardown": [ |
143 | "$TC actions flush action ife" | 143 | "$TC actions flush action ife" |
@@ -161,7 +161,7 @@ | |||
161 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", | 161 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", |
162 | "expExitCode": "0", | 162 | "expExitCode": "0", |
163 | "verifyCmd": "$TC actions get action ife index 90", | 163 | "verifyCmd": "$TC actions get action ife index 90", |
164 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", | 164 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90", |
165 | "matchCount": "1", | 165 | "matchCount": "1", |
166 | "teardown": [ | 166 | "teardown": [ |
167 | "$TC actions flush action ife" | 167 | "$TC actions flush action ife" |
@@ -185,7 +185,7 @@ | |||
185 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", | 185 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", |
186 | "expExitCode": "255", | 186 | "expExitCode": "255", |
187 | "verifyCmd": "$TC actions get action ife index 90", | 187 | "verifyCmd": "$TC actions get action ife index 90", |
188 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", | 188 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90", |
189 | "matchCount": "0", | 189 | "matchCount": "0", |
190 | "teardown": [] | 190 | "teardown": [] |
191 | }, | 191 | }, |
@@ -207,7 +207,7 @@ | |||
207 | "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", | 207 | "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", |
208 | "expExitCode": "0", | 208 | "expExitCode": "0", |
209 | "verifyCmd": "$TC actions get action ife index 9", | 209 | "verifyCmd": "$TC actions get action ife index 9", |
210 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", | 210 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9", |
211 | "matchCount": "1", | 211 | "matchCount": "1", |
212 | "teardown": [ | 212 | "teardown": [ |
213 | "$TC actions flush action ife" | 213 | "$TC actions flush action ife" |
@@ -231,7 +231,7 @@ | |||
231 | "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", | 231 | "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", |
232 | "expExitCode": "0", | 232 | "expExitCode": "0", |
233 | "verifyCmd": "$TC actions get action ife index 9", | 233 | "verifyCmd": "$TC actions get action ife index 9", |
234 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", | 234 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9", |
235 | "matchCount": "1", | 235 | "matchCount": "1", |
236 | "teardown": [ | 236 | "teardown": [ |
237 | "$TC actions flush action ife" | 237 | "$TC actions flush action ife" |
@@ -255,7 +255,7 @@ | |||
255 | "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", | 255 | "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", |
256 | "expExitCode": "0", | 256 | "expExitCode": "0", |
257 | "verifyCmd": "$TC actions get action ife index 9", | 257 | "verifyCmd": "$TC actions get action ife index 9", |
258 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", | 258 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9", |
259 | "matchCount": "1", | 259 | "matchCount": "1", |
260 | "teardown": [ | 260 | "teardown": [ |
261 | "$TC actions flush action ife" | 261 | "$TC actions flush action ife" |
@@ -279,7 +279,7 @@ | |||
279 | "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", | 279 | "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", |
280 | "expExitCode": "0", | 280 | "expExitCode": "0", |
281 | "verifyCmd": "$TC actions get action ife index 9", | 281 | "verifyCmd": "$TC actions get action ife index 9", |
282 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", | 282 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9", |
283 | "matchCount": "1", | 283 | "matchCount": "1", |
284 | "teardown": [ | 284 | "teardown": [ |
285 | "$TC actions flush action ife" | 285 | "$TC actions flush action ife" |
@@ -303,7 +303,7 @@ | |||
303 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", | 303 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", |
304 | "expExitCode": "0", | 304 | "expExitCode": "0", |
305 | "verifyCmd": "$TC actions get action ife index 9", | 305 | "verifyCmd": "$TC actions get action ife index 9", |
306 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", | 306 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9", |
307 | "matchCount": "1", | 307 | "matchCount": "1", |
308 | "teardown": [ | 308 | "teardown": [ |
309 | "$TC actions flush action ife" | 309 | "$TC actions flush action ife" |
@@ -327,7 +327,7 @@ | |||
327 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", | 327 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", |
328 | "expExitCode": "0", | 328 | "expExitCode": "0", |
329 | "verifyCmd": "$TC actions get action ife index 9", | 329 | "verifyCmd": "$TC actions get action ife index 9", |
330 | "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", | 330 | "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9", |
331 | "matchCount": "1", | 331 | "matchCount": "1", |
332 | "teardown": [ | 332 | "teardown": [ |
333 | "$TC actions flush action ife" | 333 | "$TC actions flush action ife" |
@@ -351,7 +351,7 @@ | |||
351 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", | 351 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", |
352 | "expExitCode": "0", | 352 | "expExitCode": "0", |
353 | "verifyCmd": "$TC actions get action ife index 99", | 353 | "verifyCmd": "$TC actions get action ife index 99", |
354 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", | 354 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99", |
355 | "matchCount": "1", | 355 | "matchCount": "1", |
356 | "teardown": [ | 356 | "teardown": [ |
357 | "$TC actions flush action ife" | 357 | "$TC actions flush action ife" |
@@ -375,7 +375,7 @@ | |||
375 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", | 375 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", |
376 | "expExitCode": "255", | 376 | "expExitCode": "255", |
377 | "verifyCmd": "$TC actions get action ife index 99", | 377 | "verifyCmd": "$TC actions get action ife index 99", |
378 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", | 378 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99", |
379 | "matchCount": "0", | 379 | "matchCount": "0", |
380 | "teardown": [] | 380 | "teardown": [] |
381 | }, | 381 | }, |
@@ -397,7 +397,7 @@ | |||
397 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", | 397 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", |
398 | "expExitCode": "0", | 398 | "expExitCode": "0", |
399 | "verifyCmd": "$TC actions get action ife index 1", | 399 | "verifyCmd": "$TC actions get action ife index 1", |
400 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", | 400 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1", |
401 | "matchCount": "1", | 401 | "matchCount": "1", |
402 | "teardown": [ | 402 | "teardown": [ |
403 | "$TC actions flush action ife" | 403 | "$TC actions flush action ife" |
@@ -421,7 +421,7 @@ | |||
421 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", | 421 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", |
422 | "expExitCode": "0", | 422 | "expExitCode": "0", |
423 | "verifyCmd": "$TC actions get action ife index 1", | 423 | "verifyCmd": "$TC actions get action ife index 1", |
424 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", | 424 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1", |
425 | "matchCount": "1", | 425 | "matchCount": "1", |
426 | "teardown": [ | 426 | "teardown": [ |
427 | "$TC actions flush action ife" | 427 | "$TC actions flush action ife" |
@@ -445,7 +445,7 @@ | |||
445 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", | 445 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", |
446 | "expExitCode": "0", | 446 | "expExitCode": "0", |
447 | "verifyCmd": "$TC actions get action ife index 1", | 447 | "verifyCmd": "$TC actions get action ife index 1", |
448 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", | 448 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", |
449 | "matchCount": "1", | 449 | "matchCount": "1", |
450 | "teardown": [ | 450 | "teardown": [ |
451 | "$TC actions flush action ife" | 451 | "$TC actions flush action ife" |
@@ -469,7 +469,7 @@ | |||
469 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", | 469 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", |
470 | "expExitCode": "0", | 470 | "expExitCode": "0", |
471 | "verifyCmd": "$TC actions get action ife index 1", | 471 | "verifyCmd": "$TC actions get action ife index 1", |
472 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", | 472 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", |
473 | "matchCount": "1", | 473 | "matchCount": "1", |
474 | "teardown": [ | 474 | "teardown": [ |
475 | "$TC actions flush action ife" | 475 | "$TC actions flush action ife" |
@@ -493,7 +493,7 @@ | |||
493 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", | 493 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", |
494 | "expExitCode": "0", | 494 | "expExitCode": "0", |
495 | "verifyCmd": "$TC actions get action ife index 77", | 495 | "verifyCmd": "$TC actions get action ife index 77", |
496 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", | 496 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77", |
497 | "matchCount": "1", | 497 | "matchCount": "1", |
498 | "teardown": [ | 498 | "teardown": [ |
499 | "$TC actions flush action ife" | 499 | "$TC actions flush action ife" |
@@ -517,7 +517,7 @@ | |||
517 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", | 517 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", |
518 | "expExitCode": "0", | 518 | "expExitCode": "0", |
519 | "verifyCmd": "$TC actions get action ife index 77", | 519 | "verifyCmd": "$TC actions get action ife index 77", |
520 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", | 520 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77", |
521 | "matchCount": "1", | 521 | "matchCount": "1", |
522 | "teardown": [ | 522 | "teardown": [ |
523 | "$TC actions flush action ife" | 523 | "$TC actions flush action ife" |
@@ -541,7 +541,7 @@ | |||
541 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", | 541 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", |
542 | "expExitCode": "0", | 542 | "expExitCode": "0", |
543 | "verifyCmd": "$TC actions get action ife index 77", | 543 | "verifyCmd": "$TC actions get action ife index 77", |
544 | "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", | 544 | "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77", |
545 | "matchCount": "1", | 545 | "matchCount": "1", |
546 | "teardown": [ | 546 | "teardown": [ |
547 | "$TC actions flush action ife" | 547 | "$TC actions flush action ife" |
@@ -565,7 +565,7 @@ | |||
565 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", | 565 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", |
566 | "expExitCode": "0", | 566 | "expExitCode": "0", |
567 | "verifyCmd": "$TC actions get action ife index 1", | 567 | "verifyCmd": "$TC actions get action ife index 1", |
568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", | 568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1", |
569 | "matchCount": "1", | 569 | "matchCount": "1", |
570 | "teardown": [ | 570 | "teardown": [ |
571 | "$TC actions flush action ife" | 571 | "$TC actions flush action ife" |
@@ -589,7 +589,7 @@ | |||
589 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", | 589 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", |
590 | "expExitCode": "255", | 590 | "expExitCode": "255", |
591 | "verifyCmd": "$TC actions get action ife index 1", | 591 | "verifyCmd": "$TC actions get action ife index 1", |
592 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", | 592 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1", |
593 | "matchCount": "0", | 593 | "matchCount": "0", |
594 | "teardown": [] | 594 | "teardown": [] |
595 | }, | 595 | }, |
@@ -611,7 +611,7 @@ | |||
611 | "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", | 611 | "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", |
612 | "expExitCode": "0", | 612 | "expExitCode": "0", |
613 | "verifyCmd": "$TC actions get action ife index 1", | 613 | "verifyCmd": "$TC actions get action ife index 1", |
614 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", | 614 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1", |
615 | "matchCount": "1", | 615 | "matchCount": "1", |
616 | "teardown": [ | 616 | "teardown": [ |
617 | "$TC actions flush action ife" | 617 | "$TC actions flush action ife" |
@@ -635,7 +635,7 @@ | |||
635 | "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", | 635 | "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", |
636 | "expExitCode": "0", | 636 | "expExitCode": "0", |
637 | "verifyCmd": "$TC actions get action ife index 1", | 637 | "verifyCmd": "$TC actions get action ife index 1", |
638 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", | 638 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", |
639 | "matchCount": "1", | 639 | "matchCount": "1", |
640 | "teardown": [ | 640 | "teardown": [ |
641 | "$TC actions flush action ife" | 641 | "$TC actions flush action ife" |
@@ -659,7 +659,7 @@ | |||
659 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", | 659 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", |
660 | "expExitCode": "0", | 660 | "expExitCode": "0", |
661 | "verifyCmd": "$TC actions get action ife index 11", | 661 | "verifyCmd": "$TC actions get action ife index 11", |
662 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", | 662 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", |
663 | "matchCount": "1", | 663 | "matchCount": "1", |
664 | "teardown": [ | 664 | "teardown": [ |
665 | "$TC actions flush action ife" | 665 | "$TC actions flush action ife" |
@@ -683,7 +683,7 @@ | |||
683 | "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", | 683 | "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", |
684 | "expExitCode": "0", | 684 | "expExitCode": "0", |
685 | "verifyCmd": "$TC actions get action ife index 1", | 685 | "verifyCmd": "$TC actions get action ife index 1", |
686 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", | 686 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1", |
687 | "matchCount": "1", | 687 | "matchCount": "1", |
688 | "teardown": [ | 688 | "teardown": [ |
689 | "$TC actions flush action ife" | 689 | "$TC actions flush action ife" |
@@ -707,7 +707,7 @@ | |||
707 | "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", | 707 | "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", |
708 | "expExitCode": "0", | 708 | "expExitCode": "0", |
709 | "verifyCmd": "$TC actions get action ife index 21", | 709 | "verifyCmd": "$TC actions get action ife index 21", |
710 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", | 710 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21", |
711 | "matchCount": "1", | 711 | "matchCount": "1", |
712 | "teardown": [ | 712 | "teardown": [ |
713 | "$TC actions flush action ife" | 713 | "$TC actions flush action ife" |
@@ -731,7 +731,7 @@ | |||
731 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", | 731 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", |
732 | "expExitCode": "0", | 732 | "expExitCode": "0", |
733 | "verifyCmd": "$TC actions get action ife index 21", | 733 | "verifyCmd": "$TC actions get action ife index 21", |
734 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", | 734 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21", |
735 | "matchCount": "1", | 735 | "matchCount": "1", |
736 | "teardown": [ | 736 | "teardown": [ |
737 | "$TC actions flush action ife" | 737 | "$TC actions flush action ife" |
@@ -739,7 +739,7 @@ | |||
739 | }, | 739 | }, |
740 | { | 740 | { |
741 | "id": "fac3", | 741 | "id": "fac3", |
742 | "name": "Create valid ife encode action with index at 32-bit maximnum", | 742 | "name": "Create valid ife encode action with index at 32-bit maximum", |
743 | "category": [ | 743 | "category": [ |
744 | "actions", | 744 | "actions", |
745 | "ife" | 745 | "ife" |
@@ -755,7 +755,7 @@ | |||
755 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", | 755 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", |
756 | "expExitCode": "0", | 756 | "expExitCode": "0", |
757 | "verifyCmd": "$TC actions get action ife index 4294967295", | 757 | "verifyCmd": "$TC actions get action ife index 4294967295", |
758 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", | 758 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295", |
759 | "matchCount": "1", | 759 | "matchCount": "1", |
760 | "teardown": [ | 760 | "teardown": [ |
761 | "$TC actions flush action ife" | 761 | "$TC actions flush action ife" |
@@ -779,7 +779,7 @@ | |||
779 | "cmdUnderTest": "$TC actions add action ife decode pass index 1", | 779 | "cmdUnderTest": "$TC actions add action ife decode pass index 1", |
780 | "expExitCode": "0", | 780 | "expExitCode": "0", |
781 | "verifyCmd": "$TC actions get action ife index 1", | 781 | "verifyCmd": "$TC actions get action ife index 1", |
782 | "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 782 | "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
783 | "matchCount": "1", | 783 | "matchCount": "1", |
784 | "teardown": [ | 784 | "teardown": [ |
785 | "$TC actions flush action ife" | 785 | "$TC actions flush action ife" |
@@ -803,7 +803,7 @@ | |||
803 | "cmdUnderTest": "$TC actions add action ife decode pipe index 1", | 803 | "cmdUnderTest": "$TC actions add action ife decode pipe index 1", |
804 | "expExitCode": "0", | 804 | "expExitCode": "0", |
805 | "verifyCmd": "$TC actions get action ife index 1", | 805 | "verifyCmd": "$TC actions get action ife index 1", |
806 | "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 806 | "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
807 | "matchCount": "1", | 807 | "matchCount": "1", |
808 | "teardown": [ | 808 | "teardown": [ |
809 | "$TC actions flush action ife" | 809 | "$TC actions flush action ife" |
@@ -827,7 +827,7 @@ | |||
827 | "cmdUnderTest": "$TC actions add action ife decode continue index 1", | 827 | "cmdUnderTest": "$TC actions add action ife decode continue index 1", |
828 | "expExitCode": "0", | 828 | "expExitCode": "0", |
829 | "verifyCmd": "$TC actions get action ife index 1", | 829 | "verifyCmd": "$TC actions get action ife index 1", |
830 | "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 830 | "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
831 | "matchCount": "1", | 831 | "matchCount": "1", |
832 | "teardown": [ | 832 | "teardown": [ |
833 | "$TC actions flush action ife" | 833 | "$TC actions flush action ife" |
@@ -851,7 +851,7 @@ | |||
851 | "cmdUnderTest": "$TC actions add action ife decode drop index 1", | 851 | "cmdUnderTest": "$TC actions add action ife decode drop index 1", |
852 | "expExitCode": "0", | 852 | "expExitCode": "0", |
853 | "verifyCmd": "$TC actions get action ife index 1", | 853 | "verifyCmd": "$TC actions get action ife index 1", |
854 | "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 854 | "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
855 | "matchCount": "1", | 855 | "matchCount": "1", |
856 | "teardown": [ | 856 | "teardown": [ |
857 | "$TC actions flush action ife" | 857 | "$TC actions flush action ife" |
@@ -875,7 +875,7 @@ | |||
875 | "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", | 875 | "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", |
876 | "expExitCode": "0", | 876 | "expExitCode": "0", |
877 | "verifyCmd": "$TC actions get action ife index 1", | 877 | "verifyCmd": "$TC actions get action ife index 1", |
878 | "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 878 | "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
879 | "matchCount": "1", | 879 | "matchCount": "1", |
880 | "teardown": [ | 880 | "teardown": [ |
881 | "$TC actions flush action ife" | 881 | "$TC actions flush action ife" |
@@ -899,7 +899,7 @@ | |||
899 | "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", | 899 | "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", |
900 | "expExitCode": "0", | 900 | "expExitCode": "0", |
901 | "verifyCmd": "$TC actions get action ife index 1", | 901 | "verifyCmd": "$TC actions get action ife index 1", |
902 | "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 902 | "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
903 | "matchCount": "1", | 903 | "matchCount": "1", |
904 | "teardown": [ | 904 | "teardown": [ |
905 | "$TC actions flush action ife" | 905 | "$TC actions flush action ife" |
@@ -923,7 +923,7 @@ | |||
923 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", | 923 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", |
924 | "expExitCode": "255", | 924 | "expExitCode": "255", |
925 | "verifyCmd": "$TC actions get action ife index 4294967295999", | 925 | "verifyCmd": "$TC actions get action ife index 4294967295999", |
926 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", | 926 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999", |
927 | "matchCount": "0", | 927 | "matchCount": "0", |
928 | "teardown": [] | 928 | "teardown": [] |
929 | }, | 929 | }, |
@@ -945,7 +945,7 @@ | |||
945 | "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", | 945 | "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", |
946 | "expExitCode": "255", | 946 | "expExitCode": "255", |
947 | "verifyCmd": "$TC actions get action ife index 4", | 947 | "verifyCmd": "$TC actions get action ife index 4", |
948 | "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", | 948 | "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4", |
949 | "matchCount": "0", | 949 | "matchCount": "0", |
950 | "teardown": [] | 950 | "teardown": [] |
951 | }, | 951 | }, |
@@ -967,7 +967,7 @@ | |||
967 | "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", | 967 | "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", |
968 | "expExitCode": "0", | 968 | "expExitCode": "0", |
969 | "verifyCmd": "$TC actions get action ife index 4", | 969 | "verifyCmd": "$TC actions get action ife index 4", |
970 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", | 970 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", |
971 | "matchCount": "1", | 971 | "matchCount": "1", |
972 | "teardown": [ | 972 | "teardown": [ |
973 | "$TC actions flush action ife" | 973 | "$TC actions flush action ife" |
@@ -991,7 +991,7 @@ | |||
991 | "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", | 991 | "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", |
992 | "expExitCode": "255", | 992 | "expExitCode": "255", |
993 | "verifyCmd": "$TC actions get action ife index 4", | 993 | "verifyCmd": "$TC actions get action ife index 4", |
994 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", | 994 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4", |
995 | "matchCount": "0", | 995 | "matchCount": "0", |
996 | "teardown": [] | 996 | "teardown": [] |
997 | }, | 997 | }, |
@@ -1013,7 +1013,7 @@ | |||
1013 | "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", | 1013 | "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", |
1014 | "expExitCode": "255", | 1014 | "expExitCode": "255", |
1015 | "verifyCmd": "$TC actions get action ife index 4", | 1015 | "verifyCmd": "$TC actions get action ife index 4", |
1016 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", | 1016 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4", |
1017 | "matchCount": "0", | 1017 | "matchCount": "0", |
1018 | "teardown": [] | 1018 | "teardown": [] |
1019 | }, | 1019 | }, |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index 10b2d894e436..e7e15a7336b6 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json | |||
@@ -82,35 +82,6 @@ | |||
82 | ] | 82 | ] |
83 | }, | 83 | }, |
84 | { | 84 | { |
85 | "id": "ba4e", | ||
86 | "name": "Add tunnel_key set action with missing mandatory id parameter", | ||
87 | "category": [ | ||
88 | "actions", | ||
89 | "tunnel_key" | ||
90 | ], | ||
91 | "setup": [ | ||
92 | [ | ||
93 | "$TC actions flush action tunnel_key", | ||
94 | 0, | ||
95 | 1, | ||
96 | 255 | ||
97 | ] | ||
98 | ], | ||
99 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", | ||
100 | "expExitCode": "255", | ||
101 | "verifyCmd": "$TC actions list action tunnel_key", | ||
102 | "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", | ||
103 | "matchCount": "0", | ||
104 | "teardown": [ | ||
105 | [ | ||
106 | "$TC actions flush action tunnel_key", | ||
107 | 0, | ||
108 | 1, | ||
109 | 255 | ||
110 | ] | ||
111 | ] | ||
112 | }, | ||
113 | { | ||
114 | "id": "a5e0", | 85 | "id": "a5e0", |
115 | "name": "Add tunnel_key set action with invalid src_ip parameter", | 86 | "name": "Add tunnel_key set action with invalid src_ip parameter", |
116 | "category": [ | 87 | "category": [ |
@@ -634,7 +605,7 @@ | |||
634 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", | 605 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", |
635 | "expExitCode": "0", | 606 | "expExitCode": "0", |
636 | "verifyCmd": "$TC actions get action tunnel_key index 4", | 607 | "verifyCmd": "$TC actions get action tunnel_key index 4", |
637 | "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", | 608 | "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", |
638 | "matchCount": "1", | 609 | "matchCount": "1", |
639 | "teardown": [ | 610 | "teardown": [ |
640 | "$TC actions flush action tunnel_key" | 611 | "$TC actions flush action tunnel_key" |