diff options
723 files changed, 7751 insertions, 3854 deletions
diff --git a/.clang-format b/.clang-format index e6080f5834a3..bc2ffb2a0b53 100644 --- a/.clang-format +++ b/.clang-format | |||
| @@ -72,6 +72,10 @@ ForEachMacros: | |||
| 72 | - 'apei_estatus_for_each_section' | 72 | - 'apei_estatus_for_each_section' |
| 73 | - 'ata_for_each_dev' | 73 | - 'ata_for_each_dev' |
| 74 | - 'ata_for_each_link' | 74 | - 'ata_for_each_link' |
| 75 | - '__ata_qc_for_each' | ||
| 76 | - 'ata_qc_for_each' | ||
| 77 | - 'ata_qc_for_each_raw' | ||
| 78 | - 'ata_qc_for_each_with_internal' | ||
| 75 | - 'ax25_for_each' | 79 | - 'ax25_for_each' |
| 76 | - 'ax25_uid_for_each' | 80 | - 'ax25_uid_for_each' |
| 77 | - 'bio_for_each_integrity_vec' | 81 | - 'bio_for_each_integrity_vec' |
| @@ -85,6 +89,7 @@ ForEachMacros: | |||
| 85 | - 'blk_queue_for_each_rl' | 89 | - 'blk_queue_for_each_rl' |
| 86 | - 'bond_for_each_slave' | 90 | - 'bond_for_each_slave' |
| 87 | - 'bond_for_each_slave_rcu' | 91 | - 'bond_for_each_slave_rcu' |
| 92 | - 'bpf_for_each_spilled_reg' | ||
| 88 | - 'btree_for_each_safe128' | 93 | - 'btree_for_each_safe128' |
| 89 | - 'btree_for_each_safe32' | 94 | - 'btree_for_each_safe32' |
| 90 | - 'btree_for_each_safe64' | 95 | - 'btree_for_each_safe64' |
| @@ -103,6 +108,8 @@ ForEachMacros: | |||
| 103 | - 'drm_atomic_crtc_for_each_plane' | 108 | - 'drm_atomic_crtc_for_each_plane' |
| 104 | - 'drm_atomic_crtc_state_for_each_plane' | 109 | - 'drm_atomic_crtc_state_for_each_plane' |
| 105 | - 'drm_atomic_crtc_state_for_each_plane_state' | 110 | - 'drm_atomic_crtc_state_for_each_plane_state' |
| 111 | - 'drm_atomic_for_each_plane_damage' | ||
| 112 | - 'drm_connector_for_each_possible_encoder' | ||
| 106 | - 'drm_for_each_connector_iter' | 113 | - 'drm_for_each_connector_iter' |
| 107 | - 'drm_for_each_crtc' | 114 | - 'drm_for_each_crtc' |
| 108 | - 'drm_for_each_encoder' | 115 | - 'drm_for_each_encoder' |
| @@ -121,11 +128,21 @@ ForEachMacros: | |||
| 121 | - 'for_each_bio' | 128 | - 'for_each_bio' |
| 122 | - 'for_each_board_func_rsrc' | 129 | - 'for_each_board_func_rsrc' |
| 123 | - 'for_each_bvec' | 130 | - 'for_each_bvec' |
| 131 | - 'for_each_card_components' | ||
| 132 | - 'for_each_card_links' | ||
| 133 | - 'for_each_card_links_safe' | ||
| 134 | - 'for_each_card_prelinks' | ||
| 135 | - 'for_each_card_rtds' | ||
| 136 | - 'for_each_card_rtds_safe' | ||
| 137 | - 'for_each_cgroup_storage_type' | ||
| 124 | - 'for_each_child_of_node' | 138 | - 'for_each_child_of_node' |
| 125 | - 'for_each_clear_bit' | 139 | - 'for_each_clear_bit' |
| 126 | - 'for_each_clear_bit_from' | 140 | - 'for_each_clear_bit_from' |
| 127 | - 'for_each_cmsghdr' | 141 | - 'for_each_cmsghdr' |
| 128 | - 'for_each_compatible_node' | 142 | - 'for_each_compatible_node' |
| 143 | - 'for_each_component_dais' | ||
| 144 | - 'for_each_component_dais_safe' | ||
| 145 | - 'for_each_comp_order' | ||
| 129 | - 'for_each_console' | 146 | - 'for_each_console' |
| 130 | - 'for_each_cpu' | 147 | - 'for_each_cpu' |
| 131 | - 'for_each_cpu_and' | 148 | - 'for_each_cpu_and' |
| @@ -133,6 +150,10 @@ ForEachMacros: | |||
| 133 | - 'for_each_cpu_wrap' | 150 | - 'for_each_cpu_wrap' |
| 134 | - 'for_each_dev_addr' | 151 | - 'for_each_dev_addr' |
| 135 | - 'for_each_dma_cap_mask' | 152 | - 'for_each_dma_cap_mask' |
| 153 | - 'for_each_dpcm_be' | ||
| 154 | - 'for_each_dpcm_be_rollback' | ||
| 155 | - 'for_each_dpcm_be_safe' | ||
| 156 | - 'for_each_dpcm_fe' | ||
| 136 | - 'for_each_drhd_unit' | 157 | - 'for_each_drhd_unit' |
| 137 | - 'for_each_dss_dev' | 158 | - 'for_each_dss_dev' |
| 138 | - 'for_each_efi_memory_desc' | 159 | - 'for_each_efi_memory_desc' |
| @@ -149,6 +170,7 @@ ForEachMacros: | |||
| 149 | - 'for_each_iommu' | 170 | - 'for_each_iommu' |
| 150 | - 'for_each_ip_tunnel_rcu' | 171 | - 'for_each_ip_tunnel_rcu' |
| 151 | - 'for_each_irq_nr' | 172 | - 'for_each_irq_nr' |
| 173 | - 'for_each_link_codecs' | ||
| 152 | - 'for_each_lru' | 174 | - 'for_each_lru' |
| 153 | - 'for_each_matching_node' | 175 | - 'for_each_matching_node' |
| 154 | - 'for_each_matching_node_and_match' | 176 | - 'for_each_matching_node_and_match' |
| @@ -160,6 +182,7 @@ ForEachMacros: | |||
| 160 | - 'for_each_mem_range_rev' | 182 | - 'for_each_mem_range_rev' |
| 161 | - 'for_each_migratetype_order' | 183 | - 'for_each_migratetype_order' |
| 162 | - 'for_each_msi_entry' | 184 | - 'for_each_msi_entry' |
| 185 | - 'for_each_msi_entry_safe' | ||
| 163 | - 'for_each_net' | 186 | - 'for_each_net' |
| 164 | - 'for_each_netdev' | 187 | - 'for_each_netdev' |
| 165 | - 'for_each_netdev_continue' | 188 | - 'for_each_netdev_continue' |
| @@ -183,12 +206,14 @@ ForEachMacros: | |||
| 183 | - 'for_each_node_with_property' | 206 | - 'for_each_node_with_property' |
| 184 | - 'for_each_of_allnodes' | 207 | - 'for_each_of_allnodes' |
| 185 | - 'for_each_of_allnodes_from' | 208 | - 'for_each_of_allnodes_from' |
| 209 | - 'for_each_of_cpu_node' | ||
| 186 | - 'for_each_of_pci_range' | 210 | - 'for_each_of_pci_range' |
| 187 | - 'for_each_old_connector_in_state' | 211 | - 'for_each_old_connector_in_state' |
| 188 | - 'for_each_old_crtc_in_state' | 212 | - 'for_each_old_crtc_in_state' |
| 189 | - 'for_each_oldnew_connector_in_state' | 213 | - 'for_each_oldnew_connector_in_state' |
| 190 | - 'for_each_oldnew_crtc_in_state' | 214 | - 'for_each_oldnew_crtc_in_state' |
| 191 | - 'for_each_oldnew_plane_in_state' | 215 | - 'for_each_oldnew_plane_in_state' |
| 216 | - 'for_each_oldnew_plane_in_state_reverse' | ||
| 192 | - 'for_each_oldnew_private_obj_in_state' | 217 | - 'for_each_oldnew_private_obj_in_state' |
| 193 | - 'for_each_old_plane_in_state' | 218 | - 'for_each_old_plane_in_state' |
| 194 | - 'for_each_old_private_obj_in_state' | 219 | - 'for_each_old_private_obj_in_state' |
| @@ -206,14 +231,17 @@ ForEachMacros: | |||
| 206 | - 'for_each_process' | 231 | - 'for_each_process' |
| 207 | - 'for_each_process_thread' | 232 | - 'for_each_process_thread' |
| 208 | - 'for_each_property_of_node' | 233 | - 'for_each_property_of_node' |
| 234 | - 'for_each_registered_fb' | ||
| 209 | - 'for_each_reserved_mem_region' | 235 | - 'for_each_reserved_mem_region' |
| 210 | - 'for_each_resv_unavail_range' | 236 | - 'for_each_rtd_codec_dai' |
| 237 | - 'for_each_rtd_codec_dai_rollback' | ||
| 211 | - 'for_each_rtdcom' | 238 | - 'for_each_rtdcom' |
| 212 | - 'for_each_rtdcom_safe' | 239 | - 'for_each_rtdcom_safe' |
| 213 | - 'for_each_set_bit' | 240 | - 'for_each_set_bit' |
| 214 | - 'for_each_set_bit_from' | 241 | - 'for_each_set_bit_from' |
| 215 | - 'for_each_sg' | 242 | - 'for_each_sg' |
| 216 | - 'for_each_sg_page' | 243 | - 'for_each_sg_page' |
| 244 | - 'for_each_sibling_event' | ||
| 217 | - '__for_each_thread' | 245 | - '__for_each_thread' |
| 218 | - 'for_each_thread' | 246 | - 'for_each_thread' |
| 219 | - 'for_each_zone' | 247 | - 'for_each_zone' |
| @@ -251,6 +279,8 @@ ForEachMacros: | |||
| 251 | - 'hlist_nulls_for_each_entry_from' | 279 | - 'hlist_nulls_for_each_entry_from' |
| 252 | - 'hlist_nulls_for_each_entry_rcu' | 280 | - 'hlist_nulls_for_each_entry_rcu' |
| 253 | - 'hlist_nulls_for_each_entry_safe' | 281 | - 'hlist_nulls_for_each_entry_safe' |
| 282 | - 'i3c_bus_for_each_i2cdev' | ||
| 283 | - 'i3c_bus_for_each_i3cdev' | ||
| 254 | - 'ide_host_for_each_port' | 284 | - 'ide_host_for_each_port' |
| 255 | - 'ide_port_for_each_dev' | 285 | - 'ide_port_for_each_dev' |
| 256 | - 'ide_port_for_each_present_dev' | 286 | - 'ide_port_for_each_present_dev' |
| @@ -267,11 +297,14 @@ ForEachMacros: | |||
| 267 | - 'kvm_for_each_memslot' | 297 | - 'kvm_for_each_memslot' |
| 268 | - 'kvm_for_each_vcpu' | 298 | - 'kvm_for_each_vcpu' |
| 269 | - 'list_for_each' | 299 | - 'list_for_each' |
| 300 | - 'list_for_each_codec' | ||
| 301 | - 'list_for_each_codec_safe' | ||
| 270 | - 'list_for_each_entry' | 302 | - 'list_for_each_entry' |
| 271 | - 'list_for_each_entry_continue' | 303 | - 'list_for_each_entry_continue' |
| 272 | - 'list_for_each_entry_continue_rcu' | 304 | - 'list_for_each_entry_continue_rcu' |
| 273 | - 'list_for_each_entry_continue_reverse' | 305 | - 'list_for_each_entry_continue_reverse' |
| 274 | - 'list_for_each_entry_from' | 306 | - 'list_for_each_entry_from' |
| 307 | - 'list_for_each_entry_from_rcu' | ||
| 275 | - 'list_for_each_entry_from_reverse' | 308 | - 'list_for_each_entry_from_reverse' |
| 276 | - 'list_for_each_entry_lockless' | 309 | - 'list_for_each_entry_lockless' |
| 277 | - 'list_for_each_entry_rcu' | 310 | - 'list_for_each_entry_rcu' |
| @@ -291,6 +324,7 @@ ForEachMacros: | |||
| 291 | - 'media_device_for_each_intf' | 324 | - 'media_device_for_each_intf' |
| 292 | - 'media_device_for_each_link' | 325 | - 'media_device_for_each_link' |
| 293 | - 'media_device_for_each_pad' | 326 | - 'media_device_for_each_pad' |
| 327 | - 'nanddev_io_for_each_page' | ||
| 294 | - 'netdev_for_each_lower_dev' | 328 | - 'netdev_for_each_lower_dev' |
| 295 | - 'netdev_for_each_lower_private' | 329 | - 'netdev_for_each_lower_private' |
| 296 | - 'netdev_for_each_lower_private_rcu' | 330 | - 'netdev_for_each_lower_private_rcu' |
| @@ -357,12 +391,14 @@ ForEachMacros: | |||
| 357 | - 'sk_nulls_for_each' | 391 | - 'sk_nulls_for_each' |
| 358 | - 'sk_nulls_for_each_from' | 392 | - 'sk_nulls_for_each_from' |
| 359 | - 'sk_nulls_for_each_rcu' | 393 | - 'sk_nulls_for_each_rcu' |
| 394 | - 'snd_array_for_each' | ||
| 360 | - 'snd_pcm_group_for_each_entry' | 395 | - 'snd_pcm_group_for_each_entry' |
| 361 | - 'snd_soc_dapm_widget_for_each_path' | 396 | - 'snd_soc_dapm_widget_for_each_path' |
| 362 | - 'snd_soc_dapm_widget_for_each_path_safe' | 397 | - 'snd_soc_dapm_widget_for_each_path_safe' |
| 363 | - 'snd_soc_dapm_widget_for_each_sink_path' | 398 | - 'snd_soc_dapm_widget_for_each_sink_path' |
| 364 | - 'snd_soc_dapm_widget_for_each_source_path' | 399 | - 'snd_soc_dapm_widget_for_each_source_path' |
| 365 | - 'tb_property_for_each' | 400 | - 'tb_property_for_each' |
| 401 | - 'tcf_exts_for_each_action' | ||
| 366 | - 'udp_portaddr_for_each_entry' | 402 | - 'udp_portaddr_for_each_entry' |
| 367 | - 'udp_portaddr_for_each_entry_rcu' | 403 | - 'udp_portaddr_for_each_entry_rcu' |
| 368 | - 'usb_hub_for_each_child' | 404 | - 'usb_hub_for_each_child' |
| @@ -371,6 +407,11 @@ ForEachMacros: | |||
| 371 | - 'v4l2_m2m_for_each_dst_buf_safe' | 407 | - 'v4l2_m2m_for_each_dst_buf_safe' |
| 372 | - 'v4l2_m2m_for_each_src_buf' | 408 | - 'v4l2_m2m_for_each_src_buf' |
| 373 | - 'v4l2_m2m_for_each_src_buf_safe' | 409 | - 'v4l2_m2m_for_each_src_buf_safe' |
| 410 | - 'virtio_device_for_each_vq' | ||
| 411 | - 'xa_for_each' | ||
| 412 | - 'xas_for_each' | ||
| 413 | - 'xas_for_each_conflict' | ||
| 414 | - 'xas_for_each_marked' | ||
| 374 | - 'zorro_for_each_dev' | 415 | - 'zorro_for_each_dev' |
| 375 | 416 | ||
| 376 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 | 417 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 |
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index 6780a6d81745..7cc9e368c1e9 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst | |||
| @@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI? | |||
| 157 | ------------------------------ | 157 | ------------------------------ |
| 158 | A: YES. BPF instructions, arguments to BPF programs, set of helper | 158 | A: YES. BPF instructions, arguments to BPF programs, set of helper |
| 159 | functions and their arguments, recognized return codes are all part | 159 | functions and their arguments, recognized return codes are all part |
| 160 | of ABI. However when tracing programs are using bpf_probe_read() helper | 160 | of ABI. However there is one specific exception to tracing programs |
| 161 | to walk kernel internal datastructures and compile with kernel | 161 | which are using helpers like bpf_probe_read() to walk kernel internal |
| 162 | internal headers these accesses can and will break with newer | 162 | data structures and compile with kernel internal headers. Both of these |
| 163 | kernels. The union bpf_attr -> kern_version is checked at load time | 163 | kernel internals are subject to change and can break with newer kernels |
| 164 | to prevent accidentally loading kprobe-based bpf programs written | 164 | such that the program needs to be adapted accordingly. |
| 165 | for a different kernel. Networking programs don't do kern_version check. | ||
| 166 | 165 | ||
| 167 | Q: How much stack space a BPF program uses? | 166 | Q: How much stack space a BPF program uses? |
| 168 | ------------------------------------------- | 167 | ------------------------------------------- |
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 6a6d67acaf69..5d54b27c6eba 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst | |||
| @@ -108,12 +108,13 @@ some, but not all of the other indices changing. | |||
| 108 | 108 | ||
| 109 | Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` | 109 | Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` |
| 110 | will not need to allocate memory. The :c:func:`xa_reserve` function | 110 | will not need to allocate memory. The :c:func:`xa_reserve` function |
| 111 | will store a reserved entry at the indicated index. Users of the normal | 111 | will store a reserved entry at the indicated index. Users of the |
| 112 | API will see this entry as containing ``NULL``. If you do not need to | 112 | normal API will see this entry as containing ``NULL``. If you do |
| 113 | use the reserved entry, you can call :c:func:`xa_release` to remove the | 113 | not need to use the reserved entry, you can call :c:func:`xa_release` |
| 114 | unused entry. If another user has stored to the entry in the meantime, | 114 | to remove the unused entry. If another user has stored to the entry |
| 115 | :c:func:`xa_release` will do nothing; if instead you want the entry to | 115 | in the meantime, :c:func:`xa_release` will do nothing; if instead you |
| 116 | become ``NULL``, you should use :c:func:`xa_erase`. | 116 | want the entry to become ``NULL``, you should use :c:func:`xa_erase`. |
| 117 | Using :c:func:`xa_insert` on a reserved entry will fail. | ||
| 117 | 118 | ||
| 118 | If all entries in the array are ``NULL``, the :c:func:`xa_empty` function | 119 | If all entries in the array are ``NULL``, the :c:func:`xa_empty` function |
| 119 | will return ``true``. | 120 | will return ``true``. |
| @@ -183,6 +184,8 @@ Takes xa_lock internally: | |||
| 183 | * :c:func:`xa_store_bh` | 184 | * :c:func:`xa_store_bh` |
| 184 | * :c:func:`xa_store_irq` | 185 | * :c:func:`xa_store_irq` |
| 185 | * :c:func:`xa_insert` | 186 | * :c:func:`xa_insert` |
| 187 | * :c:func:`xa_insert_bh` | ||
| 188 | * :c:func:`xa_insert_irq` | ||
| 186 | * :c:func:`xa_erase` | 189 | * :c:func:`xa_erase` |
| 187 | * :c:func:`xa_erase_bh` | 190 | * :c:func:`xa_erase_bh` |
| 188 | * :c:func:`xa_erase_irq` | 191 | * :c:func:`xa_erase_irq` |
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 84262cdb8d29..96fa46cb133c 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt | |||
| @@ -235,4 +235,4 @@ cpus { | |||
| 235 | =========================================== | 235 | =========================================== |
| 236 | 236 | ||
| 237 | [1] ARM Linux Kernel documentation - CPUs bindings | 237 | [1] ARM Linux Kernel documentation - CPUs bindings |
| 238 | Documentation/devicetree/bindings/arm/cpus.txt | 238 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 8f0937db55c5..45730ba60af5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt | |||
| @@ -684,7 +684,7 @@ cpus { | |||
| 684 | =========================================== | 684 | =========================================== |
| 685 | 685 | ||
| 686 | [1] ARM Linux Kernel documentation - CPUs bindings | 686 | [1] ARM Linux Kernel documentation - CPUs bindings |
| 687 | Documentation/devicetree/bindings/arm/cpus.txt | 687 | Documentation/devicetree/bindings/arm/cpus.yaml |
| 688 | 688 | ||
| 689 | [2] ARM Linux Kernel documentation - PSCI bindings | 689 | [2] ARM Linux Kernel documentation - PSCI bindings |
| 690 | Documentation/devicetree/bindings/arm/psci.txt | 690 | Documentation/devicetree/bindings/arm/psci.txt |
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt index 1b2ab1ff5587..46652bf65147 100644 --- a/Documentation/devicetree/bindings/arm/sp810.txt +++ b/Documentation/devicetree/bindings/arm/sp810.txt | |||
| @@ -4,7 +4,7 @@ SP810 System Controller | |||
| 4 | Required properties: | 4 | Required properties: |
| 5 | 5 | ||
| 6 | - compatible: standard compatible string for a Primecell peripheral, | 6 | - compatible: standard compatible string for a Primecell peripheral, |
| 7 | see Documentation/devicetree/bindings/arm/primecell.txt | 7 | see Documentation/devicetree/bindings/arm/primecell.yaml |
| 8 | for more details | 8 | for more details |
| 9 | should be: "arm,sp810", "arm,primecell" | 9 | should be: "arm,sp810", "arm,primecell" |
| 10 | 10 | ||
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index de9eb0486630..b0d80c0fb265 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt | |||
| @@ -472,4 +472,4 @@ cpus { | |||
| 472 | 472 | ||
| 473 | =============================================================================== | 473 | =============================================================================== |
| 474 | [1] ARM Linux kernel documentation | 474 | [1] ARM Linux kernel documentation |
| 475 | Documentation/devicetree/bindings/arm/cpus.txt | 475 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt index af376a01f2b7..23b52dc02266 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt | |||
| @@ -18,4 +18,4 @@ Required Properties: | |||
| 18 | Each clock is assigned an identifier and client nodes use this identifier | 18 | Each clock is assigned an identifier and client nodes use this identifier |
| 19 | to specify the clock which they consume. | 19 | to specify the clock which they consume. |
| 20 | 20 | ||
| 21 | All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. | 21 | All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>. |
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt index ef89ab46b2c9..572fa2773ec4 100644 --- a/Documentation/devicetree/bindings/display/arm,pl11x.txt +++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | * ARM PrimeCell Color LCD Controller PL110/PL111 | 1 | * ARM PrimeCell Color LCD Controller PL110/PL111 |
| 2 | 2 | ||
| 3 | See also Documentation/devicetree/bindings/arm/primecell.txt | 3 | See also Documentation/devicetree/bindings/arm/primecell.yaml |
| 4 | 4 | ||
| 5 | Required properties: | 5 | Required properties: |
| 6 | 6 | ||
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index ac8df3b871f9..f8759145ce1a 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt | |||
| @@ -27,7 +27,6 @@ Example: | |||
| 27 | reg = <0x04300000 0x20000>; | 27 | reg = <0x04300000 0x20000>; |
| 28 | reg-names = "kgsl_3d0_reg_memory"; | 28 | reg-names = "kgsl_3d0_reg_memory"; |
| 29 | interrupts = <GIC_SPI 80 0>; | 29 | interrupts = <GIC_SPI 80 0>; |
| 30 | interrupt-names = "kgsl_3d0_irq"; | ||
| 31 | clock-names = | 30 | clock-names = |
| 32 | "core", | 31 | "core", |
| 33 | "iface", | 32 | "iface", |
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 38ca2201e8ae..2e097b57f170 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt | |||
| @@ -14,8 +14,6 @@ Required properties: | |||
| 14 | 14 | ||
| 15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K | 15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K |
| 16 | SoCs (either from AP or CP), see | 16 | SoCs (either from AP or CP), see |
| 17 | Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt | ||
| 18 | and | ||
| 19 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt | 17 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt |
| 20 | for specific details about the offset property. | 18 | for specific details about the offset property. |
| 21 | 19 | ||
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index b83bb8249074..a3be5298a5eb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt | |||
| @@ -78,7 +78,7 @@ Sub-nodes: | |||
| 78 | PPI affinity can be expressed as a single "ppi-partitions" node, | 78 | PPI affinity can be expressed as a single "ppi-partitions" node, |
| 79 | containing a set of sub-nodes, each with the following property: | 79 | containing a set of sub-nodes, each with the following property: |
| 80 | - affinity: Should be a list of phandles to CPU nodes (as described in | 80 | - affinity: Should be a list of phandles to CPU nodes (as described in |
| 81 | Documentation/devicetree/bindings/arm/cpus.txt). | 81 | Documentation/devicetree/bindings/arm/cpus.yaml). |
| 82 | 82 | ||
| 83 | GICv3 has one or more Interrupt Translation Services (ITS) that are | 83 | GICv3 has one or more Interrupt Translation Services (ITS) that are |
| 84 | used to route Message Signalled Interrupts (MSI) to the CPUs. | 84 | used to route Message Signalled Interrupts (MSI) to the CPUs. |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 0b8cc533ca83..cf759e5f9b10 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt | |||
| @@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function | |||
| 55 | = EXAMPLE | 55 | = EXAMPLE |
| 56 | The following example represents the GLINK RPM node on a MSM8996 device, with | 56 | The following example represents the GLINK RPM node on a MSM8996 device, with |
| 57 | the function for the "rpm_request" channel defined, which is used for | 57 | the function for the "rpm_request" channel defined, which is used for |
| 58 | regualtors and root clocks. | 58 | regulators and root clocks. |
| 59 | 59 | ||
| 60 | apcs_glb: mailbox@9820000 { | 60 | apcs_glb: mailbox@9820000 { |
| 61 | compatible = "qcom,msm8996-apcs-hmss-global"; | 61 | compatible = "qcom,msm8996-apcs-hmss-global"; |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt index a35af2dafdad..49e1d72d3648 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt | |||
| @@ -41,12 +41,12 @@ processor ID) and a string identifier. | |||
| 41 | - qcom,local-pid: | 41 | - qcom,local-pid: |
| 42 | Usage: required | 42 | Usage: required |
| 43 | Value type: <u32> | 43 | Value type: <u32> |
| 44 | Definition: specifies the identfier of the local endpoint of this edge | 44 | Definition: specifies the identifier of the local endpoint of this edge |
| 45 | 45 | ||
| 46 | - qcom,remote-pid: | 46 | - qcom,remote-pid: |
| 47 | Usage: required | 47 | Usage: required |
| 48 | Value type: <u32> | 48 | Value type: <u32> |
| 49 | Definition: specifies the identfier of the remote endpoint of this edge | 49 | Definition: specifies the identifier of the remote endpoint of this edge |
| 50 | 50 | ||
| 51 | = SUBNODES | 51 | = SUBNODES |
| 52 | Each SMP2P pair contain a set of inbound and outbound entries, these are | 52 | Each SMP2P pair contain a set of inbound and outbound entries, these are |
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 62af30511a95..60a5ec04e8f0 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt | |||
| @@ -163,6 +163,14 @@ C. Boot options | |||
| 163 | be preserved until there actually is some text is output to the console. | 163 | be preserved until there actually is some text is output to the console. |
| 164 | This option causes fbcon to bind immediately to the fbdev device. | 164 | This option causes fbcon to bind immediately to the fbdev device. |
| 165 | 165 | ||
| 166 | 7. fbcon=logo-pos:<location> | ||
| 167 | |||
| 168 | The only possible 'location' is 'center' (without quotes), and when | ||
| 169 | given, the bootup logo is moved from the default top-left corner | ||
| 170 | location to the center of the framebuffer. If more than one logo is | ||
| 171 | displayed due to multiple CPUs, the collected line of logos is moved | ||
| 172 | as a whole. | ||
| 173 | |||
| 166 | C. Attaching, Detaching and Unloading | 174 | C. Attaching, Detaching and Unloading |
| 167 | 175 | ||
| 168 | Before going on to how to attach, detach and unload the framebuffer console, an | 176 | Before going on to how to attach, detach and unload the framebuffer console, an |
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 6a47629ef8ed..59e86de662cd 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst | |||
| @@ -11,19 +11,19 @@ Contents: | |||
| 11 | batman-adv | 11 | batman-adv |
| 12 | can | 12 | can |
| 13 | can_ucan_protocol | 13 | can_ucan_protocol |
| 14 | dpaa2/index | 14 | device_drivers/freescale/dpaa2/index |
| 15 | e100 | 15 | device_drivers/intel/e100 |
| 16 | e1000 | 16 | device_drivers/intel/e1000 |
| 17 | e1000e | 17 | device_drivers/intel/e1000e |
| 18 | fm10k | 18 | device_drivers/intel/fm10k |
| 19 | igb | 19 | device_drivers/intel/igb |
| 20 | igbvf | 20 | device_drivers/intel/igbvf |
| 21 | ixgb | 21 | device_drivers/intel/ixgb |
| 22 | ixgbe | 22 | device_drivers/intel/ixgbe |
| 23 | ixgbevf | 23 | device_drivers/intel/ixgbevf |
| 24 | i40e | 24 | device_drivers/intel/i40e |
| 25 | iavf | 25 | device_drivers/intel/iavf |
| 26 | ice | 26 | device_drivers/intel/ice |
| 27 | kapi | 27 | kapi |
| 28 | z8530book | 28 | z8530book |
| 29 | msg_zerocopy | 29 | msg_zerocopy |
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index c9d052e0cf51..2df5894353d6 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt | |||
| @@ -1000,51 +1000,6 @@ The kernel interface functions are as follows: | |||
| 1000 | size should be set when the call is begun. tx_total_len may not be less | 1000 | size should be set when the call is begun. tx_total_len may not be less |
| 1001 | than zero. | 1001 | than zero. |
| 1002 | 1002 | ||
| 1003 | (*) Check to see the completion state of a call so that the caller can assess | ||
| 1004 | whether it needs to be retried. | ||
| 1005 | |||
| 1006 | enum rxrpc_call_completion { | ||
| 1007 | RXRPC_CALL_SUCCEEDED, | ||
| 1008 | RXRPC_CALL_REMOTELY_ABORTED, | ||
| 1009 | RXRPC_CALL_LOCALLY_ABORTED, | ||
| 1010 | RXRPC_CALL_LOCAL_ERROR, | ||
| 1011 | RXRPC_CALL_NETWORK_ERROR, | ||
| 1012 | }; | ||
| 1013 | |||
| 1014 | int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, | ||
| 1015 | enum rxrpc_call_completion *_compl, | ||
| 1016 | u32 *_abort_code); | ||
| 1017 | |||
| 1018 | On return, -EINPROGRESS will be returned if the call is still ongoing; if | ||
| 1019 | it is finished, *_compl will be set to indicate the manner of completion, | ||
| 1020 | *_abort_code will be set to any abort code that occurred. 0 will be | ||
| 1021 | returned on a successful completion, -ECONNABORTED will be returned if the | ||
| 1022 | client failed due to a remote abort and anything else will return an | ||
| 1023 | appropriate error code. | ||
| 1024 | |||
| 1025 | The caller should look at this information to decide if it's worth | ||
| 1026 | retrying the call. | ||
| 1027 | |||
| 1028 | (*) Retry a client call. | ||
| 1029 | |||
| 1030 | int rxrpc_kernel_retry_call(struct socket *sock, | ||
| 1031 | struct rxrpc_call *call, | ||
| 1032 | struct sockaddr_rxrpc *srx, | ||
| 1033 | struct key *key); | ||
| 1034 | |||
| 1035 | This attempts to partially reinitialise a call and submit it again while | ||
| 1036 | reusing the original call's Tx queue to avoid the need to repackage and | ||
| 1037 | re-encrypt the data to be sent. call indicates the call to retry, srx the | ||
| 1038 | new address to send it to and key the encryption key to use for signing or | ||
| 1039 | encrypting the packets. | ||
| 1040 | |||
| 1041 | For this to work, the first Tx data packet must still be in the transmit | ||
| 1042 | queue, and currently this is only permitted for local and network errors | ||
| 1043 | and the call must not have been aborted. Any partially constructed Tx | ||
| 1044 | packet is left as is and can continue being filled afterwards. | ||
| 1045 | |||
| 1046 | It returns 0 if the call was requeued and an error otherwise. | ||
| 1047 | |||
| 1048 | (*) Get call RTT. | 1003 | (*) Get call RTT. |
| 1049 | 1004 | ||
| 1050 | u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); | 1005 | u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); |
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst index b0dfdaaca512..fe8f741193be 100644 --- a/Documentation/networking/snmp_counter.rst +++ b/Documentation/networking/snmp_counter.rst | |||
| @@ -336,7 +336,26 @@ time client replies ACK, this socket will get another chance to move | |||
| 336 | to the accept queue. | 336 | to the accept queue. |
| 337 | 337 | ||
| 338 | 338 | ||
| 339 | TCP Fast Open | 339 | * TcpEstabResets |
| 340 | Defined in `RFC1213 tcpEstabResets`_. | ||
| 341 | |||
| 342 | .. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48 | ||
| 343 | |||
| 344 | * TcpAttemptFails | ||
| 345 | Defined in `RFC1213 tcpAttemptFails`_. | ||
| 346 | |||
| 347 | .. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48 | ||
| 348 | |||
| 349 | * TcpOutRsts | ||
| 350 | Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates | ||
| 351 | the 'segments sent containing the RST flag', but in linux kernel, this | ||
| 352 | couner indicates the segments kerenl tried to send. The sending | ||
| 353 | process might be failed due to some errors (e.g. memory alloc failed). | ||
| 354 | |||
| 355 | .. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52 | ||
| 356 | |||
| 357 | |||
| 358 | TCP Fast Path | ||
| 340 | ============ | 359 | ============ |
| 341 | When kernel receives a TCP packet, it has two paths to handler the | 360 | When kernel receives a TCP packet, it has two paths to handler the |
| 342 | packet, one is fast path, another is slow path. The comment in kernel | 361 | packet, one is fast path, another is slow path. The comment in kernel |
| @@ -383,8 +402,6 @@ increase 1. | |||
| 383 | 402 | ||
| 384 | TCP abort | 403 | TCP abort |
| 385 | ======== | 404 | ======== |
| 386 | |||
| 387 | |||
| 388 | * TcpExtTCPAbortOnData | 405 | * TcpExtTCPAbortOnData |
| 389 | It means TCP layer has data in flight, but need to close the | 406 | It means TCP layer has data in flight, but need to close the |
| 390 | connection. So TCP layer sends a RST to the other side, indicate the | 407 | connection. So TCP layer sends a RST to the other side, indicate the |
| @@ -545,7 +562,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP | |||
| 545 | stack of kernel will increase TcpExtTCPSACKReorder for both of the | 562 | stack of kernel will increase TcpExtTCPSACKReorder for both of the |
| 546 | above scenarios. | 563 | above scenarios. |
| 547 | 564 | ||
| 548 | |||
| 549 | DSACK | 565 | DSACK |
| 550 | ===== | 566 | ===== |
| 551 | The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report | 567 | The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report |
| @@ -566,13 +582,63 @@ The TCP stack receives an out of order duplicate packet, so it sends a | |||
| 566 | DSACK to the sender. | 582 | DSACK to the sender. |
| 567 | 583 | ||
| 568 | * TcpExtTCPDSACKRecv | 584 | * TcpExtTCPDSACKRecv |
| 569 | The TCP stack receives a DSACK, which indicate an acknowledged | 585 | The TCP stack receives a DSACK, which indicates an acknowledged |
| 570 | duplicate packet is received. | 586 | duplicate packet is received. |
| 571 | 587 | ||
| 572 | * TcpExtTCPDSACKOfoRecv | 588 | * TcpExtTCPDSACKOfoRecv |
| 573 | The TCP stack receives a DSACK, which indicate an out of order | 589 | The TCP stack receives a DSACK, which indicate an out of order |
| 574 | duplicate packet is received. | 590 | duplicate packet is received. |
| 575 | 591 | ||
| 592 | invalid SACK and DSACK | ||
| 593 | ==================== | ||
| 594 | When a SACK (or DSACK) block is invalid, a corresponding counter would | ||
| 595 | be updated. The validation method is base on the start/end sequence | ||
| 596 | number of the SACK block. For more details, please refer the comment | ||
| 597 | of the function tcp_is_sackblock_valid in the kernel source code. A | ||
| 598 | SACK option could have up to 4 blocks, they are checked | ||
| 599 | individually. E.g., if 3 blocks of a SACk is invalid, the | ||
| 600 | corresponding counter would be updated 3 times. The comment of the | ||
| 601 | `Add counters for discarded SACK blocks`_ patch has additional | ||
| 602 | explaination: | ||
| 603 | |||
| 604 | .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 | ||
| 605 | |||
| 606 | * TcpExtTCPSACKDiscard | ||
| 607 | This counter indicates how many SACK blocks are invalid. If the invalid | ||
| 608 | SACK block is caused by ACK recording, the TCP stack will only ignore | ||
| 609 | it and won't update this counter. | ||
| 610 | |||
| 611 | * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo | ||
| 612 | When a DSACK block is invalid, one of these two counters would be | ||
| 613 | updated. Which counter will be updated depends on the undo_marker flag | ||
| 614 | of the TCP socket. If the undo_marker is not set, the TCP stack isn't | ||
| 615 | likely to re-transmit any packets, and we still receive an invalid | ||
| 616 | DSACK block, the reason might be that the packet is duplicated in the | ||
| 617 | middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo | ||
| 618 | will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld | ||
| 619 | will be updated. As implied in its name, it might be an old packet. | ||
| 620 | |||
| 621 | SACK shift | ||
| 622 | ========= | ||
| 623 | The linux networking stack stores data in sk_buff struct (skb for | ||
| 624 | short). If a SACK block acrosses multiple skb, the TCP stack will try | ||
| 625 | to re-arrange data in these skb. E.g. if a SACK block acknowledges seq | ||
| 626 | 10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and | ||
| 627 | 15 in skb2 would be moved to skb1. This operation is 'shift'. If a | ||
| 628 | SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has | ||
| 629 | seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be | ||
| 630 | discard, this operation is 'merge'. | ||
| 631 | |||
| 632 | * TcpExtTCPSackShifted | ||
| 633 | A skb is shifted | ||
| 634 | |||
| 635 | * TcpExtTCPSackMerged | ||
| 636 | A skb is merged | ||
| 637 | |||
| 638 | * TcpExtTCPSackShiftFallback | ||
| 639 | A skb should be shifted or merged, but the TCP stack doesn't do it for | ||
| 640 | some reasons. | ||
| 641 | |||
| 576 | TCP out of order | 642 | TCP out of order |
| 577 | =============== | 643 | =============== |
| 578 | * TcpExtTCPOFOQueue | 644 | * TcpExtTCPOFOQueue |
| @@ -662,6 +728,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_). | |||
| 662 | .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 | 728 | .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 |
| 663 | .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 | 729 | .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 |
| 664 | 730 | ||
| 731 | TCP receive window | ||
| 732 | ================= | ||
| 733 | * TcpExtTCPWantZeroWindowAdv | ||
| 734 | Depending on current memory usage, the TCP stack tries to set receive | ||
| 735 | window to zero. But the receive window might still be a no-zero | ||
| 736 | value. For example, if the previous window size is 10, and the TCP | ||
| 737 | stack receives 3 bytes, the current window size would be 7 even if the | ||
| 738 | window size calculated by the memory usage is zero. | ||
| 739 | |||
| 740 | * TcpExtTCPToZeroWindowAdv | ||
| 741 | The TCP receive window is set to zero from a no-zero value. | ||
| 742 | |||
| 743 | * TcpExtTCPFromZeroWindowAdv | ||
| 744 | The TCP receive window is set to no-zero value from zero. | ||
| 745 | |||
| 746 | |||
| 747 | Delayed ACK | ||
| 748 | ========== | ||
| 749 | The TCP Delayed ACK is a technique which is used for reducing the | ||
| 750 | packet count in the network. For more details, please refer the | ||
| 751 | `Delayed ACK wiki`_ | ||
| 752 | |||
| 753 | .. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment | ||
| 754 | |||
| 755 | * TcpExtDelayedACKs | ||
| 756 | A delayed ACK timer expires. The TCP stack will send a pure ACK packet | ||
| 757 | and exit the delayed ACK mode. | ||
| 758 | |||
| 759 | * TcpExtDelayedACKLocked | ||
| 760 | A delayed ACK timer expires, but the TCP stack can't send an ACK | ||
| 761 | immediately due to the socket is locked by a userspace program. The | ||
| 762 | TCP stack will send a pure ACK later (after the userspace program | ||
| 763 | unlock the socket). When the TCP stack sends the pure ACK later, the | ||
| 764 | TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK | ||
| 765 | mode. | ||
| 766 | |||
| 767 | * TcpExtDelayedACKLost | ||
| 768 | It will be updated when the TCP stack receives a packet which has been | ||
| 769 | ACKed. A Delayed ACK loss might cause this issue, but it would also be | ||
| 770 | triggered by other reasons, such as a packet is duplicated in the | ||
| 771 | network. | ||
| 772 | |||
| 773 | Tail Loss Probe (TLP) | ||
| 774 | =================== | ||
| 775 | TLP is an algorithm which is used to detect TCP packet loss. For more | ||
| 776 | details, please refer the `TLP paper`_. | ||
| 777 | |||
| 778 | .. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 | ||
| 779 | |||
| 780 | * TcpExtTCPLossProbes | ||
| 781 | A TLP probe packet is sent. | ||
| 782 | |||
| 783 | * TcpExtTCPLossProbeRecovery | ||
| 784 | A packet loss is detected and recovered by TLP. | ||
| 665 | 785 | ||
| 666 | examples | 786 | examples |
| 667 | ======= | 787 | ======= |
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 1be0b6f9e0cb..9d1432e0aaa8 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
| @@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set. | |||
| 417 | 417 | ||
| 418 | Hardware time stamping must also be initialized for each device driver | 418 | Hardware time stamping must also be initialized for each device driver |
| 419 | that is expected to do hardware time stamping. The parameter is defined in | 419 | that is expected to do hardware time stamping. The parameter is defined in |
| 420 | /include/linux/net_tstamp.h as: | 420 | include/uapi/linux/net_tstamp.h as: |
| 421 | 421 | ||
| 422 | struct hwtstamp_config { | 422 | struct hwtstamp_config { |
| 423 | int flags; /* no flags defined right now, must be zero */ | 423 | int flags; /* no flags defined right now, must be zero */ |
| @@ -487,7 +487,7 @@ enum { | |||
| 487 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, | 487 | HWTSTAMP_FILTER_PTP_V1_L4_EVENT, |
| 488 | 488 | ||
| 489 | /* for the complete list of values, please check | 489 | /* for the complete list of values, please check |
| 490 | * the include file /include/linux/net_tstamp.h | 490 | * the include file include/uapi/linux/net_tstamp.h |
| 491 | */ | 491 | */ |
| 492 | }; | 492 | }; |
| 493 | 493 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3b9e50a50c14..d20cf119ec1d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3052,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h | |||
| 3052 | F: include/linux/bcm963xx_tag.h | 3052 | F: include/linux/bcm963xx_tag.h |
| 3053 | 3053 | ||
| 3054 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER | 3054 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER |
| 3055 | M: Rasesh Mody <rasesh.mody@cavium.com> | 3055 | M: Rasesh Mody <rmody@marvell.com> |
| 3056 | M: Dept-GELinuxNICDev@cavium.com | 3056 | M: GR-Linux-NIC-Dev@marvell.com |
| 3057 | L: netdev@vger.kernel.org | 3057 | L: netdev@vger.kernel.org |
| 3058 | S: Supported | 3058 | S: Supported |
| 3059 | F: drivers/net/ethernet/broadcom/bnx2.* | 3059 | F: drivers/net/ethernet/broadcom/bnx2.* |
| @@ -3072,9 +3072,9 @@ S: Supported | |||
| 3072 | F: drivers/scsi/bnx2i/ | 3072 | F: drivers/scsi/bnx2i/ |
| 3073 | 3073 | ||
| 3074 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | 3074 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER |
| 3075 | M: Ariel Elior <ariel.elior@cavium.com> | 3075 | M: Ariel Elior <aelior@marvell.com> |
| 3076 | M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> | 3076 | M: Sudarsana Kalluru <skalluru@marvell.com> |
| 3077 | M: everest-linux-l2@cavium.com | 3077 | M: GR-everest-linux-l2@marvell.com |
| 3078 | L: netdev@vger.kernel.org | 3078 | L: netdev@vger.kernel.org |
| 3079 | S: Supported | 3079 | S: Supported |
| 3080 | F: drivers/net/ethernet/broadcom/bnx2x/ | 3080 | F: drivers/net/ethernet/broadcom/bnx2x/ |
| @@ -3249,9 +3249,9 @@ S: Supported | |||
| 3249 | F: drivers/scsi/bfa/ | 3249 | F: drivers/scsi/bfa/ |
| 3250 | 3250 | ||
| 3251 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER | 3251 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER |
| 3252 | M: Rasesh Mody <rasesh.mody@cavium.com> | 3252 | M: Rasesh Mody <rmody@marvell.com> |
| 3253 | M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> | 3253 | M: Sudarsana Kalluru <skalluru@marvell.com> |
| 3254 | M: Dept-GELinuxNICDev@cavium.com | 3254 | M: GR-Linux-NIC-Dev@marvell.com |
| 3255 | L: netdev@vger.kernel.org | 3255 | L: netdev@vger.kernel.org |
| 3256 | S: Supported | 3256 | S: Supported |
| 3257 | F: drivers/net/ethernet/brocade/bna/ | 3257 | F: drivers/net/ethernet/brocade/bna/ |
| @@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon* | |||
| 3471 | F: drivers/i2c/busses/i2c-thunderx* | 3471 | F: drivers/i2c/busses/i2c-thunderx* |
| 3472 | 3472 | ||
| 3473 | CAVIUM LIQUIDIO NETWORK DRIVER | 3473 | CAVIUM LIQUIDIO NETWORK DRIVER |
| 3474 | M: Derek Chickles <derek.chickles@caviumnetworks.com> | 3474 | M: Derek Chickles <dchickles@marvell.com> |
| 3475 | M: Satanand Burla <satananda.burla@caviumnetworks.com> | 3475 | M: Satanand Burla <sburla@marvell.com> |
| 3476 | M: Felix Manlunas <felix.manlunas@caviumnetworks.com> | 3476 | M: Felix Manlunas <fmanlunas@marvell.com> |
| 3477 | M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> | ||
| 3478 | L: netdev@vger.kernel.org | 3477 | L: netdev@vger.kernel.org |
| 3479 | W: http://www.cavium.com | 3478 | W: http://www.cavium.com |
| 3480 | S: Supported | 3479 | S: Supported |
| @@ -3979,6 +3978,7 @@ F: drivers/cpufreq/arm_big_little.c | |||
| 3979 | CPU POWER MONITORING SUBSYSTEM | 3978 | CPU POWER MONITORING SUBSYSTEM |
| 3980 | M: Thomas Renninger <trenn@suse.com> | 3979 | M: Thomas Renninger <trenn@suse.com> |
| 3981 | M: Shuah Khan <shuah@kernel.org> | 3980 | M: Shuah Khan <shuah@kernel.org> |
| 3981 | M: Shuah Khan <skhan@linuxfoundation.org> | ||
| 3982 | L: linux-pm@vger.kernel.org | 3982 | L: linux-pm@vger.kernel.org |
| 3983 | S: Maintained | 3983 | S: Maintained |
| 3984 | F: tools/power/cpupower/ | 3984 | F: tools/power/cpupower/ |
| @@ -8260,6 +8260,7 @@ F: include/uapi/linux/sunrpc/ | |||
| 8260 | 8260 | ||
| 8261 | KERNEL SELFTEST FRAMEWORK | 8261 | KERNEL SELFTEST FRAMEWORK |
| 8262 | M: Shuah Khan <shuah@kernel.org> | 8262 | M: Shuah Khan <shuah@kernel.org> |
| 8263 | M: Shuah Khan <skhan@linuxfoundation.org> | ||
| 8263 | L: linux-kselftest@vger.kernel.org | 8264 | L: linux-kselftest@vger.kernel.org |
| 8264 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git | 8265 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git |
| 8265 | Q: https://patchwork.kernel.org/project/linux-kselftest/list/ | 8266 | Q: https://patchwork.kernel.org/project/linux-kselftest/list/ |
| @@ -10688,9 +10689,9 @@ S: Maintained | |||
| 10688 | F: drivers/net/netdevsim/* | 10689 | F: drivers/net/netdevsim/* |
| 10689 | 10690 | ||
| 10690 | NETXEN (1/10) GbE SUPPORT | 10691 | NETXEN (1/10) GbE SUPPORT |
| 10691 | M: Manish Chopra <manish.chopra@cavium.com> | 10692 | M: Manish Chopra <manishc@marvell.com> |
| 10692 | M: Rahul Verma <rahul.verma@cavium.com> | 10693 | M: Rahul Verma <rahulv@marvell.com> |
| 10693 | M: Dept-GELinuxNICDev@cavium.com | 10694 | M: GR-Linux-NIC-Dev@marvell.com |
| 10694 | L: netdev@vger.kernel.org | 10695 | L: netdev@vger.kernel.org |
| 10695 | S: Supported | 10696 | S: Supported |
| 10696 | F: drivers/net/ethernet/qlogic/netxen/ | 10697 | F: drivers/net/ethernet/qlogic/netxen/ |
| @@ -12474,8 +12475,8 @@ S: Supported | |||
| 12474 | F: drivers/scsi/qedi/ | 12475 | F: drivers/scsi/qedi/ |
| 12475 | 12476 | ||
| 12476 | QLOGIC QL4xxx ETHERNET DRIVER | 12477 | QLOGIC QL4xxx ETHERNET DRIVER |
| 12477 | M: Ariel Elior <Ariel.Elior@cavium.com> | 12478 | M: Ariel Elior <aelior@marvell.com> |
| 12478 | M: everest-linux-l2@cavium.com | 12479 | M: GR-everest-linux-l2@marvell.com |
| 12479 | L: netdev@vger.kernel.org | 12480 | L: netdev@vger.kernel.org |
| 12480 | S: Supported | 12481 | S: Supported |
| 12481 | F: drivers/net/ethernet/qlogic/qed/ | 12482 | F: drivers/net/ethernet/qlogic/qed/ |
| @@ -12483,8 +12484,8 @@ F: include/linux/qed/ | |||
| 12483 | F: drivers/net/ethernet/qlogic/qede/ | 12484 | F: drivers/net/ethernet/qlogic/qede/ |
| 12484 | 12485 | ||
| 12485 | QLOGIC QL4xxx RDMA DRIVER | 12486 | QLOGIC QL4xxx RDMA DRIVER |
| 12486 | M: Michal Kalderon <Michal.Kalderon@cavium.com> | 12487 | M: Michal Kalderon <mkalderon@marvell.com> |
| 12487 | M: Ariel Elior <Ariel.Elior@cavium.com> | 12488 | M: Ariel Elior <aelior@marvell.com> |
| 12488 | L: linux-rdma@vger.kernel.org | 12489 | L: linux-rdma@vger.kernel.org |
| 12489 | S: Supported | 12490 | S: Supported |
| 12490 | F: drivers/infiniband/hw/qedr/ | 12491 | F: drivers/infiniband/hw/qedr/ |
| @@ -12504,7 +12505,7 @@ F: Documentation/scsi/LICENSE.qla2xxx | |||
| 12504 | F: drivers/scsi/qla2xxx/ | 12505 | F: drivers/scsi/qla2xxx/ |
| 12505 | 12506 | ||
| 12506 | QLOGIC QLA3XXX NETWORK DRIVER | 12507 | QLOGIC QLA3XXX NETWORK DRIVER |
| 12507 | M: Dept-GELinuxNICDev@cavium.com | 12508 | M: GR-Linux-NIC-Dev@marvell.com |
| 12508 | L: netdev@vger.kernel.org | 12509 | L: netdev@vger.kernel.org |
| 12509 | S: Supported | 12510 | S: Supported |
| 12510 | F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx | 12511 | F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx |
| @@ -12518,16 +12519,16 @@ F: Documentation/scsi/LICENSE.qla4xxx | |||
| 12518 | F: drivers/scsi/qla4xxx/ | 12519 | F: drivers/scsi/qla4xxx/ |
| 12519 | 12520 | ||
| 12520 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER | 12521 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER |
| 12521 | M: Shahed Shaikh <Shahed.Shaikh@cavium.com> | 12522 | M: Shahed Shaikh <shshaikh@marvell.com> |
| 12522 | M: Manish Chopra <manish.chopra@cavium.com> | 12523 | M: Manish Chopra <manishc@marvell.com> |
| 12523 | M: Dept-GELinuxNICDev@cavium.com | 12524 | M: GR-Linux-NIC-Dev@marvell.com |
| 12524 | L: netdev@vger.kernel.org | 12525 | L: netdev@vger.kernel.org |
| 12525 | S: Supported | 12526 | S: Supported |
| 12526 | F: drivers/net/ethernet/qlogic/qlcnic/ | 12527 | F: drivers/net/ethernet/qlogic/qlcnic/ |
| 12527 | 12528 | ||
| 12528 | QLOGIC QLGE 10Gb ETHERNET DRIVER | 12529 | QLOGIC QLGE 10Gb ETHERNET DRIVER |
| 12529 | M: Manish Chopra <manish.chopra@cavium.com> | 12530 | M: Manish Chopra <manishc@marvell.com> |
| 12530 | M: Dept-GELinuxNICDev@cavium.com | 12531 | M: GR-Linux-NIC-Dev@marvell.com |
| 12531 | L: netdev@vger.kernel.org | 12532 | L: netdev@vger.kernel.org |
| 12532 | S: Supported | 12533 | S: Supported |
| 12533 | F: drivers/net/ethernet/qlogic/qlge/ | 12534 | F: drivers/net/ethernet/qlogic/qlge/ |
| @@ -15841,6 +15842,7 @@ F: drivers/usb/common/usb-otg-fsm.c | |||
| 15841 | USB OVER IP DRIVER | 15842 | USB OVER IP DRIVER |
| 15842 | M: Valentina Manea <valentina.manea.m@gmail.com> | 15843 | M: Valentina Manea <valentina.manea.m@gmail.com> |
| 15843 | M: Shuah Khan <shuah@kernel.org> | 15844 | M: Shuah Khan <shuah@kernel.org> |
| 15845 | M: Shuah Khan <skhan@linuxfoundation.org> | ||
| 15844 | L: linux-usb@vger.kernel.org | 15846 | L: linux-usb@vger.kernel.org |
| 15845 | S: Maintained | 15847 | S: Maintained |
| 15846 | F: Documentation/usb/usbip_protocol.txt | 15848 | F: Documentation/usb/usbip_protocol.txt |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 5 | 2 | VERSION = 5 |
| 3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc2 | 5 | EXTRAVERSION = -rc4 |
| 6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
| @@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION | |||
| 955 | endif | 955 | endif |
| 956 | endif | 956 | endif |
| 957 | 957 | ||
| 958 | PHONY += prepare0 | ||
| 958 | 959 | ||
| 959 | ifeq ($(KBUILD_EXTMOD),) | 960 | ifeq ($(KBUILD_EXTMOD),) |
| 960 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ | 961 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ |
| @@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc | |||
| 1061 | # archprepare is used in arch Makefiles and when processed asm symlink, | 1062 | # archprepare is used in arch Makefiles and when processed asm symlink, |
| 1062 | # version.h and scripts_basic is processed / created. | 1063 | # version.h and scripts_basic is processed / created. |
| 1063 | 1064 | ||
| 1064 | # Listed in dependency order | 1065 | PHONY += prepare archprepare prepare1 prepare2 prepare3 |
| 1065 | PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 | ||
| 1066 | 1066 | ||
| 1067 | # prepare3 is used to check if we are building in a separate output directory, | 1067 | # prepare3 is used to check if we are building in a separate output directory, |
| 1068 | # and if so do: | 1068 | # and if so do: |
| @@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) | |||
| 1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) | 1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) |
| 1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) | 1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) |
| 1362 | 1362 | ||
| 1363 | PHONY += $(mrproper-dirs) mrproper archmrproper | 1363 | PHONY += $(mrproper-dirs) mrproper |
| 1364 | $(mrproper-dirs): | 1364 | $(mrproper-dirs): |
| 1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) | 1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) |
| 1366 | 1366 | ||
| 1367 | mrproper: clean archmrproper $(mrproper-dirs) | 1367 | mrproper: clean $(mrproper-dirs) |
| 1368 | $(call cmd,rmdirs) | 1368 | $(call cmd,rmdirs) |
| 1369 | $(call cmd,rmfiles) | 1369 | $(call cmd,rmfiles) |
| 1370 | 1370 | ||
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index feed50ce89fa..caa270261521 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
| @@ -3,23 +3,19 @@ generic-y += bugs.h | |||
| 3 | generic-y += compat.h | 3 | generic-y += compat.h |
| 4 | generic-y += device.h | 4 | generic-y += device.h |
| 5 | generic-y += div64.h | 5 | generic-y += div64.h |
| 6 | generic-y += dma-mapping.h | ||
| 7 | generic-y += emergency-restart.h | 6 | generic-y += emergency-restart.h |
| 8 | generic-y += extable.h | 7 | generic-y += extable.h |
| 9 | generic-y += fb.h | ||
| 10 | generic-y += ftrace.h | 8 | generic-y += ftrace.h |
| 11 | generic-y += hardirq.h | 9 | generic-y += hardirq.h |
| 12 | generic-y += hw_irq.h | 10 | generic-y += hw_irq.h |
| 13 | generic-y += irq_regs.h | 11 | generic-y += irq_regs.h |
| 14 | generic-y += irq_work.h | 12 | generic-y += irq_work.h |
| 15 | generic-y += kmap_types.h | ||
| 16 | generic-y += local.h | 13 | generic-y += local.h |
| 17 | generic-y += local64.h | 14 | generic-y += local64.h |
| 18 | generic-y += mcs_spinlock.h | 15 | generic-y += mcs_spinlock.h |
| 19 | generic-y += mm-arch-hooks.h | 16 | generic-y += mm-arch-hooks.h |
| 20 | generic-y += msi.h | 17 | generic-y += msi.h |
| 21 | generic-y += parport.h | 18 | generic-y += parport.h |
| 22 | generic-y += pci.h | ||
| 23 | generic-y += percpu.h | 19 | generic-y += percpu.h |
| 24 | generic-y += preempt.h | 20 | generic-y += preempt.h |
| 25 | generic-y += topology.h | 21 | generic-y += topology.h |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 49bfbd879caa..f1b86cef0905 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
| @@ -216,6 +216,14 @@ struct bcr_fp_arcv2 { | |||
| 216 | #endif | 216 | #endif |
| 217 | }; | 217 | }; |
| 218 | 218 | ||
| 219 | struct bcr_actionpoint { | ||
| 220 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
| 221 | unsigned int pad:21, min:1, num:2, ver:8; | ||
| 222 | #else | ||
| 223 | unsigned int ver:8, num:2, min:1, pad:21; | ||
| 224 | #endif | ||
| 225 | }; | ||
| 226 | |||
| 219 | #include <soc/arc/timers.h> | 227 | #include <soc/arc/timers.h> |
| 220 | 228 | ||
| 221 | struct bcr_bpu_arcompact { | 229 | struct bcr_bpu_arcompact { |
| @@ -283,7 +291,7 @@ struct cpuinfo_arc_cache { | |||
| 283 | }; | 291 | }; |
| 284 | 292 | ||
| 285 | struct cpuinfo_arc_bpu { | 293 | struct cpuinfo_arc_bpu { |
| 286 | unsigned int ver, full, num_cache, num_pred; | 294 | unsigned int ver, full, num_cache, num_pred, ret_stk; |
| 287 | }; | 295 | }; |
| 288 | 296 | ||
| 289 | struct cpuinfo_arc_ccm { | 297 | struct cpuinfo_arc_ccm { |
| @@ -302,7 +310,7 @@ struct cpuinfo_arc { | |||
| 302 | struct { | 310 | struct { |
| 303 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, | 311 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, |
| 304 | fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, | 312 | fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, |
| 305 | debug:1, ap:1, smart:1, rtt:1, pad3:4, | 313 | ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1, |
| 306 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; | 314 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; |
| 307 | } extn; | 315 | } extn; |
| 308 | struct bcr_mpy extn_mpy; | 316 | struct bcr_mpy extn_mpy; |
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index ee9246184033..202b74c339f0 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h | |||
| @@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) | |||
| 340 | /* | 340 | /* |
| 341 | * __ffs: Similar to ffs, but zero based (0-31) | 341 | * __ffs: Similar to ffs, but zero based (0-31) |
| 342 | */ | 342 | */ |
| 343 | static inline __attribute__ ((const)) int __ffs(unsigned long word) | 343 | static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) |
| 344 | { | 344 | { |
| 345 | if (!word) | 345 | if (!word) |
| 346 | return word; | 346 | return word; |
| @@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) | |||
| 400 | /* | 400 | /* |
| 401 | * __ffs: Similar to ffs, but zero based (0-31) | 401 | * __ffs: Similar to ffs, but zero based (0-31) |
| 402 | */ | 402 | */ |
| 403 | static inline __attribute__ ((const)) int __ffs(unsigned long x) | 403 | static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) |
| 404 | { | 404 | { |
| 405 | int n; | 405 | unsigned long n; |
| 406 | 406 | ||
| 407 | asm volatile( | 407 | asm volatile( |
| 408 | " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ | 408 | " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ |
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 9185541035cc..6958545390f0 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h | |||
| @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { | |||
| 103 | 103 | ||
| 104 | /* counts condition */ | 104 | /* counts condition */ |
| 105 | [PERF_COUNT_HW_INSTRUCTIONS] = "iall", | 105 | [PERF_COUNT_HW_INSTRUCTIONS] = "iall", |
| 106 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ | 106 | /* All jump instructions that are taken */ |
| 107 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", | ||
| 107 | [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ | 108 | [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ |
| 108 | #ifdef CONFIG_ISA_ARCV2 | 109 | #ifdef CONFIG_ISA_ARCV2 |
| 109 | [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", | 110 | [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", |
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 8aec462d90fb..861a8aea51f9 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
| @@ -1,15 +1,10 @@ | |||
| 1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | * Linux performance counter support for ARC700 series | 2 | // |
| 3 | * | 3 | // Linux performance counter support for ARC CPUs. |
| 4 | * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) | 4 | // This code is inspired by the perf support of various other architectures. |
| 5 | * | 5 | // |
| 6 | * This code is inspired by the perf support of various other architectures. | 6 | // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com) |
| 7 | * | 7 | |
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
| 14 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
| 15 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| @@ -19,12 +14,31 @@ | |||
| 19 | #include <asm/arcregs.h> | 14 | #include <asm/arcregs.h> |
| 20 | #include <asm/stacktrace.h> | 15 | #include <asm/stacktrace.h> |
| 21 | 16 | ||
| 17 | /* HW holds 8 symbols + one for null terminator */ | ||
| 18 | #define ARCPMU_EVENT_NAME_LEN 9 | ||
| 19 | |||
| 20 | enum arc_pmu_attr_groups { | ||
| 21 | ARCPMU_ATTR_GR_EVENTS, | ||
| 22 | ARCPMU_ATTR_GR_FORMATS, | ||
| 23 | ARCPMU_NR_ATTR_GR | ||
| 24 | }; | ||
| 25 | |||
| 26 | struct arc_pmu_raw_event_entry { | ||
| 27 | char name[ARCPMU_EVENT_NAME_LEN]; | ||
| 28 | }; | ||
| 29 | |||
| 22 | struct arc_pmu { | 30 | struct arc_pmu { |
| 23 | struct pmu pmu; | 31 | struct pmu pmu; |
| 24 | unsigned int irq; | 32 | unsigned int irq; |
| 25 | int n_counters; | 33 | int n_counters; |
| 34 | int n_events; | ||
| 26 | u64 max_period; | 35 | u64 max_period; |
| 27 | int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; | 36 | int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; |
| 37 | |||
| 38 | struct arc_pmu_raw_event_entry *raw_entry; | ||
| 39 | struct attribute **attrs; | ||
| 40 | struct perf_pmu_events_attr *attr; | ||
| 41 | const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1]; | ||
| 28 | }; | 42 | }; |
| 29 | 43 | ||
| 30 | struct arc_pmu_cpu { | 44 | struct arc_pmu_cpu { |
| @@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data) | |||
| 49 | { | 63 | { |
| 50 | struct arc_callchain_trace *ctrl = data; | 64 | struct arc_callchain_trace *ctrl = data; |
| 51 | struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; | 65 | struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; |
| 66 | |||
| 52 | perf_callchain_store(entry, addr); | 67 | perf_callchain_store(entry, addr); |
| 53 | 68 | ||
| 54 | if (ctrl->depth++ < 3) | 69 | if (ctrl->depth++ < 3) |
| @@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data) | |||
| 57 | return -1; | 72 | return -1; |
| 58 | } | 73 | } |
| 59 | 74 | ||
| 60 | void | 75 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
| 61 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) | 76 | struct pt_regs *regs) |
| 62 | { | 77 | { |
| 63 | struct arc_callchain_trace ctrl = { | 78 | struct arc_callchain_trace ctrl = { |
| 64 | .depth = 0, | 79 | .depth = 0, |
| @@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re | |||
| 68 | arc_unwind_core(NULL, regs, callchain_trace, &ctrl); | 83 | arc_unwind_core(NULL, regs, callchain_trace, &ctrl); |
| 69 | } | 84 | } |
| 70 | 85 | ||
| 71 | void | 86 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
| 72 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) | 87 | struct pt_regs *regs) |
| 73 | { | 88 | { |
| 74 | /* | 89 | /* |
| 75 | * User stack can't be unwound trivially with kernel dwarf unwinder | 90 | * User stack can't be unwound trivially with kernel dwarf unwinder |
| @@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu; | |||
| 82 | static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); | 97 | static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); |
| 83 | 98 | ||
| 84 | /* read counter #idx; note that counter# != event# on ARC! */ | 99 | /* read counter #idx; note that counter# != event# on ARC! */ |
| 85 | static uint64_t arc_pmu_read_counter(int idx) | 100 | static u64 arc_pmu_read_counter(int idx) |
| 86 | { | 101 | { |
| 87 | uint32_t tmp; | 102 | u32 tmp; |
| 88 | uint64_t result; | 103 | u64 result; |
| 89 | 104 | ||
| 90 | /* | 105 | /* |
| 91 | * ARC supports making 'snapshots' of the counters, so we don't | 106 | * ARC supports making 'snapshots' of the counters, so we don't |
| @@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx) | |||
| 94 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | 109 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
| 95 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | 110 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 96 | write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); | 111 | write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); |
| 97 | result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; | 112 | result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; |
| 98 | result |= read_aux_reg(ARC_REG_PCT_SNAPL); | 113 | result |= read_aux_reg(ARC_REG_PCT_SNAPL); |
| 99 | 114 | ||
| 100 | return result; | 115 | return result; |
| @@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx) | |||
| 103 | static void arc_perf_event_update(struct perf_event *event, | 118 | static void arc_perf_event_update(struct perf_event *event, |
| 104 | struct hw_perf_event *hwc, int idx) | 119 | struct hw_perf_event *hwc, int idx) |
| 105 | { | 120 | { |
| 106 | uint64_t prev_raw_count = local64_read(&hwc->prev_count); | 121 | u64 prev_raw_count = local64_read(&hwc->prev_count); |
| 107 | uint64_t new_raw_count = arc_pmu_read_counter(idx); | 122 | u64 new_raw_count = arc_pmu_read_counter(idx); |
| 108 | int64_t delta = new_raw_count - prev_raw_count; | 123 | s64 delta = new_raw_count - prev_raw_count; |
| 109 | 124 | ||
| 110 | /* | 125 | /* |
| 111 | * We aren't afraid of hwc->prev_count changing beneath our feet | 126 | * We aren't afraid of hwc->prev_count changing beneath our feet |
| @@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event) | |||
| 155 | int ret; | 170 | int ret; |
| 156 | 171 | ||
| 157 | if (!is_sampling_event(event)) { | 172 | if (!is_sampling_event(event)) { |
| 158 | hwc->sample_period = arc_pmu->max_period; | 173 | hwc->sample_period = arc_pmu->max_period; |
| 159 | hwc->last_period = hwc->sample_period; | 174 | hwc->last_period = hwc->sample_period; |
| 160 | local64_set(&hwc->period_left, hwc->sample_period); | 175 | local64_set(&hwc->period_left, hwc->sample_period); |
| 161 | } | 176 | } |
| @@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event) | |||
| 192 | pr_debug("init cache event with h/w %08x \'%s\'\n", | 207 | pr_debug("init cache event with h/w %08x \'%s\'\n", |
| 193 | (int)hwc->config, arc_pmu_ev_hw_map[ret]); | 208 | (int)hwc->config, arc_pmu_ev_hw_map[ret]); |
| 194 | return 0; | 209 | return 0; |
| 210 | |||
| 211 | case PERF_TYPE_RAW: | ||
| 212 | if (event->attr.config >= arc_pmu->n_events) | ||
| 213 | return -ENOENT; | ||
| 214 | |||
| 215 | hwc->config |= event->attr.config; | ||
| 216 | pr_debug("init raw event with idx %lld \'%s\'\n", | ||
| 217 | event->attr.config, | ||
| 218 | arc_pmu->raw_entry[event->attr.config].name); | ||
| 219 | |||
| 220 | return 0; | ||
| 221 | |||
| 195 | default: | 222 | default: |
| 196 | return -ENOENT; | 223 | return -ENOENT; |
| 197 | } | 224 | } |
| @@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event) | |||
| 200 | /* starts all counters */ | 227 | /* starts all counters */ |
| 201 | static void arc_pmu_enable(struct pmu *pmu) | 228 | static void arc_pmu_enable(struct pmu *pmu) |
| 202 | { | 229 | { |
| 203 | uint32_t tmp; | 230 | u32 tmp; |
| 204 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | 231 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 205 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); | 232 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); |
| 206 | } | 233 | } |
| @@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu) | |||
| 208 | /* stops all counters */ | 235 | /* stops all counters */ |
| 209 | static void arc_pmu_disable(struct pmu *pmu) | 236 | static void arc_pmu_disable(struct pmu *pmu) |
| 210 | { | 237 | { |
| 211 | uint32_t tmp; | 238 | u32 tmp; |
| 212 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); | 239 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 213 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); | 240 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); |
| 214 | } | 241 | } |
| @@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event) | |||
| 228 | local64_set(&hwc->period_left, left); | 255 | local64_set(&hwc->period_left, left); |
| 229 | hwc->last_period = period; | 256 | hwc->last_period = period; |
| 230 | overflow = 1; | 257 | overflow = 1; |
| 231 | } else if (unlikely(left <= 0)) { | 258 | } else if (unlikely(left <= 0)) { |
| 232 | /* left underflowed by less than period. */ | 259 | /* left underflowed by less than period. */ |
| 233 | left += period; | 260 | left += period; |
| 234 | local64_set(&hwc->period_left, left); | 261 | local64_set(&hwc->period_left, left); |
| @@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event) | |||
| 246 | write_aux_reg(ARC_REG_PCT_INDEX, idx); | 273 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
| 247 | 274 | ||
| 248 | /* Write value */ | 275 | /* Write value */ |
| 249 | write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); | 276 | write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value)); |
| 250 | write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); | 277 | write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value)); |
| 251 | 278 | ||
| 252 | perf_event_update_userpage(event); | 279 | perf_event_update_userpage(event); |
| 253 | 280 | ||
| @@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags) | |||
| 277 | /* Enable interrupt for this counter */ | 304 | /* Enable interrupt for this counter */ |
| 278 | if (is_sampling_event(event)) | 305 | if (is_sampling_event(event)) |
| 279 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | 306 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
| 280 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); | 307 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); |
| 281 | 308 | ||
| 282 | /* enable ARC pmu here */ | 309 | /* enable ARC pmu here */ |
| 283 | write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ | 310 | write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ |
| @@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags) | |||
| 295 | * Reset interrupt flag by writing of 1. This is required | 322 | * Reset interrupt flag by writing of 1. This is required |
| 296 | * to make sure pending interrupt was not left. | 323 | * to make sure pending interrupt was not left. |
| 297 | */ | 324 | */ |
| 298 | write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); | 325 | write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); |
| 299 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | 326 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
| 300 | read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); | 327 | read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx)); |
| 301 | } | 328 | } |
| 302 | 329 | ||
| 303 | if (!(event->hw.state & PERF_HES_STOPPED)) { | 330 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
| @@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags) | |||
| 349 | 376 | ||
| 350 | if (is_sampling_event(event)) { | 377 | if (is_sampling_event(event)) { |
| 351 | /* Mimic full counter overflow as other arches do */ | 378 | /* Mimic full counter overflow as other arches do */ |
| 352 | write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); | 379 | write_aux_reg(ARC_REG_PCT_INT_CNTL, |
| 380 | lower_32_bits(arc_pmu->max_period)); | ||
| 353 | write_aux_reg(ARC_REG_PCT_INT_CNTH, | 381 | write_aux_reg(ARC_REG_PCT_INT_CNTH, |
| 354 | (arc_pmu->max_period >> 32)); | 382 | upper_32_bits(arc_pmu->max_period)); |
| 355 | } | 383 | } |
| 356 | 384 | ||
| 357 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); | 385 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); |
| @@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) | |||
| 392 | idx = __ffs(active_ints); | 420 | idx = __ffs(active_ints); |
| 393 | 421 | ||
| 394 | /* Reset interrupt flag by writing of 1 */ | 422 | /* Reset interrupt flag by writing of 1 */ |
| 395 | write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); | 423 | write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); |
| 396 | 424 | ||
| 397 | /* | 425 | /* |
| 398 | * On reset of "interrupt active" bit corresponding | 426 | * On reset of "interrupt active" bit corresponding |
| @@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) | |||
| 400 | * Now we need to re-enable interrupt for the counter. | 428 | * Now we need to re-enable interrupt for the counter. |
| 401 | */ | 429 | */ |
| 402 | write_aux_reg(ARC_REG_PCT_INT_CTRL, | 430 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
| 403 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); | 431 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); |
| 404 | 432 | ||
| 405 | event = pmu_cpu->act_counter[idx]; | 433 | event = pmu_cpu->act_counter[idx]; |
| 406 | hwc = &event->hw; | 434 | hwc = &event->hw; |
| @@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) | |||
| 414 | arc_pmu_stop(event, 0); | 442 | arc_pmu_stop(event, 0); |
| 415 | } | 443 | } |
| 416 | 444 | ||
| 417 | active_ints &= ~(1U << idx); | 445 | active_ints &= ~BIT(idx); |
| 418 | } while (active_ints); | 446 | } while (active_ints); |
| 419 | 447 | ||
| 420 | done: | 448 | done: |
| @@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data) | |||
| 441 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | 469 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); |
| 442 | } | 470 | } |
| 443 | 471 | ||
| 472 | /* Event field occupies the bottom 15 bits of our config field */ | ||
| 473 | PMU_FORMAT_ATTR(event, "config:0-14"); | ||
| 474 | static struct attribute *arc_pmu_format_attrs[] = { | ||
| 475 | &format_attr_event.attr, | ||
| 476 | NULL, | ||
| 477 | }; | ||
| 478 | |||
| 479 | static struct attribute_group arc_pmu_format_attr_gr = { | ||
| 480 | .name = "format", | ||
| 481 | .attrs = arc_pmu_format_attrs, | ||
| 482 | }; | ||
| 483 | |||
| 484 | static ssize_t arc_pmu_events_sysfs_show(struct device *dev, | ||
| 485 | struct device_attribute *attr, | ||
| 486 | char *page) | ||
| 487 | { | ||
| 488 | struct perf_pmu_events_attr *pmu_attr; | ||
| 489 | |||
| 490 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | ||
| 491 | return sprintf(page, "event=0x%04llx\n", pmu_attr->id); | ||
| 492 | } | ||
| 493 | |||
| 494 | /* | ||
| 495 | * We don't add attrs here as we don't have pre-defined list of perf events. | ||
| 496 | * We will generate and add attrs dynamically in probe() after we read HW | ||
| 497 | * configuration. | ||
| 498 | */ | ||
| 499 | static struct attribute_group arc_pmu_events_attr_gr = { | ||
| 500 | .name = "events", | ||
| 501 | }; | ||
| 502 | |||
| 503 | static void arc_pmu_add_raw_event_attr(int j, char *str) | ||
| 504 | { | ||
| 505 | memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); | ||
| 506 | arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; | ||
| 507 | arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); | ||
| 508 | arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; | ||
| 509 | arc_pmu->attr[j].id = j; | ||
| 510 | arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); | ||
| 511 | } | ||
| 512 | |||
| 513 | static int arc_pmu_raw_alloc(struct device *dev) | ||
| 514 | { | ||
| 515 | arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, | ||
| 516 | sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); | ||
| 517 | if (!arc_pmu->attr) | ||
| 518 | return -ENOMEM; | ||
| 519 | |||
| 520 | arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, | ||
| 521 | sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); | ||
| 522 | if (!arc_pmu->attrs) | ||
| 523 | return -ENOMEM; | ||
| 524 | |||
| 525 | arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, | ||
| 526 | sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); | ||
| 527 | if (!arc_pmu->raw_entry) | ||
| 528 | return -ENOMEM; | ||
| 529 | |||
| 530 | return 0; | ||
| 531 | } | ||
| 532 | |||
| 533 | static inline bool event_in_hw_event_map(int i, char *name) | ||
| 534 | { | ||
| 535 | if (!arc_pmu_ev_hw_map[i]) | ||
| 536 | return false; | ||
| 537 | |||
| 538 | if (!strlen(arc_pmu_ev_hw_map[i])) | ||
| 539 | return false; | ||
| 540 | |||
| 541 | if (strcmp(arc_pmu_ev_hw_map[i], name)) | ||
| 542 | return false; | ||
| 543 | |||
| 544 | return true; | ||
| 545 | } | ||
| 546 | |||
| 547 | static void arc_pmu_map_hw_event(int j, char *str) | ||
| 548 | { | ||
| 549 | int i; | ||
| 550 | |||
| 551 | /* See if HW condition has been mapped to a perf event_id */ | ||
| 552 | for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { | ||
| 553 | if (event_in_hw_event_map(i, str)) { | ||
| 554 | pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", | ||
| 555 | i, str, j); | ||
| 556 | arc_pmu->ev_hw_idx[i] = j; | ||
| 557 | } | ||
| 558 | } | ||
| 559 | } | ||
| 560 | |||
| 444 | static int arc_pmu_device_probe(struct platform_device *pdev) | 561 | static int arc_pmu_device_probe(struct platform_device *pdev) |
| 445 | { | 562 | { |
| 446 | struct arc_reg_pct_build pct_bcr; | 563 | struct arc_reg_pct_build pct_bcr; |
| 447 | struct arc_reg_cc_build cc_bcr; | 564 | struct arc_reg_cc_build cc_bcr; |
| 448 | int i, j, has_interrupts; | 565 | int i, has_interrupts; |
| 449 | int counter_size; /* in bits */ | 566 | int counter_size; /* in bits */ |
| 450 | 567 | ||
| 451 | union cc_name { | 568 | union cc_name { |
| 452 | struct { | 569 | struct { |
| 453 | uint32_t word0, word1; | 570 | u32 word0, word1; |
| 454 | char sentinel; | 571 | char sentinel; |
| 455 | } indiv; | 572 | } indiv; |
| 456 | char str[9]; | 573 | char str[ARCPMU_EVENT_NAME_LEN]; |
| 457 | } cc_name; | 574 | } cc_name; |
| 458 | 575 | ||
| 459 | 576 | ||
| @@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 463 | return -ENODEV; | 580 | return -ENODEV; |
| 464 | } | 581 | } |
| 465 | BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); | 582 | BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); |
| 466 | BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); | 583 | if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS)) |
| 584 | return -EINVAL; | ||
| 467 | 585 | ||
| 468 | READ_BCR(ARC_REG_CC_BUILD, cc_bcr); | 586 | READ_BCR(ARC_REG_CC_BUILD, cc_bcr); |
| 469 | BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ | 587 | if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?")) |
| 588 | return -EINVAL; | ||
| 470 | 589 | ||
| 471 | arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); | 590 | arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); |
| 472 | if (!arc_pmu) | 591 | if (!arc_pmu) |
| 473 | return -ENOMEM; | 592 | return -ENOMEM; |
| 474 | 593 | ||
| 594 | arc_pmu->n_events = cc_bcr.c; | ||
| 595 | |||
| 596 | if (arc_pmu_raw_alloc(&pdev->dev)) | ||
| 597 | return -ENOMEM; | ||
| 598 | |||
| 475 | has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; | 599 | has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; |
| 476 | 600 | ||
| 477 | arc_pmu->n_counters = pct_bcr.c; | 601 | arc_pmu->n_counters = pct_bcr.c; |
| @@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 481 | 605 | ||
| 482 | pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", | 606 | pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", |
| 483 | arc_pmu->n_counters, counter_size, cc_bcr.c, | 607 | arc_pmu->n_counters, counter_size, cc_bcr.c, |
| 484 | has_interrupts ? ", [overflow IRQ support]":""); | 608 | has_interrupts ? ", [overflow IRQ support]" : ""); |
| 485 | 609 | ||
| 486 | cc_name.str[8] = 0; | 610 | cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0; |
| 487 | for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) | 611 | for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) |
| 488 | arc_pmu->ev_hw_idx[i] = -1; | 612 | arc_pmu->ev_hw_idx[i] = -1; |
| 489 | 613 | ||
| 490 | /* loop thru all available h/w condition indexes */ | 614 | /* loop thru all available h/w condition indexes */ |
| 491 | for (j = 0; j < cc_bcr.c; j++) { | 615 | for (i = 0; i < cc_bcr.c; i++) { |
| 492 | write_aux_reg(ARC_REG_CC_INDEX, j); | 616 | write_aux_reg(ARC_REG_CC_INDEX, i); |
| 493 | cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); | 617 | cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); |
| 494 | cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); | 618 | cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); |
| 495 | 619 | ||
| 496 | /* See if it has been mapped to a perf event_id */ | 620 | arc_pmu_map_hw_event(i, cc_name.str); |
| 497 | for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { | 621 | arc_pmu_add_raw_event_attr(i, cc_name.str); |
| 498 | if (arc_pmu_ev_hw_map[i] && | ||
| 499 | !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) && | ||
| 500 | strlen(arc_pmu_ev_hw_map[i])) { | ||
| 501 | pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", | ||
| 502 | i, cc_name.str, j); | ||
| 503 | arc_pmu->ev_hw_idx[i] = j; | ||
| 504 | } | ||
| 505 | } | ||
| 506 | } | 622 | } |
| 507 | 623 | ||
| 624 | arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; | ||
| 625 | arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; | ||
| 626 | arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; | ||
| 627 | |||
| 508 | arc_pmu->pmu = (struct pmu) { | 628 | arc_pmu->pmu = (struct pmu) { |
| 509 | .pmu_enable = arc_pmu_enable, | 629 | .pmu_enable = arc_pmu_enable, |
| 510 | .pmu_disable = arc_pmu_disable, | 630 | .pmu_disable = arc_pmu_disable, |
| @@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 514 | .start = arc_pmu_start, | 634 | .start = arc_pmu_start, |
| 515 | .stop = arc_pmu_stop, | 635 | .stop = arc_pmu_stop, |
| 516 | .read = arc_pmu_read, | 636 | .read = arc_pmu_read, |
| 637 | .attr_groups = arc_pmu->attr_groups, | ||
| 517 | }; | 638 | }; |
| 518 | 639 | ||
| 519 | if (has_interrupts) { | 640 | if (has_interrupts) { |
| @@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 535 | } else | 656 | } else |
| 536 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | 657 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
| 537 | 658 | ||
| 538 | return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); | 659 | /* |
| 660 | * perf parser doesn't really like '-' symbol in events name, so let's | ||
| 661 | * use '_' in arc pct name as it goes to kernel PMU event prefix. | ||
| 662 | */ | ||
| 663 | return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW); | ||
| 539 | } | 664 | } |
| 540 | 665 | ||
| 541 | #ifdef CONFIG_OF | ||
| 542 | static const struct of_device_id arc_pmu_match[] = { | 666 | static const struct of_device_id arc_pmu_match[] = { |
| 543 | { .compatible = "snps,arc700-pct" }, | 667 | { .compatible = "snps,arc700-pct" }, |
| 544 | { .compatible = "snps,archs-pct" }, | 668 | { .compatible = "snps,archs-pct" }, |
| 545 | {}, | 669 | {}, |
| 546 | }; | 670 | }; |
| 547 | MODULE_DEVICE_TABLE(of, arc_pmu_match); | 671 | MODULE_DEVICE_TABLE(of, arc_pmu_match); |
| 548 | #endif | ||
| 549 | 672 | ||
| 550 | static struct platform_driver arc_pmu_driver = { | 673 | static struct platform_driver arc_pmu_driver = { |
| 551 | .driver = { | 674 | .driver = { |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 2e018b8c2e19..feb90093e6b1 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void) | |||
| 123 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; | 123 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; |
| 124 | const struct id_to_str *tbl; | 124 | const struct id_to_str *tbl; |
| 125 | struct bcr_isa_arcv2 isa; | 125 | struct bcr_isa_arcv2 isa; |
| 126 | struct bcr_actionpoint ap; | ||
| 126 | 127 | ||
| 127 | FIX_PTR(cpu); | 128 | FIX_PTR(cpu); |
| 128 | 129 | ||
| @@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void) | |||
| 195 | cpu->bpu.full = bpu.ft; | 196 | cpu->bpu.full = bpu.ft; |
| 196 | cpu->bpu.num_cache = 256 << bpu.bce; | 197 | cpu->bpu.num_cache = 256 << bpu.bce; |
| 197 | cpu->bpu.num_pred = 2048 << bpu.pte; | 198 | cpu->bpu.num_pred = 2048 << bpu.pte; |
| 199 | cpu->bpu.ret_stk = 4 << bpu.rse; | ||
| 198 | 200 | ||
| 199 | if (cpu->core.family >= 0x54) { | 201 | if (cpu->core.family >= 0x54) { |
| 200 | unsigned int exec_ctrl; | 202 | unsigned int exec_ctrl; |
| @@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void) | |||
| 207 | } | 209 | } |
| 208 | } | 210 | } |
| 209 | 211 | ||
| 210 | READ_BCR(ARC_REG_AP_BCR, bcr); | 212 | READ_BCR(ARC_REG_AP_BCR, ap); |
| 211 | cpu->extn.ap = bcr.ver ? 1 : 0; | 213 | if (ap.ver) { |
| 214 | cpu->extn.ap_num = 2 << ap.num; | ||
| 215 | cpu->extn.ap_full = !!ap.min; | ||
| 216 | } | ||
| 212 | 217 | ||
| 213 | READ_BCR(ARC_REG_SMART_BCR, bcr); | 218 | READ_BCR(ARC_REG_SMART_BCR, bcr); |
| 214 | cpu->extn.smart = bcr.ver ? 1 : 0; | 219 | cpu->extn.smart = bcr.ver ? 1 : 0; |
| @@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void) | |||
| 216 | READ_BCR(ARC_REG_RTT_BCR, bcr); | 221 | READ_BCR(ARC_REG_RTT_BCR, bcr); |
| 217 | cpu->extn.rtt = bcr.ver ? 1 : 0; | 222 | cpu->extn.rtt = bcr.ver ? 1 : 0; |
| 218 | 223 | ||
| 219 | cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; | ||
| 220 | |||
| 221 | READ_BCR(ARC_REG_ISA_CFG_BCR, isa); | 224 | READ_BCR(ARC_REG_ISA_CFG_BCR, isa); |
| 222 | 225 | ||
| 223 | /* some hacks for lack of feature BCR info in old ARC700 cores */ | 226 | /* some hacks for lack of feature BCR info in old ARC700 cores */ |
| @@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
| 299 | 302 | ||
| 300 | if (cpu->bpu.ver) | 303 | if (cpu->bpu.ver) |
| 301 | n += scnprintf(buf + n, len - n, | 304 | n += scnprintf(buf + n, len - n, |
| 302 | "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", | 305 | "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d", |
| 303 | IS_AVAIL1(cpu->bpu.full, "full"), | 306 | IS_AVAIL1(cpu->bpu.full, "full"), |
| 304 | IS_AVAIL1(!cpu->bpu.full, "partial"), | 307 | IS_AVAIL1(!cpu->bpu.full, "partial"), |
| 305 | cpu->bpu.num_cache, cpu->bpu.num_pred); | 308 | cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk); |
| 306 | 309 | ||
| 307 | if (is_isa_arcv2()) { | 310 | if (is_isa_arcv2()) { |
| 308 | struct bcr_lpb lpb; | 311 | struct bcr_lpb lpb; |
| @@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) | |||
| 336 | IS_AVAIL1(cpu->extn.fpu_sp, "SP "), | 339 | IS_AVAIL1(cpu->extn.fpu_sp, "SP "), |
| 337 | IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); | 340 | IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); |
| 338 | 341 | ||
| 339 | if (cpu->extn.debug) | 342 | if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) { |
| 340 | n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", | 343 | n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s", |
| 341 | IS_AVAIL1(cpu->extn.ap, "ActionPoint "), | ||
| 342 | IS_AVAIL1(cpu->extn.smart, "smaRT "), | 344 | IS_AVAIL1(cpu->extn.smart, "smaRT "), |
| 343 | IS_AVAIL1(cpu->extn.rtt, "RTT ")); | 345 | IS_AVAIL1(cpu->extn.rtt, "RTT ")); |
| 346 | if (cpu->extn.ap_num) { | ||
| 347 | n += scnprintf(buf + n, len - n, "ActionPoint %d/%s", | ||
| 348 | cpu->extn.ap_num, | ||
| 349 | cpu->extn.ap_full ? "full":"min"); | ||
| 350 | } | ||
| 351 | n += scnprintf(buf + n, len - n, "\n"); | ||
| 352 | } | ||
| 344 | 353 | ||
| 345 | if (cpu->dccm.sz || cpu->iccm.sz) | 354 | if (cpu->dccm.sz || cpu->iccm.sz) |
| 346 | n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", | 355 | n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", |
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e8d9fb452346..215f515442e0 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <asm/arcregs.h> | 18 | #include <asm/arcregs.h> |
| 19 | #include <asm/irqflags.h> | 19 | #include <asm/irqflags.h> |
| 20 | 20 | ||
| 21 | #define ARC_PATH_MAX 256 | ||
| 22 | |||
| 21 | /* | 23 | /* |
| 22 | * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) | 24 | * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) |
| 23 | * -Prints 3 regs per line and a CR. | 25 | * -Prints 3 regs per line and a CR. |
| @@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) | |||
| 58 | print_reg_file(&(cregs->r13), 13); | 60 | print_reg_file(&(cregs->r13), 13); |
| 59 | } | 61 | } |
| 60 | 62 | ||
| 61 | static void print_task_path_n_nm(struct task_struct *tsk, char *buf) | 63 | static void print_task_path_n_nm(struct task_struct *tsk) |
| 62 | { | 64 | { |
| 63 | char *path_nm = NULL; | 65 | char *path_nm = NULL; |
| 64 | struct mm_struct *mm; | 66 | struct mm_struct *mm; |
| 65 | struct file *exe_file; | 67 | struct file *exe_file; |
| 68 | char buf[ARC_PATH_MAX]; | ||
| 66 | 69 | ||
| 67 | mm = get_task_mm(tsk); | 70 | mm = get_task_mm(tsk); |
| 68 | if (!mm) | 71 | if (!mm) |
| @@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) | |||
| 72 | mmput(mm); | 75 | mmput(mm); |
| 73 | 76 | ||
| 74 | if (exe_file) { | 77 | if (exe_file) { |
| 75 | path_nm = file_path(exe_file, buf, 255); | 78 | path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1); |
| 76 | fput(exe_file); | 79 | fput(exe_file); |
| 77 | } | 80 | } |
| 78 | 81 | ||
| @@ -80,10 +83,9 @@ done: | |||
| 80 | pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); | 83 | pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); |
| 81 | } | 84 | } |
| 82 | 85 | ||
| 83 | static void show_faulting_vma(unsigned long address, char *buf) | 86 | static void show_faulting_vma(unsigned long address) |
| 84 | { | 87 | { |
| 85 | struct vm_area_struct *vma; | 88 | struct vm_area_struct *vma; |
| 86 | char *nm = buf; | ||
| 87 | struct mm_struct *active_mm = current->active_mm; | 89 | struct mm_struct *active_mm = current->active_mm; |
| 88 | 90 | ||
| 89 | /* can't use print_vma_addr() yet as it doesn't check for | 91 | /* can't use print_vma_addr() yet as it doesn't check for |
| @@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) | |||
| 96 | * if the container VMA is not found | 98 | * if the container VMA is not found |
| 97 | */ | 99 | */ |
| 98 | if (vma && (vma->vm_start <= address)) { | 100 | if (vma && (vma->vm_start <= address)) { |
| 101 | char buf[ARC_PATH_MAX]; | ||
| 102 | char *nm = "?"; | ||
| 103 | |||
| 99 | if (vma->vm_file) { | 104 | if (vma->vm_file) { |
| 100 | nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); | 105 | nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1); |
| 101 | if (IS_ERR(nm)) | 106 | if (IS_ERR(nm)) |
| 102 | nm = "?"; | 107 | nm = "?"; |
| 103 | } | 108 | } |
| @@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs) | |||
| 173 | { | 178 | { |
| 174 | struct task_struct *tsk = current; | 179 | struct task_struct *tsk = current; |
| 175 | struct callee_regs *cregs; | 180 | struct callee_regs *cregs; |
| 176 | char *buf; | ||
| 177 | 181 | ||
| 178 | buf = (char *)__get_free_page(GFP_KERNEL); | 182 | /* |
| 179 | if (!buf) | 183 | * generic code calls us with preemption disabled, but some calls |
| 180 | return; | 184 | * here could sleep, so re-enable to avoid lockdep splat |
| 185 | */ | ||
| 186 | preempt_enable(); | ||
| 181 | 187 | ||
| 182 | print_task_path_n_nm(tsk, buf); | 188 | print_task_path_n_nm(tsk); |
| 183 | show_regs_print_info(KERN_INFO); | 189 | show_regs_print_info(KERN_INFO); |
| 184 | 190 | ||
| 185 | show_ecr_verbose(regs); | 191 | show_ecr_verbose(regs); |
| @@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs) | |||
| 189 | (void *)regs->blink, (void *)regs->ret); | 195 | (void *)regs->blink, (void *)regs->ret); |
| 190 | 196 | ||
| 191 | if (user_mode(regs)) | 197 | if (user_mode(regs)) |
| 192 | show_faulting_vma(regs->ret, buf); /* faulting code, not data */ | 198 | show_faulting_vma(regs->ret); /* faulting code, not data */ |
| 193 | 199 | ||
| 194 | pr_info("[STAT32]: 0x%08lx", regs->status32); | 200 | pr_info("[STAT32]: 0x%08lx", regs->status32); |
| 195 | 201 | ||
| @@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs) | |||
| 222 | if (cregs) | 228 | if (cregs) |
| 223 | show_callee_regs(cregs); | 229 | show_callee_regs(cregs); |
| 224 | 230 | ||
| 225 | free_page((unsigned long)buf); | 231 | preempt_disable(); |
| 226 | } | 232 | } |
| 227 | 233 | ||
| 228 | void show_kernel_fault_diag(const char *str, struct pt_regs *regs, | 234 | void show_kernel_fault_diag(const char *str, struct pt_regs *regs, |
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 62ad4bcb841a..f230bb7092fd 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S | |||
| @@ -7,11 +7,39 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 10 | #include <asm/cache.h> | ||
| 10 | 11 | ||
| 11 | #undef PREALLOC_NOT_AVAIL | 12 | /* |
| 13 | * The memset implementation below is optimized to use prefetchw and prealloc | ||
| 14 | * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) | ||
| 15 | * If you want to implement optimized memset for other possible L1 data cache | ||
| 16 | * line lengths (32B and 128B) you should rewrite code carefully checking | ||
| 17 | * we don't call any prefetchw/prealloc instruction for L1 cache lines which | ||
| 18 | * don't belongs to memset area. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #if L1_CACHE_SHIFT == 6 | ||
| 22 | |||
| 23 | .macro PREALLOC_INSTR reg, off | ||
| 24 | prealloc [\reg, \off] | ||
| 25 | .endm | ||
| 26 | |||
| 27 | .macro PREFETCHW_INSTR reg, off | ||
| 28 | prefetchw [\reg, \off] | ||
| 29 | .endm | ||
| 30 | |||
| 31 | #else | ||
| 32 | |||
| 33 | .macro PREALLOC_INSTR | ||
| 34 | .endm | ||
| 35 | |||
| 36 | .macro PREFETCHW_INSTR | ||
| 37 | .endm | ||
| 38 | |||
| 39 | #endif | ||
| 12 | 40 | ||
| 13 | ENTRY_CFI(memset) | 41 | ENTRY_CFI(memset) |
| 14 | prefetchw [r0] ; Prefetch the write location | 42 | PREFETCHW_INSTR r0, 0 ; Prefetch the first write location |
| 15 | mov.f 0, r2 | 43 | mov.f 0, r2 |
| 16 | ;;; if size is zero | 44 | ;;; if size is zero |
| 17 | jz.d [blink] | 45 | jz.d [blink] |
| @@ -48,11 +76,8 @@ ENTRY_CFI(memset) | |||
| 48 | 76 | ||
| 49 | lpnz @.Lset64bytes | 77 | lpnz @.Lset64bytes |
| 50 | ;; LOOP START | 78 | ;; LOOP START |
| 51 | #ifdef PREALLOC_NOT_AVAIL | 79 | PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching |
| 52 | prefetchw [r3, 64] ;Prefetch the next write location | 80 | |
| 53 | #else | ||
| 54 | prealloc [r3, 64] | ||
| 55 | #endif | ||
| 56 | #ifdef CONFIG_ARC_HAS_LL64 | 81 | #ifdef CONFIG_ARC_HAS_LL64 |
| 57 | std.ab r4, [r3, 8] | 82 | std.ab r4, [r3, 8] |
| 58 | std.ab r4, [r3, 8] | 83 | std.ab r4, [r3, 8] |
| @@ -85,7 +110,6 @@ ENTRY_CFI(memset) | |||
| 85 | lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes | 110 | lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes |
| 86 | lpnz .Lset32bytes | 111 | lpnz .Lset32bytes |
| 87 | ;; LOOP START | 112 | ;; LOOP START |
| 88 | prefetchw [r3, 32] ;Prefetch the next write location | ||
| 89 | #ifdef CONFIG_ARC_HAS_LL64 | 113 | #ifdef CONFIG_ARC_HAS_LL64 |
| 90 | std.ab r4, [r3, 8] | 114 | std.ab r4, [r3, 8] |
| 91 | std.ab r4, [r3, 8] | 115 | std.ab r4, [r3, 8] |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index a1d723197084..8df1638259f3 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
| @@ -141,12 +141,17 @@ good_area: | |||
| 141 | */ | 141 | */ |
| 142 | fault = handle_mm_fault(vma, address, flags); | 142 | fault = handle_mm_fault(vma, address, flags); |
| 143 | 143 | ||
| 144 | /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ | ||
| 145 | if (fatal_signal_pending(current)) { | 144 | if (fatal_signal_pending(current)) { |
| 146 | if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) | 145 | |
| 147 | up_read(&mm->mmap_sem); | 146 | /* |
| 148 | if (user_mode(regs)) | 147 | * if fault retry, mmap_sem already relinquished by core mm |
| 148 | * so OK to return to user mode (with signal handled first) | ||
| 149 | */ | ||
| 150 | if (fault & VM_FAULT_RETRY) { | ||
| 151 | if (!user_mode(regs)) | ||
| 152 | goto no_context; | ||
| 149 | return; | 153 | return; |
| 154 | } | ||
| 150 | } | 155 | } |
| 151 | 156 | ||
| 152 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 43bf4c3a1290..e1ab2d7f1d64 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
| @@ -119,7 +119,8 @@ void __init setup_arch_memory(void) | |||
| 119 | */ | 119 | */ |
| 120 | 120 | ||
| 121 | memblock_add_node(low_mem_start, low_mem_sz, 0); | 121 | memblock_add_node(low_mem_start, low_mem_sz, 0); |
| 122 | memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); | 122 | memblock_reserve(CONFIG_LINUX_LINK_BASE, |
| 123 | __pa(_end) - CONFIG_LINUX_LINK_BASE); | ||
| 123 | 124 | ||
| 124 | #ifdef CONFIG_BLK_DEV_INITRD | 125 | #ifdef CONFIG_BLK_DEV_INITRD |
| 125 | if (phys_initrd_size) { | 126 | if (phys_initrd_size) { |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index b3ef061d8b74..2c403e7c782d 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
| @@ -1 +1,95 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H | ||
| 3 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | ||
| 4 | |||
| 5 | #include <linux/dma-mapping.h> | ||
| 6 | #include <asm/page.h> | ||
| 1 | #include <xen/arm/page-coherent.h> | 7 | #include <xen/arm/page-coherent.h> |
| 8 | |||
| 9 | static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) | ||
| 10 | { | ||
| 11 | if (dev && dev->archdata.dev_dma_ops) | ||
| 12 | return dev->archdata.dev_dma_ops; | ||
| 13 | return get_arch_dma_ops(NULL); | ||
| 14 | } | ||
| 15 | |||
| 16 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
| 17 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) | ||
| 18 | { | ||
| 19 | return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | ||
| 20 | } | ||
| 21 | |||
| 22 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
| 23 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) | ||
| 24 | { | ||
| 25 | xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
| 29 | dma_addr_t dev_addr, unsigned long offset, size_t size, | ||
| 30 | enum dma_data_direction dir, unsigned long attrs) | ||
| 31 | { | ||
| 32 | unsigned long page_pfn = page_to_xen_pfn(page); | ||
| 33 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); | ||
| 34 | unsigned long compound_pages = | ||
| 35 | (1<<compound_order(page)) * XEN_PFN_PER_PAGE; | ||
| 36 | bool local = (page_pfn <= dev_pfn) && | ||
| 37 | (dev_pfn - page_pfn < compound_pages); | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Dom0 is mapped 1:1, while the Linux page can span across | ||
| 41 | * multiple Xen pages, it's not possible for it to contain a | ||
| 42 | * mix of local and foreign Xen pages. So if the first xen_pfn | ||
| 43 | * == mfn the page is local otherwise it's a foreign page | ||
| 44 | * grant-mapped in dom0. If the page is local we can safely | ||
| 45 | * call the native dma_ops function, otherwise we call the xen | ||
| 46 | * specific function. | ||
| 47 | */ | ||
| 48 | if (local) | ||
| 49 | xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
| 50 | else | ||
| 51 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
| 55 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
| 56 | { | ||
| 57 | unsigned long pfn = PFN_DOWN(handle); | ||
| 58 | /* | ||
| 59 | * Dom0 is mapped 1:1, while the Linux page can be spanned accross | ||
| 60 | * multiple Xen page, it's not possible to have a mix of local and | ||
| 61 | * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a | ||
| 62 | * foreign mfn will always return false. If the page is local we can | ||
| 63 | * safely call the native dma_ops function, otherwise we call the xen | ||
| 64 | * specific function. | ||
| 65 | */ | ||
| 66 | if (pfn_valid(pfn)) { | ||
| 67 | if (xen_get_dma_ops(hwdev)->unmap_page) | ||
| 68 | xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
| 69 | } else | ||
| 70 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
| 74 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 75 | { | ||
| 76 | unsigned long pfn = PFN_DOWN(handle); | ||
| 77 | if (pfn_valid(pfn)) { | ||
| 78 | if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) | ||
| 79 | xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 80 | } else | ||
| 81 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
| 85 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 86 | { | ||
| 87 | unsigned long pfn = PFN_DOWN(handle); | ||
| 88 | if (pfn_valid(pfn)) { | ||
| 89 | if (xen_get_dma_ops(hwdev)->sync_single_for_device) | ||
| 90 | xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
| 91 | } else | ||
| 92 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); | ||
| 93 | } | ||
| 94 | |||
| 95 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index eb43e09c1980..926434f413fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h | |||
| @@ -60,8 +60,6 @@ | |||
| 60 | 60 | ||
| 61 | #ifdef CONFIG_KASAN_SW_TAGS | 61 | #ifdef CONFIG_KASAN_SW_TAGS |
| 62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) | 62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) |
| 63 | #else | ||
| 64 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 65 | #endif | 63 | #endif |
| 66 | 64 | ||
| 67 | #ifndef __ASSEMBLY__ | 65 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 3dd3d664c5c5..4658c937e173 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h | |||
| @@ -20,9 +20,6 @@ struct dev_archdata { | |||
| 20 | #ifdef CONFIG_IOMMU_API | 20 | #ifdef CONFIG_IOMMU_API |
| 21 | void *iommu; /* private IOMMU data */ | 21 | void *iommu; /* private IOMMU data */ |
| 22 | #endif | 22 | #endif |
| 23 | #ifdef CONFIG_XEN | ||
| 24 | const struct dma_map_ops *dev_dma_ops; | ||
| 25 | #endif | ||
| 26 | }; | 23 | }; |
| 27 | 24 | ||
| 28 | struct pdev_archdata { | 25 | struct pdev_archdata { |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index ac352accb3d9..3e8063f4f9d3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
| @@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void) | |||
| 60 | * later determine that kpti is required, then | 60 | * later determine that kpti is required, then |
| 61 | * kpti_install_ng_mappings() will make them non-global. | 61 | * kpti_install_ng_mappings() will make them non-global. |
| 62 | */ | 62 | */ |
| 63 | if (arm64_kernel_unmapped_at_el0()) | ||
| 64 | return true; | ||
| 65 | |||
| 63 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) | 66 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
| 64 | return arm64_kernel_unmapped_at_el0(); | 67 | return false; |
| 65 | 68 | ||
| 66 | /* | 69 | /* |
| 67 | * KASLR is enabled so we're going to be enabling kpti on non-broken | 70 | * KASLR is enabled so we're going to be enabling kpti on non-broken |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index b3ef061d8b74..d88e56b90b93 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
| @@ -1 +1,77 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H | ||
| 3 | #define _ASM_ARM64_XEN_PAGE_COHERENT_H | ||
| 4 | |||
| 5 | #include <linux/dma-mapping.h> | ||
| 6 | #include <asm/page.h> | ||
| 1 | #include <xen/arm/page-coherent.h> | 7 | #include <xen/arm/page-coherent.h> |
| 8 | |||
| 9 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
| 10 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) | ||
| 11 | { | ||
| 12 | return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); | ||
| 13 | } | ||
| 14 | |||
| 15 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
| 16 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) | ||
| 17 | { | ||
| 18 | dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
| 22 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 23 | { | ||
| 24 | unsigned long pfn = PFN_DOWN(handle); | ||
| 25 | |||
| 26 | if (pfn_valid(pfn)) | ||
| 27 | dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 28 | else | ||
| 29 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
| 33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 34 | { | ||
| 35 | unsigned long pfn = PFN_DOWN(handle); | ||
| 36 | if (pfn_valid(pfn)) | ||
| 37 | dma_direct_sync_single_for_device(hwdev, handle, size, dir); | ||
| 38 | else | ||
| 39 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
| 43 | dma_addr_t dev_addr, unsigned long offset, size_t size, | ||
| 44 | enum dma_data_direction dir, unsigned long attrs) | ||
| 45 | { | ||
| 46 | unsigned long page_pfn = page_to_xen_pfn(page); | ||
| 47 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); | ||
| 48 | unsigned long compound_pages = | ||
| 49 | (1<<compound_order(page)) * XEN_PFN_PER_PAGE; | ||
| 50 | bool local = (page_pfn <= dev_pfn) && | ||
| 51 | (dev_pfn - page_pfn < compound_pages); | ||
| 52 | |||
| 53 | if (local) | ||
| 54 | dma_direct_map_page(hwdev, page, offset, size, dir, attrs); | ||
| 55 | else | ||
| 56 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
| 60 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
| 61 | { | ||
| 62 | unsigned long pfn = PFN_DOWN(handle); | ||
| 63 | /* | ||
| 64 | * Dom0 is mapped 1:1, while the Linux page can be spanned accross | ||
| 65 | * multiple Xen page, it's not possible to have a mix of local and | ||
| 66 | * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a | ||
| 67 | * foreign mfn will always return false. If the page is local we can | ||
| 68 | * safely call the native dma_ops function, otherwise we call the xen | ||
| 69 | * specific function. | ||
| 70 | */ | ||
| 71 | if (pfn_valid(pfn)) | ||
| 72 | dma_direct_unmap_page(hwdev, handle, size, dir, attrs); | ||
| 73 | else | ||
| 74 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); | ||
| 75 | } | ||
| 76 | |||
| 77 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c..ba6b41790fcd 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 16 | 16 | ||
| 17 | #include <asm/cacheflush.h> | ||
| 17 | #include <asm/fixmap.h> | 18 | #include <asm/fixmap.h> |
| 18 | #include <asm/kernel-pgtable.h> | 19 | #include <asm/kernel-pgtable.h> |
| 19 | #include <asm/memory.h> | 20 | #include <asm/memory.h> |
| @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) | |||
| 43 | return ret; | 44 | return ret; |
| 44 | } | 45 | } |
| 45 | 46 | ||
| 46 | static __init const u8 *get_cmdline(void *fdt) | 47 | static __init const u8 *kaslr_get_cmdline(void *fdt) |
| 47 | { | 48 | { |
| 48 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; | 49 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; |
| 49 | 50 | ||
| @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
| 109 | * Check if 'nokaslr' appears on the command line, and | 110 | * Check if 'nokaslr' appears on the command line, and |
| 110 | * return 0 if that is the case. | 111 | * return 0 if that is the case. |
| 111 | */ | 112 | */ |
| 112 | cmdline = get_cmdline(fdt); | 113 | cmdline = kaslr_get_cmdline(fdt); |
| 113 | str = strstr(cmdline, "nokaslr"); | 114 | str = strstr(cmdline, "nokaslr"); |
| 114 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) | 115 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) |
| 115 | return 0; | 116 | return 0; |
| @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
| 169 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; | 170 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; |
| 170 | module_alloc_base &= PAGE_MASK; | 171 | module_alloc_base &= PAGE_MASK; |
| 171 | 172 | ||
| 173 | __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); | ||
| 174 | __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); | ||
| 175 | |||
| 172 | return offset; | 176 | return offset; |
| 173 | } | 177 | } |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index fb0908456a1f..78c0a72f822c 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
| 466 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); | 466 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); |
| 467 | 467 | ||
| 468 | #ifdef CONFIG_XEN | 468 | #ifdef CONFIG_XEN |
| 469 | if (xen_initial_domain()) { | 469 | if (xen_initial_domain()) |
| 470 | dev->archdata.dev_dma_ops = dev->dma_ops; | ||
| 471 | dev->dma_ops = xen_dma_ops; | 470 | dev->dma_ops = xen_dma_ops; |
| 472 | } | ||
| 473 | #endif | 471 | #endif |
| 474 | } | 472 | } |
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 4003ddc616e1..f801f3708a89 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile | |||
| @@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/ | |||
| 37 | 37 | ||
| 38 | boot := arch/h8300/boot | 38 | boot := arch/h8300/boot |
| 39 | 39 | ||
| 40 | archmrproper: | ||
| 41 | |||
| 42 | archclean: | 40 | archclean: |
| 43 | $(Q)$(MAKE) $(clean)=$(boot) | 41 | $(Q)$(MAKE) $(clean)=$(boot) |
| 44 | 42 | ||
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 320d86f192ee..171290f9f1de 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
| @@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig | |||
| 16 | NM := $(CROSS_COMPILE)nm -B | 16 | NM := $(CROSS_COMPILE)nm -B |
| 17 | READELF := $(CROSS_COMPILE)readelf | 17 | READELF := $(CROSS_COMPILE)readelf |
| 18 | 18 | ||
| 19 | export AWK | ||
| 20 | |||
| 21 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ | 19 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ |
| 22 | 20 | ||
| 23 | OBJCOPYFLAGS := --strip-all | 21 | OBJCOPYFLAGS := --strip-all |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 787290781b8c..0d14f51d0002 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -3155,6 +3155,7 @@ config MIPS32_O32 | |||
| 3155 | config MIPS32_N32 | 3155 | config MIPS32_N32 |
| 3156 | bool "Kernel support for n32 binaries" | 3156 | bool "Kernel support for n32 binaries" |
| 3157 | depends on 64BIT | 3157 | depends on 64BIT |
| 3158 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | ||
| 3158 | select COMPAT | 3159 | select COMPAT |
| 3159 | select MIPS32_COMPAT | 3160 | select MIPS32_COMPAT |
| 3160 | select SYSVIPC_COMPAT if SYSVIPC | 3161 | select SYSVIPC_COMPAT if SYSVIPC |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..fe3773539eff 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
| @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) | |||
| 173 | pm_power_off = bcm47xx_machine_halt; | 173 | pm_power_off = bcm47xx_machine_halt; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | #ifdef CONFIG_BCM47XX_BCMA | ||
| 177 | static struct device * __init bcm47xx_setup_device(void) | ||
| 178 | { | ||
| 179 | struct device *dev; | ||
| 180 | int err; | ||
| 181 | |||
| 182 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
| 183 | if (!dev) | ||
| 184 | return NULL; | ||
| 185 | |||
| 186 | err = dev_set_name(dev, "bcm47xx_soc"); | ||
| 187 | if (err) { | ||
| 188 | pr_err("Failed to set SoC device name: %d\n", err); | ||
| 189 | kfree(dev); | ||
| 190 | return NULL; | ||
| 191 | } | ||
| 192 | |||
| 193 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
| 194 | if (err) | ||
| 195 | pr_err("Failed to set SoC DMA mask: %d\n", err); | ||
| 196 | |||
| 197 | return dev; | ||
| 198 | } | ||
| 199 | #endif | ||
| 200 | |||
| 176 | /* | 201 | /* |
| 177 | * This finishes bus initialization doing things that were not possible without | 202 | * This finishes bus initialization doing things that were not possible without |
| 178 | * kmalloc. Make sure to call it late enough (after mm_init). | 203 | * kmalloc. Make sure to call it late enough (after mm_init). |
| @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) | |||
| 183 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { | 208 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { |
| 184 | int err; | 209 | int err; |
| 185 | 210 | ||
| 211 | bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); | ||
| 212 | if (!bcm47xx_bus.bcma.dev) | ||
| 213 | panic("Failed to setup SoC device\n"); | ||
| 214 | |||
| 186 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); | 215 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); |
| 187 | if (err) | 216 | if (err) |
| 188 | panic("Failed to initialize BCMA bus (err %d)", err); | 217 | panic("Failed to initialize BCMA bus (err %d)", err); |
| @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) | |||
| 235 | #endif | 264 | #endif |
| 236 | #ifdef CONFIG_BCM47XX_BCMA | 265 | #ifdef CONFIG_BCM47XX_BCMA |
| 237 | case BCM47XX_BUS_TYPE_BCMA: | 266 | case BCM47XX_BUS_TYPE_BCMA: |
| 267 | if (device_register(bcm47xx_bus.bcma.dev)) | ||
| 268 | pr_err("Failed to register SoC device\n"); | ||
| 238 | bcma_bus_register(&bcm47xx_bus.bcma.bus); | 269 | bcma_bus_register(&bcm47xx_bus.bcma.bus); |
| 239 | break; | 270 | break; |
| 240 | #endif | 271 | #endif |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2c79ab52977a..8bf43c5a7bc7 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
| @@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored) | |||
| 98 | " sync \n" | 98 | " sync \n" |
| 99 | " synci ($0) \n"); | 99 | " synci ($0) \n"); |
| 100 | 100 | ||
| 101 | relocated_kexec_smp_wait(NULL); | 101 | kexec_reboot(); |
| 102 | } | 102 | } |
| 103 | #endif | 103 | #endif |
| 104 | 104 | ||
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 4e4ec779f182..6f981af67826 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig | |||
| @@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 66 | # CONFIG_SERIAL_8250_PCI is not set | 66 | # CONFIG_SERIAL_8250_PCI is not set |
| 67 | CONFIG_SERIAL_8250_NR_UARTS=1 | 67 | CONFIG_SERIAL_8250_NR_UARTS=1 |
| 68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 | 68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 |
| 69 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 69 | CONFIG_SERIAL_AR933X=y | 70 | CONFIG_SERIAL_AR933X=y |
| 70 | CONFIG_SERIAL_AR933X_CONSOLE=y | 71 | CONFIG_SERIAL_AR933X_CONSOLE=y |
| 71 | # CONFIG_HW_RANDOM is not set | 72 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h index c6b63a409641..6dd8ad2409dc 100644 --- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h +++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h | |||
| @@ -18,8 +18,6 @@ | |||
| 18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) | 18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) |
| 19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) | 19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) |
| 20 | 20 | ||
| 21 | #define MIPS_CPU_TIMER_IRQ 7 | ||
| 22 | |||
| 23 | #define MAX_IM 5 | 21 | #define MAX_IM 5 |
| 24 | 22 | ||
| 25 | #endif /* _FALCON_IRQ__ */ | 23 | #endif /* _FALCON_IRQ__ */ |
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h index 141076325307..0b424214a5e9 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h | |||
| @@ -19,8 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) | 20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) |
| 21 | 21 | ||
| 22 | #define MIPS_CPU_TIMER_IRQ 7 | ||
| 23 | |||
| 24 | #define MAX_IM 5 | 22 | #define MAX_IM 5 |
| 25 | 23 | ||
| 26 | #endif | 24 | #endif |
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 6256d35dbf4d..bedb5047aff3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
| @@ -74,14 +74,15 @@ static int __init vdma_init(void) | |||
| 74 | get_order(VDMA_PGTBL_SIZE)); | 74 | get_order(VDMA_PGTBL_SIZE)); |
| 75 | BUG_ON(!pgtbl); | 75 | BUG_ON(!pgtbl); |
| 76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); | 76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); |
| 77 | pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); | 77 | pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
| 80 | * Clear the R4030 translation table | 80 | * Clear the R4030 translation table |
| 81 | */ | 81 | */ |
| 82 | vdma_pgtbl_init(); | 82 | vdma_pgtbl_init(); |
| 83 | 83 | ||
| 84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); | 84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, |
| 85 | CPHYSADDR((unsigned long)pgtbl)); | ||
| 85 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); | 86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); |
| 86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); | 87 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); |
| 87 | 88 | ||
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed11..6549499eb202 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
| @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { | |||
| 224 | .irq_set_type = ltq_eiu_settype, | 224 | .irq_set_type = ltq_eiu_settype, |
| 225 | }; | 225 | }; |
| 226 | 226 | ||
| 227 | static void ltq_hw_irqdispatch(int module) | 227 | static void ltq_hw_irq_handler(struct irq_desc *desc) |
| 228 | { | 228 | { |
| 229 | int module = irq_desc_get_irq(desc) - 2; | ||
| 229 | u32 irq; | 230 | u32 irq; |
| 231 | int hwirq; | ||
| 230 | 232 | ||
| 231 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); | 233 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); |
| 232 | if (irq == 0) | 234 | if (irq == 0) |
| @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) | |||
| 237 | * other bits might be bogus | 239 | * other bits might be bogus |
| 238 | */ | 240 | */ |
| 239 | irq = __fls(irq); | 241 | irq = __fls(irq); |
| 240 | do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); | 242 | hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); |
| 243 | generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); | ||
| 241 | 244 | ||
| 242 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | 245 | /* if this is a EBU irq, we need to ack it or get a deadlock */ |
| 243 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) | 246 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) |
| @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) | |||
| 245 | LTQ_EBU_PCC_ISTAT); | 248 | LTQ_EBU_PCC_ISTAT); |
| 246 | } | 249 | } |
| 247 | 250 | ||
| 248 | #define DEFINE_HWx_IRQDISPATCH(x) \ | ||
| 249 | static void ltq_hw ## x ## _irqdispatch(void) \ | ||
| 250 | { \ | ||
| 251 | ltq_hw_irqdispatch(x); \ | ||
| 252 | } | ||
| 253 | DEFINE_HWx_IRQDISPATCH(0) | ||
| 254 | DEFINE_HWx_IRQDISPATCH(1) | ||
| 255 | DEFINE_HWx_IRQDISPATCH(2) | ||
| 256 | DEFINE_HWx_IRQDISPATCH(3) | ||
| 257 | DEFINE_HWx_IRQDISPATCH(4) | ||
| 258 | |||
| 259 | #if MIPS_CPU_TIMER_IRQ == 7 | ||
| 260 | static void ltq_hw5_irqdispatch(void) | ||
| 261 | { | ||
| 262 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
| 263 | } | ||
| 264 | #else | ||
| 265 | DEFINE_HWx_IRQDISPATCH(5) | ||
| 266 | #endif | ||
| 267 | |||
| 268 | static void ltq_hw_irq_handler(struct irq_desc *desc) | ||
| 269 | { | ||
| 270 | ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); | ||
| 271 | } | ||
| 272 | |||
| 273 | asmlinkage void plat_irq_dispatch(void) | ||
| 274 | { | ||
| 275 | unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; | ||
| 276 | int irq; | ||
| 277 | |||
| 278 | if (!pending) { | ||
| 279 | spurious_interrupt(); | ||
| 280 | return; | ||
| 281 | } | ||
| 282 | |||
| 283 | pending >>= CAUSEB_IP; | ||
| 284 | while (pending) { | ||
| 285 | irq = fls(pending) - 1; | ||
| 286 | do_IRQ(MIPS_CPU_IRQ_BASE + irq); | ||
| 287 | pending &= ~BIT(irq); | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) | 251 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) |
| 292 | { | 252 | { |
| 293 | struct irq_chip *chip = <q_irq_type; | 253 | struct irq_chip *chip = <q_irq_type; |
| @@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
| 343 | for (i = 0; i < MAX_IM; i++) | 303 | for (i = 0; i < MAX_IM; i++) |
| 344 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); | 304 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); |
| 345 | 305 | ||
| 346 | if (cpu_has_vint) { | ||
| 347 | pr_info("Setting up vectored interrupts\n"); | ||
| 348 | set_vi_handler(2, ltq_hw0_irqdispatch); | ||
| 349 | set_vi_handler(3, ltq_hw1_irqdispatch); | ||
| 350 | set_vi_handler(4, ltq_hw2_irqdispatch); | ||
| 351 | set_vi_handler(5, ltq_hw3_irqdispatch); | ||
| 352 | set_vi_handler(6, ltq_hw4_irqdispatch); | ||
| 353 | set_vi_handler(7, ltq_hw5_irqdispatch); | ||
| 354 | } | ||
| 355 | |||
| 356 | ltq_domain = irq_domain_add_linear(node, | 306 | ltq_domain = irq_domain_add_linear(node, |
| 357 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, | 307 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, |
| 358 | &irq_domain_ops, 0); | 308 | &irq_domain_ops, 0); |
| 359 | 309 | ||
| 360 | #ifndef CONFIG_MIPS_MT_SMP | ||
| 361 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||
| 362 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
| 363 | #else | ||
| 364 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | | ||
| 365 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
| 366 | #endif | ||
| 367 | |||
| 368 | /* tell oprofile which irq to use */ | 310 | /* tell oprofile which irq to use */ |
| 369 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); | 311 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
| 370 | 312 | ||
| 371 | /* | ||
| 372 | * if the timer irq is not one of the mips irqs we need to | ||
| 373 | * create a mapping | ||
| 374 | */ | ||
| 375 | if (MIPS_CPU_TIMER_IRQ != 7) | ||
| 376 | irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); | ||
| 377 | |||
| 378 | /* the external interrupts are optional and xway only */ | 313 | /* the external interrupts are optional and xway only */ |
| 379 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); | 314 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); |
| 380 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { | 315 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { |
| @@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int); | |||
| 411 | 346 | ||
| 412 | unsigned int get_c0_compare_int(void) | 347 | unsigned int get_c0_compare_int(void) |
| 413 | { | 348 | { |
| 414 | return MIPS_CPU_TIMER_IRQ; | 349 | return CP0_LEGACY_COMPARE_IRQ; |
| 415 | } | 350 | } |
| 416 | 351 | ||
| 417 | static struct of_device_id __initdata of_irq_ids[] = { | 352 | static struct of_device_id __initdata of_irq_ids[] = { |
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10e..288b58b00dc8 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c | |||
| @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) | |||
| 369 | int irq; | 369 | int irq; |
| 370 | struct irq_chip *msi; | 370 | struct irq_chip *msi; |
| 371 | 371 | ||
| 372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | 372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { |
| 373 | return 0; | ||
| 374 | } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | ||
| 373 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; | 375 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; |
| 374 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; | 376 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; |
| 375 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; | 377 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 0a935c136ec2..ac3482882cf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
| @@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S | |||
| 3 | 3 | ||
| 4 | KBUILD_DEFCONFIG := defconfig | 4 | KBUILD_DEFCONFIG := defconfig |
| 5 | 5 | ||
| 6 | comma = , | ||
| 7 | |||
| 8 | |||
| 9 | ifdef CONFIG_FUNCTION_TRACER | 6 | ifdef CONFIG_FUNCTION_TRACER |
| 10 | arch-y += -malways-save-lp -mno-relax | 7 | arch-y += -malways-save-lp -mno-relax |
| 11 | endif | 8 | endif |
| @@ -54,8 +51,6 @@ endif | |||
| 54 | boot := arch/nds32/boot | 51 | boot := arch/nds32/boot |
| 55 | core-y += $(boot)/dts/ | 52 | core-y += $(boot)/dts/ |
| 56 | 53 | ||
| 57 | .PHONY: FORCE | ||
| 58 | |||
| 59 | Image: vmlinux | 54 | Image: vmlinux |
| 60 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 55 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
| 61 | 56 | ||
| @@ -68,9 +63,6 @@ prepare: vdso_prepare | |||
| 68 | vdso_prepare: prepare0 | 63 | vdso_prepare: prepare0 |
| 69 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h | 64 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h |
| 70 | 65 | ||
| 71 | CLEAN_FILES += include/asm-nds32/constants.h* | ||
| 72 | |||
| 73 | # We use MRPROPER_FILES and CLEAN_FILES now | ||
| 74 | archclean: | 66 | archclean: |
| 75 | $(Q)$(MAKE) $(clean)=$(boot) | 67 | $(Q)$(MAKE) $(clean)=$(boot) |
| 76 | 68 | ||
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 70e06d34006c..bf10141c7426 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | KBUILD_DEFCONFIG := or1ksim_defconfig | 20 | KBUILD_DEFCONFIG := or1ksim_defconfig |
| 21 | 21 | ||
| 22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
| 23 | LDFLAGS_vmlinux := | ||
| 24 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 23 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) |
| 25 | 24 | ||
| 26 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ | 25 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ |
| @@ -50,5 +49,3 @@ else | |||
| 50 | BUILTIN_DTB := n | 49 | BUILTIN_DTB := n |
| 51 | endif | 50 | endif |
| 52 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ | 51 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ |
| 53 | |||
| 54 | all: vmlinux | ||
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h | |||
| @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
| 47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
| 48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
| 49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
| 50 | PERF_REG_POWERPC_MMCRA, | ||
| 50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
| 51 | }; | 52 | }; |
| 52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 57deb1e9ffea..20cc816b3508 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
| @@ -852,11 +852,12 @@ start_here: | |||
| 852 | 852 | ||
| 853 | /* set up the PTE pointers for the Abatron bdiGDB. | 853 | /* set up the PTE pointers for the Abatron bdiGDB. |
| 854 | */ | 854 | */ |
| 855 | tovirt(r6,r6) | ||
| 856 | lis r5, abatron_pteptrs@h | 855 | lis r5, abatron_pteptrs@h |
| 857 | ori r5, r5, abatron_pteptrs@l | 856 | ori r5, r5, abatron_pteptrs@l |
| 858 | stw r5, 0xf0(0) /* Must match your Abatron config file */ | 857 | stw r5, 0xf0(0) /* Must match your Abatron config file */ |
| 859 | tophys(r5,r5) | 858 | tophys(r5,r5) |
| 859 | lis r6, swapper_pg_dir@h | ||
| 860 | ori r6, r6, swapper_pg_dir@l | ||
| 860 | stw r6, 0(r5) | 861 | stw r6, 0(r5) |
| 861 | 862 | ||
| 862 | /* Now turn on the MMU for real! */ | 863 | /* Now turn on the MMU for real! */ |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index bd5e6834ca69..6794466f6420 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
| 755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, | 755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, |
| 756 | &uc_transact->uc_mcontext)) | 756 | &uc_transact->uc_mcontext)) |
| 757 | goto badframe; | 757 | goto badframe; |
| 758 | } | 758 | } else |
| 759 | #endif | 759 | #endif |
| 760 | /* Fall through, for non-TM restore */ | 760 | { |
| 761 | if (!MSR_TM_ACTIVE(msr)) { | ||
| 762 | /* | 761 | /* |
| 762 | * Fall through, for non-TM restore | ||
| 763 | * | ||
| 763 | * Unset MSR[TS] on the thread regs since MSR from user | 764 | * Unset MSR[TS] on the thread regs since MSR from user |
| 764 | * context does not have MSR active, and recheckpoint was | 765 | * context does not have MSR active, and recheckpoint was |
| 765 | * not called since restore_tm_sigcontexts() was not called | 766 | * not called since restore_tm_sigcontexts() was not called |
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 29746dc28df5..517662a56bdc 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c | |||
| @@ -967,13 +967,6 @@ out: | |||
| 967 | } | 967 | } |
| 968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 969 | 969 | ||
| 970 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
| 971 | unsigned long __init arch_syscall_addr(int nr) | ||
| 972 | { | ||
| 973 | return sys_call_table[nr*2]; | ||
| 974 | } | ||
| 975 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
| 976 | |||
| 977 | #ifdef PPC64_ELF_ABI_v1 | 970 | #ifdef PPC64_ELF_ABI_v1 |
| 978 | char *arch_ftrace_match_adjust(char *str, const char *search) | 971 | char *arch_ftrace_match_adjust(char *str, const char *search) |
| 979 | { | 972 | { |
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 5c36b3a8d47a..3349f3f8fe84 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c | |||
| @@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { | |||
| 70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), | 70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), |
| 71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), | 71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), |
| 72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), | 72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), |
| 73 | PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | u64 perf_reg_value(struct pt_regs *regs, int idx) | 76 | u64 perf_reg_value(struct pt_regs *regs, int idx) |
| @@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
| 83 | !is_sier_available())) | 84 | !is_sier_available())) |
| 84 | return 0; | 85 | return 0; |
| 85 | 86 | ||
| 87 | if (idx == PERF_REG_POWERPC_MMCRA && | ||
| 88 | (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || | ||
| 89 | IS_ENABLED(CONFIG_PPC32))) | ||
| 90 | return 0; | ||
| 91 | |||
| 86 | return regs_get_register(regs, pt_regs_offset[idx]); | 92 | return regs_get_register(regs, pt_regs_offset[idx]); |
| 87 | } | 93 | } |
| 88 | 94 | ||
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index a1aaa1569d7c..f0e488d97567 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c | |||
| @@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
| 237 | continue; | 237 | continue; |
| 238 | 238 | ||
| 239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); | 239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); |
| 240 | seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); | 240 | seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); |
| 241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); | 241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); |
| 242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); | 242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); |
| 243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); | 243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); |
| 244 | 244 | ||
| 245 | seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); | 245 | seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); |
| 246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); | 246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); |
| 247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); | 247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); |
| 248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); | 248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); |
| @@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
| 252 | blk->size, blk->owner); | 252 | blk->size, blk->owner); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); | 255 | seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); |
| 256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); | 256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); |
| 257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); | 257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); |
| 258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); | 258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..9438fa0fc355 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
| @@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void) | |||
| 538 | /* see if there is a keyboard in the device tree | 538 | /* see if there is a keyboard in the device tree |
| 539 | with a parent of type "adb" */ | 539 | with a parent of type "adb" */ |
| 540 | for_each_node_by_name(kbd, "keyboard") | 540 | for_each_node_by_name(kbd, "keyboard") |
| 541 | if (kbd->parent && kbd->parent->type | 541 | if (of_node_is_type(kbd->parent, "adb")) |
| 542 | && strcmp(kbd->parent->type, "adb") == 0) | ||
| 543 | break; | 542 | break; |
| 544 | of_node_put(kbd); | 543 | of_node_put(kbd); |
| 545 | if (kbd) | 544 | if (kbd) |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index d7f742ed48ba..3f58c7dbd581 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
| @@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) | |||
| 564 | } | 564 | } |
| 565 | } else { | 565 | } else { |
| 566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ | 566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ |
| 567 | pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); | 567 | pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); |
| 568 | table_group = &pe->npucomp->table_group; | 568 | table_group = &pe->npucomp->table_group; |
| 569 | table_group->ops = &pnv_npu_peers_ops; | 569 | table_group->ops = &pnv_npu_peers_ops; |
| 570 | iommu_register_group(table_group, hose->global_number, | 570 | iommu_register_group(table_group, hose->global_number, |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1d6406a051f1..7db3119f8a5b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
| @@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void) | |||
| 2681 | list_for_each_entry(hose, &hose_list, list_node) { | 2681 | list_for_each_entry(hose, &hose_list, list_node) { |
| 2682 | phb = hose->private_data; | 2682 | phb = hose->private_data; |
| 2683 | 2683 | ||
| 2684 | if (phb->type == PNV_PHB_NPU_NVLINK) | 2684 | if (phb->type == PNV_PHB_NPU_NVLINK || |
| 2685 | phb->type == PNV_PHB_NPU_OCAPI) | ||
| 2685 | continue; | 2686 | continue; |
| 2686 | 2687 | ||
| 2687 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { | 2688 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 7725825d887d..37a77e57893e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
| @@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void) | |||
| 264 | if (!of_device_is_compatible(nvdn->parent, | 264 | if (!of_device_is_compatible(nvdn->parent, |
| 265 | "ibm,power9-npu")) | 265 | "ibm,power9-npu")) |
| 266 | continue; | 266 | continue; |
| 267 | #ifdef CONFIG_PPC_POWERNV | ||
| 267 | WARN_ON_ONCE(pnv_npu2_init(hose)); | 268 | WARN_ON_ONCE(pnv_npu2_init(hose)); |
| 269 | #endif | ||
| 268 | break; | 270 | break; |
| 269 | } | 271 | } |
| 270 | } | 272 | } |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index ccbb53e22024..8d04e6f3f796 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
| @@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 25 | atomic_set(&mm->context.flush_count, 0); | 25 | atomic_set(&mm->context.flush_count, 0); |
| 26 | mm->context.gmap_asce = 0; | 26 | mm->context.gmap_asce = 0; |
| 27 | mm->context.flush_mm = 0; | 27 | mm->context.flush_mm = 0; |
| 28 | mm->context.compat_mm = 0; | 28 | mm->context.compat_mm = test_thread_flag(TIF_31BIT); |
| 29 | #ifdef CONFIG_PGSTE | 29 | #ifdef CONFIG_PGSTE |
| 30 | mm->context.alloc_pgste = page_table_allocate_pgste || | 30 | mm->context.alloc_pgste = page_table_allocate_pgste || |
| 31 | test_thread_flag(TIF_PGSTE) || | 31 | test_thread_flag(TIF_PGSTE) || |
| @@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 90 | { | 90 | { |
| 91 | int cpu = smp_processor_id(); | 91 | int cpu = smp_processor_id(); |
| 92 | 92 | ||
| 93 | if (prev == next) | ||
| 94 | return; | ||
| 95 | S390_lowcore.user_asce = next->context.asce; | 93 | S390_lowcore.user_asce = next->context.asce; |
| 96 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); | 94 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); |
| 97 | /* Clear previous user-ASCE from CR1 and CR7 */ | 95 | /* Clear previous user-ASCE from CR1 and CR7 */ |
| @@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 103 | __ctl_load(S390_lowcore.vdso_asce, 7, 7); | 101 | __ctl_load(S390_lowcore.vdso_asce, 7, 7); |
| 104 | clear_cpu_flag(CIF_ASCE_SECONDARY); | 102 | clear_cpu_flag(CIF_ASCE_SECONDARY); |
| 105 | } | 103 | } |
| 106 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 104 | if (prev != next) |
| 105 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | ||
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 108 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index af5c2b3f7065..a8c7789b246b 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
| @@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void) | |||
| 63 | if (stsi(vmms, 3, 2, 2) || !vmms->count) | 63 | if (stsi(vmms, 3, 2, 2) || !vmms->count) |
| 64 | return; | 64 | return; |
| 65 | 65 | ||
| 66 | /* Running under KVM? If not we assume z/VM */ | 66 | /* Detect known hypervisors */ |
| 67 | if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) | 67 | if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) |
| 68 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; | 68 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; |
| 69 | else | 69 | else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) |
| 70 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; | 70 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; |
| 71 | } | 71 | } |
| 72 | 72 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 72dd23ef771b..7ed90a759135 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p) | |||
| 1006 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 1006 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
| 1007 | else if (MACHINE_IS_LPAR) | 1007 | else if (MACHINE_IS_LPAR) |
| 1008 | pr_info("Linux is running natively in 64-bit mode\n"); | 1008 | pr_info("Linux is running natively in 64-bit mode\n"); |
| 1009 | else | ||
| 1010 | pr_info("Linux is running as a guest in 64-bit mode\n"); | ||
| 1009 | 1011 | ||
| 1010 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 1012 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
| 1011 | /* boot_command_line has been already set up in early.c */ | 1013 | /* boot_command_line has been already set up in early.c */ |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f82b3d3c36e2..b198ece2aad6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) | |||
| 381 | */ | 381 | */ |
| 382 | void smp_call_ipl_cpu(void (*func)(void *), void *data) | 382 | void smp_call_ipl_cpu(void (*func)(void *), void *data) |
| 383 | { | 383 | { |
| 384 | struct lowcore *lc = pcpu_devices->lowcore; | ||
| 385 | |||
| 386 | if (pcpu_devices[0].address == stap()) | ||
| 387 | lc = &S390_lowcore; | ||
| 388 | |||
| 384 | pcpu_delegate(&pcpu_devices[0], func, data, | 389 | pcpu_delegate(&pcpu_devices[0], func, data, |
| 385 | pcpu_devices->lowcore->nodat_stack); | 390 | lc->nodat_stack); |
| 386 | } | 391 | } |
| 387 | 392 | ||
| 388 | int smp_find_processor_id(u16 address) | 393 | int smp_find_processor_id(u16 address) |
| @@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev, | |||
| 1166 | { | 1171 | { |
| 1167 | int rc; | 1172 | int rc; |
| 1168 | 1173 | ||
| 1174 | rc = lock_device_hotplug_sysfs(); | ||
| 1175 | if (rc) | ||
| 1176 | return rc; | ||
| 1169 | rc = smp_rescan_cpus(); | 1177 | rc = smp_rescan_cpus(); |
| 1178 | unlock_device_hotplug(); | ||
| 1170 | return rc ? rc : count; | 1179 | return rc ? rc : count; |
| 1171 | } | 1180 | } |
| 1172 | static DEVICE_ATTR_WO(rescan); | 1181 | static DEVICE_ATTR_WO(rescan); |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index ebe748a9f472..4ff354887db4 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
| @@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
| 224 | 224 | ||
| 225 | vdso_pages = vdso64_pages; | 225 | vdso_pages = vdso64_pages; |
| 226 | #ifdef CONFIG_COMPAT | 226 | #ifdef CONFIG_COMPAT |
| 227 | if (is_compat_task()) { | 227 | mm->context.compat_mm = is_compat_task(); |
| 228 | if (mm->context.compat_mm) | ||
| 228 | vdso_pages = vdso32_pages; | 229 | vdso_pages = vdso32_pages; |
| 229 | mm->context.compat_mm = 1; | ||
| 230 | } | ||
| 231 | #endif | 230 | #endif |
| 232 | /* | 231 | /* |
| 233 | * vDSO has a problem and was disabled, just don't "enable" it for | 232 | * vDSO has a problem and was disabled, just don't "enable" it for |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15af091611e2..26387c7bf305 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -198,7 +198,7 @@ config X86 | |||
| 198 | select IRQ_FORCED_THREADING | 198 | select IRQ_FORCED_THREADING |
| 199 | select NEED_SG_DMA_LENGTH | 199 | select NEED_SG_DMA_LENGTH |
| 200 | select PCI_DOMAINS if PCI | 200 | select PCI_DOMAINS if PCI |
| 201 | select PCI_LOCKLESS_CONFIG | 201 | select PCI_LOCKLESS_CONFIG if PCI |
| 202 | select PERF_EVENTS | 202 | select PERF_EVENTS |
| 203 | select RTC_LIB | 203 | select RTC_LIB |
| 204 | select RTC_MC146818_LIB | 204 | select RTC_MC146818_LIB |
| @@ -617,7 +617,7 @@ config X86_INTEL_QUARK | |||
| 617 | 617 | ||
| 618 | config X86_INTEL_LPSS | 618 | config X86_INTEL_LPSS |
| 619 | bool "Intel Low Power Subsystem Support" | 619 | bool "Intel Low Power Subsystem Support" |
| 620 | depends on X86 && ACPI | 620 | depends on X86 && ACPI && PCI |
| 621 | select COMMON_CLK | 621 | select COMMON_CLK |
| 622 | select PINCTRL | 622 | select PINCTRL |
| 623 | select IOSF_MBI | 623 | select IOSF_MBI |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 8eaf8952c408..39913770a44d 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
| @@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat) | |||
| 361 | 361 | ||
| 362 | /* Need to switch before accessing the thread stack. */ | 362 | /* Need to switch before accessing the thread stack. */ |
| 363 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | 363 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi |
| 364 | movq %rsp, %rdi | 364 | /* In the Xen PV case we already run on the thread stack. */ |
| 365 | ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV | ||
| 365 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | 366 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
| 366 | 367 | ||
| 367 | pushq 6*8(%rdi) /* regs->ss */ | 368 | pushq 6*8(%rdi) /* regs->ss */ |
| @@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat) | |||
| 370 | pushq 3*8(%rdi) /* regs->cs */ | 371 | pushq 3*8(%rdi) /* regs->cs */ |
| 371 | pushq 2*8(%rdi) /* regs->ip */ | 372 | pushq 2*8(%rdi) /* regs->ip */ |
| 372 | pushq 1*8(%rdi) /* regs->orig_ax */ | 373 | pushq 1*8(%rdi) /* regs->orig_ax */ |
| 373 | |||
| 374 | pushq (%rdi) /* pt_regs->di */ | 374 | pushq (%rdi) /* pt_regs->di */ |
| 375 | .Lint80_keep_stack: | ||
| 376 | |||
| 375 | pushq %rsi /* pt_regs->si */ | 377 | pushq %rsi /* pt_regs->si */ |
| 376 | xorl %esi, %esi /* nospec si */ | 378 | xorl %esi, %esi /* nospec si */ |
| 377 | pushq %rdx /* pt_regs->dx */ | 379 | pushq %rdx /* pt_regs->dx */ |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 0ca50611e8ce..19d18fae6ec6 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
| @@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |||
| 178 | 178 | ||
| 179 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); | 179 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
| 180 | 180 | ||
| 181 | /* | ||
| 182 | * Init a new mm. Used on mm copies, like at fork() | ||
| 183 | * and on mm's that are brand-new, like at execve(). | ||
| 184 | */ | ||
| 181 | static inline int init_new_context(struct task_struct *tsk, | 185 | static inline int init_new_context(struct task_struct *tsk, |
| 182 | struct mm_struct *mm) | 186 | struct mm_struct *mm) |
| 183 | { | 187 | { |
| @@ -228,8 +232,22 @@ do { \ | |||
| 228 | } while (0) | 232 | } while (0) |
| 229 | #endif | 233 | #endif |
| 230 | 234 | ||
| 235 | static inline void arch_dup_pkeys(struct mm_struct *oldmm, | ||
| 236 | struct mm_struct *mm) | ||
| 237 | { | ||
| 238 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | ||
| 239 | if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) | ||
| 240 | return; | ||
| 241 | |||
| 242 | /* Duplicate the oldmm pkey state in mm: */ | ||
| 243 | mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; | ||
| 244 | mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; | ||
| 245 | #endif | ||
| 246 | } | ||
| 247 | |||
| 231 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 248 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
| 232 | { | 249 | { |
| 250 | arch_dup_pkeys(oldmm, mm); | ||
| 233 | paravirt_arch_dup_mmap(oldmm, mm); | 251 | paravirt_arch_dup_mmap(oldmm, mm); |
| 234 | return ldt_dup_context(oldmm, mm); | 252 | return ldt_dup_context(oldmm, mm); |
| 235 | } | 253 | } |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a77445d1b034..780f2b42c8ef 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
| @@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t | |||
| 711 | { | 711 | { |
| 712 | if (unlikely(!access_ok(ptr,len))) | 712 | if (unlikely(!access_ok(ptr,len))) |
| 713 | return 0; | 713 | return 0; |
| 714 | __uaccess_begin(); | 714 | __uaccess_begin_nospec(); |
| 715 | return 1; | 715 | return 1; |
| 716 | } | 716 | } |
| 717 | #define user_access_begin(a,b) user_access_begin(a,b) | 717 | #define user_access_begin(a,b) user_access_begin(a,b) |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index c8b07d8ea5a2..17ffc869cab8 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
| @@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image) | |||
| 470 | 470 | ||
| 471 | kbuf.memsz = kbuf.bufsz; | 471 | kbuf.memsz = kbuf.bufsz; |
| 472 | kbuf.buf_align = ELF_CORE_HEADER_ALIGN; | 472 | kbuf.buf_align = ELF_CORE_HEADER_ALIGN; |
| 473 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; | ||
| 473 | ret = kexec_add_buffer(&kbuf); | 474 | ret = kexec_add_buffer(&kbuf); |
| 474 | if (ret) { | 475 | if (ret) { |
| 475 | vfree((void *)image->arch.elf_headers); | 476 | vfree((void *)image->arch.elf_headers); |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index b0acb22e5a46..dfd3aca82c61 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -21,10 +21,6 @@ | |||
| 21 | 21 | ||
| 22 | #define HPET_MASK CLOCKSOURCE_MASK(32) | 22 | #define HPET_MASK CLOCKSOURCE_MASK(32) |
| 23 | 23 | ||
| 24 | /* FSEC = 10^-15 | ||
| 25 | NSEC = 10^-9 */ | ||
| 26 | #define FSEC_PER_NSEC 1000000L | ||
| 27 | |||
| 28 | #define HPET_DEV_USED_BIT 2 | 24 | #define HPET_DEV_USED_BIT 2 |
| 29 | #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) | 25 | #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) |
| 30 | #define HPET_DEV_VALID 0x8 | 26 | #define HPET_DEV_VALID 0x8 |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 278cd07228dd..0d5efa34f359 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
| @@ -434,6 +434,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
| 434 | kbuf.memsz = PAGE_ALIGN(header->init_size); | 434 | kbuf.memsz = PAGE_ALIGN(header->init_size); |
| 435 | kbuf.buf_align = header->kernel_alignment; | 435 | kbuf.buf_align = header->kernel_alignment; |
| 436 | kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; | 436 | kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; |
| 437 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; | ||
| 437 | ret = kexec_add_buffer(&kbuf); | 438 | ret = kexec_add_buffer(&kbuf); |
| 438 | if (ret) | 439 | if (ret) |
| 439 | goto out_free_params; | 440 | goto out_free_params; |
| @@ -448,6 +449,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
| 448 | kbuf.bufsz = kbuf.memsz = initrd_len; | 449 | kbuf.bufsz = kbuf.memsz = initrd_len; |
| 449 | kbuf.buf_align = PAGE_SIZE; | 450 | kbuf.buf_align = PAGE_SIZE; |
| 450 | kbuf.buf_min = MIN_INITRD_LOAD_ADDR; | 451 | kbuf.buf_min = MIN_INITRD_LOAD_ADDR; |
| 452 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; | ||
| 451 | ret = kexec_add_buffer(&kbuf); | 453 | ret = kexec_add_buffer(&kbuf); |
| 452 | if (ret) | 454 | if (ret) |
| 453 | goto out_free_params; | 455 | goto out_free_params; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index ba4bfb7f6a36..5c93a65ee1e5 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
| @@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | |||
| 457 | #else | 457 | #else |
| 458 | u64 ipi_bitmap = 0; | 458 | u64 ipi_bitmap = 0; |
| 459 | #endif | 459 | #endif |
| 460 | long ret; | ||
| 460 | 461 | ||
| 461 | if (cpumask_empty(mask)) | 462 | if (cpumask_empty(mask)) |
| 462 | return; | 463 | return; |
| @@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | |||
| 482 | } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { | 483 | } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { |
| 483 | max = apic_id < max ? max : apic_id; | 484 | max = apic_id < max ? max : apic_id; |
| 484 | } else { | 485 | } else { |
| 485 | kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | 486 | ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, |
| 486 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); | 487 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); |
| 488 | WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); | ||
| 487 | min = max = apic_id; | 489 | min = max = apic_id; |
| 488 | ipi_bitmap = 0; | 490 | ipi_bitmap = 0; |
| 489 | } | 491 | } |
| @@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | |||
| 491 | } | 493 | } |
| 492 | 494 | ||
| 493 | if (ipi_bitmap) { | 495 | if (ipi_bitmap) { |
| 494 | kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | 496 | ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, |
| 495 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); | 497 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); |
| 498 | WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); | ||
| 496 | } | 499 | } |
| 497 | 500 | ||
| 498 | local_irq_restore(flags); | 501 | local_irq_restore(flags); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index e9f777bfed40..3fae23834069 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -297,15 +297,16 @@ static int __init tsc_setup(char *str) | |||
| 297 | 297 | ||
| 298 | __setup("tsc=", tsc_setup); | 298 | __setup("tsc=", tsc_setup); |
| 299 | 299 | ||
| 300 | #define MAX_RETRIES 5 | 300 | #define MAX_RETRIES 5 |
| 301 | #define SMI_TRESHOLD 50000 | 301 | #define TSC_DEFAULT_THRESHOLD 0x20000 |
| 302 | 302 | ||
| 303 | /* | 303 | /* |
| 304 | * Read TSC and the reference counters. Take care of SMI disturbance | 304 | * Read TSC and the reference counters. Take care of any disturbances |
| 305 | */ | 305 | */ |
| 306 | static u64 tsc_read_refs(u64 *p, int hpet) | 306 | static u64 tsc_read_refs(u64 *p, int hpet) |
| 307 | { | 307 | { |
| 308 | u64 t1, t2; | 308 | u64 t1, t2; |
| 309 | u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD; | ||
| 309 | int i; | 310 | int i; |
| 310 | 311 | ||
| 311 | for (i = 0; i < MAX_RETRIES; i++) { | 312 | for (i = 0; i < MAX_RETRIES; i++) { |
| @@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet) | |||
| 315 | else | 316 | else |
| 316 | *p = acpi_pm_read_early(); | 317 | *p = acpi_pm_read_early(); |
| 317 | t2 = get_cycles(); | 318 | t2 = get_cycles(); |
| 318 | if ((t2 - t1) < SMI_TRESHOLD) | 319 | if ((t2 - t1) < thresh) |
| 319 | return t2; | 320 | return t2; |
| 320 | } | 321 | } |
| 321 | return ULLONG_MAX; | 322 | return ULLONG_MAX; |
| @@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void) | |||
| 703 | * zero. In each wait loop iteration we read the TSC and check | 704 | * zero. In each wait loop iteration we read the TSC and check |
| 704 | * the delta to the previous read. We keep track of the min | 705 | * the delta to the previous read. We keep track of the min |
| 705 | * and max values of that delta. The delta is mostly defined | 706 | * and max values of that delta. The delta is mostly defined |
| 706 | * by the IO time of the PIT access, so we can detect when a | 707 | * by the IO time of the PIT access, so we can detect when |
| 707 | * SMI/SMM disturbance happened between the two reads. If the | 708 | * any disturbance happened between the two reads. If the |
| 708 | * maximum time is significantly larger than the minimum time, | 709 | * maximum time is significantly larger than the minimum time, |
| 709 | * then we discard the result and have another try. | 710 | * then we discard the result and have another try. |
| 710 | * | 711 | * |
| 711 | * 2) Reference counter. If available we use the HPET or the | 712 | * 2) Reference counter. If available we use the HPET or the |
| 712 | * PMTIMER as a reference to check the sanity of that value. | 713 | * PMTIMER as a reference to check the sanity of that value. |
| 713 | * We use separate TSC readouts and check inside of the | 714 | * We use separate TSC readouts and check inside of the |
| 714 | * reference read for a SMI/SMM disturbance. We dicard | 715 | * reference read for any possible disturbance. We dicard |
| 715 | * disturbed values here as well. We do that around the PIT | 716 | * disturbed values here as well. We do that around the PIT |
| 716 | * calibration delay loop as we have to wait for a certain | 717 | * calibration delay loop as we have to wait for a certain |
| 717 | * amount of time anyway. | 718 | * amount of time anyway. |
| @@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void) | |||
| 744 | if (ref1 == ref2) | 745 | if (ref1 == ref2) |
| 745 | continue; | 746 | continue; |
| 746 | 747 | ||
| 747 | /* Check, whether the sampling was disturbed by an SMI */ | 748 | /* Check, whether the sampling was disturbed */ |
| 748 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) | 749 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) |
| 749 | continue; | 750 | continue; |
| 750 | 751 | ||
| @@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | |||
| 1268 | */ | 1269 | */ |
| 1269 | static void tsc_refine_calibration_work(struct work_struct *work) | 1270 | static void tsc_refine_calibration_work(struct work_struct *work) |
| 1270 | { | 1271 | { |
| 1271 | static u64 tsc_start = -1, ref_start; | 1272 | static u64 tsc_start = ULLONG_MAX, ref_start; |
| 1272 | static int hpet; | 1273 | static int hpet; |
| 1273 | u64 tsc_stop, ref_stop, delta; | 1274 | u64 tsc_stop, ref_stop, delta; |
| 1274 | unsigned long freq; | 1275 | unsigned long freq; |
| @@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work) | |||
| 1283 | * delayed the first time we expire. So set the workqueue | 1284 | * delayed the first time we expire. So set the workqueue |
| 1284 | * again once we know timers are working. | 1285 | * again once we know timers are working. |
| 1285 | */ | 1286 | */ |
| 1286 | if (tsc_start == -1) { | 1287 | if (tsc_start == ULLONG_MAX) { |
| 1288 | restart: | ||
| 1287 | /* | 1289 | /* |
| 1288 | * Only set hpet once, to avoid mixing hardware | 1290 | * Only set hpet once, to avoid mixing hardware |
| 1289 | * if the hpet becomes enabled later. | 1291 | * if the hpet becomes enabled later. |
| 1290 | */ | 1292 | */ |
| 1291 | hpet = is_hpet_enabled(); | 1293 | hpet = is_hpet_enabled(); |
| 1292 | schedule_delayed_work(&tsc_irqwork, HZ); | ||
| 1293 | tsc_start = tsc_read_refs(&ref_start, hpet); | 1294 | tsc_start = tsc_read_refs(&ref_start, hpet); |
| 1295 | schedule_delayed_work(&tsc_irqwork, HZ); | ||
| 1294 | return; | 1296 | return; |
| 1295 | } | 1297 | } |
| 1296 | 1298 | ||
| @@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work) | |||
| 1300 | if (ref_start == ref_stop) | 1302 | if (ref_start == ref_stop) |
| 1301 | goto out; | 1303 | goto out; |
| 1302 | 1304 | ||
| 1303 | /* Check, whether the sampling was disturbed by an SMI */ | 1305 | /* Check, whether the sampling was disturbed */ |
| 1304 | if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) | 1306 | if (tsc_stop == ULLONG_MAX) |
| 1305 | goto out; | 1307 | goto restart; |
| 1306 | 1308 | ||
| 1307 | delta = tsc_stop - tsc_start; | 1309 | delta = tsc_stop - tsc_start; |
| 1308 | delta *= 1000000LL; | 1310 | delta *= 1000000LL; |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 69b3a7c30013..31ecf7a76d5a 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
| @@ -2,10 +2,6 @@ | |||
| 2 | 2 | ||
| 3 | ccflags-y += -Iarch/x86/kvm | 3 | ccflags-y += -Iarch/x86/kvm |
| 4 | 4 | ||
| 5 | CFLAGS_x86.o := -I. | ||
| 6 | CFLAGS_svm.o := -I. | ||
| 7 | CFLAGS_vmx.o := -I. | ||
| 8 | |||
| 9 | KVM := ../../../virt/kvm | 5 | KVM := ../../../virt/kvm |
| 10 | 6 | ||
| 11 | kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ | 7 | kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ |
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index c90a5352d158..89d20ed1d2e8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c | |||
| @@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
| 1636 | ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); | 1636 | ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); |
| 1637 | if (ret != HV_STATUS_INVALID_PORT_ID) | 1637 | if (ret != HV_STATUS_INVALID_PORT_ID) |
| 1638 | break; | 1638 | break; |
| 1639 | /* maybe userspace knows this conn_id: fall through */ | 1639 | /* fall through - maybe userspace knows this conn_id. */ |
| 1640 | case HVCALL_POST_MESSAGE: | 1640 | case HVCALL_POST_MESSAGE: |
| 1641 | /* don't bother userspace if it has no way to handle it */ | 1641 | /* don't bother userspace if it has no way to handle it */ |
| 1642 | if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { | 1642 | if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { |
| @@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, | |||
| 1832 | ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; | 1832 | ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; |
| 1833 | ent->eax |= HV_X64_MSR_RESET_AVAILABLE; | 1833 | ent->eax |= HV_X64_MSR_RESET_AVAILABLE; |
| 1834 | ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; | 1834 | ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; |
| 1835 | ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE; | ||
| 1836 | ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; | 1835 | ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; |
| 1837 | ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; | 1836 | ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; |
| 1838 | 1837 | ||
| @@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, | |||
| 1848 | case HYPERV_CPUID_ENLIGHTMENT_INFO: | 1847 | case HYPERV_CPUID_ENLIGHTMENT_INFO: |
| 1849 | ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; | 1848 | ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; |
| 1850 | ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; | 1849 | ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; |
| 1851 | ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED; | ||
| 1852 | ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; | 1850 | ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; |
| 1853 | ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; | 1851 | ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; |
| 1854 | ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; | 1852 | ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; |
| 1855 | ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; | 1853 | if (evmcs_ver) |
| 1854 | ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; | ||
| 1856 | 1855 | ||
| 1857 | /* | 1856 | /* |
| 1858 | * Default number of spinlock retry attempts, matches | 1857 | * Default number of spinlock retry attempts, matches |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9f089e2e09d0..4b6c2da7265c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
| 1035 | switch (delivery_mode) { | 1035 | switch (delivery_mode) { |
| 1036 | case APIC_DM_LOWEST: | 1036 | case APIC_DM_LOWEST: |
| 1037 | vcpu->arch.apic_arb_prio++; | 1037 | vcpu->arch.apic_arb_prio++; |
| 1038 | /* fall through */ | ||
| 1038 | case APIC_DM_FIXED: | 1039 | case APIC_DM_FIXED: |
| 1039 | if (unlikely(trig_mode && !level)) | 1040 | if (unlikely(trig_mode && !level)) |
| 1040 | break; | 1041 | break; |
| @@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) | |||
| 1874 | 1875 | ||
| 1875 | case APIC_LVT0: | 1876 | case APIC_LVT0: |
| 1876 | apic_manage_nmi_watchdog(apic, val); | 1877 | apic_manage_nmi_watchdog(apic, val); |
| 1878 | /* fall through */ | ||
| 1877 | case APIC_LVTTHMR: | 1879 | case APIC_LVTTHMR: |
| 1878 | case APIC_LVTPC: | 1880 | case APIC_LVTPC: |
| 1879 | case APIC_LVT1: | 1881 | case APIC_LVT1: |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ce770b446238..da9c42349b1f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
| 4371 | rsvd_bits(maxphyaddr, 51); | 4371 | rsvd_bits(maxphyaddr, 51); |
| 4372 | rsvd_check->rsvd_bits_mask[1][4] = | 4372 | rsvd_check->rsvd_bits_mask[1][4] = |
| 4373 | rsvd_check->rsvd_bits_mask[0][4]; | 4373 | rsvd_check->rsvd_bits_mask[0][4]; |
| 4374 | /* fall through */ | ||
| 4374 | case PT64_ROOT_4LEVEL: | 4375 | case PT64_ROOT_4LEVEL: |
| 4375 | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | | 4376 | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
| 4376 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | | 4377 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a157ca5b6869..f13a3a24d360 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
| 3414 | kvm_mmu_reset_context(&svm->vcpu); | 3414 | kvm_mmu_reset_context(&svm->vcpu); |
| 3415 | kvm_mmu_load(&svm->vcpu); | 3415 | kvm_mmu_load(&svm->vcpu); |
| 3416 | 3416 | ||
| 3417 | /* | ||
| 3418 | * Drop what we picked up for L2 via svm_complete_interrupts() so it | ||
| 3419 | * doesn't end up in L1. | ||
| 3420 | */ | ||
| 3421 | svm->vcpu.arch.nmi_injected = false; | ||
| 3422 | kvm_clear_exception_queue(&svm->vcpu); | ||
| 3423 | kvm_clear_interrupt_queue(&svm->vcpu); | ||
| 3424 | |||
| 3417 | return 0; | 3425 | return 0; |
| 3418 | } | 3426 | } |
| 3419 | 3427 | ||
| @@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 4395 | case MSR_IA32_APICBASE: | 4403 | case MSR_IA32_APICBASE: |
| 4396 | if (kvm_vcpu_apicv_active(vcpu)) | 4404 | if (kvm_vcpu_apicv_active(vcpu)) |
| 4397 | avic_update_vapic_bar(to_svm(vcpu), data); | 4405 | avic_update_vapic_bar(to_svm(vcpu), data); |
| 4398 | /* Follow through */ | 4406 | /* Fall through */ |
| 4399 | default: | 4407 | default: |
| 4400 | return kvm_set_msr_common(vcpu, msr); | 4408 | return kvm_set_msr_common(vcpu, msr); |
| 4401 | } | 4409 | } |
| @@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) | |||
| 4504 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); | 4512 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); |
| 4505 | break; | 4513 | break; |
| 4506 | case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { | 4514 | case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { |
| 4507 | int i; | ||
| 4508 | struct kvm_vcpu *vcpu; | ||
| 4509 | struct kvm *kvm = svm->vcpu.kvm; | ||
| 4510 | struct kvm_lapic *apic = svm->vcpu.arch.apic; | 4515 | struct kvm_lapic *apic = svm->vcpu.arch.apic; |
| 4511 | 4516 | ||
| 4512 | /* | 4517 | /* |
| 4513 | * At this point, we expect that the AVIC HW has already | 4518 | * Update ICR high and low, then emulate sending IPI, |
| 4514 | * set the appropriate IRR bits on the valid target | 4519 | * which is handled when writing APIC_ICR. |
| 4515 | * vcpus. So, we just need to kick the appropriate vcpu. | ||
| 4516 | */ | 4520 | */ |
| 4517 | kvm_for_each_vcpu(i, vcpu, kvm) { | 4521 | kvm_lapic_reg_write(apic, APIC_ICR2, icrh); |
| 4518 | bool m = kvm_apic_match_dest(vcpu, apic, | 4522 | kvm_lapic_reg_write(apic, APIC_ICR, icrl); |
| 4519 | icrl & KVM_APIC_SHORT_MASK, | ||
| 4520 | GET_APIC_DEST_FIELD(icrh), | ||
| 4521 | icrl & KVM_APIC_DEST_MASK); | ||
| 4522 | |||
| 4523 | if (m && !avic_vcpu_is_running(vcpu)) | ||
| 4524 | kvm_vcpu_wake_up(vcpu); | ||
| 4525 | } | ||
| 4526 | break; | 4523 | break; |
| 4527 | } | 4524 | } |
| 4528 | case AVIC_IPI_FAILURE_INVALID_TARGET: | 4525 | case AVIC_IPI_FAILURE_INVALID_TARGET: |
| 4526 | WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", | ||
| 4527 | index, svm->vcpu.vcpu_id, icrh, icrl); | ||
| 4529 | break; | 4528 | break; |
| 4530 | case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: | 4529 | case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: |
| 4531 | WARN_ONCE(1, "Invalid backing page\n"); | 4530 | WARN_ONCE(1, "Invalid backing page\n"); |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 705f40ae2532..6432d08c7de7 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
| @@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex, | |||
| 1465 | #endif /* _TRACE_KVM_H */ | 1465 | #endif /* _TRACE_KVM_H */ |
| 1466 | 1466 | ||
| 1467 | #undef TRACE_INCLUDE_PATH | 1467 | #undef TRACE_INCLUDE_PATH |
| 1468 | #define TRACE_INCLUDE_PATH arch/x86/kvm | 1468 | #define TRACE_INCLUDE_PATH ../../arch/x86/kvm |
| 1469 | #undef TRACE_INCLUDE_FILE | 1469 | #undef TRACE_INCLUDE_FILE |
| 1470 | #define TRACE_INCLUDE_FILE trace | 1470 | #define TRACE_INCLUDE_FILE trace |
| 1471 | 1471 | ||
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c index 95bc2247478d..5466c6d85cf3 100644 --- a/arch/x86/kvm/vmx/evmcs.c +++ b/arch/x86/kvm/vmx/evmcs.c | |||
| @@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu, | |||
| 332 | uint16_t *vmcs_version) | 332 | uint16_t *vmcs_version) |
| 333 | { | 333 | { |
| 334 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 334 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 335 | bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled; | ||
| 336 | |||
| 337 | vmx->nested.enlightened_vmcs_enabled = true; | ||
| 335 | 338 | ||
| 336 | if (vmcs_version) | 339 | if (vmcs_version) |
| 337 | *vmcs_version = nested_get_evmcs_version(vcpu); | 340 | *vmcs_version = nested_get_evmcs_version(vcpu); |
| 338 | 341 | ||
| 339 | /* We don't support disabling the feature for simplicity. */ | 342 | /* We don't support disabling the feature for simplicity. */ |
| 340 | if (vmx->nested.enlightened_vmcs_enabled) | 343 | if (evmcs_already_enabled) |
| 341 | return 0; | 344 | return 0; |
| 342 | 345 | ||
| 343 | vmx->nested.enlightened_vmcs_enabled = true; | ||
| 344 | |||
| 345 | vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; | 346 | vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; |
| 346 | vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; | 347 | vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; |
| 347 | vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; | 348 | vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 2616bd2c7f2c..8ff20523661b 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
| @@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = { | |||
| 55 | static int max_shadow_read_write_fields = | 55 | static int max_shadow_read_write_fields = |
| 56 | ARRAY_SIZE(shadow_read_write_fields); | 56 | ARRAY_SIZE(shadow_read_write_fields); |
| 57 | 57 | ||
| 58 | void init_vmcs_shadow_fields(void) | 58 | static void init_vmcs_shadow_fields(void) |
| 59 | { | 59 | { |
| 60 | int i, j; | 60 | int i, j; |
| 61 | 61 | ||
| @@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) | |||
| 4140 | if (r < 0) | 4140 | if (r < 0) |
| 4141 | goto out_vmcs02; | 4141 | goto out_vmcs02; |
| 4142 | 4142 | ||
| 4143 | vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); | 4143 | vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); |
| 4144 | if (!vmx->nested.cached_vmcs12) | 4144 | if (!vmx->nested.cached_vmcs12) |
| 4145 | goto out_cached_vmcs12; | 4145 | goto out_cached_vmcs12; |
| 4146 | 4146 | ||
| 4147 | vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); | 4147 | vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); |
| 4148 | if (!vmx->nested.cached_shadow_vmcs12) | 4148 | if (!vmx->nested.cached_shadow_vmcs12) |
| 4149 | goto out_cached_shadow_vmcs12; | 4149 | goto out_cached_shadow_vmcs12; |
| 4150 | 4150 | ||
| @@ -5263,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, | |||
| 5263 | copy_shadow_to_vmcs12(vmx); | 5263 | copy_shadow_to_vmcs12(vmx); |
| 5264 | } | 5264 | } |
| 5265 | 5265 | ||
| 5266 | if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) | 5266 | /* |
| 5267 | * Copy over the full allocated size of vmcs12 rather than just the size | ||
| 5268 | * of the struct. | ||
| 5269 | */ | ||
| 5270 | if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) | ||
| 5267 | return -EFAULT; | 5271 | return -EFAULT; |
| 5268 | 5272 | ||
| 5269 | if (nested_cpu_has_shadow_vmcs(vmcs12) && | 5273 | if (nested_cpu_has_shadow_vmcs(vmcs12) && |
| 5270 | vmcs12->vmcs_link_pointer != -1ull) { | 5274 | vmcs12->vmcs_link_pointer != -1ull) { |
| 5271 | if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, | 5275 | if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, |
| 5272 | get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) | 5276 | get_shadow_vmcs12(vcpu), VMCS12_SIZE)) |
| 5273 | return -EFAULT; | 5277 | return -EFAULT; |
| 5274 | } | 5278 | } |
| 5275 | 5279 | ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f6915f10e584..4341175339f3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
| @@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm) | |||
| 423 | to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; | 423 | to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, | 426 | static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, |
| 427 | void *data) | 427 | void *data) |
| 428 | { | 428 | { |
| 429 | struct kvm_tlb_range *range = data; | 429 | struct kvm_tlb_range *range = data; |
| @@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 1773 | if (!msr_info->host_initiated && | 1773 | if (!msr_info->host_initiated && |
| 1774 | !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) | 1774 | !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) |
| 1775 | return 1; | 1775 | return 1; |
| 1776 | /* Otherwise falls through */ | 1776 | /* Else, falls through */ |
| 1777 | default: | 1777 | default: |
| 1778 | msr = find_msr_entry(vmx, msr_info->index); | 1778 | msr = find_msr_entry(vmx, msr_info->index); |
| 1779 | if (msr) { | 1779 | if (msr) { |
| @@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2014 | /* Check reserved bit, higher 32 bits should be zero */ | 2014 | /* Check reserved bit, higher 32 bits should be zero */ |
| 2015 | if ((data >> 32) != 0) | 2015 | if ((data >> 32) != 0) |
| 2016 | return 1; | 2016 | return 1; |
| 2017 | /* Otherwise falls through */ | 2017 | /* Else, falls through */ |
| 2018 | default: | 2018 | default: |
| 2019 | msr = find_msr_entry(vmx, msr_index); | 2019 | msr = find_msr_entry(vmx, msr_index); |
| 2020 | if (msr) { | 2020 | if (msr) { |
| @@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, | |||
| 2344 | case 37: /* AAT100 */ | 2344 | case 37: /* AAT100 */ |
| 2345 | case 44: /* BC86,AAY89,BD102 */ | 2345 | case 44: /* BC86,AAY89,BD102 */ |
| 2346 | case 46: /* BA97 */ | 2346 | case 46: /* BA97 */ |
| 2347 | _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; | 2347 | _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; |
| 2348 | _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; | 2348 | _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; |
| 2349 | pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " | 2349 | pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " |
| 2350 | "does not work properly. Using workaround\n"); | 2350 | "does not work properly. Using workaround\n"); |
| @@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) | |||
| 6362 | vmx->loaded_vmcs->hv_timer_armed = false; | 6362 | vmx->loaded_vmcs->hv_timer_armed = false; |
| 6363 | } | 6363 | } |
| 6364 | 6364 | ||
| 6365 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | 6365 | static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) |
| 6366 | { | 6366 | { |
| 6367 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 6367 | unsigned long evmcs_rsp; |
| 6368 | unsigned long cr3, cr4, evmcs_rsp; | ||
| 6369 | |||
| 6370 | /* Record the guest's net vcpu time for enforced NMI injections. */ | ||
| 6371 | if (unlikely(!enable_vnmi && | ||
| 6372 | vmx->loaded_vmcs->soft_vnmi_blocked)) | ||
| 6373 | vmx->loaded_vmcs->entry_time = ktime_get(); | ||
| 6374 | |||
| 6375 | /* Don't enter VMX if guest state is invalid, let the exit handler | ||
| 6376 | start emulation until we arrive back to a valid state */ | ||
| 6377 | if (vmx->emulation_required) | ||
| 6378 | return; | ||
| 6379 | |||
| 6380 | if (vmx->ple_window_dirty) { | ||
| 6381 | vmx->ple_window_dirty = false; | ||
| 6382 | vmcs_write32(PLE_WINDOW, vmx->ple_window); | ||
| 6383 | } | ||
| 6384 | |||
| 6385 | if (vmx->nested.need_vmcs12_sync) | ||
| 6386 | nested_sync_from_vmcs12(vcpu); | ||
| 6387 | |||
| 6388 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
| 6389 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | ||
| 6390 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
| 6391 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | ||
| 6392 | |||
| 6393 | cr3 = __get_current_cr3_fast(); | ||
| 6394 | if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { | ||
| 6395 | vmcs_writel(HOST_CR3, cr3); | ||
| 6396 | vmx->loaded_vmcs->host_state.cr3 = cr3; | ||
| 6397 | } | ||
| 6398 | |||
| 6399 | cr4 = cr4_read_shadow(); | ||
| 6400 | if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { | ||
| 6401 | vmcs_writel(HOST_CR4, cr4); | ||
| 6402 | vmx->loaded_vmcs->host_state.cr4 = cr4; | ||
| 6403 | } | ||
| 6404 | |||
| 6405 | /* When single-stepping over STI and MOV SS, we must clear the | ||
| 6406 | * corresponding interruptibility bits in the guest state. Otherwise | ||
| 6407 | * vmentry fails as it then expects bit 14 (BS) in pending debug | ||
| 6408 | * exceptions being set, but that's not correct for the guest debugging | ||
| 6409 | * case. */ | ||
| 6410 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
| 6411 | vmx_set_interrupt_shadow(vcpu, 0); | ||
| 6412 | |||
| 6413 | if (static_cpu_has(X86_FEATURE_PKU) && | ||
| 6414 | kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && | ||
| 6415 | vcpu->arch.pkru != vmx->host_pkru) | ||
| 6416 | __write_pkru(vcpu->arch.pkru); | ||
| 6417 | |||
| 6418 | pt_guest_enter(vmx); | ||
| 6419 | |||
| 6420 | atomic_switch_perf_msrs(vmx); | ||
| 6421 | |||
| 6422 | vmx_update_hv_timer(vcpu); | ||
| 6423 | |||
| 6424 | /* | ||
| 6425 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if | ||
| 6426 | * it's non-zero. Since vmentry is serialising on affected CPUs, there | ||
| 6427 | * is no need to worry about the conditional branch over the wrmsr | ||
| 6428 | * being speculatively taken. | ||
| 6429 | */ | ||
| 6430 | x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); | ||
| 6431 | 6368 | ||
| 6432 | vmx->__launched = vmx->loaded_vmcs->launched; | 6369 | vmx->__launched = vmx->loaded_vmcs->launched; |
| 6433 | 6370 | ||
| @@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6567 | , "eax", "ebx", "edi" | 6504 | , "eax", "ebx", "edi" |
| 6568 | #endif | 6505 | #endif |
| 6569 | ); | 6506 | ); |
| 6507 | } | ||
| 6508 | STACK_FRAME_NON_STANDARD(__vmx_vcpu_run); | ||
| 6509 | |||
| 6510 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | ||
| 6511 | { | ||
| 6512 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
| 6513 | unsigned long cr3, cr4; | ||
| 6514 | |||
| 6515 | /* Record the guest's net vcpu time for enforced NMI injections. */ | ||
| 6516 | if (unlikely(!enable_vnmi && | ||
| 6517 | vmx->loaded_vmcs->soft_vnmi_blocked)) | ||
| 6518 | vmx->loaded_vmcs->entry_time = ktime_get(); | ||
| 6519 | |||
| 6520 | /* Don't enter VMX if guest state is invalid, let the exit handler | ||
| 6521 | start emulation until we arrive back to a valid state */ | ||
| 6522 | if (vmx->emulation_required) | ||
| 6523 | return; | ||
| 6524 | |||
| 6525 | if (vmx->ple_window_dirty) { | ||
| 6526 | vmx->ple_window_dirty = false; | ||
| 6527 | vmcs_write32(PLE_WINDOW, vmx->ple_window); | ||
| 6528 | } | ||
| 6529 | |||
| 6530 | if (vmx->nested.need_vmcs12_sync) | ||
| 6531 | nested_sync_from_vmcs12(vcpu); | ||
| 6532 | |||
| 6533 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
| 6534 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | ||
| 6535 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) | ||
| 6536 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | ||
| 6537 | |||
| 6538 | cr3 = __get_current_cr3_fast(); | ||
| 6539 | if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { | ||
| 6540 | vmcs_writel(HOST_CR3, cr3); | ||
| 6541 | vmx->loaded_vmcs->host_state.cr3 = cr3; | ||
| 6542 | } | ||
| 6543 | |||
| 6544 | cr4 = cr4_read_shadow(); | ||
| 6545 | if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { | ||
| 6546 | vmcs_writel(HOST_CR4, cr4); | ||
| 6547 | vmx->loaded_vmcs->host_state.cr4 = cr4; | ||
| 6548 | } | ||
| 6549 | |||
| 6550 | /* When single-stepping over STI and MOV SS, we must clear the | ||
| 6551 | * corresponding interruptibility bits in the guest state. Otherwise | ||
| 6552 | * vmentry fails as it then expects bit 14 (BS) in pending debug | ||
| 6553 | * exceptions being set, but that's not correct for the guest debugging | ||
| 6554 | * case. */ | ||
| 6555 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
| 6556 | vmx_set_interrupt_shadow(vcpu, 0); | ||
| 6557 | |||
| 6558 | if (static_cpu_has(X86_FEATURE_PKU) && | ||
| 6559 | kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && | ||
| 6560 | vcpu->arch.pkru != vmx->host_pkru) | ||
| 6561 | __write_pkru(vcpu->arch.pkru); | ||
| 6562 | |||
| 6563 | pt_guest_enter(vmx); | ||
| 6564 | |||
| 6565 | atomic_switch_perf_msrs(vmx); | ||
| 6566 | |||
| 6567 | vmx_update_hv_timer(vcpu); | ||
| 6568 | |||
| 6569 | /* | ||
| 6570 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if | ||
| 6571 | * it's non-zero. Since vmentry is serialising on affected CPUs, there | ||
| 6572 | * is no need to worry about the conditional branch over the wrmsr | ||
| 6573 | * being speculatively taken. | ||
| 6574 | */ | ||
| 6575 | x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); | ||
| 6576 | |||
| 6577 | __vmx_vcpu_run(vcpu, vmx); | ||
| 6570 | 6578 | ||
| 6571 | /* | 6579 | /* |
| 6572 | * We do not use IBRS in the kernel. If this vCPU has used the | 6580 | * We do not use IBRS in the kernel. If this vCPU has used the |
| @@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6648 | vmx_recover_nmi_blocking(vmx); | 6656 | vmx_recover_nmi_blocking(vmx); |
| 6649 | vmx_complete_interrupts(vmx); | 6657 | vmx_complete_interrupts(vmx); |
| 6650 | } | 6658 | } |
| 6651 | STACK_FRAME_NON_STANDARD(vmx_vcpu_run); | ||
| 6652 | 6659 | ||
| 6653 | static struct kvm *vmx_vm_alloc(void) | 6660 | static struct kvm *vmx_vm_alloc(void) |
| 6654 | { | 6661 | { |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02c8e095a239..3d27206f6c01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
| 3834 | case KVM_CAP_HYPERV_SYNIC2: | 3834 | case KVM_CAP_HYPERV_SYNIC2: |
| 3835 | if (cap->args[0]) | 3835 | if (cap->args[0]) |
| 3836 | return -EINVAL; | 3836 | return -EINVAL; |
| 3837 | /* fall through */ | ||
| 3838 | |||
| 3837 | case KVM_CAP_HYPERV_SYNIC: | 3839 | case KVM_CAP_HYPERV_SYNIC: |
| 3838 | if (!irqchip_in_kernel(vcpu->kvm)) | 3840 | if (!irqchip_in_kernel(vcpu->kvm)) |
| 3839 | return -EINVAL; | 3841 | return -EINVAL; |
| @@ -6480,8 +6482,7 @@ restart: | |||
| 6480 | toggle_interruptibility(vcpu, ctxt->interruptibility); | 6482 | toggle_interruptibility(vcpu, ctxt->interruptibility); |
| 6481 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 6483 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
| 6482 | kvm_rip_write(vcpu, ctxt->eip); | 6484 | kvm_rip_write(vcpu, ctxt->eip); |
| 6483 | if (r == EMULATE_DONE && | 6485 | if (r == EMULATE_DONE && ctxt->tf) |
| 6484 | (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) | ||
| 6485 | kvm_vcpu_do_singlestep(vcpu, &r); | 6486 | kvm_vcpu_do_singlestep(vcpu, &r); |
| 6486 | if (!ctxt->have_exception || | 6487 | if (!ctxt->have_exception || |
| 6487 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) | 6488 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) |
| @@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
| 7093 | case KVM_HC_CLOCK_PAIRING: | 7094 | case KVM_HC_CLOCK_PAIRING: |
| 7094 | ret = kvm_pv_clock_pairing(vcpu, a0, a1); | 7095 | ret = kvm_pv_clock_pairing(vcpu, a0, a1); |
| 7095 | break; | 7096 | break; |
| 7097 | #endif | ||
| 7096 | case KVM_HC_SEND_IPI: | 7098 | case KVM_HC_SEND_IPI: |
| 7097 | ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); | 7099 | ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); |
| 7098 | break; | 7100 | break; |
| 7099 | #endif | ||
| 7100 | default: | 7101 | default: |
| 7101 | ret = -KVM_ENOSYS; | 7102 | ret = -KVM_ENOSYS; |
| 7102 | break; | 7103 | break; |
| @@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) | |||
| 7937 | vcpu->arch.pv.pv_unhalted = false; | 7938 | vcpu->arch.pv.pv_unhalted = false; |
| 7938 | vcpu->arch.mp_state = | 7939 | vcpu->arch.mp_state = |
| 7939 | KVM_MP_STATE_RUNNABLE; | 7940 | KVM_MP_STATE_RUNNABLE; |
| 7941 | /* fall through */ | ||
| 7940 | case KVM_MP_STATE_RUNNABLE: | 7942 | case KVM_MP_STATE_RUNNABLE: |
| 7941 | vcpu->arch.apf.halted = false; | 7943 | vcpu->arch.apf.halted = false; |
| 7942 | break; | 7944 | break; |
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index 79778ab200e4..a53665116458 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c | |||
| @@ -36,8 +36,8 @@ static inline u16 i8254(void) | |||
| 36 | u16 status, timer; | 36 | u16 status, timer; |
| 37 | 37 | ||
| 38 | do { | 38 | do { |
| 39 | outb(I8254_PORT_CONTROL, | 39 | outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, |
| 40 | I8254_CMD_READBACK | I8254_SELECT_COUNTER0); | 40 | I8254_PORT_CONTROL); |
| 41 | status = inb(I8254_PORT_COUNTER0); | 41 | status = inb(I8254_PORT_COUNTER0); |
| 42 | timer = inb(I8254_PORT_COUNTER0); | 42 | timer = inb(I8254_PORT_COUNTER0); |
| 43 | timer |= inb(I8254_PORT_COUNTER0) << 8; | 43 | timer |= inb(I8254_PORT_COUNTER0) << 8; |
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index a19ef1a416ff..4aa9b1480866 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c | |||
| @@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) | |||
| 158 | pmd = pmd_offset(pud, ppd->vaddr); | 158 | pmd = pmd_offset(pud, ppd->vaddr); |
| 159 | if (pmd_none(*pmd)) { | 159 | if (pmd_none(*pmd)) { |
| 160 | pte = ppd->pgtable_area; | 160 | pte = ppd->pgtable_area; |
| 161 | memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); | 161 | memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE); |
| 162 | ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; | 162 | ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE; |
| 163 | set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); | 163 | set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); |
| 164 | } | 164 | } |
| 165 | 165 | ||
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2f6787fc7106..c54a493e139a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
| @@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) | |||
| 898 | val = native_read_msr_safe(msr, err); | 898 | val = native_read_msr_safe(msr, err); |
| 899 | switch (msr) { | 899 | switch (msr) { |
| 900 | case MSR_IA32_APICBASE: | 900 | case MSR_IA32_APICBASE: |
| 901 | #ifdef CONFIG_X86_X2APIC | 901 | val &= ~X2APIC_ENABLE; |
| 902 | if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) | ||
| 903 | #endif | ||
| 904 | val &= ~X2APIC_ENABLE; | ||
| 905 | break; | 902 | break; |
| 906 | } | 903 | } |
| 907 | return val; | 904 | return val; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 72bf446c3fee..6e29794573b7 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
| @@ -361,8 +361,6 @@ void xen_timer_resume(void) | |||
| 361 | { | 361 | { |
| 362 | int cpu; | 362 | int cpu; |
| 363 | 363 | ||
| 364 | pvclock_resume(); | ||
| 365 | |||
| 366 | if (xen_clockevent != &xen_vcpuop_clockevent) | 364 | if (xen_clockevent != &xen_vcpuop_clockevent) |
| 367 | return; | 365 | return; |
| 368 | 366 | ||
| @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { | |||
| 379 | }; | 377 | }; |
| 380 | 378 | ||
| 381 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; | 379 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; |
| 380 | static u64 xen_clock_value_saved; | ||
| 382 | 381 | ||
| 383 | void xen_save_time_memory_area(void) | 382 | void xen_save_time_memory_area(void) |
| 384 | { | 383 | { |
| 385 | struct vcpu_register_time_memory_area t; | 384 | struct vcpu_register_time_memory_area t; |
| 386 | int ret; | 385 | int ret; |
| 387 | 386 | ||
| 387 | xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; | ||
| 388 | |||
| 388 | if (!xen_clock) | 389 | if (!xen_clock) |
| 389 | return; | 390 | return; |
| 390 | 391 | ||
| @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) | |||
| 404 | int ret; | 405 | int ret; |
| 405 | 406 | ||
| 406 | if (!xen_clock) | 407 | if (!xen_clock) |
| 407 | return; | 408 | goto out; |
| 408 | 409 | ||
| 409 | t.addr.v = &xen_clock->pvti; | 410 | t.addr.v = &xen_clock->pvti; |
| 410 | 411 | ||
| @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) | |||
| 421 | if (ret != 0) | 422 | if (ret != 0) |
| 422 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", | 423 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", |
| 423 | ret); | 424 | ret); |
| 425 | |||
| 426 | out: | ||
| 427 | /* Need pvclock_resume() before using xen_clocksource_read(). */ | ||
| 428 | pvclock_resume(); | ||
| 429 | xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; | ||
| 424 | } | 430 | } |
| 425 | 431 | ||
| 426 | static void xen_setup_vsyscall_time_info(void) | 432 | static void xen_setup_vsyscall_time_info(void) |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63e0f12be7c9..72adbbe975d5 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
| @@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, | |||
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
| 1156 | /** | 1156 | /** |
| 1157 | * __bfq_deactivate_entity - deactivate an entity from its service tree. | 1157 | * __bfq_deactivate_entity - update sched_data and service trees for |
| 1158 | * @entity: the entity to deactivate. | 1158 | * entity, so as to represent entity as inactive |
| 1159 | * @entity: the entity being deactivated. | ||
| 1159 | * @ins_into_idle_tree: if false, the entity will not be put into the | 1160 | * @ins_into_idle_tree: if false, the entity will not be put into the |
| 1160 | * idle tree. | 1161 | * idle tree. |
| 1161 | * | 1162 | * |
| 1162 | * Deactivates an entity, independently of its previous state. Must | 1163 | * If necessary and allowed, puts entity into the idle tree. NOTE: |
| 1163 | * be invoked only if entity is on a service tree. Extracts the entity | 1164 | * entity may be on no tree if in service. |
| 1164 | * from that tree, and if necessary and allowed, puts it into the idle | ||
| 1165 | * tree. | ||
| 1166 | */ | 1165 | */ |
| 1167 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | 1166 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) |
| 1168 | { | 1167 | { |
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c index fb2c82c351e4..038cb627c868 100644 --- a/block/blk-mq-debugfs-zoned.c +++ b/block/blk-mq-debugfs-zoned.c | |||
| @@ -1,8 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. |
| 4 | * | ||
| 5 | * This file is released under the GPL. | ||
| 6 | */ | 4 | */ |
| 7 | 5 | ||
| 8 | #include <linux/blkdev.h> | 6 | #include <linux/blkdev.h> |
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 90d68760af08..f8120832ca7b 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
| @@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = { | |||
| 308 | CMD_FLAG_NAME(PREFLUSH), | 308 | CMD_FLAG_NAME(PREFLUSH), |
| 309 | CMD_FLAG_NAME(RAHEAD), | 309 | CMD_FLAG_NAME(RAHEAD), |
| 310 | CMD_FLAG_NAME(BACKGROUND), | 310 | CMD_FLAG_NAME(BACKGROUND), |
| 311 | CMD_FLAG_NAME(NOUNMAP), | ||
| 312 | CMD_FLAG_NAME(NOWAIT), | 311 | CMD_FLAG_NAME(NOWAIT), |
| 312 | CMD_FLAG_NAME(NOUNMAP), | ||
| 313 | CMD_FLAG_NAME(HIPRI), | ||
| 313 | }; | 314 | }; |
| 314 | #undef CMD_FLAG_NAME | 315 | #undef CMD_FLAG_NAME |
| 315 | 316 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ba37b9e15e9..8f5b533764ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1906 | { | 1906 | { |
| 1907 | const int is_sync = op_is_sync(bio->bi_opf); | 1907 | const int is_sync = op_is_sync(bio->bi_opf); |
| 1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); | 1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); |
| 1909 | struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; | 1909 | struct blk_mq_alloc_data data = { .flags = 0}; |
| 1910 | struct request *rq; | 1910 | struct request *rq; |
| 1911 | struct blk_plug *plug; | 1911 | struct blk_plug *plug; |
| 1912 | struct request *same_queue_rq = NULL; | 1912 | struct request *same_queue_rq = NULL; |
| @@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1928 | 1928 | ||
| 1929 | rq_qos_throttle(q, bio); | 1929 | rq_qos_throttle(q, bio); |
| 1930 | 1930 | ||
| 1931 | data.cmd_flags = bio->bi_opf; | ||
| 1931 | rq = blk_mq_get_request(q, bio, &data); | 1932 | rq = blk_mq_get_request(q, bio, &data); |
| 1932 | if (unlikely(!rq)) { | 1933 | if (unlikely(!rq)) { |
| 1933 | rq_qos_cleanup(q, bio); | 1934 | rq_qos_cleanup(q, bio); |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index f0c56649775f..fd166fbb0f65 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
| @@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) | |||
| 597 | rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); | 597 | rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); |
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | void wbt_issue(struct rq_qos *rqos, struct request *rq) | 600 | static void wbt_issue(struct rq_qos *rqos, struct request *rq) |
| 601 | { | 601 | { |
| 602 | struct rq_wb *rwb = RQWB(rqos); | 602 | struct rq_wb *rwb = RQWB(rqos); |
| 603 | 603 | ||
| @@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq) | |||
| 617 | } | 617 | } |
| 618 | } | 618 | } |
| 619 | 619 | ||
| 620 | void wbt_requeue(struct rq_qos *rqos, struct request *rq) | 620 | static void wbt_requeue(struct rq_qos *rqos, struct request *rq) |
| 621 | { | 621 | { |
| 622 | struct rq_wb *rwb = RQWB(rqos); | 622 | struct rq_wb *rwb = RQWB(rqos); |
| 623 | if (!rwb_enabled(rwb)) | 623 | if (!rwb_enabled(rwb)) |
diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 6651e713c45d..5564e73266a6 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c | |||
| @@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 539 | ictx = skcipher_instance_ctx(inst); | 539 | ictx = skcipher_instance_ctx(inst); |
| 540 | 540 | ||
| 541 | /* Stream cipher, e.g. "xchacha12" */ | 541 | /* Stream cipher, e.g. "xchacha12" */ |
| 542 | crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, | ||
| 543 | skcipher_crypto_instance(inst)); | ||
| 542 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, | 544 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, |
| 543 | 0, crypto_requires_sync(algt->type, | 545 | 0, crypto_requires_sync(algt->type, |
| 544 | algt->mask)); | 546 | algt->mask)); |
| @@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 547 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); | 549 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); |
| 548 | 550 | ||
| 549 | /* Block cipher, e.g. "aes" */ | 551 | /* Block cipher, e.g. "aes" */ |
| 552 | crypto_set_spawn(&ictx->blockcipher_spawn, | ||
| 553 | skcipher_crypto_instance(inst)); | ||
| 550 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, | 554 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, |
| 551 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); | 555 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); |
| 552 | if (err) | 556 | if (err) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 37f54d1b2f66..4be293a4b5f0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, | |||
| 58 | return -EINVAL; | 58 | return -EINVAL; |
| 59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
| 60 | return -EINVAL; | 60 | return -EINVAL; |
| 61 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 61 | |
| 62 | /* | ||
| 63 | * RTA_OK() didn't align the rtattr's payload when validating that it | ||
| 64 | * fits in the buffer. Yet, the keys should start on the next 4-byte | ||
| 65 | * aligned boundary. To avoid confusion, require that the rtattr | ||
| 66 | * payload be exactly the param struct, which has a 4-byte aligned size. | ||
| 67 | */ | ||
| 68 | if (RTA_PAYLOAD(rta) != sizeof(*param)) | ||
| 62 | return -EINVAL; | 69 | return -EINVAL; |
| 70 | BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); | ||
| 63 | 71 | ||
| 64 | param = RTA_DATA(rta); | 72 | param = RTA_DATA(rta); |
| 65 | keys->enckeylen = be32_to_cpu(param->enckeylen); | 73 | keys->enckeylen = be32_to_cpu(param->enckeylen); |
| 66 | 74 | ||
| 67 | key += RTA_ALIGN(rta->rta_len); | 75 | key += rta->rta_len; |
| 68 | keylen -= RTA_ALIGN(rta->rta_len); | 76 | keylen -= rta->rta_len; |
| 69 | 77 | ||
| 70 | if (keylen < keys->enckeylen) | 78 | if (keylen < keys->enckeylen) |
| 71 | return -EINVAL; | 79 | return -EINVAL; |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 80a25cc04aec..4741fe89ba2c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
| @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | |||
| 279 | struct aead_request *req = areq->data; | 279 | struct aead_request *req = areq->data; |
| 280 | 280 | ||
| 281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); | 281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); |
| 282 | aead_request_complete(req, err); | 282 | authenc_esn_request_complete(req, err); |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | 285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) |
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad..c0cf87ae7ef6 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c | |||
| @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) | |||
| 100 | 100 | ||
| 101 | for (i = 0; i <= 63; i++) { | 101 | for (i = 0; i <= 63; i++) { |
| 102 | 102 | ||
| 103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); | 103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); |
| 104 | 104 | ||
| 105 | ss2 = ss1 ^ rol32(a, 12); | 105 | ss2 = ss1 ^ rol32(a, 12); |
| 106 | 106 | ||
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7c6afc111d76..bb857421c2e8 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -41,7 +41,8 @@ acpi-y += ec.o | |||
| 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
| 42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o | 42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o |
| 43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o | 43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o |
| 44 | acpi-y += acpi_lpss.o acpi_apd.o | 44 | acpi-$(CONFIG_PCI) += acpi_lpss.o |
| 45 | acpi-y += acpi_apd.o | ||
| 45 | acpi-y += acpi_platform.o | 46 | acpi-y += acpi_platform.o |
| 46 | acpi-y += acpi_pnp.o | 47 | acpi-y += acpi_pnp.o |
| 47 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o | 48 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 99d820a693a8..5c093ce01bcd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1054,18 +1054,6 @@ void __init acpi_early_init(void) | |||
| 1054 | goto error0; | 1054 | goto error0; |
| 1055 | } | 1055 | } |
| 1056 | 1056 | ||
| 1057 | /* | ||
| 1058 | * ACPI 2.0 requires the EC driver to be loaded and work before | ||
| 1059 | * the EC device is found in the namespace (i.e. before | ||
| 1060 | * acpi_load_tables() is called). | ||
| 1061 | * | ||
| 1062 | * This is accomplished by looking for the ECDT table, and getting | ||
| 1063 | * the EC parameters out of that. | ||
| 1064 | * | ||
| 1065 | * Ignore the result. Not having an ECDT is not fatal. | ||
| 1066 | */ | ||
| 1067 | status = acpi_ec_ecdt_probe(); | ||
| 1068 | |||
| 1069 | #ifdef CONFIG_X86 | 1057 | #ifdef CONFIG_X86 |
| 1070 | if (!acpi_ioapic) { | 1058 | if (!acpi_ioapic) { |
| 1071 | /* compatible (0) means level (3) */ | 1059 | /* compatible (0) means level (3) */ |
| @@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void) | |||
| 1142 | goto error1; | 1130 | goto error1; |
| 1143 | } | 1131 | } |
| 1144 | 1132 | ||
| 1133 | /* | ||
| 1134 | * ACPI 2.0 requires the EC driver to be loaded and work before the EC | ||
| 1135 | * device is found in the namespace. | ||
| 1136 | * | ||
| 1137 | * This is accomplished by looking for the ECDT table and getting the EC | ||
| 1138 | * parameters out of that. | ||
| 1139 | * | ||
| 1140 | * Do that before calling acpi_initialize_objects() which may trigger EC | ||
| 1141 | * address space accesses. | ||
| 1142 | */ | ||
| 1143 | acpi_ec_ecdt_probe(); | ||
| 1144 | |||
| 1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); | 1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); |
| 1146 | if (ACPI_FAILURE(status)) { | 1146 | if (ACPI_FAILURE(status)) { |
| 1147 | printk(KERN_ERR PREFIX | 1147 | printk(KERN_ERR PREFIX |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 7e6952edb5b0..6a9e1fb8913a 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -81,7 +81,11 @@ void acpi_debugfs_init(void); | |||
| 81 | #else | 81 | #else |
| 82 | static inline void acpi_debugfs_init(void) { return; } | 82 | static inline void acpi_debugfs_init(void) { return; } |
| 83 | #endif | 83 | #endif |
| 84 | #ifdef CONFIG_PCI | ||
| 84 | void acpi_lpss_init(void); | 85 | void acpi_lpss_init(void); |
| 86 | #else | ||
| 87 | static inline void acpi_lpss_init(void) {} | ||
| 88 | #endif | ||
| 85 | 89 | ||
| 86 | void acpi_apd_init(void); | 90 | void acpi_apd_init(void); |
| 87 | 91 | ||
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index a22e2f2bbb75..c7afb1f223f7 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <acpi/nfit.h> | 26 | #include <acpi/nfit.h> |
| 27 | #include "intel.h" | 27 | #include "intel.h" |
| 28 | #include "nfit.h" | 28 | #include "nfit.h" |
| 29 | #include "intel.h" | ||
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| 32 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | 31 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
| @@ -82,12 +81,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) | |||
| 82 | } | 81 | } |
| 83 | EXPORT_SYMBOL(to_nfit_uuid); | 82 | EXPORT_SYMBOL(to_nfit_uuid); |
| 84 | 83 | ||
| 85 | static struct acpi_nfit_desc *to_acpi_nfit_desc( | ||
| 86 | struct nvdimm_bus_descriptor *nd_desc) | ||
| 87 | { | ||
| 88 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); | ||
| 89 | } | ||
| 90 | |||
| 91 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) | 84 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) |
| 92 | { | 85 | { |
| 93 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; | 86 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| @@ -420,10 +413,40 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) | |||
| 420 | return true; | 413 | return true; |
| 421 | } | 414 | } |
| 422 | 415 | ||
| 416 | static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, | ||
| 417 | struct nd_cmd_pkg *call_pkg) | ||
| 418 | { | ||
| 419 | if (call_pkg) { | ||
| 420 | int i; | ||
| 421 | |||
| 422 | if (nfit_mem && nfit_mem->family != call_pkg->nd_family) | ||
| 423 | return -ENOTTY; | ||
| 424 | |||
| 425 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) | ||
| 426 | if (call_pkg->nd_reserved2[i]) | ||
| 427 | return -EINVAL; | ||
| 428 | return call_pkg->nd_command; | ||
| 429 | } | ||
| 430 | |||
| 431 | /* In the !call_pkg case, bus commands == bus functions */ | ||
| 432 | if (!nfit_mem) | ||
| 433 | return cmd; | ||
| 434 | |||
| 435 | /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ | ||
| 436 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) | ||
| 437 | return cmd; | ||
| 438 | |||
| 439 | /* | ||
| 440 | * Force function number validation to fail since 0 is never | ||
| 441 | * published as a valid function in dsm_mask. | ||
| 442 | */ | ||
| 443 | return 0; | ||
| 444 | } | ||
| 445 | |||
| 423 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | 446 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, |
| 424 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) | 447 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) |
| 425 | { | 448 | { |
| 426 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 449 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 427 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 450 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 428 | union acpi_object in_obj, in_buf, *out_obj; | 451 | union acpi_object in_obj, in_buf, *out_obj; |
| 429 | const struct nd_cmd_desc *desc = NULL; | 452 | const struct nd_cmd_desc *desc = NULL; |
| @@ -433,29 +456,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 433 | unsigned long cmd_mask, dsm_mask; | 456 | unsigned long cmd_mask, dsm_mask; |
| 434 | u32 offset, fw_status = 0; | 457 | u32 offset, fw_status = 0; |
| 435 | acpi_handle handle; | 458 | acpi_handle handle; |
| 436 | unsigned int func; | ||
| 437 | const guid_t *guid; | 459 | const guid_t *guid; |
| 438 | int rc, i; | 460 | int func, rc, i; |
| 439 | 461 | ||
| 440 | if (cmd_rc) | 462 | if (cmd_rc) |
| 441 | *cmd_rc = -EINVAL; | 463 | *cmd_rc = -EINVAL; |
| 442 | func = cmd; | ||
| 443 | if (cmd == ND_CMD_CALL) { | ||
| 444 | call_pkg = buf; | ||
| 445 | func = call_pkg->nd_command; | ||
| 446 | 464 | ||
| 447 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) | 465 | if (cmd == ND_CMD_CALL) |
| 448 | if (call_pkg->nd_reserved2[i]) | 466 | call_pkg = buf; |
| 449 | return -EINVAL; | 467 | func = cmd_to_func(nfit_mem, cmd, call_pkg); |
| 450 | } | 468 | if (func < 0) |
| 469 | return func; | ||
| 451 | 470 | ||
| 452 | if (nvdimm) { | 471 | if (nvdimm) { |
| 453 | struct acpi_device *adev = nfit_mem->adev; | 472 | struct acpi_device *adev = nfit_mem->adev; |
| 454 | 473 | ||
| 455 | if (!adev) | 474 | if (!adev) |
| 456 | return -ENOTTY; | 475 | return -ENOTTY; |
| 457 | if (call_pkg && nfit_mem->family != call_pkg->nd_family) | ||
| 458 | return -ENOTTY; | ||
| 459 | 476 | ||
| 460 | dimm_name = nvdimm_name(nvdimm); | 477 | dimm_name = nvdimm_name(nvdimm); |
| 461 | cmd_name = nvdimm_cmd_name(cmd); | 478 | cmd_name = nvdimm_cmd_name(cmd); |
| @@ -469,9 +486,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 469 | 486 | ||
| 470 | cmd_name = nvdimm_bus_cmd_name(cmd); | 487 | cmd_name = nvdimm_bus_cmd_name(cmd); |
| 471 | cmd_mask = nd_desc->cmd_mask; | 488 | cmd_mask = nd_desc->cmd_mask; |
| 472 | dsm_mask = cmd_mask; | 489 | dsm_mask = nd_desc->bus_dsm_mask; |
| 473 | if (cmd == ND_CMD_CALL) | ||
| 474 | dsm_mask = nd_desc->bus_dsm_mask; | ||
| 475 | desc = nd_cmd_bus_desc(cmd); | 490 | desc = nd_cmd_bus_desc(cmd); |
| 476 | guid = to_nfit_uuid(NFIT_DEV_BUS); | 491 | guid = to_nfit_uuid(NFIT_DEV_BUS); |
| 477 | handle = adev->handle; | 492 | handle = adev->handle; |
| @@ -481,7 +496,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 481 | if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) | 496 | if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) |
| 482 | return -ENOTTY; | 497 | return -ENOTTY; |
| 483 | 498 | ||
| 484 | if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) | 499 | /* |
| 500 | * Check for a valid command. For ND_CMD_CALL, we also have to | ||
| 501 | * make sure that the DSM function is supported. | ||
| 502 | */ | ||
| 503 | if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) | ||
| 504 | return -ENOTTY; | ||
| 505 | else if (!test_bit(cmd, &cmd_mask)) | ||
| 485 | return -ENOTTY; | 506 | return -ENOTTY; |
| 486 | 507 | ||
| 487 | in_obj.type = ACPI_TYPE_PACKAGE; | 508 | in_obj.type = ACPI_TYPE_PACKAGE; |
| @@ -725,6 +746,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
| 725 | struct acpi_nfit_memory_map *memdev; | 746 | struct acpi_nfit_memory_map *memdev; |
| 726 | struct acpi_nfit_desc *acpi_desc; | 747 | struct acpi_nfit_desc *acpi_desc; |
| 727 | struct nfit_mem *nfit_mem; | 748 | struct nfit_mem *nfit_mem; |
| 749 | u16 physical_id; | ||
| 728 | 750 | ||
| 729 | mutex_lock(&acpi_desc_lock); | 751 | mutex_lock(&acpi_desc_lock); |
| 730 | list_for_each_entry(acpi_desc, &acpi_descs, list) { | 752 | list_for_each_entry(acpi_desc, &acpi_descs, list) { |
| @@ -732,10 +754,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
| 732 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | 754 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 733 | memdev = __to_nfit_memdev(nfit_mem); | 755 | memdev = __to_nfit_memdev(nfit_mem); |
| 734 | if (memdev->device_handle == device_handle) { | 756 | if (memdev->device_handle == device_handle) { |
| 757 | *flags = memdev->flags; | ||
| 758 | physical_id = memdev->physical_id; | ||
| 735 | mutex_unlock(&acpi_desc->init_mutex); | 759 | mutex_unlock(&acpi_desc->init_mutex); |
| 736 | mutex_unlock(&acpi_desc_lock); | 760 | mutex_unlock(&acpi_desc_lock); |
| 737 | *flags = memdev->flags; | 761 | return physical_id; |
| 738 | return memdev->physical_id; | ||
| 739 | } | 762 | } |
| 740 | } | 763 | } |
| 741 | mutex_unlock(&acpi_desc->init_mutex); | 764 | mutex_unlock(&acpi_desc->init_mutex); |
| @@ -1892,6 +1915,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1892 | return 0; | 1915 | return 0; |
| 1893 | } | 1916 | } |
| 1894 | 1917 | ||
| 1918 | /* | ||
| 1919 | * Function 0 is the command interrogation function, don't | ||
| 1920 | * export it to potential userspace use, and enable it to be | ||
| 1921 | * used as an error value in acpi_nfit_ctl(). | ||
| 1922 | */ | ||
| 1923 | dsm_mask &= ~1UL; | ||
| 1924 | |||
| 1895 | guid = to_nfit_uuid(nfit_mem->family); | 1925 | guid = to_nfit_uuid(nfit_mem->family); |
| 1896 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) | 1926 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) |
| 1897 | if (acpi_check_dsm(adev_dimm->handle, guid, | 1927 | if (acpi_check_dsm(adev_dimm->handle, guid, |
| @@ -2085,11 +2115,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | |||
| 2085 | if (!nvdimm) | 2115 | if (!nvdimm) |
| 2086 | continue; | 2116 | continue; |
| 2087 | 2117 | ||
| 2088 | rc = nvdimm_security_setup_events(nvdimm); | ||
| 2089 | if (rc < 0) | ||
| 2090 | dev_warn(acpi_desc->dev, | ||
| 2091 | "security event setup failed: %d\n", rc); | ||
| 2092 | |||
| 2093 | nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); | 2118 | nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); |
| 2094 | if (nfit_kernfs) | 2119 | if (nfit_kernfs) |
| 2095 | nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, | 2120 | nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, |
| @@ -2269,7 +2294,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, | |||
| 2269 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); | 2294 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); |
| 2270 | if (!nd_set) | 2295 | if (!nd_set) |
| 2271 | return -ENOMEM; | 2296 | return -ENOMEM; |
| 2272 | ndr_desc->nd_set = nd_set; | ||
| 2273 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); | 2297 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); |
| 2274 | 2298 | ||
| 2275 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); | 2299 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); |
| @@ -3405,7 +3429,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init); | |||
| 3405 | 3429 | ||
| 3406 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | 3430 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) |
| 3407 | { | 3431 | { |
| 3408 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3432 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 3409 | struct device *dev = acpi_desc->dev; | 3433 | struct device *dev = acpi_desc->dev; |
| 3410 | 3434 | ||
| 3411 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ | 3435 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
| @@ -3422,7 +3446,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
| 3422 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, | 3446 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
| 3423 | struct nvdimm *nvdimm, unsigned int cmd) | 3447 | struct nvdimm *nvdimm, unsigned int cmd) |
| 3424 | { | 3448 | { |
| 3425 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3449 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 3426 | 3450 | ||
| 3427 | if (nvdimm) | 3451 | if (nvdimm) |
| 3428 | return 0; | 3452 | return 0; |
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 850b2927b4e7..f70de71f79d6 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c | |||
| @@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm, | |||
| 146 | 146 | ||
| 147 | static void nvdimm_invalidate_cache(void); | 147 | static void nvdimm_invalidate_cache(void); |
| 148 | 148 | ||
| 149 | static int intel_security_unlock(struct nvdimm *nvdimm, | 149 | static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, |
| 150 | const struct nvdimm_key_data *key_data) | 150 | const struct nvdimm_key_data *key_data) |
| 151 | { | 151 | { |
| 152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| @@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm, | |||
| 227 | return 0; | 227 | return 0; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | static int intel_security_erase(struct nvdimm *nvdimm, | 230 | static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, |
| 231 | const struct nvdimm_key_data *key, | 231 | const struct nvdimm_key_data *key, |
| 232 | enum nvdimm_passphrase_type ptype) | 232 | enum nvdimm_passphrase_type ptype) |
| 233 | { | 233 | { |
| @@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm, | |||
| 276 | return 0; | 276 | return 0; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | static int intel_security_query_overwrite(struct nvdimm *nvdimm) | 279 | static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) |
| 280 | { | 280 | { |
| 281 | int rc; | 281 | int rc; |
| 282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| @@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm) | |||
| 313 | return 0; | 313 | return 0; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static int intel_security_overwrite(struct nvdimm *nvdimm, | 316 | static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, |
| 317 | const struct nvdimm_key_data *nkey) | 317 | const struct nvdimm_key_data *nkey) |
| 318 | { | 318 | { |
| 319 | int rc; | 319 | int rc; |
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 7496b10532aa..6a2185eb66c5 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/kdev_t.h> | 11 | #include <linux/kdev_t.h> |
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
| 14 | #include <linux/namei.h> | ||
| 14 | #include <linux/magic.h> | 15 | #include <linux/magic.h> |
| 15 | #include <linux/major.h> | 16 | #include <linux/major.h> |
| 16 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
| @@ -20,6 +21,7 @@ | |||
| 20 | #include <linux/parser.h> | 21 | #include <linux/parser.h> |
| 21 | #include <linux/radix-tree.h> | 22 | #include <linux/radix-tree.h> |
| 22 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
| 24 | #include <linux/seq_file.h> | ||
| 23 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 24 | #include <linux/spinlock_types.h> | 26 | #include <linux/spinlock_types.h> |
| 25 | #include <linux/stddef.h> | 27 | #include <linux/stddef.h> |
| @@ -30,7 +32,7 @@ | |||
| 30 | #include <linux/xarray.h> | 32 | #include <linux/xarray.h> |
| 31 | #include <uapi/asm-generic/errno-base.h> | 33 | #include <uapi/asm-generic/errno-base.h> |
| 32 | #include <uapi/linux/android/binder.h> | 34 | #include <uapi/linux/android/binder.h> |
| 33 | #include <uapi/linux/android/binder_ctl.h> | 35 | #include <uapi/linux/android/binderfs.h> |
| 34 | 36 | ||
| 35 | #include "binder_internal.h" | 37 | #include "binder_internal.h" |
| 36 | 38 | ||
| @@ -39,14 +41,32 @@ | |||
| 39 | #define INODE_OFFSET 3 | 41 | #define INODE_OFFSET 3 |
| 40 | #define INTSTRLEN 21 | 42 | #define INTSTRLEN 21 |
| 41 | #define BINDERFS_MAX_MINOR (1U << MINORBITS) | 43 | #define BINDERFS_MAX_MINOR (1U << MINORBITS) |
| 42 | 44 | /* Ensure that the initial ipc namespace always has devices available. */ | |
| 43 | static struct vfsmount *binderfs_mnt; | 45 | #define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) |
| 44 | 46 | ||
| 45 | static dev_t binderfs_dev; | 47 | static dev_t binderfs_dev; |
| 46 | static DEFINE_MUTEX(binderfs_minors_mutex); | 48 | static DEFINE_MUTEX(binderfs_minors_mutex); |
| 47 | static DEFINE_IDA(binderfs_minors); | 49 | static DEFINE_IDA(binderfs_minors); |
| 48 | 50 | ||
| 49 | /** | 51 | /** |
| 52 | * binderfs_mount_opts - mount options for binderfs | ||
| 53 | * @max: maximum number of allocatable binderfs binder devices | ||
| 54 | */ | ||
| 55 | struct binderfs_mount_opts { | ||
| 56 | int max; | ||
| 57 | }; | ||
| 58 | |||
| 59 | enum { | ||
| 60 | Opt_max, | ||
| 61 | Opt_err | ||
| 62 | }; | ||
| 63 | |||
| 64 | static const match_table_t tokens = { | ||
| 65 | { Opt_max, "max=%d" }, | ||
| 66 | { Opt_err, NULL } | ||
| 67 | }; | ||
| 68 | |||
| 69 | /** | ||
| 50 | * binderfs_info - information about a binderfs mount | 70 | * binderfs_info - information about a binderfs mount |
| 51 | * @ipc_ns: The ipc namespace the binderfs mount belongs to. | 71 | * @ipc_ns: The ipc namespace the binderfs mount belongs to. |
| 52 | * @control_dentry: This records the dentry of this binderfs mount | 72 | * @control_dentry: This records the dentry of this binderfs mount |
| @@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors); | |||
| 55 | * created. | 75 | * created. |
| 56 | * @root_gid: gid that needs to be used when a new binder device is | 76 | * @root_gid: gid that needs to be used when a new binder device is |
| 57 | * created. | 77 | * created. |
| 78 | * @mount_opts: The mount options in use. | ||
| 79 | * @device_count: The current number of allocated binder devices. | ||
| 58 | */ | 80 | */ |
| 59 | struct binderfs_info { | 81 | struct binderfs_info { |
| 60 | struct ipc_namespace *ipc_ns; | 82 | struct ipc_namespace *ipc_ns; |
| 61 | struct dentry *control_dentry; | 83 | struct dentry *control_dentry; |
| 62 | kuid_t root_uid; | 84 | kuid_t root_uid; |
| 63 | kgid_t root_gid; | 85 | kgid_t root_gid; |
| 64 | 86 | struct binderfs_mount_opts mount_opts; | |
| 87 | int device_count; | ||
| 65 | }; | 88 | }; |
| 66 | 89 | ||
| 67 | static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) | 90 | static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) |
| @@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode) | |||
| 84 | * @userp: buffer to copy information about new device for userspace to | 107 | * @userp: buffer to copy information about new device for userspace to |
| 85 | * @req: struct binderfs_device as copied from userspace | 108 | * @req: struct binderfs_device as copied from userspace |
| 86 | * | 109 | * |
| 87 | * This function allocated a new binder_device and reserves a new minor | 110 | * This function allocates a new binder_device and reserves a new minor |
| 88 | * number for it. | 111 | * number for it. |
| 89 | * Minor numbers are limited and tracked globally in binderfs_minors. The | 112 | * Minor numbers are limited and tracked globally in binderfs_minors. The |
| 90 | * function will stash a struct binder_device for the specific binder | 113 | * function will stash a struct binder_device for the specific binder |
| @@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode, | |||
| 100 | struct binderfs_device *req) | 123 | struct binderfs_device *req) |
| 101 | { | 124 | { |
| 102 | int minor, ret; | 125 | int minor, ret; |
| 103 | struct dentry *dentry, *dup, *root; | 126 | struct dentry *dentry, *root; |
| 104 | struct binder_device *device; | 127 | struct binder_device *device; |
| 105 | size_t name_len = BINDERFS_MAX_NAME + 1; | ||
| 106 | char *name = NULL; | 128 | char *name = NULL; |
| 129 | size_t name_len; | ||
| 107 | struct inode *inode = NULL; | 130 | struct inode *inode = NULL; |
| 108 | struct super_block *sb = ref_inode->i_sb; | 131 | struct super_block *sb = ref_inode->i_sb; |
| 109 | struct binderfs_info *info = sb->s_fs_info; | 132 | struct binderfs_info *info = sb->s_fs_info; |
| 133 | #if defined(CONFIG_IPC_NS) | ||
| 134 | bool use_reserve = (info->ipc_ns == &init_ipc_ns); | ||
| 135 | #else | ||
| 136 | bool use_reserve = true; | ||
| 137 | #endif | ||
| 110 | 138 | ||
| 111 | /* Reserve new minor number for the new device. */ | 139 | /* Reserve new minor number for the new device. */ |
| 112 | mutex_lock(&binderfs_minors_mutex); | 140 | mutex_lock(&binderfs_minors_mutex); |
| 113 | minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); | 141 | if (++info->device_count <= info->mount_opts.max) |
| 114 | mutex_unlock(&binderfs_minors_mutex); | 142 | minor = ida_alloc_max(&binderfs_minors, |
| 115 | if (minor < 0) | 143 | use_reserve ? BINDERFS_MAX_MINOR : |
| 144 | BINDERFS_MAX_MINOR_CAPPED, | ||
| 145 | GFP_KERNEL); | ||
| 146 | else | ||
| 147 | minor = -ENOSPC; | ||
| 148 | if (minor < 0) { | ||
| 149 | --info->device_count; | ||
| 150 | mutex_unlock(&binderfs_minors_mutex); | ||
| 116 | return minor; | 151 | return minor; |
| 152 | } | ||
| 153 | mutex_unlock(&binderfs_minors_mutex); | ||
| 117 | 154 | ||
| 118 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
| 119 | device = kzalloc(sizeof(*device), GFP_KERNEL); | 156 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
| @@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode, | |||
| 132 | inode->i_uid = info->root_uid; | 169 | inode->i_uid = info->root_uid; |
| 133 | inode->i_gid = info->root_gid; | 170 | inode->i_gid = info->root_gid; |
| 134 | 171 | ||
| 135 | name = kmalloc(name_len, GFP_KERNEL); | 172 | req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ |
| 173 | name_len = strlen(req->name); | ||
| 174 | /* Make sure to include terminating NUL byte */ | ||
| 175 | name = kmemdup(req->name, name_len + 1, GFP_KERNEL); | ||
| 136 | if (!name) | 176 | if (!name) |
| 137 | goto err; | 177 | goto err; |
| 138 | 178 | ||
| 139 | strscpy(name, req->name, name_len); | ||
| 140 | |||
| 141 | device->binderfs_inode = inode; | 179 | device->binderfs_inode = inode; |
| 142 | device->context.binder_context_mgr_uid = INVALID_UID; | 180 | device->context.binder_context_mgr_uid = INVALID_UID; |
| 143 | device->context.name = name; | 181 | device->context.name = name; |
| @@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode, | |||
| 156 | 194 | ||
| 157 | root = sb->s_root; | 195 | root = sb->s_root; |
| 158 | inode_lock(d_inode(root)); | 196 | inode_lock(d_inode(root)); |
| 159 | dentry = d_alloc_name(root, name); | 197 | |
| 160 | if (!dentry) { | 198 | /* look it up */ |
| 199 | dentry = lookup_one_len(name, root, name_len); | ||
| 200 | if (IS_ERR(dentry)) { | ||
| 161 | inode_unlock(d_inode(root)); | 201 | inode_unlock(d_inode(root)); |
| 162 | ret = -ENOMEM; | 202 | ret = PTR_ERR(dentry); |
| 163 | goto err; | 203 | goto err; |
| 164 | } | 204 | } |
| 165 | 205 | ||
| 166 | /* Verify that the name userspace gave us is not already in use. */ | 206 | if (d_really_is_positive(dentry)) { |
| 167 | dup = d_lookup(root, &dentry->d_name); | 207 | /* already exists */ |
| 168 | if (dup) { | 208 | dput(dentry); |
| 169 | if (d_really_is_positive(dup)) { | 209 | inode_unlock(d_inode(root)); |
| 170 | dput(dup); | 210 | ret = -EEXIST; |
| 171 | dput(dentry); | 211 | goto err; |
| 172 | inode_unlock(d_inode(root)); | ||
| 173 | ret = -EEXIST; | ||
| 174 | goto err; | ||
| 175 | } | ||
| 176 | dput(dup); | ||
| 177 | } | 212 | } |
| 178 | 213 | ||
| 179 | inode->i_private = device; | 214 | inode->i_private = device; |
| 180 | d_add(dentry, inode); | 215 | d_instantiate(dentry, inode); |
| 181 | fsnotify_create(root->d_inode, dentry); | 216 | fsnotify_create(root->d_inode, dentry); |
| 182 | inode_unlock(d_inode(root)); | 217 | inode_unlock(d_inode(root)); |
| 183 | 218 | ||
| @@ -187,6 +222,7 @@ err: | |||
| 187 | kfree(name); | 222 | kfree(name); |
| 188 | kfree(device); | 223 | kfree(device); |
| 189 | mutex_lock(&binderfs_minors_mutex); | 224 | mutex_lock(&binderfs_minors_mutex); |
| 225 | --info->device_count; | ||
| 190 | ida_free(&binderfs_minors, minor); | 226 | ida_free(&binderfs_minors, minor); |
| 191 | mutex_unlock(&binderfs_minors_mutex); | 227 | mutex_unlock(&binderfs_minors_mutex); |
| 192 | iput(inode); | 228 | iput(inode); |
| @@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, | |||
| 232 | static void binderfs_evict_inode(struct inode *inode) | 268 | static void binderfs_evict_inode(struct inode *inode) |
| 233 | { | 269 | { |
| 234 | struct binder_device *device = inode->i_private; | 270 | struct binder_device *device = inode->i_private; |
| 271 | struct binderfs_info *info = BINDERFS_I(inode); | ||
| 235 | 272 | ||
| 236 | clear_inode(inode); | 273 | clear_inode(inode); |
| 237 | 274 | ||
| @@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode) | |||
| 239 | return; | 276 | return; |
| 240 | 277 | ||
| 241 | mutex_lock(&binderfs_minors_mutex); | 278 | mutex_lock(&binderfs_minors_mutex); |
| 279 | --info->device_count; | ||
| 242 | ida_free(&binderfs_minors, device->miscdev.minor); | 280 | ida_free(&binderfs_minors, device->miscdev.minor); |
| 243 | mutex_unlock(&binderfs_minors_mutex); | 281 | mutex_unlock(&binderfs_minors_mutex); |
| 244 | 282 | ||
| @@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode) | |||
| 246 | kfree(device); | 284 | kfree(device); |
| 247 | } | 285 | } |
| 248 | 286 | ||
| 287 | /** | ||
| 288 | * binderfs_parse_mount_opts - parse binderfs mount options | ||
| 289 | * @data: options to set (can be NULL in which case defaults are used) | ||
| 290 | */ | ||
| 291 | static int binderfs_parse_mount_opts(char *data, | ||
| 292 | struct binderfs_mount_opts *opts) | ||
| 293 | { | ||
| 294 | char *p; | ||
| 295 | opts->max = BINDERFS_MAX_MINOR; | ||
| 296 | |||
| 297 | while ((p = strsep(&data, ",")) != NULL) { | ||
| 298 | substring_t args[MAX_OPT_ARGS]; | ||
| 299 | int token; | ||
| 300 | int max_devices; | ||
| 301 | |||
| 302 | if (!*p) | ||
| 303 | continue; | ||
| 304 | |||
| 305 | token = match_token(p, tokens, args); | ||
| 306 | switch (token) { | ||
| 307 | case Opt_max: | ||
| 308 | if (match_int(&args[0], &max_devices) || | ||
| 309 | (max_devices < 0 || | ||
| 310 | (max_devices > BINDERFS_MAX_MINOR))) | ||
| 311 | return -EINVAL; | ||
| 312 | |||
| 313 | opts->max = max_devices; | ||
| 314 | break; | ||
| 315 | default: | ||
| 316 | pr_err("Invalid mount options\n"); | ||
| 317 | return -EINVAL; | ||
| 318 | } | ||
| 319 | } | ||
| 320 | |||
| 321 | return 0; | ||
| 322 | } | ||
| 323 | |||
| 324 | static int binderfs_remount(struct super_block *sb, int *flags, char *data) | ||
| 325 | { | ||
| 326 | struct binderfs_info *info = sb->s_fs_info; | ||
| 327 | return binderfs_parse_mount_opts(data, &info->mount_opts); | ||
| 328 | } | ||
| 329 | |||
| 330 | static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) | ||
| 331 | { | ||
| 332 | struct binderfs_info *info; | ||
| 333 | |||
| 334 | info = root->d_sb->s_fs_info; | ||
| 335 | if (info->mount_opts.max <= BINDERFS_MAX_MINOR) | ||
| 336 | seq_printf(seq, ",max=%d", info->mount_opts.max); | ||
| 337 | |||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 249 | static const struct super_operations binderfs_super_ops = { | 341 | static const struct super_operations binderfs_super_ops = { |
| 250 | .statfs = simple_statfs, | 342 | .evict_inode = binderfs_evict_inode, |
| 251 | .evict_inode = binderfs_evict_inode, | 343 | .remount_fs = binderfs_remount, |
| 344 | .show_options = binderfs_show_mount_opts, | ||
| 345 | .statfs = simple_statfs, | ||
| 252 | }; | 346 | }; |
| 253 | 347 | ||
| 348 | static inline bool is_binderfs_control_device(const struct dentry *dentry) | ||
| 349 | { | ||
| 350 | struct binderfs_info *info = dentry->d_sb->s_fs_info; | ||
| 351 | return info->control_dentry == dentry; | ||
| 352 | } | ||
| 353 | |||
| 254 | static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 354 | static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
| 255 | struct inode *new_dir, struct dentry *new_dentry, | 355 | struct inode *new_dir, struct dentry *new_dentry, |
| 256 | unsigned int flags) | 356 | unsigned int flags) |
| 257 | { | 357 | { |
| 258 | struct inode *inode = d_inode(old_dentry); | 358 | if (is_binderfs_control_device(old_dentry) || |
| 259 | 359 | is_binderfs_control_device(new_dentry)) | |
| 260 | /* binderfs doesn't support directories. */ | ||
| 261 | if (d_is_dir(old_dentry)) | ||
| 262 | return -EPERM; | 360 | return -EPERM; |
| 263 | 361 | ||
| 264 | if (flags & ~RENAME_NOREPLACE) | 362 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags); |
| 265 | return -EINVAL; | ||
| 266 | |||
| 267 | if (!simple_empty(new_dentry)) | ||
| 268 | return -ENOTEMPTY; | ||
| 269 | |||
| 270 | if (d_really_is_positive(new_dentry)) | ||
| 271 | simple_unlink(new_dir, new_dentry); | ||
| 272 | |||
| 273 | old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = | ||
| 274 | new_dir->i_mtime = inode->i_ctime = current_time(old_dir); | ||
| 275 | |||
| 276 | return 0; | ||
| 277 | } | 363 | } |
| 278 | 364 | ||
| 279 | static int binderfs_unlink(struct inode *dir, struct dentry *dentry) | 365 | static int binderfs_unlink(struct inode *dir, struct dentry *dentry) |
| 280 | { | 366 | { |
| 281 | /* | 367 | if (is_binderfs_control_device(dentry)) |
| 282 | * The control dentry is only ever touched during mount so checking it | ||
| 283 | * here should not require us to take lock. | ||
| 284 | */ | ||
| 285 | if (BINDERFS_I(dir)->control_dentry == dentry) | ||
| 286 | return -EPERM; | 368 | return -EPERM; |
| 287 | 369 | ||
| 288 | return simple_unlink(dir, dentry); | 370 | return simple_unlink(dir, dentry); |
| @@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
| 318 | if (!device) | 400 | if (!device) |
| 319 | return -ENOMEM; | 401 | return -ENOMEM; |
| 320 | 402 | ||
| 321 | inode_lock(d_inode(root)); | ||
| 322 | |||
| 323 | /* If we have already created a binder-control node, return. */ | 403 | /* If we have already created a binder-control node, return. */ |
| 324 | if (info->control_dentry) { | 404 | if (info->control_dentry) { |
| 325 | ret = 0; | 405 | ret = 0; |
| @@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
| 358 | inode->i_private = device; | 438 | inode->i_private = device; |
| 359 | info->control_dentry = dentry; | 439 | info->control_dentry = dentry; |
| 360 | d_add(dentry, inode); | 440 | d_add(dentry, inode); |
| 361 | inode_unlock(d_inode(root)); | ||
| 362 | 441 | ||
| 363 | return 0; | 442 | return 0; |
| 364 | 443 | ||
| 365 | out: | 444 | out: |
| 366 | inode_unlock(d_inode(root)); | ||
| 367 | kfree(device); | 445 | kfree(device); |
| 368 | iput(inode); | 446 | iput(inode); |
| 369 | 447 | ||
| @@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = { | |||
| 378 | 456 | ||
| 379 | static int binderfs_fill_super(struct super_block *sb, void *data, int silent) | 457 | static int binderfs_fill_super(struct super_block *sb, void *data, int silent) |
| 380 | { | 458 | { |
| 459 | int ret; | ||
| 381 | struct binderfs_info *info; | 460 | struct binderfs_info *info; |
| 382 | int ret = -ENOMEM; | ||
| 383 | struct inode *inode = NULL; | 461 | struct inode *inode = NULL; |
| 384 | struct ipc_namespace *ipc_ns = sb->s_fs_info; | ||
| 385 | |||
| 386 | get_ipc_ns(ipc_ns); | ||
| 387 | 462 | ||
| 388 | sb->s_blocksize = PAGE_SIZE; | 463 | sb->s_blocksize = PAGE_SIZE; |
| 389 | sb->s_blocksize_bits = PAGE_SHIFT; | 464 | sb->s_blocksize_bits = PAGE_SHIFT; |
| @@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 405 | sb->s_op = &binderfs_super_ops; | 480 | sb->s_op = &binderfs_super_ops; |
| 406 | sb->s_time_gran = 1; | 481 | sb->s_time_gran = 1; |
| 407 | 482 | ||
| 408 | info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); | 483 | sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); |
| 409 | if (!info) | 484 | if (!sb->s_fs_info) |
| 410 | goto err_without_dentry; | 485 | return -ENOMEM; |
| 486 | info = sb->s_fs_info; | ||
| 487 | |||
| 488 | info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); | ||
| 489 | |||
| 490 | ret = binderfs_parse_mount_opts(data, &info->mount_opts); | ||
| 491 | if (ret) | ||
| 492 | return ret; | ||
| 411 | 493 | ||
| 412 | info->ipc_ns = ipc_ns; | ||
| 413 | info->root_gid = make_kgid(sb->s_user_ns, 0); | 494 | info->root_gid = make_kgid(sb->s_user_ns, 0); |
| 414 | if (!gid_valid(info->root_gid)) | 495 | if (!gid_valid(info->root_gid)) |
| 415 | info->root_gid = GLOBAL_ROOT_GID; | 496 | info->root_gid = GLOBAL_ROOT_GID; |
| @@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 417 | if (!uid_valid(info->root_uid)) | 498 | if (!uid_valid(info->root_uid)) |
| 418 | info->root_uid = GLOBAL_ROOT_UID; | 499 | info->root_uid = GLOBAL_ROOT_UID; |
| 419 | 500 | ||
| 420 | sb->s_fs_info = info; | ||
| 421 | |||
| 422 | inode = new_inode(sb); | 501 | inode = new_inode(sb); |
| 423 | if (!inode) | 502 | if (!inode) |
| 424 | goto err_without_dentry; | 503 | return -ENOMEM; |
| 425 | 504 | ||
| 426 | inode->i_ino = FIRST_INODE; | 505 | inode->i_ino = FIRST_INODE; |
| 427 | inode->i_fop = &simple_dir_operations; | 506 | inode->i_fop = &simple_dir_operations; |
| @@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 432 | 511 | ||
| 433 | sb->s_root = d_make_root(inode); | 512 | sb->s_root = d_make_root(inode); |
| 434 | if (!sb->s_root) | 513 | if (!sb->s_root) |
| 435 | goto err_without_dentry; | 514 | return -ENOMEM; |
| 436 | |||
| 437 | ret = binderfs_binder_ctl_create(sb); | ||
| 438 | if (ret) | ||
| 439 | goto err_with_dentry; | ||
| 440 | |||
| 441 | return 0; | ||
| 442 | |||
| 443 | err_with_dentry: | ||
| 444 | dput(sb->s_root); | ||
| 445 | sb->s_root = NULL; | ||
| 446 | |||
| 447 | err_without_dentry: | ||
| 448 | put_ipc_ns(ipc_ns); | ||
| 449 | iput(inode); | ||
| 450 | kfree(info); | ||
| 451 | |||
| 452 | return ret; | ||
| 453 | } | ||
| 454 | |||
| 455 | static int binderfs_test_super(struct super_block *sb, void *data) | ||
| 456 | { | ||
| 457 | struct binderfs_info *info = sb->s_fs_info; | ||
| 458 | |||
| 459 | if (info) | ||
| 460 | return info->ipc_ns == data; | ||
| 461 | |||
| 462 | return 0; | ||
| 463 | } | ||
| 464 | 515 | ||
| 465 | static int binderfs_set_super(struct super_block *sb, void *data) | 516 | return binderfs_binder_ctl_create(sb); |
| 466 | { | ||
| 467 | sb->s_fs_info = data; | ||
| 468 | return set_anon_super(sb, NULL); | ||
| 469 | } | 517 | } |
| 470 | 518 | ||
| 471 | static struct dentry *binderfs_mount(struct file_system_type *fs_type, | 519 | static struct dentry *binderfs_mount(struct file_system_type *fs_type, |
| 472 | int flags, const char *dev_name, | 520 | int flags, const char *dev_name, |
| 473 | void *data) | 521 | void *data) |
| 474 | { | 522 | { |
| 475 | struct super_block *sb; | 523 | return mount_nodev(fs_type, flags, data, binderfs_fill_super); |
| 476 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; | ||
| 477 | |||
| 478 | if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN)) | ||
| 479 | return ERR_PTR(-EPERM); | ||
| 480 | |||
| 481 | sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super, | ||
| 482 | flags, ipc_ns->user_ns, ipc_ns); | ||
| 483 | if (IS_ERR(sb)) | ||
| 484 | return ERR_CAST(sb); | ||
| 485 | |||
| 486 | if (!sb->s_root) { | ||
| 487 | int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); | ||
| 488 | if (ret) { | ||
| 489 | deactivate_locked_super(sb); | ||
| 490 | return ERR_PTR(ret); | ||
| 491 | } | ||
| 492 | |||
| 493 | sb->s_flags |= SB_ACTIVE; | ||
| 494 | } | ||
| 495 | |||
| 496 | return dget(sb->s_root); | ||
| 497 | } | 524 | } |
| 498 | 525 | ||
| 499 | static void binderfs_kill_super(struct super_block *sb) | 526 | static void binderfs_kill_super(struct super_block *sb) |
| 500 | { | 527 | { |
| 501 | struct binderfs_info *info = sb->s_fs_info; | 528 | struct binderfs_info *info = sb->s_fs_info; |
| 502 | 529 | ||
| 530 | kill_litter_super(sb); | ||
| 531 | |||
| 503 | if (info && info->ipc_ns) | 532 | if (info && info->ipc_ns) |
| 504 | put_ipc_ns(info->ipc_ns); | 533 | put_ipc_ns(info->ipc_ns); |
| 505 | 534 | ||
| 506 | kfree(info); | 535 | kfree(info); |
| 507 | kill_litter_super(sb); | ||
| 508 | } | 536 | } |
| 509 | 537 | ||
| 510 | static struct file_system_type binder_fs_type = { | 538 | static struct file_system_type binder_fs_type = { |
| @@ -530,14 +558,6 @@ static int __init init_binderfs(void) | |||
| 530 | return ret; | 558 | return ret; |
| 531 | } | 559 | } |
| 532 | 560 | ||
| 533 | binderfs_mnt = kern_mount(&binder_fs_type); | ||
| 534 | if (IS_ERR(binderfs_mnt)) { | ||
| 535 | ret = PTR_ERR(binderfs_mnt); | ||
| 536 | binderfs_mnt = NULL; | ||
| 537 | unregister_filesystem(&binder_fs_type); | ||
| 538 | unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); | ||
| 539 | } | ||
| 540 | |||
| 541 | return ret; | 561 | return ret; |
| 542 | } | 562 | } |
| 543 | 563 | ||
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ca7a6b4eaae..8218db17ebdb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers" | |||
| 1091 | 1091 | ||
| 1092 | config PATA_ACPI | 1092 | config PATA_ACPI |
| 1093 | tristate "ACPI firmware driver for PATA" | 1093 | tristate "ACPI firmware driver for PATA" |
| 1094 | depends on ATA_ACPI && ATA_BMDMA | 1094 | depends on ATA_ACPI && ATA_BMDMA && PCI |
| 1095 | help | 1095 | help |
| 1096 | This option enables an ACPI method driver which drives | 1096 | This option enables an ACPI method driver which drives |
| 1097 | motherboard PATA controller interfaces through the ACPI | 1097 | motherboard PATA controller interfaces through the ACPI |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 8cc9c429ad95..9e7fc302430f 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c | |||
| @@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = { | |||
| 915 | .sg_tablesize = MAX_DCMDS, | 915 | .sg_tablesize = MAX_DCMDS, |
| 916 | /* We may not need that strict one */ | 916 | /* We may not need that strict one */ |
| 917 | .dma_boundary = ATA_DMA_BOUNDARY, | 917 | .dma_boundary = ATA_DMA_BOUNDARY, |
| 918 | /* Not sure what the real max is but we know it's less than 64K, let's | ||
| 919 | * use 64K minus 256 | ||
| 920 | */ | ||
| 921 | .max_segment_size = MAX_DBDMA_SEG, | ||
| 918 | .slave_configure = pata_macio_slave_config, | 922 | .slave_configure = pata_macio_slave_config, |
| 919 | }; | 923 | }; |
| 920 | 924 | ||
| @@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv, | |||
| 1044 | /* Make sure we have sane initial timings in the cache */ | 1048 | /* Make sure we have sane initial timings in the cache */ |
| 1045 | pata_macio_default_timings(priv); | 1049 | pata_macio_default_timings(priv); |
| 1046 | 1050 | ||
| 1047 | /* Not sure what the real max is but we know it's less than 64K, let's | ||
| 1048 | * use 64K minus 256 | ||
| 1049 | */ | ||
| 1050 | dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); | ||
| 1051 | |||
| 1052 | /* Allocate libata host for 1 port */ | 1051 | /* Allocate libata host for 1 port */ |
| 1053 | memset(&pinfo, 0, sizeof(struct ata_port_info)); | 1052 | memset(&pinfo, 0, sizeof(struct ata_port_info)); |
| 1054 | pmac_macio_calc_timing_masks(priv, &pinfo); | 1053 | pmac_macio_calc_timing_masks(priv, &pinfo); |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index e0bcf9b2dab0..174e84ce4379 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
| @@ -245,8 +245,15 @@ struct inic_port_priv { | |||
| 245 | 245 | ||
| 246 | static struct scsi_host_template inic_sht = { | 246 | static struct scsi_host_template inic_sht = { |
| 247 | ATA_BASE_SHT(DRV_NAME), | 247 | ATA_BASE_SHT(DRV_NAME), |
| 248 | .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ | 248 | .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ |
| 249 | .dma_boundary = INIC_DMA_BOUNDARY, | 249 | |
| 250 | /* | ||
| 251 | * This controller is braindamaged. dma_boundary is 0xffff like others | ||
| 252 | * but it will lock up the whole machine HARD if 65536 byte PRD entry | ||
| 253 | * is fed. Reduce maximum segment size. | ||
| 254 | */ | ||
| 255 | .dma_boundary = INIC_DMA_BOUNDARY, | ||
| 256 | .max_segment_size = 65536 - 512, | ||
| 250 | }; | 257 | }; |
| 251 | 258 | ||
| 252 | static const int scr_map[] = { | 259 | static const int scr_map[] = { |
| @@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 868 | return rc; | 875 | return rc; |
| 869 | } | 876 | } |
| 870 | 877 | ||
| 871 | /* | ||
| 872 | * This controller is braindamaged. dma_boundary is 0xffff | ||
| 873 | * like others but it will lock up the whole machine HARD if | ||
| 874 | * 65536 byte PRD entry is fed. Reduce maximum segment size. | ||
| 875 | */ | ||
| 876 | rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512); | ||
| 877 | if (rc) { | ||
| 878 | dev_err(&pdev->dev, "failed to set the maximum segment size\n"); | ||
| 879 | return rc; | ||
| 880 | } | ||
| 881 | |||
| 882 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); | 878 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); |
| 883 | if (rc) { | 879 | if (rc) { |
| 884 | dev_err(&pdev->dev, "failed to initialize controller\n"); | 880 | dev_err(&pdev->dev, "failed to initialize controller\n"); |
diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 2e9d1cfe3aeb..211607986134 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c | |||
| @@ -718,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) | |||
| 718 | instead of '/ 512', use '>> 9' to prevent a call | 718 | instead of '/ 512', use '>> 9' to prevent a call |
| 719 | to divdu3 on x86 platforms | 719 | to divdu3 on x86 platforms |
| 720 | */ | 720 | */ |
| 721 | rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; | 721 | rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; |
| 722 | 722 | ||
| 723 | if (rate_cps < 10) | 723 | if (rate_cps < 10) |
| 724 | rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ | 724 | rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1bd1145ad8b5..330c1f7e9665 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
| @@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) | |||
| 108 | * suppress pointless writes. | 108 | * suppress pointless writes. |
| 109 | */ | 109 | */ |
| 110 | for (i = 0; i < d->chip->num_regs; i++) { | 110 | for (i = 0; i < d->chip->num_regs; i++) { |
| 111 | if (!d->chip->mask_base) | ||
| 112 | continue; | ||
| 113 | |||
| 111 | reg = d->chip->mask_base + | 114 | reg = d->chip->mask_base + |
| 112 | (i * map->reg_stride * d->irq_reg_stride); | 115 | (i * map->reg_stride * d->irq_reg_stride); |
| 113 | if (d->chip->mask_invert) { | 116 | if (d->chip->mask_invert) { |
| @@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) | |||
| 258 | const struct regmap_irq_type *t = &irq_data->type; | 261 | const struct regmap_irq_type *t = &irq_data->type; |
| 259 | 262 | ||
| 260 | if ((t->types_supported & type) != type) | 263 | if ((t->types_supported & type) != type) |
| 261 | return -ENOTSUPP; | 264 | return 0; |
| 262 | 265 | ||
| 263 | reg = t->type_reg_offset / map->reg_stride; | 266 | reg = t->type_reg_offset / map->reg_stride; |
| 264 | 267 | ||
| @@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
| 588 | /* Mask all the interrupts by default */ | 591 | /* Mask all the interrupts by default */ |
| 589 | for (i = 0; i < chip->num_regs; i++) { | 592 | for (i = 0; i < chip->num_regs; i++) { |
| 590 | d->mask_buf[i] = d->mask_buf_def[i]; | 593 | d->mask_buf[i] = d->mask_buf_def[i]; |
| 594 | if (!chip->mask_base) | ||
| 595 | continue; | ||
| 596 | |||
| 591 | reg = chip->mask_base + | 597 | reg = chip->mask_base + |
| 592 | (i * map->reg_stride * d->irq_reg_stride); | 598 | (i * map->reg_stride * d->irq_reg_stride); |
| 593 | if (chip->mask_invert) | 599 | if (chip->mask_invert) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 08696f5f00bb..7c9a949e876b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) | |||
| 288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); | 288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
| 289 | set_capacity(nbd->disk, config->bytesize >> 9); | 289 | set_capacity(nbd->disk, config->bytesize >> 9); |
| 290 | if (bdev) { | 290 | if (bdev) { |
| 291 | if (bdev->bd_disk) | 291 | if (bdev->bd_disk) { |
| 292 | bd_set_size(bdev, config->bytesize); | 292 | bd_set_size(bdev, config->bytesize); |
| 293 | else | 293 | set_blocksize(bdev, config->blksize); |
| 294 | } else | ||
| 294 | bdev->bd_invalidated = 1; | 295 | bdev->bd_invalidated = 1; |
| 295 | bdput(bdev); | 296 | bdput(bdev); |
| 296 | } | 297 | } |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index a74ce885b541..c518659b4d9f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/moduleparam.h> | 32 | #include <linux/moduleparam.h> |
| 33 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
| 34 | #include <linux/uuid.h> | 34 | #include <linux/uuid.h> |
| 35 | #include <linux/nospec.h> | ||
| 35 | 36 | ||
| 36 | #define IPMI_DRIVER_VERSION "39.2" | 37 | #define IPMI_DRIVER_VERSION "39.2" |
| 37 | 38 | ||
| @@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data, | |||
| 62 | { } | 63 | { } |
| 63 | #endif | 64 | #endif |
| 64 | 65 | ||
| 65 | static int initialized; | 66 | static bool initialized; |
| 67 | static bool drvregistered; | ||
| 66 | 68 | ||
| 67 | enum ipmi_panic_event_op { | 69 | enum ipmi_panic_event_op { |
| 68 | IPMI_SEND_PANIC_EVENT_NONE, | 70 | IPMI_SEND_PANIC_EVENT_NONE, |
| @@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex); | |||
| 612 | 614 | ||
| 613 | static LIST_HEAD(ipmi_interfaces); | 615 | static LIST_HEAD(ipmi_interfaces); |
| 614 | static DEFINE_MUTEX(ipmi_interfaces_mutex); | 616 | static DEFINE_MUTEX(ipmi_interfaces_mutex); |
| 615 | DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); | 617 | struct srcu_struct ipmi_interfaces_srcu; |
| 616 | 618 | ||
| 617 | /* | 619 | /* |
| 618 | * List of watchers that want to know when smi's are added and deleted. | 620 | * List of watchers that want to know when smi's are added and deleted. |
| @@ -720,7 +722,15 @@ struct watcher_entry { | |||
| 720 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | 722 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) |
| 721 | { | 723 | { |
| 722 | struct ipmi_smi *intf; | 724 | struct ipmi_smi *intf; |
| 723 | int index; | 725 | int index, rv; |
| 726 | |||
| 727 | /* | ||
| 728 | * Make sure the driver is actually initialized, this handles | ||
| 729 | * problems with initialization order. | ||
| 730 | */ | ||
| 731 | rv = ipmi_init_msghandler(); | ||
| 732 | if (rv) | ||
| 733 | return rv; | ||
| 724 | 734 | ||
| 725 | mutex_lock(&smi_watchers_mutex); | 735 | mutex_lock(&smi_watchers_mutex); |
| 726 | 736 | ||
| @@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) | |||
| 884 | 894 | ||
| 885 | if (user) { | 895 | if (user) { |
| 886 | user->handler->ipmi_recv_hndl(msg, user->handler_data); | 896 | user->handler->ipmi_recv_hndl(msg, user->handler_data); |
| 887 | release_ipmi_user(msg->user, index); | 897 | release_ipmi_user(user, index); |
| 888 | } else { | 898 | } else { |
| 889 | /* User went away, give up. */ | 899 | /* User went away, give up. */ |
| 890 | ipmi_free_recv_msg(msg); | 900 | ipmi_free_recv_msg(msg); |
| @@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num, | |||
| 1076 | { | 1086 | { |
| 1077 | unsigned long flags; | 1087 | unsigned long flags; |
| 1078 | struct ipmi_user *new_user; | 1088 | struct ipmi_user *new_user; |
| 1079 | int rv = 0, index; | 1089 | int rv, index; |
| 1080 | struct ipmi_smi *intf; | 1090 | struct ipmi_smi *intf; |
| 1081 | 1091 | ||
| 1082 | /* | 1092 | /* |
| @@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num, | |||
| 1094 | * Make sure the driver is actually initialized, this handles | 1104 | * Make sure the driver is actually initialized, this handles |
| 1095 | * problems with initialization order. | 1105 | * problems with initialization order. |
| 1096 | */ | 1106 | */ |
| 1097 | if (!initialized) { | 1107 | rv = ipmi_init_msghandler(); |
| 1098 | rv = ipmi_init_msghandler(); | 1108 | if (rv) |
| 1099 | if (rv) | 1109 | return rv; |
| 1100 | return rv; | ||
| 1101 | |||
| 1102 | /* | ||
| 1103 | * The init code doesn't return an error if it was turned | ||
| 1104 | * off, but it won't initialize. Check that. | ||
| 1105 | */ | ||
| 1106 | if (!initialized) | ||
| 1107 | return -ENODEV; | ||
| 1108 | } | ||
| 1109 | 1110 | ||
| 1110 | new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); | 1111 | new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); |
| 1111 | if (!new_user) | 1112 | if (!new_user) |
| @@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info); | |||
| 1183 | static void free_user(struct kref *ref) | 1184 | static void free_user(struct kref *ref) |
| 1184 | { | 1185 | { |
| 1185 | struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); | 1186 | struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); |
| 1187 | cleanup_srcu_struct(&user->release_barrier); | ||
| 1186 | kfree(user); | 1188 | kfree(user); |
| 1187 | } | 1189 | } |
| 1188 | 1190 | ||
| @@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user) | |||
| 1259 | { | 1261 | { |
| 1260 | _ipmi_destroy_user(user); | 1262 | _ipmi_destroy_user(user); |
| 1261 | 1263 | ||
| 1262 | cleanup_srcu_struct(&user->release_barrier); | ||
| 1263 | kref_put(&user->refcount, free_user); | 1264 | kref_put(&user->refcount, free_user); |
| 1264 | 1265 | ||
| 1265 | return 0; | 1266 | return 0; |
| @@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user, | |||
| 1298 | if (!user) | 1299 | if (!user) |
| 1299 | return -ENODEV; | 1300 | return -ENODEV; |
| 1300 | 1301 | ||
| 1301 | if (channel >= IPMI_MAX_CHANNELS) | 1302 | if (channel >= IPMI_MAX_CHANNELS) { |
| 1302 | rv = -EINVAL; | 1303 | rv = -EINVAL; |
| 1303 | else | 1304 | } else { |
| 1305 | channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); | ||
| 1304 | user->intf->addrinfo[channel].address = address; | 1306 | user->intf->addrinfo[channel].address = address; |
| 1307 | } | ||
| 1305 | release_ipmi_user(user, index); | 1308 | release_ipmi_user(user, index); |
| 1306 | 1309 | ||
| 1307 | return rv; | 1310 | return rv; |
| @@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user, | |||
| 1318 | if (!user) | 1321 | if (!user) |
| 1319 | return -ENODEV; | 1322 | return -ENODEV; |
| 1320 | 1323 | ||
| 1321 | if (channel >= IPMI_MAX_CHANNELS) | 1324 | if (channel >= IPMI_MAX_CHANNELS) { |
| 1322 | rv = -EINVAL; | 1325 | rv = -EINVAL; |
| 1323 | else | 1326 | } else { |
| 1327 | channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); | ||
| 1324 | *address = user->intf->addrinfo[channel].address; | 1328 | *address = user->intf->addrinfo[channel].address; |
| 1329 | } | ||
| 1325 | release_ipmi_user(user, index); | 1330 | release_ipmi_user(user, index); |
| 1326 | 1331 | ||
| 1327 | return rv; | 1332 | return rv; |
| @@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user, | |||
| 1338 | if (!user) | 1343 | if (!user) |
| 1339 | return -ENODEV; | 1344 | return -ENODEV; |
| 1340 | 1345 | ||
| 1341 | if (channel >= IPMI_MAX_CHANNELS) | 1346 | if (channel >= IPMI_MAX_CHANNELS) { |
| 1342 | rv = -EINVAL; | 1347 | rv = -EINVAL; |
| 1343 | else | 1348 | } else { |
| 1349 | channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); | ||
| 1344 | user->intf->addrinfo[channel].lun = LUN & 0x3; | 1350 | user->intf->addrinfo[channel].lun = LUN & 0x3; |
| 1351 | } | ||
| 1345 | release_ipmi_user(user, index); | 1352 | release_ipmi_user(user, index); |
| 1346 | 1353 | ||
| 1347 | return rv; | 1354 | return rv; |
| @@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user, | |||
| 1358 | if (!user) | 1365 | if (!user) |
| 1359 | return -ENODEV; | 1366 | return -ENODEV; |
| 1360 | 1367 | ||
| 1361 | if (channel >= IPMI_MAX_CHANNELS) | 1368 | if (channel >= IPMI_MAX_CHANNELS) { |
| 1362 | rv = -EINVAL; | 1369 | rv = -EINVAL; |
| 1363 | else | 1370 | } else { |
| 1371 | channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); | ||
| 1364 | *address = user->intf->addrinfo[channel].lun; | 1372 | *address = user->intf->addrinfo[channel].lun; |
| 1373 | } | ||
| 1365 | release_ipmi_user(user, index); | 1374 | release_ipmi_user(user, index); |
| 1366 | 1375 | ||
| 1367 | return rv; | 1376 | return rv; |
| @@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf, | |||
| 2184 | { | 2193 | { |
| 2185 | if (addr->channel >= IPMI_MAX_CHANNELS) | 2194 | if (addr->channel >= IPMI_MAX_CHANNELS) |
| 2186 | return -EINVAL; | 2195 | return -EINVAL; |
| 2196 | addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); | ||
| 2187 | *lun = intf->addrinfo[addr->channel].lun; | 2197 | *lun = intf->addrinfo[addr->channel].lun; |
| 2188 | *saddr = intf->addrinfo[addr->channel].address; | 2198 | *saddr = intf->addrinfo[addr->channel].address; |
| 2189 | return 0; | 2199 | return 0; |
| @@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
| 3291 | * Make sure the driver is actually initialized, this handles | 3301 | * Make sure the driver is actually initialized, this handles |
| 3292 | * problems with initialization order. | 3302 | * problems with initialization order. |
| 3293 | */ | 3303 | */ |
| 3294 | if (!initialized) { | 3304 | rv = ipmi_init_msghandler(); |
| 3295 | rv = ipmi_init_msghandler(); | 3305 | if (rv) |
| 3296 | if (rv) | 3306 | return rv; |
| 3297 | return rv; | ||
| 3298 | /* | ||
| 3299 | * The init code doesn't return an error if it was turned | ||
| 3300 | * off, but it won't initialize. Check that. | ||
| 3301 | */ | ||
| 3302 | if (!initialized) | ||
| 3303 | return -ENODEV; | ||
| 3304 | } | ||
| 3305 | 3307 | ||
| 3306 | intf = kzalloc(sizeof(*intf), GFP_KERNEL); | 3308 | intf = kzalloc(sizeof(*intf), GFP_KERNEL); |
| 3307 | if (!intf) | 3309 | if (!intf) |
| @@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this, | |||
| 5017 | return NOTIFY_DONE; | 5019 | return NOTIFY_DONE; |
| 5018 | } | 5020 | } |
| 5019 | 5021 | ||
| 5022 | /* Must be called with ipmi_interfaces_mutex held. */ | ||
| 5023 | static int ipmi_register_driver(void) | ||
| 5024 | { | ||
| 5025 | int rv; | ||
| 5026 | |||
| 5027 | if (drvregistered) | ||
| 5028 | return 0; | ||
| 5029 | |||
| 5030 | rv = driver_register(&ipmidriver.driver); | ||
| 5031 | if (rv) | ||
| 5032 | pr_err("Could not register IPMI driver\n"); | ||
| 5033 | else | ||
| 5034 | drvregistered = true; | ||
| 5035 | return rv; | ||
| 5036 | } | ||
| 5037 | |||
| 5020 | static struct notifier_block panic_block = { | 5038 | static struct notifier_block panic_block = { |
| 5021 | .notifier_call = panic_event, | 5039 | .notifier_call = panic_event, |
| 5022 | .next = NULL, | 5040 | .next = NULL, |
| @@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void) | |||
| 5027 | { | 5045 | { |
| 5028 | int rv; | 5046 | int rv; |
| 5029 | 5047 | ||
| 5048 | mutex_lock(&ipmi_interfaces_mutex); | ||
| 5049 | rv = ipmi_register_driver(); | ||
| 5050 | if (rv) | ||
| 5051 | goto out; | ||
| 5030 | if (initialized) | 5052 | if (initialized) |
| 5031 | return 0; | 5053 | goto out; |
| 5032 | |||
| 5033 | rv = driver_register(&ipmidriver.driver); | ||
| 5034 | if (rv) { | ||
| 5035 | pr_err("Could not register IPMI driver\n"); | ||
| 5036 | return rv; | ||
| 5037 | } | ||
| 5038 | 5054 | ||
| 5039 | pr_info("version " IPMI_DRIVER_VERSION "\n"); | 5055 | init_srcu_struct(&ipmi_interfaces_srcu); |
| 5040 | 5056 | ||
| 5041 | timer_setup(&ipmi_timer, ipmi_timeout, 0); | 5057 | timer_setup(&ipmi_timer, ipmi_timeout, 0); |
| 5042 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); | 5058 | mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); |
| 5043 | 5059 | ||
| 5044 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | 5060 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
| 5045 | 5061 | ||
| 5046 | initialized = 1; | 5062 | initialized = true; |
| 5047 | 5063 | ||
| 5048 | return 0; | 5064 | out: |
| 5065 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 5066 | return rv; | ||
| 5049 | } | 5067 | } |
| 5050 | 5068 | ||
| 5051 | static int __init ipmi_init_msghandler_mod(void) | 5069 | static int __init ipmi_init_msghandler_mod(void) |
| 5052 | { | 5070 | { |
| 5053 | ipmi_init_msghandler(); | 5071 | int rv; |
| 5054 | return 0; | 5072 | |
| 5073 | pr_info("version " IPMI_DRIVER_VERSION "\n"); | ||
| 5074 | |||
| 5075 | mutex_lock(&ipmi_interfaces_mutex); | ||
| 5076 | rv = ipmi_register_driver(); | ||
| 5077 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 5078 | |||
| 5079 | return rv; | ||
| 5055 | } | 5080 | } |
| 5056 | 5081 | ||
| 5057 | static void __exit cleanup_ipmi(void) | 5082 | static void __exit cleanup_ipmi(void) |
| 5058 | { | 5083 | { |
| 5059 | int count; | 5084 | int count; |
| 5060 | 5085 | ||
| 5061 | if (!initialized) | 5086 | if (initialized) { |
| 5062 | return; | 5087 | atomic_notifier_chain_unregister(&panic_notifier_list, |
| 5063 | 5088 | &panic_block); | |
| 5064 | atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); | ||
| 5065 | 5089 | ||
| 5066 | /* | 5090 | /* |
| 5067 | * This can't be called if any interfaces exist, so no worry | 5091 | * This can't be called if any interfaces exist, so no worry |
| 5068 | * about shutting down the interfaces. | 5092 | * about shutting down the interfaces. |
| 5069 | */ | 5093 | */ |
| 5070 | 5094 | ||
| 5071 | /* | 5095 | /* |
| 5072 | * Tell the timer to stop, then wait for it to stop. This | 5096 | * Tell the timer to stop, then wait for it to stop. This |
| 5073 | * avoids problems with race conditions removing the timer | 5097 | * avoids problems with race conditions removing the timer |
| 5074 | * here. | 5098 | * here. |
| 5075 | */ | 5099 | */ |
| 5076 | atomic_inc(&stop_operation); | 5100 | atomic_inc(&stop_operation); |
| 5077 | del_timer_sync(&ipmi_timer); | 5101 | del_timer_sync(&ipmi_timer); |
| 5078 | 5102 | ||
| 5079 | driver_unregister(&ipmidriver.driver); | 5103 | initialized = false; |
| 5080 | 5104 | ||
| 5081 | initialized = 0; | 5105 | /* Check for buffer leaks. */ |
| 5106 | count = atomic_read(&smi_msg_inuse_count); | ||
| 5107 | if (count != 0) | ||
| 5108 | pr_warn("SMI message count %d at exit\n", count); | ||
| 5109 | count = atomic_read(&recv_msg_inuse_count); | ||
| 5110 | if (count != 0) | ||
| 5111 | pr_warn("recv message count %d at exit\n", count); | ||
| 5082 | 5112 | ||
| 5083 | /* Check for buffer leaks. */ | 5113 | cleanup_srcu_struct(&ipmi_interfaces_srcu); |
| 5084 | count = atomic_read(&smi_msg_inuse_count); | 5114 | } |
| 5085 | if (count != 0) | 5115 | if (drvregistered) |
| 5086 | pr_warn("SMI message count %d at exit\n", count); | 5116 | driver_unregister(&ipmidriver.driver); |
| 5087 | count = atomic_read(&recv_msg_inuse_count); | ||
| 5088 | if (count != 0) | ||
| 5089 | pr_warn("recv message count %d at exit\n", count); | ||
| 5090 | } | 5117 | } |
| 5091 | module_exit(cleanup_ipmi); | 5118 | module_exit(cleanup_ipmi); |
| 5092 | 5119 | ||
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index ca9528c4f183..b7a1ae2afaea 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
| @@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
| 632 | 632 | ||
| 633 | /* Remove the multi-part read marker. */ | 633 | /* Remove the multi-part read marker. */ |
| 634 | len -= 2; | 634 | len -= 2; |
| 635 | data += 2; | ||
| 635 | for (i = 0; i < len; i++) | 636 | for (i = 0; i < len; i++) |
| 636 | ssif_info->data[i] = data[i+2]; | 637 | ssif_info->data[i] = data[i]; |
| 637 | ssif_info->multi_len = len; | 638 | ssif_info->multi_len = len; |
| 638 | ssif_info->multi_pos = 1; | 639 | ssif_info->multi_pos = 1; |
| 639 | 640 | ||
| @@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
| 661 | } | 662 | } |
| 662 | 663 | ||
| 663 | blocknum = data[0]; | 664 | blocknum = data[0]; |
| 665 | len--; | ||
| 666 | data++; | ||
| 667 | |||
| 668 | if (blocknum != 0xff && len != 31) { | ||
| 669 | /* All blocks but the last must have 31 data bytes. */ | ||
| 670 | result = -EIO; | ||
| 671 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) | ||
| 672 | pr_info("Received middle message <31\n"); | ||
| 664 | 673 | ||
| 665 | if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { | 674 | goto continue_op; |
| 675 | } | ||
| 676 | |||
| 677 | if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) { | ||
| 666 | /* Received message too big, abort the operation. */ | 678 | /* Received message too big, abort the operation. */ |
| 667 | result = -E2BIG; | 679 | result = -E2BIG; |
| 668 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) | 680 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) |
| @@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
| 671 | goto continue_op; | 683 | goto continue_op; |
| 672 | } | 684 | } |
| 673 | 685 | ||
| 674 | /* Remove the blocknum from the data. */ | ||
| 675 | len--; | ||
| 676 | for (i = 0; i < len; i++) | 686 | for (i = 0; i < len; i++) |
| 677 | ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; | 687 | ssif_info->data[i + ssif_info->multi_len] = data[i]; |
| 678 | ssif_info->multi_len += len; | 688 | ssif_info->multi_len += len; |
| 679 | if (blocknum == 0xff) { | 689 | if (blocknum == 0xff) { |
| 680 | /* End of read */ | 690 | /* End of read */ |
| 681 | len = ssif_info->multi_len; | 691 | len = ssif_info->multi_len; |
| 682 | data = ssif_info->data; | 692 | data = ssif_info->data; |
| 683 | } else if (blocknum + 1 != ssif_info->multi_pos) { | 693 | } else if (blocknum != ssif_info->multi_pos) { |
| 684 | /* | 694 | /* |
| 685 | * Out of sequence block, just abort. Block | 695 | * Out of sequence block, just abort. Block |
| 686 | * numbers start at zero for the second block, | 696 | * numbers start at zero for the second block, |
| @@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
| 707 | } | 717 | } |
| 708 | } | 718 | } |
| 709 | 719 | ||
| 720 | continue_op: | ||
| 710 | if (result < 0) { | 721 | if (result < 0) { |
| 711 | ssif_inc_stat(ssif_info, receive_errors); | 722 | ssif_inc_stat(ssif_info, receive_errors); |
| 712 | } else { | 723 | } else { |
| @@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
| 714 | ssif_inc_stat(ssif_info, received_message_parts); | 725 | ssif_inc_stat(ssif_info, received_message_parts); |
| 715 | } | 726 | } |
| 716 | 727 | ||
| 717 | |||
| 718 | continue_op: | ||
| 719 | if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) | 728 | if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) |
| 720 | pr_info("DONE 1: state = %d, result=%d\n", | 729 | pr_info("DONE 1: state = %d, result=%d\n", |
| 721 | ssif_info->ssif_state, result); | 730 | ssif_info->ssif_state, result); |
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index b5e3103c1175..e43c876a9223 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c | |||
| @@ -59,6 +59,7 @@ | |||
| 59 | #include <linux/mutex.h> | 59 | #include <linux/mutex.h> |
| 60 | #include <linux/delay.h> | 60 | #include <linux/delay.h> |
| 61 | #include <linux/serial_8250.h> | 61 | #include <linux/serial_8250.h> |
| 62 | #include <linux/nospec.h> | ||
| 62 | #include "smapi.h" | 63 | #include "smapi.h" |
| 63 | #include "mwavedd.h" | 64 | #include "mwavedd.h" |
| 64 | #include "3780i.h" | 65 | #include "3780i.h" |
| @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
| 289 | ipcnum); | 290 | ipcnum); |
| 290 | return -EINVAL; | 291 | return -EINVAL; |
| 291 | } | 292 | } |
| 293 | ipcnum = array_index_nospec(ipcnum, | ||
| 294 | ARRAY_SIZE(pDrvData->IPCs)); | ||
| 292 | PRINTK_3(TRACE_MWAVE, | 295 | PRINTK_3(TRACE_MWAVE, |
| 293 | "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" | 296 | "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" |
| 294 | " ipcnum %x entry usIntCount %x\n", | 297 | " ipcnum %x entry usIntCount %x\n", |
| @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
| 317 | " Invalid ipcnum %x\n", ipcnum); | 320 | " Invalid ipcnum %x\n", ipcnum); |
| 318 | return -EINVAL; | 321 | return -EINVAL; |
| 319 | } | 322 | } |
| 323 | ipcnum = array_index_nospec(ipcnum, | ||
| 324 | ARRAY_SIZE(pDrvData->IPCs)); | ||
| 320 | PRINTK_3(TRACE_MWAVE, | 325 | PRINTK_3(TRACE_MWAVE, |
| 321 | "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" | 326 | "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" |
| 322 | " ipcnum %x, usIntCount %x\n", | 327 | " ipcnum %x, usIntCount %x\n", |
| @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | |||
| 383 | ipcnum); | 388 | ipcnum); |
| 384 | return -EINVAL; | 389 | return -EINVAL; |
| 385 | } | 390 | } |
| 391 | ipcnum = array_index_nospec(ipcnum, | ||
| 392 | ARRAY_SIZE(pDrvData->IPCs)); | ||
| 386 | mutex_lock(&mwave_mutex); | 393 | mutex_lock(&mwave_mutex); |
| 387 | if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { | 394 | if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { |
| 388 | pDrvData->IPCs[ipcnum].bIsEnabled = false; | 395 | pDrvData->IPCs[ipcnum].bIsEnabled = false; |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index e5b2fe80eab4..d2f0bb5ba47e 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX | |||
| 293 | source "drivers/clk/actions/Kconfig" | 293 | source "drivers/clk/actions/Kconfig" |
| 294 | source "drivers/clk/bcm/Kconfig" | 294 | source "drivers/clk/bcm/Kconfig" |
| 295 | source "drivers/clk/hisilicon/Kconfig" | 295 | source "drivers/clk/hisilicon/Kconfig" |
| 296 | source "drivers/clk/imx/Kconfig" | ||
| 297 | source "drivers/clk/imgtec/Kconfig" | 296 | source "drivers/clk/imgtec/Kconfig" |
| 298 | source "drivers/clk/imx/Kconfig" | 297 | source "drivers/clk/imx/Kconfig" |
| 299 | source "drivers/clk/ingenic/Kconfig" | 298 | source "drivers/clk/ingenic/Kconfig" |
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 5b393e711e94..7d16ab0784ec 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c | |||
| @@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index) | |||
| 262 | 262 | ||
| 263 | if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) | 263 | if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) |
| 264 | src = VC5_PRIM_SRC_SHDN_EN_XTAL; | 264 | src = VC5_PRIM_SRC_SHDN_EN_XTAL; |
| 265 | if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) | 265 | else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) |
| 266 | src = VC5_PRIM_SRC_SHDN_EN_CLKIN; | 266 | src = VC5_PRIM_SRC_SHDN_EN_CLKIN; |
| 267 | else /* Invalid; should have been caught by vc5_probe() */ | ||
| 268 | return -EINVAL; | ||
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); | 271 | return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 75d13c0eff12..6ccdbedb02f3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -2779,7 +2779,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) | |||
| 2779 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); | 2779 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); |
| 2780 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); | 2780 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); |
| 2781 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); | 2781 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); |
| 2782 | seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); | 2782 | seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); |
| 2783 | seq_printf(s, "\"duty_cycle\": %u", | 2783 | seq_printf(s, "\"duty_cycle\": %u", |
| 2784 | clk_core_get_scaled_duty_cycle(c, 100000)); | 2784 | clk_core_get_scaled_duty_cycle(c, 100000)); |
| 2785 | } | 2785 | } |
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c index 99c2508de8e5..fb6edf1b8aa2 100644 --- a/drivers/clk/imx/clk-imx8qxp-lpcg.c +++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c | |||
| @@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev) | |||
| 169 | return -ENODEV; | 169 | return -ENODEV; |
| 170 | 170 | ||
| 171 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 171 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 172 | if (!res) | ||
| 173 | return -EINVAL; | ||
| 172 | base = devm_ioremap(dev, res->start, resource_size(res)); | 174 | base = devm_ioremap(dev, res->start, resource_size(res)); |
| 173 | if (!base) | 175 | if (!base) |
| 174 | return -ENOMEM; | 176 | return -ENOMEM; |
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 1b1ba54e33dd..1c04575c118f 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig | |||
| @@ -215,6 +215,7 @@ config MSM_MMCC_8996 | |||
| 215 | 215 | ||
| 216 | config MSM_GCC_8998 | 216 | config MSM_GCC_8998 |
| 217 | tristate "MSM8998 Global Clock Controller" | 217 | tristate "MSM8998 Global Clock Controller" |
| 218 | select QCOM_GDSC | ||
| 218 | help | 219 | help |
| 219 | Support for the global clock controller on msm8998 devices. | 220 | Support for the global clock controller on msm8998 devices. |
| 220 | Say Y if you want to use peripheral devices such as UART, SPI, | 221 | Say Y if you want to use peripheral devices such as UART, SPI, |
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 2d5d8b43727e..c4d0b6f6abf2 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c | |||
| @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, | |||
| 43 | /* Read mdiv and fdiv from the fdbck register */ | 43 | /* Read mdiv and fdiv from the fdbck register */ |
| 44 | reg = readl(socfpgaclk->hw.reg + 0x4); | 44 | reg = readl(socfpgaclk->hw.reg + 0x4); |
| 45 | mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; | 45 | mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; |
| 46 | vco_freq = (unsigned long long)parent_rate * (mdiv + 6); | 46 | vco_freq = (unsigned long long)vco_freq * (mdiv + 6); |
| 47 | 47 | ||
| 48 | return (unsigned long)vco_freq; | 48 | return (unsigned long)vco_freq; |
| 49 | } | 49 | } |
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5b238fc314ac..8281dfbf38c2 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c | |||
| @@ -12,17 +12,17 @@ | |||
| 12 | 12 | ||
| 13 | #include "stratix10-clk.h" | 13 | #include "stratix10-clk.h" |
| 14 | 14 | ||
| 15 | static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", | 15 | static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", |
| 16 | "f2s_free_clk",}; | 16 | "f2s-free-clk",}; |
| 17 | static const char * const cntr_mux[] = { "main_pll", "periph_pll", | 17 | static const char * const cntr_mux[] = { "main_pll", "periph_pll", |
| 18 | "osc1", "cb_intosc_hs_div2_clk", | 18 | "osc1", "cb-intosc-hs-div2-clk", |
| 19 | "f2s_free_clk"}; | 19 | "f2s-free-clk"}; |
| 20 | static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; | 20 | static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; |
| 21 | 21 | ||
| 22 | static const char * const noc_free_mux[] = {"main_noc_base_clk", | 22 | static const char * const noc_free_mux[] = {"main_noc_base_clk", |
| 23 | "peri_noc_base_clk", | 23 | "peri_noc_base_clk", |
| 24 | "osc1", "cb_intosc_hs_div2_clk", | 24 | "osc1", "cb-intosc-hs-div2-clk", |
| 25 | "f2s_free_clk"}; | 25 | "f2s-free-clk"}; |
| 26 | 26 | ||
| 27 | static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; | 27 | static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; |
| 28 | static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; | 28 | static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; |
| @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" | |||
| 33 | static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; | 33 | static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; |
| 34 | static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; | 34 | static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; |
| 35 | 35 | ||
| 36 | static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; | 36 | static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; |
| 37 | static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; | 37 | static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; |
| 38 | static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; | 38 | static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; |
| 39 | 39 | ||
| 40 | static const char * const mpu_free_mux[] = {"main_mpu_base_clk", | 40 | static const char * const mpu_free_mux[] = {"main_mpu_base_clk", |
| 41 | "peri_mpu_base_clk", | 41 | "peri_mpu_base_clk", |
| 42 | "osc1", "cb_intosc_hs_div2_clk", | 42 | "osc1", "cb-intosc-hs-div2-clk", |
| 43 | "f2s_free_clk"}; | 43 | "f2s-free-clk"}; |
| 44 | 44 | ||
| 45 | /* clocks in AO (always on) controller */ | 45 | /* clocks in AO (always on) controller */ |
| 46 | static const struct stratix10_pll_clock s10_pll_clks[] = { | 46 | static const struct stratix10_pll_clock s10_pll_clks[] = { |
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c index 269d3595758b..edc31bb56674 100644 --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c | |||
| @@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev) | |||
| 133 | struct tegra_dfll_soc_data *soc; | 133 | struct tegra_dfll_soc_data *soc; |
| 134 | 134 | ||
| 135 | soc = tegra_dfll_unregister(pdev); | 135 | soc = tegra_dfll_unregister(pdev); |
| 136 | if (IS_ERR(soc)) | 136 | if (IS_ERR(soc)) { |
| 137 | dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", | 137 | dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", |
| 138 | PTR_ERR(soc)); | 138 | PTR_ERR(soc)); |
| 139 | return PTR_ERR(soc); | ||
| 140 | } | ||
| 139 | 141 | ||
| 140 | tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); | 142 | tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); |
| 141 | 143 | ||
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index f65cc0ff76ab..b0908ec62f73 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c | |||
| @@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np) | |||
| 669 | if (ret) | 669 | if (ret) |
| 670 | return ret; | 670 | return ret; |
| 671 | 671 | ||
| 672 | zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * | 672 | zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx), |
| 673 | clock_max_idx, GFP_KERNEL); | 673 | GFP_KERNEL); |
| 674 | if (!zynqmp_data) | 674 | if (!zynqmp_data) |
| 675 | return -ENOMEM; | 675 | return -ENOMEM; |
| 676 | 676 | ||
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5a90075f719d..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU | |||
| 692 | depends on ARCH_BCM_IPROC | 692 | depends on ARCH_BCM_IPROC |
| 693 | depends on MAILBOX | 693 | depends on MAILBOX |
| 694 | default m | 694 | default m |
| 695 | select CRYPTO_AUTHENC | ||
| 695 | select CRYPTO_DES | 696 | select CRYPTO_DES |
| 696 | select CRYPTO_MD5 | 697 | select CRYPTO_MD5 |
| 697 | select CRYPTO_SHA1 | 698 | select CRYPTO_SHA1 |
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c9393ffb70ed..5567cbda2798 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
| @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2845 | struct spu_hw *spu = &iproc_priv.spu; | 2845 | struct spu_hw *spu = &iproc_priv.spu; |
| 2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); | 2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); |
| 2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | 2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); |
| 2848 | struct rtattr *rta = (void *)key; | 2848 | struct crypto_authenc_keys keys; |
| 2849 | struct crypto_authenc_key_param *param; | 2849 | int ret; |
| 2850 | const u8 *origkey = key; | ||
| 2851 | const unsigned int origkeylen = keylen; | ||
| 2852 | |||
| 2853 | int ret = 0; | ||
| 2854 | 2850 | ||
| 2855 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, | 2851 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, |
| 2856 | keylen); | 2852 | keylen); |
| 2857 | flow_dump(" key: ", key, keylen); | 2853 | flow_dump(" key: ", key, keylen); |
| 2858 | 2854 | ||
| 2859 | if (!RTA_OK(rta, keylen)) | 2855 | ret = crypto_authenc_extractkeys(&keys, key, keylen); |
| 2860 | goto badkey; | 2856 | if (ret) |
| 2861 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
| 2862 | goto badkey; | ||
| 2863 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 2864 | goto badkey; | 2857 | goto badkey; |
| 2865 | 2858 | ||
| 2866 | param = RTA_DATA(rta); | 2859 | if (keys.enckeylen > MAX_KEY_SIZE || |
| 2867 | ctx->enckeylen = be32_to_cpu(param->enckeylen); | 2860 | keys.authkeylen > MAX_KEY_SIZE) |
| 2868 | |||
| 2869 | key += RTA_ALIGN(rta->rta_len); | ||
| 2870 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 2871 | |||
| 2872 | if (keylen < ctx->enckeylen) | ||
| 2873 | goto badkey; | ||
| 2874 | if (ctx->enckeylen > MAX_KEY_SIZE) | ||
| 2875 | goto badkey; | 2861 | goto badkey; |
| 2876 | 2862 | ||
| 2877 | ctx->authkeylen = keylen - ctx->enckeylen; | 2863 | ctx->enckeylen = keys.enckeylen; |
| 2878 | 2864 | ctx->authkeylen = keys.authkeylen; | |
| 2879 | if (ctx->authkeylen > MAX_KEY_SIZE) | ||
| 2880 | goto badkey; | ||
| 2881 | 2865 | ||
| 2882 | memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); | 2866 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
| 2883 | /* May end up padding auth key. So make sure it's zeroed. */ | 2867 | /* May end up padding auth key. So make sure it's zeroed. */ |
| 2884 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | 2868 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
| 2885 | memcpy(ctx->authkey, key, ctx->authkeylen); | 2869 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
| 2886 | 2870 | ||
| 2887 | switch (ctx->alg->cipher_info.alg) { | 2871 | switch (ctx->alg->cipher_info.alg) { |
| 2888 | case CIPHER_ALG_DES: | 2872 | case CIPHER_ALG_DES: |
| @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2890 | u32 tmp[DES_EXPKEY_WORDS]; | 2874 | u32 tmp[DES_EXPKEY_WORDS]; |
| 2891 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; | 2875 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; |
| 2892 | 2876 | ||
| 2893 | if (des_ekey(tmp, key) == 0) { | 2877 | if (des_ekey(tmp, keys.enckey) == 0) { |
| 2894 | if (crypto_aead_get_flags(cipher) & | 2878 | if (crypto_aead_get_flags(cipher) & |
| 2895 | CRYPTO_TFM_REQ_WEAK_KEY) { | 2879 | CRYPTO_TFM_REQ_WEAK_KEY) { |
| 2896 | crypto_aead_set_flags(cipher, flags); | 2880 | crypto_aead_set_flags(cipher, flags); |
| @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2905 | break; | 2889 | break; |
| 2906 | case CIPHER_ALG_3DES: | 2890 | case CIPHER_ALG_3DES: |
| 2907 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { | 2891 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { |
| 2908 | const u32 *K = (const u32 *)key; | 2892 | const u32 *K = (const u32 *)keys.enckey; |
| 2909 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; | 2893 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; |
| 2910 | 2894 | ||
| 2911 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | 2895 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
| @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2956 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 2940 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
| 2957 | ctx->fallback_cipher->base.crt_flags |= | 2941 | ctx->fallback_cipher->base.crt_flags |= |
| 2958 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | 2942 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
| 2959 | ret = | 2943 | ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); |
| 2960 | crypto_aead_setkey(ctx->fallback_cipher, origkey, | ||
| 2961 | origkeylen); | ||
| 2962 | if (ret) { | 2944 | if (ret) { |
| 2963 | flow_log(" fallback setkey() returned:%d\n", ret); | 2945 | flow_log(" fallback setkey() returned:%d\n", ret); |
| 2964 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 2946 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 92e593e2069a..80ae69f906fb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) | |||
| 3476 | * Skip algorithms requiring message digests | 3476 | * Skip algorithms requiring message digests |
| 3477 | * if MD or MD size is not supported by device. | 3477 | * if MD or MD size is not supported by device. |
| 3478 | */ | 3478 | */ |
| 3479 | if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && | 3479 | if (is_mdha(c2_alg_sel) && |
| 3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) | 3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) |
| 3481 | continue; | 3481 | continue; |
| 3482 | 3482 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 81712aa5d0f2..bb1a2cdf1951 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1072 | 1072 | ||
| 1073 | desc = edesc->hw_desc; | 1073 | desc = edesc->hw_desc; |
| 1074 | 1074 | ||
| 1075 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1075 | if (buflen) { |
| 1076 | if (dma_mapping_error(jrdev, state->buf_dma)) { | 1076 | state->buf_dma = dma_map_single(jrdev, buf, buflen, |
| 1077 | dev_err(jrdev, "unable to map src\n"); | 1077 | DMA_TO_DEVICE); |
| 1078 | goto unmap; | 1078 | if (dma_mapping_error(jrdev, state->buf_dma)) { |
| 1079 | } | 1079 | dev_err(jrdev, "unable to map src\n"); |
| 1080 | goto unmap; | ||
| 1081 | } | ||
| 1080 | 1082 | ||
| 1081 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1083 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
| 1084 | } | ||
| 1082 | 1085 | ||
| 1083 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1086 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
| 1084 | digestsize); | 1087 | digestsize); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index ec10230178c5..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -1155,6 +1155,7 @@ | |||
| 1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) | 1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) |
| 1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) | 1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) |
| 1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) | 1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) |
| 1158 | #define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) | ||
| 1158 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) | 1159 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) |
| 1159 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) | 1160 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) |
| 1160 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) | 1161 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) |
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | 7 | ||
| 8 | #ifndef CAAM_ERROR_H | 8 | #ifndef CAAM_ERROR_H |
| 9 | #define CAAM_ERROR_H | 9 | #define CAAM_ERROR_H |
| 10 | |||
| 11 | #include "desc.h" | ||
| 12 | |||
| 10 | #define CAAM_ERROR_STR_MAX 302 | 13 | #define CAAM_ERROR_STR_MAX 302 |
| 11 | 14 | ||
| 12 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | 15 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); |
| @@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | |||
| 17 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 20 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, |
| 18 | int rowsize, int groupsize, struct scatterlist *sg, | 21 | int rowsize, int groupsize, struct scatterlist *sg, |
| 19 | size_t tlen, bool ascii); | 22 | size_t tlen, bool ascii); |
| 23 | |||
| 24 | static inline bool is_mdha(u32 algtype) | ||
| 25 | { | ||
| 26 | return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == | ||
| 27 | OP_ALG_CHA_MDHA; | ||
| 28 | } | ||
| 20 | #endif /* CAAM_ERROR_H */ | 29 | #endif /* CAAM_ERROR_H */ |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index e34e4df8fd24..fe070d75c842 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
| @@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) | |||
| 567 | 567 | ||
| 568 | /* ORH error code */ | 568 | /* ORH error code */ |
| 569 | err = READ_ONCE(*sr->resp.orh) & 0xff; | 569 | err = READ_ONCE(*sr->resp.orh) & 0xff; |
| 570 | softreq_destroy(sr); | ||
| 571 | 570 | ||
| 572 | if (sr->callback) | 571 | if (sr->callback) |
| 573 | sr->callback(sr->cb_arg, err); | 572 | sr->callback(sr->cb_arg, err); |
| 573 | softreq_destroy(sr); | ||
| 574 | 574 | ||
| 575 | req_completed++; | 575 | req_completed++; |
| 576 | } | 576 | } |
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index f2643cda45db..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c | |||
| @@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 549 | unsigned int keylen) | 549 | unsigned int keylen) |
| 550 | { | 550 | { |
| 551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 552 | struct rtattr *rta = (struct rtattr *)key; | ||
| 553 | struct cc_crypto_req cc_req = {}; | 552 | struct cc_crypto_req cc_req = {}; |
| 554 | struct crypto_authenc_key_param *param; | ||
| 555 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; | 553 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; |
| 556 | int rc = -EINVAL; | ||
| 557 | unsigned int seq_len = 0; | 554 | unsigned int seq_len = 0; |
| 558 | struct device *dev = drvdata_to_dev(ctx->drvdata); | 555 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
| 556 | const u8 *enckey, *authkey; | ||
| 557 | int rc; | ||
| 559 | 558 | ||
| 560 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", | 559 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", |
| 561 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); | 560 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); |
| @@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 563 | /* STAT_PHASE_0: Init and sanity checks */ | 562 | /* STAT_PHASE_0: Init and sanity checks */ |
| 564 | 563 | ||
| 565 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ | 564 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ |
| 566 | if (!RTA_OK(rta, keylen)) | 565 | struct crypto_authenc_keys keys; |
| 567 | goto badkey; | 566 | |
| 568 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 567 | rc = crypto_authenc_extractkeys(&keys, key, keylen); |
| 569 | goto badkey; | 568 | if (rc) |
| 570 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 571 | goto badkey; | ||
| 572 | param = RTA_DATA(rta); | ||
| 573 | ctx->enc_keylen = be32_to_cpu(param->enckeylen); | ||
| 574 | key += RTA_ALIGN(rta->rta_len); | ||
| 575 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 576 | if (keylen < ctx->enc_keylen) | ||
| 577 | goto badkey; | 569 | goto badkey; |
| 578 | ctx->auth_keylen = keylen - ctx->enc_keylen; | 570 | enckey = keys.enckey; |
| 571 | authkey = keys.authkey; | ||
| 572 | ctx->enc_keylen = keys.enckeylen; | ||
| 573 | ctx->auth_keylen = keys.authkeylen; | ||
| 579 | 574 | ||
| 580 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { | 575 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
| 581 | /* the nonce is stored in bytes at end of key */ | 576 | /* the nonce is stored in bytes at end of key */ |
| 577 | rc = -EINVAL; | ||
| 582 | if (ctx->enc_keylen < | 578 | if (ctx->enc_keylen < |
| 583 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) | 579 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) |
| 584 | goto badkey; | 580 | goto badkey; |
| 585 | /* Copy nonce from last 4 bytes in CTR key to | 581 | /* Copy nonce from last 4 bytes in CTR key to |
| 586 | * first 4 bytes in CTR IV | 582 | * first 4 bytes in CTR IV |
| 587 | */ | 583 | */ |
| 588 | memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + | 584 | memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - |
| 589 | ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, | 585 | CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); |
| 590 | CTR_RFC3686_NONCE_SIZE); | ||
| 591 | /* Set CTR key size */ | 586 | /* Set CTR key size */ |
| 592 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; | 587 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; |
| 593 | } | 588 | } |
| 594 | } else { /* non-authenc - has just one key */ | 589 | } else { /* non-authenc - has just one key */ |
| 590 | enckey = key; | ||
| 591 | authkey = NULL; | ||
| 595 | ctx->enc_keylen = keylen; | 592 | ctx->enc_keylen = keylen; |
| 596 | ctx->auth_keylen = 0; | 593 | ctx->auth_keylen = 0; |
| 597 | } | 594 | } |
| @@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 603 | /* STAT_PHASE_1: Copy key to ctx */ | 600 | /* STAT_PHASE_1: Copy key to ctx */ |
| 604 | 601 | ||
| 605 | /* Get key material */ | 602 | /* Get key material */ |
| 606 | memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); | 603 | memcpy(ctx->enckey, enckey, ctx->enc_keylen); |
| 607 | if (ctx->enc_keylen == 24) | 604 | if (ctx->enc_keylen == 24) |
| 608 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); | 605 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); |
| 609 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { | 606 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { |
| 610 | memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); | 607 | memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, |
| 608 | ctx->auth_keylen); | ||
| 611 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ | 609 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ |
| 612 | rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); | 610 | rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); |
| 613 | if (rc) | 611 | if (rc) |
| 614 | goto badkey; | 612 | goto badkey; |
| 615 | } | 613 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 45e20707cef8..f8e2c5c3f4eb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1361 | struct talitos_private *priv = dev_get_drvdata(dev); | 1361 | struct talitos_private *priv = dev_get_drvdata(dev); |
| 1362 | bool is_sec1 = has_ftr_sec1(priv); | 1362 | bool is_sec1 = has_ftr_sec1(priv); |
| 1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; | 1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; |
| 1364 | void *err; | ||
| 1365 | 1364 | ||
| 1366 | if (cryptlen + authsize > max_len) { | 1365 | if (cryptlen + authsize > max_len) { |
| 1367 | dev_err(dev, "length exceeds h/w max limit\n"); | 1366 | dev_err(dev, "length exceeds h/w max limit\n"); |
| 1368 | return ERR_PTR(-EINVAL); | 1367 | return ERR_PTR(-EINVAL); |
| 1369 | } | 1368 | } |
| 1370 | 1369 | ||
| 1371 | if (ivsize) | ||
| 1372 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
| 1373 | |||
| 1374 | if (!dst || dst == src) { | 1370 | if (!dst || dst == src) { |
| 1375 | src_len = assoclen + cryptlen + authsize; | 1371 | src_len = assoclen + cryptlen + authsize; |
| 1376 | src_nents = sg_nents_for_len(src, src_len); | 1372 | src_nents = sg_nents_for_len(src, src_len); |
| 1377 | if (src_nents < 0) { | 1373 | if (src_nents < 0) { |
| 1378 | dev_err(dev, "Invalid number of src SG.\n"); | 1374 | dev_err(dev, "Invalid number of src SG.\n"); |
| 1379 | err = ERR_PTR(-EINVAL); | 1375 | return ERR_PTR(-EINVAL); |
| 1380 | goto error_sg; | ||
| 1381 | } | 1376 | } |
| 1382 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1377 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1383 | dst_nents = dst ? src_nents : 0; | 1378 | dst_nents = dst ? src_nents : 0; |
| @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1387 | src_nents = sg_nents_for_len(src, src_len); | 1382 | src_nents = sg_nents_for_len(src, src_len); |
| 1388 | if (src_nents < 0) { | 1383 | if (src_nents < 0) { |
| 1389 | dev_err(dev, "Invalid number of src SG.\n"); | 1384 | dev_err(dev, "Invalid number of src SG.\n"); |
| 1390 | err = ERR_PTR(-EINVAL); | 1385 | return ERR_PTR(-EINVAL); |
| 1391 | goto error_sg; | ||
| 1392 | } | 1386 | } |
| 1393 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1387 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1394 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); | 1388 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); |
| 1395 | dst_nents = sg_nents_for_len(dst, dst_len); | 1389 | dst_nents = sg_nents_for_len(dst, dst_len); |
| 1396 | if (dst_nents < 0) { | 1390 | if (dst_nents < 0) { |
| 1397 | dev_err(dev, "Invalid number of dst SG.\n"); | 1391 | dev_err(dev, "Invalid number of dst SG.\n"); |
| 1398 | err = ERR_PTR(-EINVAL); | 1392 | return ERR_PTR(-EINVAL); |
| 1399 | goto error_sg; | ||
| 1400 | } | 1393 | } |
| 1401 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1394 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
| 1402 | } | 1395 | } |
| @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1423 | /* if its a ahash, add space for a second desc next to the first one */ | 1416 | /* if its a ahash, add space for a second desc next to the first one */ |
| 1424 | if (is_sec1 && !dst) | 1417 | if (is_sec1 && !dst) |
| 1425 | alloc_len += sizeof(struct talitos_desc); | 1418 | alloc_len += sizeof(struct talitos_desc); |
| 1419 | alloc_len += ivsize; | ||
| 1426 | 1420 | ||
| 1427 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1421 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
| 1428 | if (!edesc) { | 1422 | if (!edesc) |
| 1429 | err = ERR_PTR(-ENOMEM); | 1423 | return ERR_PTR(-ENOMEM); |
| 1430 | goto error_sg; | 1424 | if (ivsize) { |
| 1425 | iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); | ||
| 1426 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
| 1431 | } | 1427 | } |
| 1432 | memset(&edesc->desc, 0, sizeof(edesc->desc)); | 1428 | memset(&edesc->desc, 0, sizeof(edesc->desc)); |
| 1433 | 1429 | ||
| @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1445 | DMA_BIDIRECTIONAL); | 1441 | DMA_BIDIRECTIONAL); |
| 1446 | } | 1442 | } |
| 1447 | return edesc; | 1443 | return edesc; |
| 1448 | error_sg: | ||
| 1449 | if (iv_dma) | ||
| 1450 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
| 1451 | return err; | ||
| 1452 | } | 1444 | } |
| 1453 | 1445 | ||
| 1454 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1446 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h index 4213cb0bb2a7..f8664bac9fa8 100644 --- a/drivers/edac/altera_edac.h +++ b/drivers/edac/altera_edac.h | |||
| @@ -295,8 +295,8 @@ struct altr_sdram_mc_data { | |||
| 295 | #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 | 295 | #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 |
| 296 | 296 | ||
| 297 | /* Sticky registers for Uncorrected Errors */ | 297 | /* Sticky registers for Uncorrected Errors */ |
| 298 | #define S10_SYSMGR_UE_VAL_OFST 0x120 | 298 | #define S10_SYSMGR_UE_VAL_OFST 0x220 |
| 299 | #define S10_SYSMGR_UE_ADDR_OFST 0x124 | 299 | #define S10_SYSMGR_UE_ADDR_OFST 0x224 |
| 300 | 300 | ||
| 301 | #define S10_DDR0_IRQ_MASK BIT(16) | 301 | #define S10_DDR0_IRQ_MASK BIT(16) |
| 302 | 302 | ||
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 09b845e90114..a785ffd5af89 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
| @@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) | |||
| 1144 | if (device->is_local) | 1144 | if (device->is_local) |
| 1145 | return -ENODEV; | 1145 | return -ENODEV; |
| 1146 | 1146 | ||
| 1147 | if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) | ||
| 1148 | WARN_ON(dma_set_max_seg_size(device->card->device, | ||
| 1149 | SBP2_MAX_SEG_SIZE)); | ||
| 1150 | |||
| 1151 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); | 1147 | shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); |
| 1152 | if (shost == NULL) | 1148 | if (shost == NULL) |
| 1153 | return -ENOMEM; | 1149 | return -ENOMEM; |
| @@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = { | |||
| 1610 | .eh_abort_handler = sbp2_scsi_abort, | 1606 | .eh_abort_handler = sbp2_scsi_abort, |
| 1611 | .this_id = -1, | 1607 | .this_id = -1, |
| 1612 | .sg_tablesize = SG_ALL, | 1608 | .sg_tablesize = SG_ALL, |
| 1609 | .max_segment_size = SBP2_MAX_SEG_SIZE, | ||
| 1613 | .can_queue = 1, | 1610 | .can_queue = 1, |
| 1614 | .sdev_attrs = sbp2_scsi_sysfs_attrs, | 1611 | .sdev_attrs = sbp2_scsi_sysfs_attrs, |
| 1615 | }; | 1612 | }; |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 83617fdc661d..0dc96419efe3 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
| @@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg) | |||
| 289 | return pca953x_check_register(chip, reg, bank); | 289 | return pca953x_check_register(chip, reg, bank); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | const struct regmap_config pca953x_i2c_regmap = { | 292 | static const struct regmap_config pca953x_i2c_regmap = { |
| 293 | .reg_bits = 8, | 293 | .reg_bits = 8, |
| 294 | .val_bits = 8, | 294 | .val_bits = 8, |
| 295 | 295 | ||
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 48534bda73d3..259cf6ab969b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
| 357 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | 357 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); |
| 358 | 358 | ||
| 359 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { | 359 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { |
| 360 | struct gpio_desc *desc; | ||
| 361 | |||
| 362 | if (event->irq_requested) { | 360 | if (event->irq_requested) { |
| 363 | if (event->irq_is_wake) | 361 | if (event->irq_is_wake) |
| 364 | disable_irq_wake(event->irq); | 362 | disable_irq_wake(event->irq); |
| @@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
| 366 | free_irq(event->irq, event); | 364 | free_irq(event->irq, event); |
| 367 | } | 365 | } |
| 368 | 366 | ||
| 369 | desc = event->desc; | ||
| 370 | if (WARN_ON(IS_ERR(desc))) | ||
| 371 | continue; | ||
| 372 | gpiochip_unlock_as_irq(chip, event->pin); | 367 | gpiochip_unlock_as_irq(chip, event->pin); |
| 373 | gpiochip_free_own_desc(desc); | 368 | gpiochip_free_own_desc(event->desc); |
| 374 | list_del(&event->node); | 369 | list_del(&event->node); |
| 375 | kfree(event); | 370 | kfree(event); |
| 376 | } | 371 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a028661d9e20..92b11de19581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
| @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | |||
| 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 578 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 578 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 579 | { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
| 579 | { 0, 0, 0, 0, 0 }, | 580 | { 0, 0, 0, 0, 0 }, |
| 580 | }; | 581 | }; |
| 581 | 582 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dafc645b2e4e..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
| 531 | struct drm_gem_object *obj; | 531 | struct drm_gem_object *obj; |
| 532 | struct amdgpu_framebuffer *amdgpu_fb; | 532 | struct amdgpu_framebuffer *amdgpu_fb; |
| 533 | int ret; | 533 | int ret; |
| 534 | int height; | ||
| 535 | struct amdgpu_device *adev = dev->dev_private; | ||
| 536 | int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); | ||
| 537 | int pitch = mode_cmd->pitches[0] / cpp; | ||
| 538 | |||
| 539 | pitch = amdgpu_align_pitch(adev, pitch, cpp, false); | ||
| 540 | if (mode_cmd->pitches[0] != pitch) { | ||
| 541 | DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n", | ||
| 542 | pitch, mode_cmd->pitches[0]); | ||
| 543 | return ERR_PTR(-EINVAL); | ||
| 544 | } | ||
| 545 | 534 | ||
| 546 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); | 535 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); |
| 547 | if (obj == NULL) { | 536 | if (obj == NULL) { |
| @@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
| 556 | return ERR_PTR(-EINVAL); | 545 | return ERR_PTR(-EINVAL); |
| 557 | } | 546 | } |
| 558 | 547 | ||
| 559 | height = ALIGN(mode_cmd->height, 8); | ||
| 560 | if (obj->size < pitch * height) { | ||
| 561 | DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n", | ||
| 562 | pitch * height, obj->size); | ||
| 563 | return ERR_PTR(-EINVAL); | ||
| 564 | } | ||
| 565 | |||
| 566 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); | 548 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
| 567 | if (amdgpu_fb == NULL) { | 549 | if (amdgpu_fb == NULL) { |
| 568 | drm_gem_object_put_unlocked(obj); | 550 | drm_gem_object_put_unlocked(obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | config HSA_AMD | 5 | config HSA_AMD |
| 6 | bool "HSA kernel driver for AMD GPU devices" | 6 | bool "HSA kernel driver for AMD GPU devices" |
| 7 | depends on DRM_AMDGPU && X86_64 | 7 | depends on DRM_AMDGPU && (X86_64 || ARM64) |
| 8 | imply AMD_IOMMU_V2 | 8 | imply AMD_IOMMU_V2 if X86_64 |
| 9 | select MMU_NOTIFIER | 9 | select MMU_NOTIFIER |
| 10 | help | 10 | help |
| 11 | Enable this if you want to use HSA features on AMD GPU devices. | 11 | Enable this if you want to use HSA features on AMD GPU devices. |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..5d85ff341385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
| @@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 863 | return 0; | 863 | return 0; |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | #if CONFIG_X86_64 | ||
| 866 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
| 867 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
| 868 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
| @@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 905 | 906 | ||
| 906 | return 0; | 907 | return 0; |
| 907 | } | 908 | } |
| 909 | #endif | ||
| 908 | 910 | ||
| 909 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU | 911 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU |
| 910 | * | 912 | * |
| @@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 920 | struct crat_subtype_generic *sub_type_hdr; | 922 | struct crat_subtype_generic *sub_type_hdr; |
| 921 | int avail_size = *size; | 923 | int avail_size = *size; |
| 922 | int numa_node_id; | 924 | int numa_node_id; |
| 925 | #ifdef CONFIG_X86_64 | ||
| 923 | uint32_t entries = 0; | 926 | uint32_t entries = 0; |
| 927 | #endif | ||
| 924 | int ret = 0; | 928 | int ret = 0; |
| 925 | 929 | ||
| 926 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) | 930 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) |
| @@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 982 | sub_type_hdr->length); | 986 | sub_type_hdr->length); |
| 983 | 987 | ||
| 984 | /* Fill in Subtype: IO Link */ | 988 | /* Fill in Subtype: IO Link */ |
| 989 | #ifdef CONFIG_X86_64 | ||
| 985 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, | 990 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, |
| 986 | &entries, | 991 | &entries, |
| 987 | (struct crat_subtype_iolink *)sub_type_hdr); | 992 | (struct crat_subtype_iolink *)sub_type_hdr); |
| @@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 992 | 997 | ||
| 993 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + | 998 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + |
| 994 | sub_type_hdr->length * entries); | 999 | sub_type_hdr->length * entries); |
| 1000 | #else | ||
| 1001 | pr_info("IO link not available for non x86 platforms\n"); | ||
| 1002 | #endif | ||
| 995 | 1003 | ||
| 996 | crat_table->num_domains++; | 1004 | crat_table->num_domains++; |
| 997 | } | 1005 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) | |||
| 1093 | * the GPU device is not already present in the topology device | 1093 | * the GPU device is not already present in the topology device |
| 1094 | * list then return NULL. This means a new topology device has to | 1094 | * list then return NULL. This means a new topology device has to |
| 1095 | * be created for this GPU. | 1095 | * be created for this GPU. |
| 1096 | * TODO: Rather than assiging @gpu to first topology device withtout | ||
| 1097 | * gpu attached, it will better to have more stringent check. | ||
| 1098 | */ | 1096 | */ |
| 1099 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | 1097 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) |
| 1100 | { | 1098 | { |
| @@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | |||
| 1102 | struct kfd_topology_device *out_dev = NULL; | 1100 | struct kfd_topology_device *out_dev = NULL; |
| 1103 | 1101 | ||
| 1104 | down_write(&topology_lock); | 1102 | down_write(&topology_lock); |
| 1105 | list_for_each_entry(dev, &topology_device_list, list) | 1103 | list_for_each_entry(dev, &topology_device_list, list) { |
| 1104 | /* Discrete GPUs need their own topology device list | ||
| 1105 | * entries. Don't assign them to CPU/APU nodes. | ||
| 1106 | */ | ||
| 1107 | if (!gpu->device_info->needs_iommu_device && | ||
| 1108 | dev->node_props.cpu_cores_count) | ||
| 1109 | continue; | ||
| 1110 | |||
| 1106 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { | 1111 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { |
| 1107 | dev->gpu = gpu; | 1112 | dev->gpu = gpu; |
| 1108 | out_dev = dev; | 1113 | out_dev = dev; |
| 1109 | break; | 1114 | break; |
| 1110 | } | 1115 | } |
| 1116 | } | ||
| 1111 | up_write(&topology_lock); | 1117 | up_write(&topology_lock); |
| 1112 | return out_dev; | 1118 | return out_dev; |
| 1113 | } | 1119 | } |
| @@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) | |||
| 1392 | 1398 | ||
| 1393 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | 1399 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) |
| 1394 | { | 1400 | { |
| 1395 | const struct cpuinfo_x86 *cpuinfo; | ||
| 1396 | int first_cpu_of_numa_node; | 1401 | int first_cpu_of_numa_node; |
| 1397 | 1402 | ||
| 1398 | if (!cpumask || cpumask == cpu_none_mask) | 1403 | if (!cpumask || cpumask == cpu_none_mask) |
| @@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | |||
| 1400 | first_cpu_of_numa_node = cpumask_first(cpumask); | 1405 | first_cpu_of_numa_node = cpumask_first(cpumask); |
| 1401 | if (first_cpu_of_numa_node >= nr_cpu_ids) | 1406 | if (first_cpu_of_numa_node >= nr_cpu_ids) |
| 1402 | return -1; | 1407 | return -1; |
| 1403 | cpuinfo = &cpu_data(first_cpu_of_numa_node); | 1408 | #ifdef CONFIG_X86_64 |
| 1404 | 1409 | return cpu_data(first_cpu_of_numa_node).apicid; | |
| 1405 | return cpuinfo->apicid; | 1410 | #else |
| 1411 | return first_cpu_of_numa_node; | ||
| 1412 | #endif | ||
| 1406 | } | 1413 | } |
| 1407 | 1414 | ||
| 1408 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor | 1415 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 34f35e9a3c46..f4fa40c387d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
| 1772 | + caps.min_input_signal * 0x101; | 1772 | + caps.min_input_signal * 0x101; |
| 1773 | 1773 | ||
| 1774 | if (dc_link_set_backlight_level(dm->backlight_link, | 1774 | if (dc_link_set_backlight_level(dm->backlight_link, |
| 1775 | brightness, 0, 0)) | 1775 | brightness, 0)) |
| 1776 | return 0; | 1776 | return 0; |
| 1777 | else | 1777 | else |
| 1778 | return 1; | 1778 | return 1; |
| @@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
| 5935 | !new_crtc_state->color_mgmt_changed && | 5935 | !new_crtc_state->color_mgmt_changed && |
| 5936 | !new_crtc_state->vrr_enabled) | 5936 | old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) |
| 5937 | continue; | 5937 | continue; |
| 5938 | 5938 | ||
| 5939 | if (!new_crtc_state->enable) | 5939 | if (!new_crtc_state->enable) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 52deacf39841..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
| 2190 | 2190 | ||
| 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, | 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, |
| 2192 | uint32_t backlight_pwm_u16_16, | 2192 | uint32_t backlight_pwm_u16_16, |
| 2193 | uint32_t frame_ramp, | 2193 | uint32_t frame_ramp) |
| 2194 | const struct dc_stream_state *stream) | ||
| 2195 | { | 2194 | { |
| 2196 | struct dc *core_dc = link->ctx->dc; | 2195 | struct dc *core_dc = link->ctx->dc; |
| 2197 | struct abm *abm = core_dc->res_pool->abm; | 2196 | struct abm *abm = core_dc->res_pool->abm; |
| @@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
| 2206 | (abm->funcs->set_backlight_level_pwm == NULL)) | 2205 | (abm->funcs->set_backlight_level_pwm == NULL)) |
| 2207 | return false; | 2206 | return false; |
| 2208 | 2207 | ||
| 2209 | if (stream) | ||
| 2210 | ((struct dc_stream_state *)stream)->bl_pwm_level = | ||
| 2211 | backlight_pwm_u16_16; | ||
| 2212 | |||
| 2213 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2208 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
| 2214 | 2209 | ||
| 2215 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 2210 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
| @@ -2637,11 +2632,6 @@ void core_link_enable_stream( | |||
| 2637 | 2632 | ||
| 2638 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | 2633 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) |
| 2639 | enable_stream_features(pipe_ctx); | 2634 | enable_stream_features(pipe_ctx); |
| 2640 | |||
| 2641 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
| 2642 | pipe_ctx->stream->bl_pwm_level, | ||
| 2643 | 0, | ||
| 2644 | pipe_ctx->stream); | ||
| 2645 | } | 2635 | } |
| 2646 | 2636 | ||
| 2647 | } | 2637 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
| @@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
| 146 | */ | 146 | */ |
| 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, |
| 148 | uint32_t backlight_pwm_u16_16, | 148 | uint32_t backlight_pwm_u16_16, |
| 149 | uint32_t frame_ramp, | 149 | uint32_t frame_ramp); |
| 150 | const struct dc_stream_state *stream); | ||
| 151 | 150 | ||
| 152 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 151 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
| 153 | 152 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
| @@ -91,7 +91,6 @@ struct dc_stream_state { | |||
| 91 | 91 | ||
| 92 | /* DMCU info */ | 92 | /* DMCU info */ |
| 93 | unsigned int abm_level; | 93 | unsigned int abm_level; |
| 94 | unsigned int bl_pwm_level; | ||
| 95 | 94 | ||
| 96 | /* from core_stream struct */ | 95 | /* from core_stream struct */ |
| 97 | struct dc_context *ctx; | 96 | struct dc_context *ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4bf24758217f..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | |||
| 1000 | 1000 | ||
| 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); | 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); |
| 1002 | 1002 | ||
| 1003 | if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | 1003 | if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) |
| 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ |
| 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); |
| 1006 | /* un-mute audio */ | 1006 | /* un-mute audio */ |
| @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( |
| 1018 | pipe_ctx->stream_res.stream_enc, true); | 1018 | pipe_ctx->stream_res.stream_enc, true); |
| 1019 | if (pipe_ctx->stream_res.audio) { | 1019 | if (pipe_ctx->stream_res.audio) { |
| 1020 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
| 1021 | |||
| 1020 | if (option != KEEP_ACQUIRED_RESOURCE || | 1022 | if (option != KEEP_ACQUIRED_RESOURCE || |
| 1021 | !dc->debug.az_endpoint_mute_only) { | 1023 | !dc->debug.az_endpoint_mute_only) { |
| 1022 | /*only disalbe az_endpoint if power down or free*/ | 1024 | /*only disalbe az_endpoint if power down or free*/ |
| @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1036 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); | 1038 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); |
| 1037 | pipe_ctx->stream_res.audio = NULL; | 1039 | pipe_ctx->stream_res.audio = NULL; |
| 1038 | } | 1040 | } |
| 1041 | if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | ||
| 1042 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | ||
| 1043 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | ||
| 1039 | 1044 | ||
| 1040 | /* TODO: notify audio driver for if audio modes list changed | 1045 | /* TODO: notify audio driver for if audio modes list changed |
| 1041 | * add audio mode list change flag */ | 1046 | * add audio mode list change flag */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | |||
| @@ -463,7 +463,7 @@ void dpp1_set_cursor_position( | |||
| 463 | if (src_y_offset >= (int)param->viewport.height) | 463 | if (src_y_offset >= (int)param->viewport.height) |
| 464 | cur_en = 0; /* not visible beyond bottom edge*/ | 464 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 465 | 465 | ||
| 466 | if (src_y_offset < 0) | 466 | if (src_y_offset + (int)height <= 0) |
| 467 | cur_en = 0; /* not visible beyond top edge*/ | 467 | cur_en = 0; /* not visible beyond top edge*/ |
| 468 | 468 | ||
| 469 | REG_UPDATE(CURSOR0_CONTROL, | 469 | REG_UPDATE(CURSOR0_CONTROL, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
| @@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( | |||
| 1140 | if (src_y_offset >= (int)param->viewport.height) | 1140 | if (src_y_offset >= (int)param->viewport.height) |
| 1141 | cur_en = 0; /* not visible beyond bottom edge*/ | 1141 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 1142 | 1142 | ||
| 1143 | if (src_y_offset < 0) //+ (int)hubp->curs_attr.height | 1143 | if (src_y_offset + (int)hubp->curs_attr.height <= 0) |
| 1144 | cur_en = 0; /* not visible beyond top edge*/ | 1144 | cur_en = 0; /* not visible beyond top edge*/ |
| 1145 | 1145 | ||
| 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) | 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91e015e14355..58a12ddf12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
| @@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( | |||
| 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) | 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) |
| 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2358 | 2358 | tg = pipe_ctx->stream_res.tg; | |
| 2359 | /* Skip inactive pipes and ones already updated */ | 2359 | /* Skip inactive pipes and ones already updated */ |
| 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream |
| 2361 | || !pipe_ctx->plane_state) | 2361 | || !pipe_ctx->plane_state |
| 2362 | || !tg->funcs->is_tg_enabled(tg)) | ||
| 2362 | continue; | 2363 | continue; |
| 2363 | 2364 | ||
| 2364 | pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); | 2365 | tg->funcs->lock(tg); |
| 2365 | 2366 | ||
| 2366 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( | 2367 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( |
| 2367 | pipe_ctx->plane_res.hubp, | 2368 | pipe_ctx->plane_res.hubp, |
| 2368 | &pipe_ctx->dlg_regs, | 2369 | &pipe_ctx->dlg_regs, |
| 2369 | &pipe_ctx->ttu_regs); | 2370 | &pipe_ctx->ttu_regs); |
| 2370 | } | ||
| 2371 | |||
| 2372 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 2373 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 2374 | 2371 | ||
| 2375 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2372 | tg->funcs->unlock(tg); |
| 2376 | || !pipe_ctx->plane_state) | 2373 | } |
| 2377 | continue; | ||
| 2378 | |||
| 2379 | dcn10_pipe_control_lock(dc, pipe_ctx, false); | ||
| 2380 | } | ||
| 2381 | 2374 | ||
| 2382 | if (num_planes == 0) | 2375 | if (num_planes == 0) |
| 2383 | false_optc_underflow_wa(dc, stream, tg); | 2376 | false_optc_underflow_wa(dc, stream, tg); |
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c | |||
| @@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le | |||
| 57 | #define NUM_POWER_FN_SEGS 8 | 57 | #define NUM_POWER_FN_SEGS 8 |
| 58 | #define NUM_BL_CURVE_SEGS 16 | 58 | #define NUM_BL_CURVE_SEGS 16 |
| 59 | 59 | ||
| 60 | #pragma pack(push, 1) | ||
| 60 | /* NOTE: iRAM is 256B in size */ | 61 | /* NOTE: iRAM is 256B in size */ |
| 61 | struct iram_table_v_2 { | 62 | struct iram_table_v_2 { |
| 62 | /* flags */ | 63 | /* flags */ |
| @@ -100,6 +101,7 @@ struct iram_table_v_2 { | |||
| 100 | uint8_t dummy8; /* 0xfe */ | 101 | uint8_t dummy8; /* 0xfe */ |
| 101 | uint8_t dummy9; /* 0xff */ | 102 | uint8_t dummy9; /* 0xff */ |
| 102 | }; | 103 | }; |
| 104 | #pragma pack(pop) | ||
| 103 | 105 | ||
| 104 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) | 106 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) |
| 105 | { | 107 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index b8747a5c9204..99d596dc0e89 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "vega10_pptable.h" | 32 | #include "vega10_pptable.h" |
| 33 | 33 | ||
| 34 | #define NUM_DSPCLK_LEVELS 8 | 34 | #define NUM_DSPCLK_LEVELS 8 |
| 35 | #define VEGA10_ENGINECLOCK_HARDMAX 198000 | ||
| 35 | 36 | ||
| 36 | static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, | 37 | static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, |
| 37 | enum phm_platform_caps cap) | 38 | enum phm_platform_caps cap) |
| @@ -258,7 +259,26 @@ static int init_over_drive_limits( | |||
| 258 | struct pp_hwmgr *hwmgr, | 259 | struct pp_hwmgr *hwmgr, |
| 259 | const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) | 260 | const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) |
| 260 | { | 261 | { |
| 261 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | 262 | const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = |
| 263 | (const ATOM_Vega10_GFXCLK_Dependency_Table *) | ||
| 264 | (((unsigned long) powerplay_table) + | ||
| 265 | le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); | ||
| 266 | bool is_acg_enabled = false; | ||
| 267 | ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; | ||
| 268 | |||
| 269 | if (gfxclk_dep_table->ucRevId == 1) { | ||
| 270 | patom_record_v2 = | ||
| 271 | (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; | ||
| 272 | is_acg_enabled = | ||
| 273 | (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable; | ||
| 274 | } | ||
| 275 | |||
| 276 | if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX && | ||
| 277 | !is_acg_enabled) | ||
| 278 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | ||
| 279 | VEGA10_ENGINECLOCK_HARDMAX; | ||
| 280 | else | ||
| 281 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | ||
| 262 | le32_to_cpu(powerplay_table->ulMaxODEngineClock); | 282 | le32_to_cpu(powerplay_table->ulMaxODEngineClock); |
| 263 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = | 283 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = |
| 264 | le32_to_cpu(powerplay_table->ulMaxODMemoryClock); | 284 | le32_to_cpu(powerplay_table->ulMaxODMemoryClock); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
| @@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 753 | return 0; | 753 | return 0; |
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) | ||
| 757 | { | ||
| 758 | uint32_t result; | ||
| 759 | |||
| 760 | PP_ASSERT_WITH_CODE( | ||
| 761 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, | ||
| 762 | "[Run_ACG_BTC] Attempt to run ACG BTC failed!", | ||
| 763 | return -EINVAL); | ||
| 764 | |||
| 765 | result = smum_get_argument(hwmgr); | ||
| 766 | PP_ASSERT_WITH_CODE(result == 1, | ||
| 767 | "Failed to run ACG BTC!", return -EINVAL); | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 756 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) | 772 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) |
| 757 | { | 773 | { |
| 758 | struct vega12_hwmgr *data = | 774 | struct vega12_hwmgr *data = |
| @@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
| 931 | "Failed to initialize SMC table!", | 947 | "Failed to initialize SMC table!", |
| 932 | result = tmp_result); | 948 | result = tmp_result); |
| 933 | 949 | ||
| 950 | tmp_result = vega12_run_acg_btc(hwmgr); | ||
| 951 | PP_ASSERT_WITH_CODE(!tmp_result, | ||
| 952 | "Failed to run ACG BTC!", | ||
| 953 | result = tmp_result); | ||
| 954 | |||
| 934 | result = vega12_enable_all_smu_features(hwmgr); | 955 | result = vega12_enable_all_smu_features(hwmgr); |
| 935 | PP_ASSERT_WITH_CODE(!result, | 956 | PP_ASSERT_WITH_CODE(!result, |
| 936 | "Failed to enable all smu features!", | 957 | "Failed to enable all smu features!", |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
| 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2802 | MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
| 2802 | return 0; | 2803 | return 0; |
| 2803 | } | 2804 | } |
| 2804 | 2805 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
| @@ -41,7 +41,7 @@ struct intel_gvt_mpt { | |||
| 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
| 42 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
| 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
| 44 | void (*detach_vgpu)(unsigned long handle); | 44 | void (*detach_vgpu)(void *vgpu); |
| 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); | 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); |
| 46 | unsigned long (*from_virt_to_mfn)(void *p); | 46 | unsigned long (*from_virt_to_mfn)(void *p); |
| 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); | 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 996 | { | 996 | { |
| 997 | unsigned int index; | 997 | unsigned int index; |
| 998 | u64 virtaddr; | 998 | u64 virtaddr; |
| 999 | unsigned long req_size, pgoff = 0; | 999 | unsigned long req_size, pgoff, req_start; |
| 1000 | pgprot_t pg_prot; | 1000 | pgprot_t pg_prot; |
| 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
| 1002 | 1002 | ||
| @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 1014 | pg_prot = vma->vm_page_prot; | 1014 | pg_prot = vma->vm_page_prot; |
| 1015 | virtaddr = vma->vm_start; | 1015 | virtaddr = vma->vm_start; |
| 1016 | req_size = vma->vm_end - vma->vm_start; | 1016 | req_size = vma->vm_end - vma->vm_start; |
| 1017 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | 1017 | pgoff = vma->vm_pgoff & |
| 1018 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | ||
| 1019 | req_start = pgoff << PAGE_SHIFT; | ||
| 1020 | |||
| 1021 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | ||
| 1022 | return -EINVAL; | ||
| 1023 | if (req_start + req_size > | ||
| 1024 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | ||
| 1025 | return -EINVAL; | ||
| 1026 | |||
| 1027 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | ||
| 1018 | 1028 | ||
| 1019 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | 1029 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
| 1020 | } | 1030 | } |
| @@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) | |||
| 1662 | return 0; | 1672 | return 0; |
| 1663 | } | 1673 | } |
| 1664 | 1674 | ||
| 1665 | static void kvmgt_detach_vgpu(unsigned long handle) | 1675 | static void kvmgt_detach_vgpu(void *p_vgpu) |
| 1666 | { | 1676 | { |
| 1667 | /* nothing to do here */ | 1677 | int i; |
| 1678 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | ||
| 1679 | |||
| 1680 | if (!vgpu->vdev.region) | ||
| 1681 | return; | ||
| 1682 | |||
| 1683 | for (i = 0; i < vgpu->vdev.num_regions; i++) | ||
| 1684 | if (vgpu->vdev.region[i].ops->release) | ||
| 1685 | vgpu->vdev.region[i].ops->release(vgpu, | ||
| 1686 | &vgpu->vdev.region[i]); | ||
| 1687 | vgpu->vdev.num_regions = 0; | ||
| 1688 | kfree(vgpu->vdev.region); | ||
| 1689 | vgpu->vdev.region = NULL; | ||
| 1668 | } | 1690 | } |
| 1669 | 1691 | ||
| 1670 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | 1692 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
| @@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) | |||
| 101 | if (!intel_gvt_host.mpt->detach_vgpu) | 101 | if (!intel_gvt_host.mpt->detach_vgpu) |
| 102 | return; | 102 | return; |
| 103 | 103 | ||
| 104 | intel_gvt_host.mpt->detach_vgpu(vgpu->handle); | 104 | intel_gvt_host.mpt->detach_vgpu(vgpu); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) | 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 5567ddc7760f..55bb7885e228 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 332 | 332 | ||
| 333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
| 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); | 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
| 335 | |||
| 336 | wa_ctx->indirect_ctx.obj = NULL; | ||
| 337 | wa_ctx->indirect_ctx.shadow_va = NULL; | ||
| 335 | } | 338 | } |
| 336 | 339 | ||
| 337 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | 340 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, |
| @@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 911 | 914 | ||
| 912 | list_del_init(&workload->list); | 915 | list_del_init(&workload->list); |
| 913 | 916 | ||
| 914 | if (!workload->status) { | ||
| 915 | release_shadow_batch_buffer(workload); | ||
| 916 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
| 917 | } | ||
| 918 | |||
| 919 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { | 917 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { |
| 920 | /* if workload->status is not successful means HW GPU | 918 | /* if workload->status is not successful means HW GPU |
| 921 | * has occurred GPU hang or something wrong with i915/GVT, | 919 | * has occurred GPU hang or something wrong with i915/GVT, |
| @@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) | |||
| 1283 | { | 1281 | { |
| 1284 | struct intel_vgpu_submission *s = &workload->vgpu->submission; | 1282 | struct intel_vgpu_submission *s = &workload->vgpu->submission; |
| 1285 | 1283 | ||
| 1284 | release_shadow_batch_buffer(workload); | ||
| 1285 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
| 1286 | |||
| 1286 | if (workload->shadow_mm) | 1287 | if (workload->shadow_mm) |
| 1287 | intel_vgpu_mm_put(workload->shadow_mm); | 1288 | intel_vgpu_mm_put(workload->shadow_mm); |
| 1288 | 1289 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4796f40a6d4f..eab9341a5152 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||
| 303 | */ | 303 | */ |
| 304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { | 304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { |
| 305 | prio |= I915_PRIORITY_NEWCLIENT; | 305 | prio |= I915_PRIORITY_NEWCLIENT; |
| 306 | active->sched.attr.priority = prio; | ||
| 306 | list_move_tail(&active->sched.link, | 307 | list_move_tail(&active->sched.link, |
| 307 | i915_sched_lookup_priolist(engine, prio)); | 308 | i915_sched_lookup_priolist(engine, prio)); |
| 308 | } | 309 | } |
| @@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
| 645 | int i; | 646 | int i; |
| 646 | 647 | ||
| 647 | priolist_for_each_request_consume(rq, rn, p, i) { | 648 | priolist_for_each_request_consume(rq, rn, p, i) { |
| 649 | GEM_BUG_ON(last && | ||
| 650 | need_preempt(engine, last, rq_prio(rq))); | ||
| 651 | |||
| 648 | /* | 652 | /* |
| 649 | * Can we combine this request with the current port? | 653 | * Can we combine this request with the current port? |
| 650 | * It has to be the same context/ringbuffer and not | 654 | * It has to be the same context/ringbuffer and not |
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
| @@ -46,7 +46,6 @@ struct meson_crtc { | |||
| 46 | struct drm_crtc base; | 46 | struct drm_crtc base; |
| 47 | struct drm_pending_vblank_event *event; | 47 | struct drm_pending_vblank_event *event; |
| 48 | struct meson_drm *priv; | 48 | struct meson_drm *priv; |
| 49 | bool enabled; | ||
| 50 | }; | 49 | }; |
| 51 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) | 50 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) |
| 52 | 51 | ||
| @@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { | |||
| 82 | 81 | ||
| 83 | }; | 82 | }; |
| 84 | 83 | ||
| 85 | static void meson_crtc_enable(struct drm_crtc *crtc) | 84 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, |
| 85 | struct drm_crtc_state *old_state) | ||
| 86 | { | 86 | { |
| 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 88 | struct drm_crtc_state *crtc_state = crtc->state; | 88 | struct drm_crtc_state *crtc_state = crtc->state; |
| @@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) | |||
| 108 | 108 | ||
| 109 | drm_crtc_vblank_on(crtc); | 109 | drm_crtc_vblank_on(crtc); |
| 110 | 110 | ||
| 111 | meson_crtc->enabled = true; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
| 115 | struct drm_crtc_state *old_state) | ||
| 116 | { | ||
| 117 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
| 118 | struct meson_drm *priv = meson_crtc->priv; | ||
| 119 | |||
| 120 | DRM_DEBUG_DRIVER("\n"); | ||
| 121 | |||
| 122 | if (!meson_crtc->enabled) | ||
| 123 | meson_crtc_enable(crtc); | ||
| 124 | |||
| 125 | priv->viu.osd1_enabled = true; | 111 | priv->viu.osd1_enabled = true; |
| 126 | } | 112 | } |
| 127 | 113 | ||
| @@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 153 | 139 | ||
| 154 | crtc->state->event = NULL; | 140 | crtc->state->event = NULL; |
| 155 | } | 141 | } |
| 156 | |||
| 157 | meson_crtc->enabled = false; | ||
| 158 | } | 142 | } |
| 159 | 143 | ||
| 160 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | 144 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, |
| @@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | |||
| 163 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 147 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 164 | unsigned long flags; | 148 | unsigned long flags; |
| 165 | 149 | ||
| 166 | if (crtc->state->enable && !meson_crtc->enabled) | ||
| 167 | meson_crtc_enable(crtc); | ||
| 168 | |||
| 169 | if (crtc->state->event) { | 150 | if (crtc->state->event) { |
| 170 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | 151 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); |
| 171 | 152 | ||
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
| @@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { | |||
| 75 | .fb_create = drm_gem_fb_create, | 75 | .fb_create = drm_gem_fb_create, |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { | ||
| 79 | .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, | ||
| 80 | }; | ||
| 81 | |||
| 78 | static irqreturn_t meson_irq(int irq, void *arg) | 82 | static irqreturn_t meson_irq(int irq, void *arg) |
| 79 | { | 83 | { |
| 80 | struct drm_device *dev = arg; | 84 | struct drm_device *dev = arg; |
| @@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | |||
| 266 | drm->mode_config.max_width = 3840; | 270 | drm->mode_config.max_width = 3840; |
| 267 | drm->mode_config.max_height = 2160; | 271 | drm->mode_config.max_height = 2160; |
| 268 | drm->mode_config.funcs = &meson_mode_config_funcs; | 272 | drm->mode_config.funcs = &meson_mode_config_funcs; |
| 273 | drm->mode_config.helper_private = &meson_mode_config_helpers; | ||
| 269 | 274 | ||
| 270 | /* Hardware Initialization */ | 275 | /* Hardware Initialization */ |
| 271 | 276 | ||
| @@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, | |||
| 388 | remote_node = of_graph_get_remote_port_parent(ep); | 393 | remote_node = of_graph_get_remote_port_parent(ep); |
| 389 | if (!remote_node || | 394 | if (!remote_node || |
| 390 | remote_node == parent || /* Ignore parent endpoint */ | 395 | remote_node == parent || /* Ignore parent endpoint */ |
| 391 | !of_device_is_available(remote_node)) | 396 | !of_device_is_available(remote_node)) { |
| 397 | of_node_put(remote_node); | ||
| 392 | continue; | 398 | continue; |
| 399 | } | ||
| 393 | 400 | ||
| 394 | count += meson_probe_remote(pdev, match, remote, remote_node); | 401 | count += meson_probe_remote(pdev, match, remote, remote_node); |
| 395 | 402 | ||
| @@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) | |||
| 408 | 415 | ||
| 409 | for_each_endpoint_of_node(np, ep) { | 416 | for_each_endpoint_of_node(np, ep) { |
| 410 | remote = of_graph_get_remote_port_parent(ep); | 417 | remote = of_graph_get_remote_port_parent(ep); |
| 411 | if (!remote || !of_device_is_available(remote)) | 418 | if (!remote || !of_device_is_available(remote)) { |
| 419 | of_node_put(remote); | ||
| 412 | continue; | 420 | continue; |
| 421 | } | ||
| 413 | 422 | ||
| 414 | count += meson_probe_remote(pdev, &match, np, remote); | 423 | count += meson_probe_remote(pdev, &match, np, remote); |
| 424 | of_node_put(remote); | ||
| 415 | } | 425 | } |
| 416 | 426 | ||
| 417 | if (count && !match) | 427 | if (count && !match) |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 5beb83d1cf87..ce1b3cc4bf6d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c | |||
| @@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) | |||
| 944 | np = dev_pm_opp_get_of_node(opp); | 944 | np = dev_pm_opp_get_of_node(opp); |
| 945 | 945 | ||
| 946 | if (np) { | 946 | if (np) { |
| 947 | of_property_read_u32(np, "qcom,level", &val); | 947 | of_property_read_u32(np, "opp-level", &val); |
| 948 | of_node_put(np); | 948 | of_node_put(np); |
| 949 | } | 949 | } |
| 950 | 950 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2e4372ef17a3..2cfee1a4fe0b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 765 | adreno_gpu->rev = config->rev; | 765 | adreno_gpu->rev = config->rev; |
| 766 | 766 | ||
| 767 | adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; | 767 | adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; |
| 768 | adreno_gpu_config.irqname = "kgsl_3d0_irq"; | ||
| 769 | 768 | ||
| 770 | adreno_gpu_config.va_start = SZ_16M; | 769 | adreno_gpu_config.va_start = SZ_16M; |
| 771 | adreno_gpu_config.va_end = 0xffffffff; | 770 | adreno_gpu_config.va_end = 0xffffffff; |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index fd75870eb17f..6aefcd6db46b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | |||
| @@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, | |||
| 365 | &pdpu->pipe_qos_cfg); | 365 | &pdpu->pipe_qos_cfg); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) | ||
| 369 | { | ||
| 370 | struct dpu_plane *pdpu = to_dpu_plane(plane); | ||
| 371 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); | ||
| 372 | |||
| 373 | if (!pdpu->is_rt_pipe) | ||
| 374 | return; | ||
| 375 | |||
| 376 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | ||
| 377 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); | ||
| 378 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | ||
| 379 | } | ||
| 380 | |||
| 381 | /** | 368 | /** |
| 382 | * _dpu_plane_set_ot_limit - set OT limit for the given plane | 369 | * _dpu_plane_set_ot_limit - set OT limit for the given plane |
| 383 | * @plane: Pointer to drm plane | 370 | * @plane: Pointer to drm plane |
| @@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane) | |||
| 1248 | } | 1235 | } |
| 1249 | 1236 | ||
| 1250 | #ifdef CONFIG_DEBUG_FS | 1237 | #ifdef CONFIG_DEBUG_FS |
| 1238 | static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) | ||
| 1239 | { | ||
| 1240 | struct dpu_plane *pdpu = to_dpu_plane(plane); | ||
| 1241 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); | ||
| 1242 | |||
| 1243 | if (!pdpu->is_rt_pipe) | ||
| 1244 | return; | ||
| 1245 | |||
| 1246 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | ||
| 1247 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); | ||
| 1248 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | static ssize_t _dpu_plane_danger_read(struct file *file, | 1251 | static ssize_t _dpu_plane_danger_read(struct file *file, |
| 1252 | char __user *buff, size_t count, loff_t *ppos) | 1252 | char __user *buff, size_t count, loff_t *ppos) |
| 1253 | { | 1253 | { |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9cd6a96c6bf2..927e5d86f7c1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, | |||
| 250 | void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | 250 | void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, |
| 251 | struct msm_gem_vma *vma); | 251 | struct msm_gem_vma *vma); |
| 252 | int msm_gem_map_vma(struct msm_gem_address_space *aspace, | 252 | int msm_gem_map_vma(struct msm_gem_address_space *aspace, |
| 253 | struct msm_gem_vma *vma, struct sg_table *sgt, int npages); | 253 | struct msm_gem_vma *vma, int prot, |
| 254 | struct sg_table *sgt, int npages); | ||
| 254 | void msm_gem_close_vma(struct msm_gem_address_space *aspace, | 255 | void msm_gem_close_vma(struct msm_gem_address_space *aspace, |
| 255 | struct msm_gem_vma *vma); | 256 | struct msm_gem_vma *vma); |
| 256 | 257 | ||
| @@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo, | |||
| 333 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | 334 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
| 334 | struct dma_buf *dmabuf, struct sg_table *sgt); | 335 | struct dma_buf *dmabuf, struct sg_table *sgt); |
| 335 | 336 | ||
| 337 | __printf(2, 3) | ||
| 336 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); | 338 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); |
| 337 | 339 | ||
| 338 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, | 340 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, |
| @@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); | |||
| 396 | int msm_debugfs_late_init(struct drm_device *dev); | 398 | int msm_debugfs_late_init(struct drm_device *dev); |
| 397 | int msm_rd_debugfs_init(struct drm_minor *minor); | 399 | int msm_rd_debugfs_init(struct drm_minor *minor); |
| 398 | void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); | 400 | void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); |
| 401 | __printf(3, 4) | ||
| 399 | void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | 402 | void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, |
| 400 | const char *fmt, ...); | 403 | const char *fmt, ...); |
| 401 | int msm_perf_debugfs_init(struct drm_minor *minor); | 404 | int msm_perf_debugfs_init(struct drm_minor *minor); |
| 402 | void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); | 405 | void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); |
| 403 | #else | 406 | #else |
| 404 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } | 407 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } |
| 408 | __printf(3, 4) | ||
| 405 | static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | 409 | static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, |
| 406 | const char *fmt, ...) {} | 410 | const char *fmt, ...) {} |
| 407 | static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} | 411 | static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 51a95da694d8..c8886d3071fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, | |||
| 391 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 391 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 392 | struct msm_gem_vma *vma; | 392 | struct msm_gem_vma *vma; |
| 393 | struct page **pages; | 393 | struct page **pages; |
| 394 | int prot = IOMMU_READ; | ||
| 395 | |||
| 396 | if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) | ||
| 397 | prot |= IOMMU_WRITE; | ||
| 394 | 398 | ||
| 395 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | 399 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 396 | 400 | ||
| @@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, | |||
| 405 | if (IS_ERR(pages)) | 409 | if (IS_ERR(pages)) |
| 406 | return PTR_ERR(pages); | 410 | return PTR_ERR(pages); |
| 407 | 411 | ||
| 408 | return msm_gem_map_vma(aspace, vma, msm_obj->sgt, | 412 | return msm_gem_map_vma(aspace, vma, prot, |
| 409 | obj->size >> PAGE_SHIFT); | 413 | msm_obj->sgt, obj->size >> PAGE_SHIFT); |
| 410 | } | 414 | } |
| 411 | 415 | ||
| 412 | /* get iova and pin it. Should have a matching put */ | 416 | /* get iova and pin it. Should have a matching put */ |
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 557360788084..49c04829cf34 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c | |||
| @@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | |||
| 68 | 68 | ||
| 69 | int | 69 | int |
| 70 | msm_gem_map_vma(struct msm_gem_address_space *aspace, | 70 | msm_gem_map_vma(struct msm_gem_address_space *aspace, |
| 71 | struct msm_gem_vma *vma, struct sg_table *sgt, int npages) | 71 | struct msm_gem_vma *vma, int prot, |
| 72 | struct sg_table *sgt, int npages) | ||
| 72 | { | 73 | { |
| 73 | unsigned size = npages << PAGE_SHIFT; | 74 | unsigned size = npages << PAGE_SHIFT; |
| 74 | int ret = 0; | 75 | int ret = 0; |
| @@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, | |||
| 86 | 87 | ||
| 87 | if (aspace->mmu) | 88 | if (aspace->mmu) |
| 88 | ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, | 89 | ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, |
| 89 | size, IOMMU_READ | IOMMU_WRITE); | 90 | size, prot); |
| 90 | 91 | ||
| 91 | if (ret) | 92 | if (ret) |
| 92 | vma->mapped = false; | 93 | vma->mapped = false; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5f3eff304355..10babd18e286 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 900 | } | 900 | } |
| 901 | 901 | ||
| 902 | /* Get Interrupt: */ | 902 | /* Get Interrupt: */ |
| 903 | gpu->irq = platform_get_irq_byname(pdev, config->irqname); | 903 | gpu->irq = platform_get_irq(pdev, 0); |
| 904 | if (gpu->irq < 0) { | 904 | if (gpu->irq < 0) { |
| 905 | ret = gpu->irq; | 905 | ret = gpu->irq; |
| 906 | DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); | 906 | DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index efb49bb64191..ca17086f72c9 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h | |||
| @@ -31,7 +31,6 @@ struct msm_gpu_state; | |||
| 31 | 31 | ||
| 32 | struct msm_gpu_config { | 32 | struct msm_gpu_config { |
| 33 | const char *ioname; | 33 | const char *ioname; |
| 34 | const char *irqname; | ||
| 35 | uint64_t va_start; | 34 | uint64_t va_start; |
| 36 | uint64_t va_end; | 35 | uint64_t va_end; |
| 37 | unsigned int nr_rings; | 36 | unsigned int nr_rings; |
| @@ -63,7 +62,7 @@ struct msm_gpu_funcs { | |||
| 63 | struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); | 62 | struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); |
| 64 | void (*recover)(struct msm_gpu *gpu); | 63 | void (*recover)(struct msm_gpu *gpu); |
| 65 | void (*destroy)(struct msm_gpu *gpu); | 64 | void (*destroy)(struct msm_gpu *gpu); |
| 66 | #ifdef CONFIG_DEBUG_FS | 65 | #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) |
| 67 | /* show GPU status in debugfs: */ | 66 | /* show GPU status in debugfs: */ |
| 68 | void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, | 67 | void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, |
| 69 | struct drm_printer *p); | 68 | struct drm_printer *p); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 90e9d0a48dc0..d21172933d92 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
| 115 | char *fptr = &fifo->buf[fifo->head]; | 115 | char *fptr = &fifo->buf[fifo->head]; |
| 116 | int n; | 116 | int n; |
| 117 | 117 | ||
| 118 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 118 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); |
| 119 | if (!rd->open) | ||
| 120 | return; | ||
| 119 | 121 | ||
| 120 | /* Note that smp_load_acquire() is not strictly required | 122 | /* Note that smp_load_acquire() is not strictly required |
| 121 | * as CIRC_SPACE_TO_END() does not access the tail more | 123 | * as CIRC_SPACE_TO_END() does not access the tail more |
| @@ -213,7 +215,10 @@ out: | |||
| 213 | static int rd_release(struct inode *inode, struct file *file) | 215 | static int rd_release(struct inode *inode, struct file *file) |
| 214 | { | 216 | { |
| 215 | struct msm_rd_state *rd = inode->i_private; | 217 | struct msm_rd_state *rd = inode->i_private; |
| 218 | |||
| 216 | rd->open = false; | 219 | rd->open = false; |
| 220 | wake_up_all(&rd->fifo_event); | ||
| 221 | |||
| 217 | return 0; | 222 | return 0; |
| 218 | } | 223 | } |
| 219 | 224 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
| @@ -2435,6 +2435,38 @@ nv140_chipset = { | |||
| 2435 | }; | 2435 | }; |
| 2436 | 2436 | ||
| 2437 | static const struct nvkm_device_chip | 2437 | static const struct nvkm_device_chip |
| 2438 | nv162_chipset = { | ||
| 2439 | .name = "TU102", | ||
| 2440 | .bar = tu104_bar_new, | ||
| 2441 | .bios = nvkm_bios_new, | ||
| 2442 | .bus = gf100_bus_new, | ||
| 2443 | .devinit = tu104_devinit_new, | ||
| 2444 | .fault = tu104_fault_new, | ||
| 2445 | .fb = gv100_fb_new, | ||
| 2446 | .fuse = gm107_fuse_new, | ||
| 2447 | .gpio = gk104_gpio_new, | ||
| 2448 | .i2c = gm200_i2c_new, | ||
| 2449 | .ibus = gm200_ibus_new, | ||
| 2450 | .imem = nv50_instmem_new, | ||
| 2451 | .ltc = gp102_ltc_new, | ||
| 2452 | .mc = tu104_mc_new, | ||
| 2453 | .mmu = tu104_mmu_new, | ||
| 2454 | .pci = gp100_pci_new, | ||
| 2455 | .pmu = gp102_pmu_new, | ||
| 2456 | .therm = gp100_therm_new, | ||
| 2457 | .timer = gk20a_timer_new, | ||
| 2458 | .top = gk104_top_new, | ||
| 2459 | .ce[0] = tu104_ce_new, | ||
| 2460 | .ce[1] = tu104_ce_new, | ||
| 2461 | .ce[2] = tu104_ce_new, | ||
| 2462 | .ce[3] = tu104_ce_new, | ||
| 2463 | .ce[4] = tu104_ce_new, | ||
| 2464 | .disp = tu104_disp_new, | ||
| 2465 | .dma = gv100_dma_new, | ||
| 2466 | .fifo = tu104_fifo_new, | ||
| 2467 | }; | ||
| 2468 | |||
| 2469 | static const struct nvkm_device_chip | ||
| 2438 | nv164_chipset = { | 2470 | nv164_chipset = { |
| 2439 | .name = "TU104", | 2471 | .name = "TU104", |
| 2440 | .bar = tu104_bar_new, | 2472 | .bar = tu104_bar_new, |
| @@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, | |||
| 2950 | case 0x138: device->chip = &nv138_chipset; break; | 2982 | case 0x138: device->chip = &nv138_chipset; break; |
| 2951 | case 0x13b: device->chip = &nv13b_chipset; break; | 2983 | case 0x13b: device->chip = &nv13b_chipset; break; |
| 2952 | case 0x140: device->chip = &nv140_chipset; break; | 2984 | case 0x140: device->chip = &nv140_chipset; break; |
| 2985 | case 0x162: device->chip = &nv162_chipset; break; | ||
| 2953 | case 0x164: device->chip = &nv164_chipset; break; | 2986 | case 0x164: device->chip = &nv164_chipset; break; |
| 2954 | case 0x166: device->chip = &nv166_chipset; break; | 2987 | case 0x166: device->chip = &nv166_chipset; break; |
| 2955 | default: | 2988 | default: |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
| @@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { | |||
| 250 | #if defined(CONFIG_DEBUG_FS) | 250 | #if defined(CONFIG_DEBUG_FS) |
| 251 | .debugfs_init = qxl_debugfs_init, | 251 | .debugfs_init = qxl_debugfs_init, |
| 252 | #endif | 252 | #endif |
| 253 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 254 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 255 | .gem_prime_export = drm_gem_prime_export, | 253 | .gem_prime_export = drm_gem_prime_export, |
| 256 | .gem_prime_import = drm_gem_prime_import, | 254 | .gem_prime_import = drm_gem_prime_import, |
| 257 | .gem_prime_pin = qxl_gem_prime_pin, | 255 | .gem_prime_pin = qxl_gem_prime_pin, |
| 258 | .gem_prime_unpin = qxl_gem_prime_unpin, | 256 | .gem_prime_unpin = qxl_gem_prime_unpin, |
| 259 | .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, | ||
| 260 | .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, | ||
| 261 | .gem_prime_vmap = qxl_gem_prime_vmap, | 257 | .gem_prime_vmap = qxl_gem_prime_vmap, |
| 262 | .gem_prime_vunmap = qxl_gem_prime_vunmap, | 258 | .gem_prime_vunmap = qxl_gem_prime_vunmap, |
| 263 | .gem_prime_mmap = qxl_gem_prime_mmap, | 259 | .gem_prime_mmap = qxl_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c | |||
| @@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 38 | WARN_ONCE(1, "not implemented"); | 38 | WARN_ONCE(1, "not implemented"); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 42 | { | ||
| 43 | WARN_ONCE(1, "not implemented"); | ||
| 44 | return ERR_PTR(-ENOSYS); | ||
| 45 | } | ||
| 46 | |||
| 47 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | ||
| 48 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 49 | struct sg_table *table) | ||
| 50 | { | ||
| 51 | WARN_ONCE(1, "not implemented"); | ||
| 52 | return ERR_PTR(-ENOSYS); | ||
| 53 | } | ||
| 54 | |||
| 55 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) | 41 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) |
| 56 | { | 42 | { |
| 57 | WARN_ONCE(1, "not implemented"); | 43 | WARN_ONCE(1, "not implemented"); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..37f93022a106 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
| @@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, | |||
| 113 | child_count++; | 113 | child_count++; |
| 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, | 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, |
| 115 | &panel, &bridge); | 115 | &panel, &bridge); |
| 116 | if (!ret) | 116 | if (!ret) { |
| 117 | of_node_put(endpoint); | ||
| 117 | break; | 118 | break; |
| 119 | } | ||
| 118 | } | 120 | } |
| 119 | 121 | ||
| 120 | of_node_put(port); | 122 | of_node_put(port); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
| @@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, | |||
| 786 | remote = of_graph_get_remote_port_parent(ep); | 786 | remote = of_graph_get_remote_port_parent(ep); |
| 787 | if (!remote) | 787 | if (!remote) |
| 788 | continue; | 788 | continue; |
| 789 | of_node_put(remote); | ||
| 789 | 790 | ||
| 790 | /* does this node match any registered engines? */ | 791 | /* does this node match any registered engines? */ |
| 791 | list_for_each_entry(frontend, &drv->frontend_list, list) { | 792 | list_for_each_entry(frontend, &drv->frontend_list, list) { |
| 792 | if (remote == frontend->node) { | 793 | if (remote == frontend->node) { |
| 793 | of_node_put(remote); | ||
| 794 | of_node_put(port); | 794 | of_node_put(port); |
| 795 | of_node_put(ep); | ||
| 795 | return frontend; | 796 | return frontend; |
| 796 | } | 797 | } |
| 797 | } | 798 | } |
| 798 | } | 799 | } |
| 799 | 800 | of_node_put(port); | |
| 800 | return ERR_PTR(-EINVAL); | 801 | return ERR_PTR(-EINVAL); |
| 801 | } | 802 | } |
| 802 | 803 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 061d2e0d9011..416da5376701 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | |||
| @@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) | |||
| 92 | val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); | 92 | val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
| 93 | val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; | 93 | val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; |
| 94 | writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); | 94 | writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
| 95 | |||
| 96 | clk_disable_unprepare(hdmi->tmds_clk); | ||
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | static void sun4i_hdmi_enable(struct drm_encoder *encoder) | 99 | static void sun4i_hdmi_enable(struct drm_encoder *encoder) |
| @@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder) | |||
| 102 | 104 | ||
| 103 | DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); | 105 | DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); |
| 104 | 106 | ||
| 107 | clk_prepare_enable(hdmi->tmds_clk); | ||
| 108 | |||
| 105 | sun4i_hdmi_setup_avi_infoframes(hdmi, mode); | 109 | sun4i_hdmi_setup_avi_infoframes(hdmi, mode); |
| 106 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); | 110 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); |
| 107 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); | 111 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c | |||
| @@ -127,14 +127,10 @@ static struct drm_driver driver = { | |||
| 127 | #if defined(CONFIG_DEBUG_FS) | 127 | #if defined(CONFIG_DEBUG_FS) |
| 128 | .debugfs_init = virtio_gpu_debugfs_init, | 128 | .debugfs_init = virtio_gpu_debugfs_init, |
| 129 | #endif | 129 | #endif |
| 130 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 131 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 132 | .gem_prime_export = drm_gem_prime_export, | 130 | .gem_prime_export = drm_gem_prime_export, |
| 133 | .gem_prime_import = drm_gem_prime_import, | 131 | .gem_prime_import = drm_gem_prime_import, |
| 134 | .gem_prime_pin = virtgpu_gem_prime_pin, | 132 | .gem_prime_pin = virtgpu_gem_prime_pin, |
| 135 | .gem_prime_unpin = virtgpu_gem_prime_unpin, | 133 | .gem_prime_unpin = virtgpu_gem_prime_unpin, |
| 136 | .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, | ||
| 137 | .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, | ||
| 138 | .gem_prime_vmap = virtgpu_gem_prime_vmap, | 134 | .gem_prime_vmap = virtgpu_gem_prime_vmap, |
| 139 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, | 135 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, |
| 140 | .gem_prime_mmap = virtgpu_gem_prime_mmap, | 136 | .gem_prime_mmap = virtgpu_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
| @@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); | |||
| 372 | /* virtgpu_prime.c */ | 372 | /* virtgpu_prime.c */ |
| 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); | 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); | 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 375 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
| 376 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 377 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 378 | struct sg_table *sgt); | ||
| 379 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); | 375 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 380 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 376 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 381 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, | 377 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c | |||
| @@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 39 | WARN_ONCE(1, "not implemented"); | 39 | WARN_ONCE(1, "not implemented"); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 43 | { | ||
| 44 | WARN_ONCE(1, "not implemented"); | ||
| 45 | return ERR_PTR(-ENODEV); | ||
| 46 | } | ||
| 47 | |||
| 48 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 49 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 50 | struct sg_table *table) | ||
| 51 | { | ||
| 52 | WARN_ONCE(1, "not implemented"); | ||
| 53 | return ERR_PTR(-ENODEV); | ||
| 54 | } | ||
| 55 | |||
| 56 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) | 42 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
| 57 | { | 43 | { |
| 58 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 44 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
| @@ -21,6 +21,7 @@ config VGA_SWITCHEROO | |||
| 21 | bool "Laptop Hybrid Graphics - GPU switching support" | 21 | bool "Laptop Hybrid Graphics - GPU switching support" |
| 22 | depends on X86 | 22 | depends on X86 |
| 23 | depends on ACPI | 23 | depends on ACPI |
| 24 | depends on PCI | ||
| 24 | select VGA_ARB | 25 | select VGA_ARB |
| 25 | help | 26 | help |
| 26 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer | 27 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f41d5fe51abe..9993b692598f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type) | |||
| 125 | { | 125 | { |
| 126 | struct hid_collection *collection; | 126 | struct hid_collection *collection; |
| 127 | unsigned usage; | 127 | unsigned usage; |
| 128 | int collection_index; | ||
| 128 | 129 | ||
| 129 | usage = parser->local.usage[0]; | 130 | usage = parser->local.usage[0]; |
| 130 | 131 | ||
| @@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type) | |||
| 167 | parser->collection_stack[parser->collection_stack_ptr++] = | 168 | parser->collection_stack[parser->collection_stack_ptr++] = |
| 168 | parser->device->maxcollection; | 169 | parser->device->maxcollection; |
| 169 | 170 | ||
| 170 | collection = parser->device->collection + | 171 | collection_index = parser->device->maxcollection++; |
| 171 | parser->device->maxcollection++; | 172 | collection = parser->device->collection + collection_index; |
| 172 | collection->type = type; | 173 | collection->type = type; |
| 173 | collection->usage = usage; | 174 | collection->usage = usage; |
| 174 | collection->level = parser->collection_stack_ptr - 1; | 175 | collection->level = parser->collection_stack_ptr - 1; |
| 175 | collection->parent = parser->active_collection; | 176 | collection->parent_idx = (collection->level == 0) ? -1 : |
| 176 | parser->active_collection = collection; | 177 | parser->collection_stack[collection->level - 1]; |
| 177 | 178 | ||
| 178 | if (type == HID_COLLECTION_APPLICATION) | 179 | if (type == HID_COLLECTION_APPLICATION) |
| 179 | parser->device->maxapplication++; | 180 | parser->device->maxapplication++; |
| @@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser) | |||
| 192 | return -EINVAL; | 193 | return -EINVAL; |
| 193 | } | 194 | } |
| 194 | parser->collection_stack_ptr--; | 195 | parser->collection_stack_ptr--; |
| 195 | if (parser->active_collection) | ||
| 196 | parser->active_collection = parser->active_collection->parent; | ||
| 197 | return 0; | 196 | return 0; |
| 198 | } | 197 | } |
| 199 | 198 | ||
| @@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid, | |||
| 1006 | usage = &field->usage[i]; | 1005 | usage = &field->usage[i]; |
| 1007 | 1006 | ||
| 1008 | collection = &hid->collection[usage->collection_index]; | 1007 | collection = &hid->collection[usage->collection_index]; |
| 1009 | while (collection && collection != multiplier_collection) | 1008 | while (collection->parent_idx != -1 && |
| 1010 | collection = collection->parent; | 1009 | collection != multiplier_collection) |
| 1010 | collection = &hid->collection[collection->parent_idx]; | ||
| 1011 | 1011 | ||
| 1012 | if (collection || multiplier_collection == NULL) | 1012 | if (collection->parent_idx != -1 || |
| 1013 | multiplier_collection == NULL) | ||
| 1013 | usage->resolution_multiplier = effective_multiplier; | 1014 | usage->resolution_multiplier = effective_multiplier; |
| 1014 | 1015 | ||
| 1015 | } | 1016 | } |
| @@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid, | |||
| 1044 | * applicable fields later. | 1045 | * applicable fields later. |
| 1045 | */ | 1046 | */ |
| 1046 | multiplier_collection = &hid->collection[multiplier->usage->collection_index]; | 1047 | multiplier_collection = &hid->collection[multiplier->usage->collection_index]; |
| 1047 | while (multiplier_collection && | 1048 | while (multiplier_collection->parent_idx != -1 && |
| 1048 | multiplier_collection->type != HID_COLLECTION_LOGICAL) | 1049 | multiplier_collection->type != HID_COLLECTION_LOGICAL) |
| 1049 | multiplier_collection = multiplier_collection->parent; | 1050 | multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; |
| 1050 | 1051 | ||
| 1051 | effective_multiplier = hid_calculate_multiplier(hid, multiplier); | 1052 | effective_multiplier = hid_calculate_multiplier(hid, multiplier); |
| 1052 | 1053 | ||
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 518fa76414f5..24f846d67478 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -461,6 +461,9 @@ | |||
| 461 | #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a | 461 | #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a |
| 462 | #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 | 462 | #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 |
| 463 | 463 | ||
| 464 | #define I2C_VENDOR_ID_GOODIX 0x27c6 | ||
| 465 | #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 | ||
| 466 | |||
| 464 | #define USB_VENDOR_ID_GOODTOUCH 0x1aad | 467 | #define USB_VENDOR_ID_GOODTOUCH 0x1aad |
| 465 | #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f | 468 | #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f |
| 466 | 469 | ||
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 8555ce7e737b..c5edfa966343 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c | |||
| @@ -179,6 +179,8 @@ static const struct i2c_hid_quirks { | |||
| 179 | I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, | 179 | I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, |
| 180 | { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, | 180 | { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, |
| 181 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | 181 | I2C_HID_QUIRK_NO_RUNTIME_PM }, |
| 182 | { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0, | ||
| 183 | I2C_HID_QUIRK_NO_RUNTIME_PM }, | ||
| 182 | { 0, 0 } | 184 | { 0, 0 } |
| 183 | }; | 185 | }; |
| 184 | 186 | ||
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index ce0ba2062723..bea4c9850247 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
| @@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
| 701 | int vmbus_disconnect_ring(struct vmbus_channel *channel) | 701 | int vmbus_disconnect_ring(struct vmbus_channel *channel) |
| 702 | { | 702 | { |
| 703 | struct vmbus_channel *cur_channel, *tmp; | 703 | struct vmbus_channel *cur_channel, *tmp; |
| 704 | unsigned long flags; | ||
| 705 | LIST_HEAD(list); | ||
| 706 | int ret; | 704 | int ret; |
| 707 | 705 | ||
| 708 | if (channel->primary_channel != NULL) | 706 | if (channel->primary_channel != NULL) |
| 709 | return -EINVAL; | 707 | return -EINVAL; |
| 710 | 708 | ||
| 711 | /* Snapshot the list of subchannels */ | 709 | list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) { |
| 712 | spin_lock_irqsave(&channel->lock, flags); | ||
| 713 | list_splice_init(&channel->sc_list, &list); | ||
| 714 | spin_unlock_irqrestore(&channel->lock, flags); | ||
| 715 | |||
| 716 | list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { | ||
| 717 | if (cur_channel->rescind) | 710 | if (cur_channel->rescind) |
| 718 | wait_for_completion(&cur_channel->rescind_event); | 711 | wait_for_completion(&cur_channel->rescind_event); |
| 719 | 712 | ||
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 5301fef16c31..7c6349a50ef1 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
| @@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, | |||
| 888 | pfn_cnt -= pgs_ol; | 888 | pfn_cnt -= pgs_ol; |
| 889 | /* | 889 | /* |
| 890 | * Check if the corresponding memory block is already | 890 | * Check if the corresponding memory block is already |
| 891 | * online by checking its last previously backed page. | 891 | * online. It is possible to observe struct pages still |
| 892 | * In case it is we need to bring rest (which was not | 892 | * being uninitialized here so check section instead. |
| 893 | * backed previously) online too. | 893 | * In case the section is online we need to bring the |
| 894 | * rest of pfns (which were not backed previously) | ||
| 895 | * online too. | ||
| 894 | */ | 896 | */ |
| 895 | if (start_pfn > has->start_pfn && | 897 | if (start_pfn > has->start_pfn && |
| 896 | !PageReserved(pfn_to_page(start_pfn - 1))) | 898 | online_section_nr(pfn_to_section_nr(start_pfn))) |
| 897 | hv_bring_pgs_online(has, start_pfn, pgs_ol); | 899 | hv_bring_pgs_online(has, start_pfn, pgs_ol); |
| 898 | 900 | ||
| 899 | } | 901 | } |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 64d0c85d5161..1f1a55e07733 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
| @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | |||
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | /* Get various debug metrics for the specified ring buffer. */ | 166 | /* Get various debug metrics for the specified ring buffer. */ |
| 167 | void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | 167 | int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, |
| 168 | struct hv_ring_buffer_debug_info *debug_info) | 168 | struct hv_ring_buffer_debug_info *debug_info) |
| 169 | { | 169 | { |
| 170 | u32 bytes_avail_towrite; | 170 | u32 bytes_avail_towrite; |
| 171 | u32 bytes_avail_toread; | 171 | u32 bytes_avail_toread; |
| 172 | 172 | ||
| 173 | if (ring_info->ring_buffer) { | 173 | if (!ring_info->ring_buffer) |
| 174 | hv_get_ringbuffer_availbytes(ring_info, | 174 | return -EINVAL; |
| 175 | &bytes_avail_toread, | 175 | |
| 176 | &bytes_avail_towrite); | 176 | hv_get_ringbuffer_availbytes(ring_info, |
| 177 | 177 | &bytes_avail_toread, | |
| 178 | debug_info->bytes_avail_toread = bytes_avail_toread; | 178 | &bytes_avail_towrite); |
| 179 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | 179 | debug_info->bytes_avail_toread = bytes_avail_toread; |
| 180 | debug_info->current_read_index = | 180 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
| 181 | ring_info->ring_buffer->read_index; | 181 | debug_info->current_read_index = ring_info->ring_buffer->read_index; |
| 182 | debug_info->current_write_index = | 182 | debug_info->current_write_index = ring_info->ring_buffer->write_index; |
| 183 | ring_info->ring_buffer->write_index; | 183 | debug_info->current_interrupt_mask |
| 184 | debug_info->current_interrupt_mask = | 184 | = ring_info->ring_buffer->interrupt_mask; |
| 185 | ring_info->ring_buffer->interrupt_mask; | 185 | return 0; |
| 186 | } | ||
| 187 | } | 186 | } |
| 188 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); | 187 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
| 189 | 188 | ||
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index d0ff65675292..403fee01572c 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, | |||
| 313 | { | 313 | { |
| 314 | struct hv_device *hv_dev = device_to_hv_device(dev); | 314 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 315 | struct hv_ring_buffer_debug_info outbound; | 315 | struct hv_ring_buffer_debug_info outbound; |
| 316 | int ret; | ||
| 316 | 317 | ||
| 317 | if (!hv_dev->channel) | 318 | if (!hv_dev->channel) |
| 318 | return -ENODEV; | 319 | return -ENODEV; |
| 319 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 320 | |
| 320 | return -EINVAL; | 321 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, |
| 321 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | 322 | &outbound); |
| 323 | if (ret < 0) | ||
| 324 | return ret; | ||
| 325 | |||
| 322 | return sprintf(buf, "%d\n", outbound.current_interrupt_mask); | 326 | return sprintf(buf, "%d\n", outbound.current_interrupt_mask); |
| 323 | } | 327 | } |
| 324 | static DEVICE_ATTR_RO(out_intr_mask); | 328 | static DEVICE_ATTR_RO(out_intr_mask); |
| @@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, | |||
| 328 | { | 332 | { |
| 329 | struct hv_device *hv_dev = device_to_hv_device(dev); | 333 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 330 | struct hv_ring_buffer_debug_info outbound; | 334 | struct hv_ring_buffer_debug_info outbound; |
| 335 | int ret; | ||
| 331 | 336 | ||
| 332 | if (!hv_dev->channel) | 337 | if (!hv_dev->channel) |
| 333 | return -ENODEV; | 338 | return -ENODEV; |
| 334 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 339 | |
| 335 | return -EINVAL; | 340 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, |
| 336 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | 341 | &outbound); |
| 342 | if (ret < 0) | ||
| 343 | return ret; | ||
| 337 | return sprintf(buf, "%d\n", outbound.current_read_index); | 344 | return sprintf(buf, "%d\n", outbound.current_read_index); |
| 338 | } | 345 | } |
| 339 | static DEVICE_ATTR_RO(out_read_index); | 346 | static DEVICE_ATTR_RO(out_read_index); |
| @@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, | |||
| 344 | { | 351 | { |
| 345 | struct hv_device *hv_dev = device_to_hv_device(dev); | 352 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 346 | struct hv_ring_buffer_debug_info outbound; | 353 | struct hv_ring_buffer_debug_info outbound; |
| 354 | int ret; | ||
| 347 | 355 | ||
| 348 | if (!hv_dev->channel) | 356 | if (!hv_dev->channel) |
| 349 | return -ENODEV; | 357 | return -ENODEV; |
| 350 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 358 | |
| 351 | return -EINVAL; | 359 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, |
| 352 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | 360 | &outbound); |
| 361 | if (ret < 0) | ||
| 362 | return ret; | ||
| 353 | return sprintf(buf, "%d\n", outbound.current_write_index); | 363 | return sprintf(buf, "%d\n", outbound.current_write_index); |
| 354 | } | 364 | } |
| 355 | static DEVICE_ATTR_RO(out_write_index); | 365 | static DEVICE_ATTR_RO(out_write_index); |
| @@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, | |||
| 360 | { | 370 | { |
| 361 | struct hv_device *hv_dev = device_to_hv_device(dev); | 371 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 362 | struct hv_ring_buffer_debug_info outbound; | 372 | struct hv_ring_buffer_debug_info outbound; |
| 373 | int ret; | ||
| 363 | 374 | ||
| 364 | if (!hv_dev->channel) | 375 | if (!hv_dev->channel) |
| 365 | return -ENODEV; | 376 | return -ENODEV; |
| 366 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 377 | |
| 367 | return -EINVAL; | 378 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, |
| 368 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | 379 | &outbound); |
| 380 | if (ret < 0) | ||
| 381 | return ret; | ||
| 369 | return sprintf(buf, "%d\n", outbound.bytes_avail_toread); | 382 | return sprintf(buf, "%d\n", outbound.bytes_avail_toread); |
| 370 | } | 383 | } |
| 371 | static DEVICE_ATTR_RO(out_read_bytes_avail); | 384 | static DEVICE_ATTR_RO(out_read_bytes_avail); |
| @@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, | |||
| 376 | { | 389 | { |
| 377 | struct hv_device *hv_dev = device_to_hv_device(dev); | 390 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 378 | struct hv_ring_buffer_debug_info outbound; | 391 | struct hv_ring_buffer_debug_info outbound; |
| 392 | int ret; | ||
| 379 | 393 | ||
| 380 | if (!hv_dev->channel) | 394 | if (!hv_dev->channel) |
| 381 | return -ENODEV; | 395 | return -ENODEV; |
| 382 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 396 | |
| 383 | return -EINVAL; | 397 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, |
| 384 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | 398 | &outbound); |
| 399 | if (ret < 0) | ||
| 400 | return ret; | ||
| 385 | return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); | 401 | return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); |
| 386 | } | 402 | } |
| 387 | static DEVICE_ATTR_RO(out_write_bytes_avail); | 403 | static DEVICE_ATTR_RO(out_write_bytes_avail); |
| @@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, | |||
| 391 | { | 407 | { |
| 392 | struct hv_device *hv_dev = device_to_hv_device(dev); | 408 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 393 | struct hv_ring_buffer_debug_info inbound; | 409 | struct hv_ring_buffer_debug_info inbound; |
| 410 | int ret; | ||
| 394 | 411 | ||
| 395 | if (!hv_dev->channel) | 412 | if (!hv_dev->channel) |
| 396 | return -ENODEV; | 413 | return -ENODEV; |
| 397 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 414 | |
| 398 | return -EINVAL; | 415 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); |
| 399 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | 416 | if (ret < 0) |
| 417 | return ret; | ||
| 418 | |||
| 400 | return sprintf(buf, "%d\n", inbound.current_interrupt_mask); | 419 | return sprintf(buf, "%d\n", inbound.current_interrupt_mask); |
| 401 | } | 420 | } |
| 402 | static DEVICE_ATTR_RO(in_intr_mask); | 421 | static DEVICE_ATTR_RO(in_intr_mask); |
| @@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, | |||
| 406 | { | 425 | { |
| 407 | struct hv_device *hv_dev = device_to_hv_device(dev); | 426 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 408 | struct hv_ring_buffer_debug_info inbound; | 427 | struct hv_ring_buffer_debug_info inbound; |
| 428 | int ret; | ||
| 409 | 429 | ||
| 410 | if (!hv_dev->channel) | 430 | if (!hv_dev->channel) |
| 411 | return -ENODEV; | 431 | return -ENODEV; |
| 412 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 432 | |
| 413 | return -EINVAL; | 433 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); |
| 414 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | 434 | if (ret < 0) |
| 435 | return ret; | ||
| 436 | |||
| 415 | return sprintf(buf, "%d\n", inbound.current_read_index); | 437 | return sprintf(buf, "%d\n", inbound.current_read_index); |
| 416 | } | 438 | } |
| 417 | static DEVICE_ATTR_RO(in_read_index); | 439 | static DEVICE_ATTR_RO(in_read_index); |
| @@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, | |||
| 421 | { | 443 | { |
| 422 | struct hv_device *hv_dev = device_to_hv_device(dev); | 444 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 423 | struct hv_ring_buffer_debug_info inbound; | 445 | struct hv_ring_buffer_debug_info inbound; |
| 446 | int ret; | ||
| 424 | 447 | ||
| 425 | if (!hv_dev->channel) | 448 | if (!hv_dev->channel) |
| 426 | return -ENODEV; | 449 | return -ENODEV; |
| 427 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 450 | |
| 428 | return -EINVAL; | 451 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); |
| 429 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | 452 | if (ret < 0) |
| 453 | return ret; | ||
| 454 | |||
| 430 | return sprintf(buf, "%d\n", inbound.current_write_index); | 455 | return sprintf(buf, "%d\n", inbound.current_write_index); |
| 431 | } | 456 | } |
| 432 | static DEVICE_ATTR_RO(in_write_index); | 457 | static DEVICE_ATTR_RO(in_write_index); |
| @@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, | |||
| 437 | { | 462 | { |
| 438 | struct hv_device *hv_dev = device_to_hv_device(dev); | 463 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 439 | struct hv_ring_buffer_debug_info inbound; | 464 | struct hv_ring_buffer_debug_info inbound; |
| 465 | int ret; | ||
| 440 | 466 | ||
| 441 | if (!hv_dev->channel) | 467 | if (!hv_dev->channel) |
| 442 | return -ENODEV; | 468 | return -ENODEV; |
| 443 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 469 | |
| 444 | return -EINVAL; | 470 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); |
| 445 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | 471 | if (ret < 0) |
| 472 | return ret; | ||
| 473 | |||
| 446 | return sprintf(buf, "%d\n", inbound.bytes_avail_toread); | 474 | return sprintf(buf, "%d\n", inbound.bytes_avail_toread); |
| 447 | } | 475 | } |
| 448 | static DEVICE_ATTR_RO(in_read_bytes_avail); | 476 | static DEVICE_ATTR_RO(in_read_bytes_avail); |
| @@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, | |||
| 453 | { | 481 | { |
| 454 | struct hv_device *hv_dev = device_to_hv_device(dev); | 482 | struct hv_device *hv_dev = device_to_hv_device(dev); |
| 455 | struct hv_ring_buffer_debug_info inbound; | 483 | struct hv_ring_buffer_debug_info inbound; |
| 484 | int ret; | ||
| 456 | 485 | ||
| 457 | if (!hv_dev->channel) | 486 | if (!hv_dev->channel) |
| 458 | return -ENODEV; | 487 | return -ENODEV; |
| 459 | if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | 488 | |
| 460 | return -EINVAL; | 489 | ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); |
| 461 | hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | 490 | if (ret < 0) |
| 491 | return ret; | ||
| 492 | |||
| 462 | return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); | 493 | return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); |
| 463 | } | 494 | } |
| 464 | static DEVICE_ATTR_RO(in_write_bytes_avail); | 495 | static DEVICE_ATTR_RO(in_write_bytes_avail); |
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 0e30fa00204c..f9b8e3e23a8e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c | |||
| @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, | |||
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | rv = lm80_read_value(client, LM80_REG_FANDIV); | 395 | rv = lm80_read_value(client, LM80_REG_FANDIV); |
| 396 | if (rv < 0) | 396 | if (rv < 0) { |
| 397 | mutex_unlock(&data->update_lock); | ||
| 397 | return rv; | 398 | return rv; |
| 399 | } | ||
| 398 | reg = (rv & ~(3 << (2 * (nr + 1)))) | 400 | reg = (rv & ~(3 << (2 * (nr + 1)))) |
| 399 | | (data->fan_div[nr] << (2 * (nr + 1))); | 401 | | (data->fan_div[nr] << (2 * (nr + 1))); |
| 400 | lm80_write_value(client, LM80_REG_FANDIV, reg); | 402 | lm80_write_value(client, LM80_REG_FANDIV, reg); |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c3040079b1cb..4adec4ab7d06 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -44,8 +44,8 @@ | |||
| 44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 | 44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 |
| 45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 | 45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 |
| 46 | * (0xd451) | 46 | * (0xd451) |
| 47 | * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 | 47 | * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 |
| 48 | * (0xd459) | 48 | * (0xd429) |
| 49 | * | 49 | * |
| 50 | * #temp lists the number of monitored temperature sources (first value) plus | 50 | * #temp lists the number of monitored temperature sources (first value) plus |
| 51 | * the number of directly connectable temperature sensors (second value). | 51 | * the number of directly connectable temperature sensors (second value). |
| @@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); | |||
| 138 | #define SIO_NCT6795_ID 0xd350 | 138 | #define SIO_NCT6795_ID 0xd350 |
| 139 | #define SIO_NCT6796_ID 0xd420 | 139 | #define SIO_NCT6796_ID 0xd420 |
| 140 | #define SIO_NCT6797_ID 0xd450 | 140 | #define SIO_NCT6797_ID 0xd450 |
| 141 | #define SIO_NCT6798_ID 0xd458 | 141 | #define SIO_NCT6798_ID 0xd428 |
| 142 | #define SIO_ID_MASK 0xFFF8 | 142 | #define SIO_ID_MASK 0xFFF8 |
| 143 | 143 | ||
| 144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; | 144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; |
| @@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) | |||
| 4508 | 4508 | ||
| 4509 | if (data->kind == nct6791 || data->kind == nct6792 || | 4509 | if (data->kind == nct6791 || data->kind == nct6792 || |
| 4510 | data->kind == nct6793 || data->kind == nct6795 || | 4510 | data->kind == nct6793 || data->kind == nct6795 || |
| 4511 | data->kind == nct6796) | 4511 | data->kind == nct6796 || data->kind == nct6797 || |
| 4512 | data->kind == nct6798) | ||
| 4512 | nct6791_enable_io_mapping(sioreg); | 4513 | nct6791_enable_io_mapping(sioreg); |
| 4513 | 4514 | ||
| 4514 | superio_exit(sioreg); | 4515 | superio_exit(sioreg); |
| @@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | |||
| 4644 | 4645 | ||
| 4645 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || | 4646 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || |
| 4646 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || | 4647 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || |
| 4647 | sio_data->kind == nct6796) | 4648 | sio_data->kind == nct6796 || sio_data->kind == nct6797 || |
| 4649 | sio_data->kind == nct6798) | ||
| 4648 | nct6791_enable_io_mapping(sioaddr); | 4650 | nct6791_enable_io_mapping(sioaddr); |
| 4649 | 4651 | ||
| 4650 | superio_exit(sioaddr); | 4652 | superio_exit(sioaddr); |
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 423903f87955..391118c8aae8 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
| @@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev, | |||
| 380 | val *= 1000000ULL; | 380 | val *= 1000000ULL; |
| 381 | break; | 381 | break; |
| 382 | case 2: | 382 | case 2: |
| 383 | val = get_unaligned_be32(&power->update_tag) * | 383 | val = (u64)get_unaligned_be32(&power->update_tag) * |
| 384 | occ->powr_sample_time_us; | 384 | occ->powr_sample_time_us; |
| 385 | break; | 385 | break; |
| 386 | case 3: | 386 | case 3: |
| 387 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 387 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
| @@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev, | |||
| 425 | &power->update_tag); | 425 | &power->update_tag); |
| 426 | break; | 426 | break; |
| 427 | case 2: | 427 | case 2: |
| 428 | val = get_unaligned_be32(&power->update_tag) * | 428 | val = (u64)get_unaligned_be32(&power->update_tag) * |
| 429 | occ->powr_sample_time_us; | 429 | occ->powr_sample_time_us; |
| 430 | break; | 430 | break; |
| 431 | case 3: | 431 | case 3: |
| 432 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 432 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
| @@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 463 | &power->system.update_tag); | 463 | &power->system.update_tag); |
| 464 | break; | 464 | break; |
| 465 | case 2: | 465 | case 2: |
| 466 | val = get_unaligned_be32(&power->system.update_tag) * | 466 | val = (u64)get_unaligned_be32(&power->system.update_tag) * |
| 467 | occ->powr_sample_time_us; | 467 | occ->powr_sample_time_us; |
| 468 | break; | 468 | break; |
| 469 | case 3: | 469 | case 3: |
| 470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; | 470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; |
| @@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 477 | &power->proc.update_tag); | 477 | &power->proc.update_tag); |
| 478 | break; | 478 | break; |
| 479 | case 6: | 479 | case 6: |
| 480 | val = get_unaligned_be32(&power->proc.update_tag) * | 480 | val = (u64)get_unaligned_be32(&power->proc.update_tag) * |
| 481 | occ->powr_sample_time_us; | 481 | occ->powr_sample_time_us; |
| 482 | break; | 482 | break; |
| 483 | case 7: | 483 | case 7: |
| 484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; | 484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; |
| @@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 491 | &power->vdd.update_tag); | 491 | &power->vdd.update_tag); |
| 492 | break; | 492 | break; |
| 493 | case 10: | 493 | case 10: |
| 494 | val = get_unaligned_be32(&power->vdd.update_tag) * | 494 | val = (u64)get_unaligned_be32(&power->vdd.update_tag) * |
| 495 | occ->powr_sample_time_us; | 495 | occ->powr_sample_time_us; |
| 496 | break; | 496 | break; |
| 497 | case 11: | 497 | case 11: |
| 498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; | 498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; |
| @@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 505 | &power->vdn.update_tag); | 505 | &power->vdn.update_tag); |
| 506 | break; | 506 | break; |
| 507 | case 14: | 507 | case 14: |
| 508 | val = get_unaligned_be32(&power->vdn.update_tag) * | 508 | val = (u64)get_unaligned_be32(&power->vdn.update_tag) * |
| 509 | occ->powr_sample_time_us; | 509 | occ->powr_sample_time_us; |
| 510 | break; | 510 | break; |
| 511 | case 15: | 511 | case 15: |
| 512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; | 512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; |
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 8844c9565d2a..7053be59ad2e 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c | |||
| @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { | |||
| 88 | .data = (void *)2 | 88 | .data = (void *)2 |
| 89 | }, | 89 | }, |
| 90 | { | 90 | { |
| 91 | .compatible = "ti,tmp422", | 91 | .compatible = "ti,tmp442", |
| 92 | .data = (void *)3 | 92 | .data = (void *)3 |
| 93 | }, | 93 | }, |
| 94 | { }, | 94 | { }, |
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b532e2c9cf5c..f8c00b94817f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c | |||
| @@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, | |||
| 901 | master->regs + | 901 | master->regs + |
| 902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
| 903 | 903 | ||
| 904 | if (!old_dyn_addr) | ||
| 905 | return 0; | ||
| 906 | |||
| 907 | master->addrs[data->index] = dev->info.dyn_addr; | 904 | master->addrs[data->index] = dev->info.dyn_addr; |
| 908 | 905 | ||
| 909 | return 0; | 906 | return 0; |
| @@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) | |||
| 925 | return -ENOMEM; | 922 | return -ENOMEM; |
| 926 | 923 | ||
| 927 | data->index = pos; | 924 | data->index = pos; |
| 928 | master->addrs[pos] = dev->info.dyn_addr; | 925 | master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; |
| 929 | master->free_pos &= ~BIT(pos); | 926 | master->free_pos &= ~BIT(pos); |
| 930 | i3c_dev_set_master_data(dev, data); | 927 | i3c_dev_set_master_data(dev, data); |
| 931 | 928 | ||
| 932 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), | 929 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), |
| 933 | master->regs + | 930 | master->regs + |
| 934 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 931 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
| 935 | 932 | ||
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index bbd79b8b1a80..8889a4fdb454 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c | |||
| @@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) | |||
| 1556 | return PTR_ERR(master->pclk); | 1556 | return PTR_ERR(master->pclk); |
| 1557 | 1557 | ||
| 1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); | 1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); |
| 1559 | if (IS_ERR(master->pclk)) | 1559 | if (IS_ERR(master->sysclk)) |
| 1560 | return PTR_ERR(master->pclk); | 1560 | return PTR_ERR(master->sysclk); |
| 1561 | 1561 | ||
| 1562 | irq = platform_get_irq(pdev, 0); | 1562 | irq = platform_get_irq(pdev, 0); |
| 1563 | if (irq < 0) | 1563 | if (irq < 0) |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 4c8c7a620d08..a5dc13576394 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
| @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) | |||
| 544 | drive->proc = proc_mkdir(drive->name, parent); | 544 | drive->proc = proc_mkdir(drive->name, parent); |
| 545 | if (drive->proc) { | 545 | if (drive->proc) { |
| 546 | ide_add_proc_entries(drive->proc, generic_drive_entries, drive); | 546 | ide_add_proc_entries(drive->proc, generic_drive_entries, drive); |
| 547 | proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, | 547 | proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, |
| 548 | drive->proc, &ide_settings_proc_fops, | 548 | drive->proc, &ide_settings_proc_fops, |
| 549 | drive); | 549 | drive); |
| 550 | } | 550 | } |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index cafb1dcadc48..9d984f2a8ba7 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
| @@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev) | |||
| 142 | stepconfig |= STEPCONFIG_MODE_SWCNT; | 142 | stepconfig |= STEPCONFIG_MODE_SWCNT; |
| 143 | 143 | ||
| 144 | tiadc_writel(adc_dev, REG_STEPCONFIG(steps), | 144 | tiadc_writel(adc_dev, REG_STEPCONFIG(steps), |
| 145 | stepconfig | STEPCONFIG_INP(chan)); | 145 | stepconfig | STEPCONFIG_INP(chan) | |
| 146 | STEPCONFIG_INM_ADCREFM | | ||
| 147 | STEPCONFIG_RFP_VREFP | | ||
| 148 | STEPCONFIG_RFM_VREFN); | ||
| 146 | 149 | ||
| 147 | if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { | 150 | if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { |
| 148 | dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", | 151 | dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 63a7cc00bae0..84f077b2b90a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, | |||
| 494 | id_priv->id.route.addr.dev_addr.transport = | 494 | id_priv->id.route.addr.dev_addr.transport = |
| 495 | rdma_node_get_transport(cma_dev->device->node_type); | 495 | rdma_node_get_transport(cma_dev->device->node_type); |
| 496 | list_add_tail(&id_priv->list, &cma_dev->id_list); | 496 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
| 497 | rdma_restrack_kadd(&id_priv->res); | 497 | if (id_priv->res.kern_name) |
| 498 | rdma_restrack_kadd(&id_priv->res); | ||
| 499 | else | ||
| 500 | rdma_restrack_uadd(&id_priv->res); | ||
| 498 | } | 501 | } |
| 499 | 502 | ||
| 500 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, | 503 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e600fc23ae62..3c97a8b6bf1e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c | |||
| @@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, | |||
| 584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, | 584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, |
| 585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) | 585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) |
| 586 | goto err; | 586 | goto err; |
| 587 | if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && | ||
| 588 | nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, | ||
| 589 | pd->unsafe_global_rkey)) | ||
| 590 | goto err; | ||
| 591 | 587 | ||
| 592 | if (fill_res_name_pid(msg, res)) | 588 | if (fill_res_name_pid(msg, res)) |
| 593 | goto err; | 589 | goto err; |
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index be6b8e1257d0..69f8db66925e 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h | |||
| @@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj, | |||
| 106 | enum uverbs_obj_access access, | 106 | enum uverbs_obj_access access, |
| 107 | bool commit); | 107 | bool commit); |
| 108 | 108 | ||
| 109 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); | ||
| 110 | |||
| 109 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 111 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
| 110 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 112 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
| 111 | 113 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6b12cc5f97b2..3317300ab036 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, | |||
| 60 | { | 60 | { |
| 61 | int ret; | 61 | int ret; |
| 62 | 62 | ||
| 63 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
| 64 | return uverbs_copy_to_struct_or_zero( | ||
| 65 | attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); | ||
| 66 | |||
| 63 | if (copy_to_user(attrs->ucore.outbuf, resp, | 67 | if (copy_to_user(attrs->ucore.outbuf, resp, |
| 64 | min(attrs->ucore.outlen, resp_len))) | 68 | min(attrs->ucore.outlen, resp_len))) |
| 65 | return -EFAULT; | 69 | return -EFAULT; |
| @@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) | |||
| 1181 | goto out_put; | 1185 | goto out_put; |
| 1182 | } | 1186 | } |
| 1183 | 1187 | ||
| 1188 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
| 1189 | ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); | ||
| 1190 | |||
| 1184 | ret = 0; | 1191 | ret = 0; |
| 1185 | 1192 | ||
| 1186 | out_put: | 1193 | out_put: |
| @@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) | |||
| 2012 | return -ENOMEM; | 2019 | return -ENOMEM; |
| 2013 | 2020 | ||
| 2014 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); | 2021 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); |
| 2015 | if (!qp) | 2022 | if (!qp) { |
| 2023 | ret = -EINVAL; | ||
| 2016 | goto out; | 2024 | goto out; |
| 2025 | } | ||
| 2017 | 2026 | ||
| 2018 | is_ud = qp->qp_type == IB_QPT_UD; | 2027 | is_ud = qp->qp_type == IB_QPT_UD; |
| 2019 | sg_ind = 0; | 2028 | sg_ind = 0; |
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c81ff698052..0ca04d224015 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c | |||
| @@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, | |||
| 144 | 0, uattr->len - len); | 144 | 0, uattr->len - len); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, | ||
| 148 | const struct uverbs_attr *attr) | ||
| 149 | { | ||
| 150 | struct bundle_priv *pbundle = | ||
| 151 | container_of(bundle, struct bundle_priv, bundle); | ||
| 152 | u16 flags; | ||
| 153 | |||
| 154 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | ||
| 155 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
| 156 | if (put_user(flags, | ||
| 157 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
| 158 | return -EFAULT; | ||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | |||
| 147 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, | 162 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, |
| 148 | const struct uverbs_api_attr *attr_uapi, | 163 | const struct uverbs_api_attr *attr_uapi, |
| 149 | struct uverbs_objs_arr_attr *attr, | 164 | struct uverbs_objs_arr_attr *attr, |
| @@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, | |||
| 456 | } | 471 | } |
| 457 | 472 | ||
| 458 | /* | 473 | /* |
| 474 | * Until the drivers are revised to use the bundle directly we have to | ||
| 475 | * assume that the driver wrote to its UHW_OUT and flag userspace | ||
| 476 | * appropriately. | ||
| 477 | */ | ||
| 478 | if (!ret && pbundle->method_elm->has_udata) { | ||
| 479 | const struct uverbs_attr *attr = | ||
| 480 | uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); | ||
| 481 | |||
| 482 | if (!IS_ERR(attr)) | ||
| 483 | ret = uverbs_set_output(&pbundle->bundle, attr); | ||
| 484 | } | ||
| 485 | |||
| 486 | /* | ||
| 459 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can | 487 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can |
| 460 | * not invoke the method because the request is not supported. No | 488 | * not invoke the method because the request is not supported. No |
| 461 | * other cases should return this code. | 489 | * other cases should return this code. |
| @@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, | |||
| 706 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | 734 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, |
| 707 | const void *from, size_t size) | 735 | const void *from, size_t size) |
| 708 | { | 736 | { |
| 709 | struct bundle_priv *pbundle = | ||
| 710 | container_of(bundle, struct bundle_priv, bundle); | ||
| 711 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 737 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
| 712 | u16 flags; | ||
| 713 | size_t min_size; | 738 | size_t min_size; |
| 714 | 739 | ||
| 715 | if (IS_ERR(attr)) | 740 | if (IS_ERR(attr)) |
| @@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | |||
| 719 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) | 744 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) |
| 720 | return -EFAULT; | 745 | return -EFAULT; |
| 721 | 746 | ||
| 722 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | 747 | return uverbs_set_output(bundle, attr); |
| 723 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
| 724 | if (put_user(flags, | ||
| 725 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
| 726 | return -EFAULT; | ||
| 727 | |||
| 728 | return 0; | ||
| 729 | } | 748 | } |
| 730 | EXPORT_SYMBOL(uverbs_copy_to); | 749 | EXPORT_SYMBOL(uverbs_copy_to); |
| 731 | 750 | ||
| 751 | |||
| 752 | /* | ||
| 753 | * This is only used if the caller has directly used copy_to_use to write the | ||
| 754 | * data. It signals to user space that the buffer is filled in. | ||
| 755 | */ | ||
| 756 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) | ||
| 757 | { | ||
| 758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | ||
| 759 | |||
| 760 | if (IS_ERR(attr)) | ||
| 761 | return PTR_ERR(attr); | ||
| 762 | |||
| 763 | return uverbs_set_output(bundle, attr); | ||
| 764 | } | ||
| 765 | |||
| 732 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, | 766 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, |
| 733 | size_t idx, s64 lower_bound, u64 upper_bound, | 767 | size_t idx, s64 lower_bound, u64 upper_bound, |
| 734 | s64 *def_val) | 768 | s64 *def_val) |
| @@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, | |||
| 757 | { | 791 | { |
| 758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 792 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
| 759 | 793 | ||
| 760 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), | 794 | if (size < attr->ptr_attr.len) { |
| 761 | attr->ptr_attr.len)) | 795 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, |
| 762 | return -EFAULT; | 796 | attr->ptr_attr.len - size)) |
| 797 | return -EFAULT; | ||
| 798 | } | ||
| 763 | return uverbs_copy_to(bundle, idx, from, size); | 799 | return uverbs_copy_to(bundle, idx, from, size); |
| 764 | } | 800 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb0007aa0c27..2890a77339e1 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
| 690 | 690 | ||
| 691 | buf += sizeof(hdr); | 691 | buf += sizeof(hdr); |
| 692 | 692 | ||
| 693 | memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); | ||
| 693 | bundle.ufile = file; | 694 | bundle.ufile = file; |
| 694 | if (!method_elm->is_ex) { | 695 | if (!method_elm->is_ex) { |
| 695 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); | 696 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82cb6b71ac7c..e3e9dd54caa2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
| @@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
| 534 | { | 534 | { |
| 535 | struct mthca_ucontext *context; | 535 | struct mthca_ucontext *context; |
| 536 | 536 | ||
| 537 | qp = kmalloc(sizeof *qp, GFP_KERNEL); | 537 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
| 538 | if (!qp) | 538 | if (!qp) |
| 539 | return ERR_PTR(-ENOMEM); | 539 | return ERR_PTR(-ENOMEM); |
| 540 | 540 | ||
| @@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
| 600 | if (udata) | 600 | if (udata) |
| 601 | return ERR_PTR(-EINVAL); | 601 | return ERR_PTR(-EINVAL); |
| 602 | 602 | ||
| 603 | qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); | 603 | qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); |
| 604 | if (!qp) | 604 | if (!qp) |
| 605 | return ERR_PTR(-ENOMEM); | 605 | return ERR_PTR(-ENOMEM); |
| 606 | 606 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997e..3c633ab58052 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
| @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) | |||
| 427 | 427 | ||
| 428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) | 428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) |
| 429 | { | 429 | { |
| 430 | return (enum pvrdma_wr_opcode)op; | 430 | switch (op) { |
| 431 | case IB_WR_RDMA_WRITE: | ||
| 432 | return PVRDMA_WR_RDMA_WRITE; | ||
| 433 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
| 434 | return PVRDMA_WR_RDMA_WRITE_WITH_IMM; | ||
| 435 | case IB_WR_SEND: | ||
| 436 | return PVRDMA_WR_SEND; | ||
| 437 | case IB_WR_SEND_WITH_IMM: | ||
| 438 | return PVRDMA_WR_SEND_WITH_IMM; | ||
| 439 | case IB_WR_RDMA_READ: | ||
| 440 | return PVRDMA_WR_RDMA_READ; | ||
| 441 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
| 442 | return PVRDMA_WR_ATOMIC_CMP_AND_SWP; | ||
| 443 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
| 444 | return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; | ||
| 445 | case IB_WR_LSO: | ||
| 446 | return PVRDMA_WR_LSO; | ||
| 447 | case IB_WR_SEND_WITH_INV: | ||
| 448 | return PVRDMA_WR_SEND_WITH_INV; | ||
| 449 | case IB_WR_RDMA_READ_WITH_INV: | ||
| 450 | return PVRDMA_WR_RDMA_READ_WITH_INV; | ||
| 451 | case IB_WR_LOCAL_INV: | ||
| 452 | return PVRDMA_WR_LOCAL_INV; | ||
| 453 | case IB_WR_REG_MR: | ||
| 454 | return PVRDMA_WR_FAST_REG_MR; | ||
| 455 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | ||
| 456 | return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; | ||
| 457 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: | ||
| 458 | return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; | ||
| 459 | case IB_WR_REG_SIG_MR: | ||
| 460 | return PVRDMA_WR_REG_SIG_MR; | ||
| 461 | default: | ||
| 462 | return PVRDMA_WR_ERROR; | ||
| 463 | } | ||
| 431 | } | 464 | } |
| 432 | 465 | ||
| 433 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( | 466 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3acf74cbe266..1ec3646087ba 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
| @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, | |||
| 721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | 721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
| 722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; | 722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; |
| 723 | 723 | ||
| 724 | if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { | ||
| 725 | *bad_wr = wr; | ||
| 726 | ret = -EINVAL; | ||
| 727 | goto out; | ||
| 728 | } | ||
| 729 | |||
| 724 | switch (qp->ibqp.qp_type) { | 730 | switch (qp->ibqp.qp_type) { |
| 725 | case IB_QPT_GSI: | 731 | case IB_QPT_GSI: |
| 726 | case IB_QPT_UD: | 732 | case IB_QPT_UD: |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index cfc8b94527b9..aa4e431cbcd3 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -252,6 +252,8 @@ static const struct xpad_device { | |||
| 252 | { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, | 252 | { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, |
| 253 | { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, | 253 | { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, |
| 254 | { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, | 254 | { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, |
| 255 | { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, | ||
| 256 | { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, | ||
| 255 | { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, | 257 | { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, |
| 256 | { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 258 | { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
| 257 | { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, | 259 | { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, |
| @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { | |||
| 428 | XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ | 430 | XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ |
| 429 | XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ | 431 | XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ |
| 430 | XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ | 432 | XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ |
| 433 | XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ | ||
| 431 | XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ | 434 | XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ |
| 432 | XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ | 435 | XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ |
| 433 | XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ | 436 | XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 8ec483e8688b..26ec603fe220 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
| 40 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
| 41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
| 42 | #include <linux/overflow.h> | ||
| 42 | #include <linux/input/mt.h> | 43 | #include <linux/input/mt.h> |
| 43 | #include "../input-compat.h" | 44 | #include "../input-compat.h" |
| 44 | 45 | ||
| @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) | |||
| 405 | static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | 406 | static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, |
| 406 | const struct input_absinfo *abs) | 407 | const struct input_absinfo *abs) |
| 407 | { | 408 | { |
| 408 | int min, max; | 409 | int min, max, range; |
| 409 | 410 | ||
| 410 | min = abs->minimum; | 411 | min = abs->minimum; |
| 411 | max = abs->maximum; | 412 | max = abs->maximum; |
| @@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | |||
| 417 | return -EINVAL; | 418 | return -EINVAL; |
| 418 | } | 419 | } |
| 419 | 420 | ||
| 420 | if (abs->flat > max - min) { | 421 | if (!check_sub_overflow(max, min, &range) && abs->flat > range) { |
| 421 | printk(KERN_DEBUG | 422 | printk(KERN_DEBUG |
| 422 | "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", | 423 | "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", |
| 423 | UINPUT_NAME, code, abs->flat, min, max); | 424 | UINPUT_NAME, code, abs->flat, min, max); |
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index b36084710f69..bae08226e3d9 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c | |||
| @@ -195,6 +195,8 @@ static int olpc_apsp_probe(struct platform_device *pdev) | |||
| 195 | if (!priv) | 195 | if (!priv) |
| 196 | return -ENOMEM; | 196 | return -ENOMEM; |
| 197 | 197 | ||
| 198 | priv->dev = &pdev->dev; | ||
| 199 | |||
| 198 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 200 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 199 | priv->base = devm_ioremap_resource(&pdev->dev, res); | 201 | priv->base = devm_ioremap_resource(&pdev->dev, res); |
| 200 | if (IS_ERR(priv->base)) { | 202 | if (IS_ERR(priv->base)) { |
| @@ -248,7 +250,6 @@ static int olpc_apsp_probe(struct platform_device *pdev) | |||
| 248 | goto err_irq; | 250 | goto err_irq; |
| 249 | } | 251 | } |
| 250 | 252 | ||
| 251 | priv->dev = &pdev->dev; | ||
| 252 | device_init_wakeup(priv->dev, 1); | 253 | device_init_wakeup(priv->dev, 1); |
| 253 | platform_set_drvdata(pdev, priv); | 254 | platform_set_drvdata(pdev, priv); |
| 254 | 255 | ||
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index af6027cc7bbf..068dbbc610fc 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
| @@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06 | |||
| 698 | 698 | ||
| 699 | config TOUCHSCREEN_RASPBERRYPI_FW | 699 | config TOUCHSCREEN_RASPBERRYPI_FW |
| 700 | tristate "Raspberry Pi's firmware base touch screen support" | 700 | tristate "Raspberry Pi's firmware base touch screen support" |
| 701 | depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST | 701 | depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST) |
| 702 | help | 702 | help |
| 703 | Say Y here if you have the official Raspberry Pi 7 inch screen on | 703 | Say Y here if you have the official Raspberry Pi 7 inch screen on |
| 704 | your system. | 704 | your system. |
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index d8947b28db2d..f04a6df65eb8 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
| @@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
| 224 | * If we have reason to believe the IOMMU driver missed the initial | 224 | * If we have reason to believe the IOMMU driver missed the initial |
| 225 | * probe for dev, replay it to get things in order. | 225 | * probe for dev, replay it to get things in order. |
| 226 | */ | 226 | */ |
| 227 | if (dev->bus && !device_iommu_mapped(dev)) | 227 | if (!err && dev->bus && !device_iommu_mapped(dev)) |
| 228 | err = iommu_probe_device(dev); | 228 | err = iommu_probe_device(dev); |
| 229 | 229 | ||
| 230 | /* Ignore all other errors apart from EPROBE_DEFER */ | 230 | /* Ignore all other errors apart from EPROBE_DEFER */ |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index db20e992a40f..7f2a45445b00 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -2399,13 +2399,14 @@ static void its_free_device(struct its_device *its_dev) | |||
| 2399 | kfree(its_dev); | 2399 | kfree(its_dev); |
| 2400 | } | 2400 | } |
| 2401 | 2401 | ||
| 2402 | static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | 2402 | static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) |
| 2403 | { | 2403 | { |
| 2404 | int idx; | 2404 | int idx; |
| 2405 | 2405 | ||
| 2406 | idx = find_first_zero_bit(dev->event_map.lpi_map, | 2406 | idx = bitmap_find_free_region(dev->event_map.lpi_map, |
| 2407 | dev->event_map.nr_lpis); | 2407 | dev->event_map.nr_lpis, |
| 2408 | if (idx == dev->event_map.nr_lpis) | 2408 | get_count_order(nvecs)); |
| 2409 | if (idx < 0) | ||
| 2409 | return -ENOSPC; | 2410 | return -ENOSPC; |
| 2410 | 2411 | ||
| 2411 | *hwirq = dev->event_map.lpi_base + idx; | 2412 | *hwirq = dev->event_map.lpi_base + idx; |
| @@ -2501,21 +2502,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 2501 | int err; | 2502 | int err; |
| 2502 | int i; | 2503 | int i; |
| 2503 | 2504 | ||
| 2504 | for (i = 0; i < nr_irqs; i++) { | 2505 | err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); |
| 2505 | err = its_alloc_device_irq(its_dev, &hwirq); | 2506 | if (err) |
| 2506 | if (err) | 2507 | return err; |
| 2507 | return err; | ||
| 2508 | 2508 | ||
| 2509 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); | 2509 | for (i = 0; i < nr_irqs; i++) { |
| 2510 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | ||
| 2510 | if (err) | 2511 | if (err) |
| 2511 | return err; | 2512 | return err; |
| 2512 | 2513 | ||
| 2513 | irq_domain_set_hwirq_and_chip(domain, virq + i, | 2514 | irq_domain_set_hwirq_and_chip(domain, virq + i, |
| 2514 | hwirq, &its_irq_chip, its_dev); | 2515 | hwirq + i, &its_irq_chip, its_dev); |
| 2515 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); | 2516 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); |
| 2516 | pr_debug("ID:%d pID:%d vID:%d\n", | 2517 | pr_debug("ID:%d pID:%d vID:%d\n", |
| 2517 | (int)(hwirq - its_dev->event_map.lpi_base), | 2518 | (int)(hwirq + i - its_dev->event_map.lpi_base), |
| 2518 | (int) hwirq, virq + i); | 2519 | (int)(hwirq + i), virq + i); |
| 2519 | } | 2520 | } |
| 2520 | 2521 | ||
| 2521 | return 0; | 2522 | return 0; |
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index ad70e7c416e3..fbfa7ff6deb1 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c | |||
| @@ -24,7 +24,7 @@ struct mbi_range { | |||
| 24 | unsigned long *bm; | 24 | unsigned long *bm; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | static struct mutex mbi_lock; | 27 | static DEFINE_MUTEX(mbi_lock); |
| 28 | static phys_addr_t mbi_phys_base; | 28 | static phys_addr_t mbi_phys_base; |
| 29 | static struct mbi_range *mbi_ranges; | 29 | static struct mbi_range *mbi_ranges; |
| 30 | static unsigned int mbi_range_nr; | 30 | static unsigned int mbi_range_nr; |
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c index e9256dee1a45..8b81271c823c 100644 --- a/drivers/irqchip/irq-madera.c +++ b/drivers/irqchip/irq-madera.c | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 10 | #include <linux/gpio.h> | ||
| 11 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
| 12 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
| 13 | #include <linux/irqdomain.h> | 12 | #include <linux/irqdomain.h> |
| @@ -16,7 +15,6 @@ | |||
| 16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 17 | #include <linux/of.h> | 16 | #include <linux/of.h> |
| 18 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
| 19 | #include <linux/of_gpio.h> | ||
| 20 | #include <linux/of_irq.h> | 18 | #include <linux/of_irq.h> |
| 21 | #include <linux/irqchip/irq-madera.h> | 19 | #include <linux/irqchip/irq-madera.h> |
| 22 | #include <linux/mfd/madera/core.h> | 20 | #include <linux/mfd/madera/core.h> |
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 6edfd4bfa169..a93296b9b45d 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c | |||
| @@ -822,6 +822,7 @@ out_unmap: | |||
| 822 | static const struct irq_domain_ops stm32_exti_h_domain_ops = { | 822 | static const struct irq_domain_ops stm32_exti_h_domain_ops = { |
| 823 | .alloc = stm32_exti_h_domain_alloc, | 823 | .alloc = stm32_exti_h_domain_alloc, |
| 824 | .free = irq_domain_free_irqs_common, | 824 | .free = irq_domain_free_irqs_common, |
| 825 | .xlate = irq_domain_xlate_twocell, | ||
| 825 | }; | 826 | }; |
| 826 | 827 | ||
| 827 | static int | 828 | static int |
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 4ac378e48902..40ca1e8fa09f 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c | |||
| @@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) | |||
| 423 | int i, j; | 423 | int i, j; |
| 424 | 424 | ||
| 425 | for (j = 0; j < AVM_MAXVERSION; j++) | 425 | for (j = 0; j < AVM_MAXVERSION; j++) |
| 426 | cinfo->version[j] = "\0\0" + 1; | 426 | cinfo->version[j] = ""; |
| 427 | for (i = 0, j = 0; | 427 | for (i = 0, j = 0; |
| 428 | j < AVM_MAXVERSION && i < cinfo->versionlen; | 428 | j < AVM_MAXVERSION && i < cinfo->versionlen; |
| 429 | j++, i += cinfo->versionbuf[i] + 1) | 429 | j++, i += cinfo->versionbuf[i] + 1) |
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 6d05946b445e..124ff530da82 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c | |||
| @@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw) | |||
| 262 | struct dchannel *dch = &hw->dch; | 262 | struct dchannel *dch = &hw->dch; |
| 263 | int i; | 263 | int i; |
| 264 | 264 | ||
| 265 | phi = kzalloc(sizeof(struct ph_info) + | 265 | phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC); |
| 266 | dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC); | ||
| 267 | phi->dch.ch.protocol = hw->protocol; | 266 | phi->dch.ch.protocol = hw->protocol; |
| 268 | phi->dch.ch.Flags = dch->Flags; | 267 | phi->dch.ch.Flags = dch->Flags; |
| 269 | phi->dch.state = dch->state; | 268 | phi->dch.state = dch->state; |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 1b2239c1d569..dc1cded716c1 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
| @@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 1437 | { | 1437 | { |
| 1438 | modem_info *info = (modem_info *) tty->driver_data; | 1438 | modem_info *info = (modem_info *) tty->driver_data; |
| 1439 | 1439 | ||
| 1440 | mutex_lock(&modem_info_mutex); | ||
| 1440 | if (!old_termios) | 1441 | if (!old_termios) |
| 1441 | isdn_tty_change_speed(info); | 1442 | isdn_tty_change_speed(info); |
| 1442 | else { | 1443 | else { |
| 1443 | if (tty->termios.c_cflag == old_termios->c_cflag && | 1444 | if (tty->termios.c_cflag == old_termios->c_cflag && |
| 1444 | tty->termios.c_ispeed == old_termios->c_ispeed && | 1445 | tty->termios.c_ispeed == old_termios->c_ispeed && |
| 1445 | tty->termios.c_ospeed == old_termios->c_ospeed) | 1446 | tty->termios.c_ospeed == old_termios->c_ospeed) { |
| 1447 | mutex_unlock(&modem_info_mutex); | ||
| 1446 | return; | 1448 | return; |
| 1449 | } | ||
| 1447 | isdn_tty_change_speed(info); | 1450 | isdn_tty_change_speed(info); |
| 1448 | } | 1451 | } |
| 1452 | mutex_unlock(&modem_info_mutex); | ||
| 1449 | } | 1453 | } |
| 1450 | 1454 | ||
| 1451 | /* | 1455 | /* |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index a2e74feee2b2..fd64df5a57a5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
| @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) | |||
| 318 | 318 | ||
| 319 | /* Let the programs run for couple of ms and check the engine status */ | 319 | /* Let the programs run for couple of ms and check the engine status */ |
| 320 | usleep_range(3000, 6000); | 320 | usleep_range(3000, 6000); |
| 321 | lp55xx_read(chip, LP5523_REG_STATUS, &status); | 321 | ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); |
| 322 | if (ret) | ||
| 323 | return ret; | ||
| 322 | status &= LP5523_ENG_STATUS_MASK; | 324 | status &= LP5523_ENG_STATUS_MASK; |
| 323 | 325 | ||
| 324 | if (status != LP5523_ENG_STATUS_MASK) { | 326 | if (status != LP5523_ENG_STATUS_MASK) { |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0ff22159a0ca..47d4e0d30bf0 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key | |||
| 2414 | * capi:cipher_api_spec-iv:ivopts | 2414 | * capi:cipher_api_spec-iv:ivopts |
| 2415 | */ | 2415 | */ |
| 2416 | tmp = &cipher_in[strlen("capi:")]; | 2416 | tmp = &cipher_in[strlen("capi:")]; |
| 2417 | cipher_api = strsep(&tmp, "-"); | 2417 | |
| 2418 | *ivmode = strsep(&tmp, ":"); | 2418 | /* Separate IV options if present, it can contain another '-' in hash name */ |
| 2419 | *ivopts = tmp; | 2419 | *ivopts = strrchr(tmp, ':'); |
| 2420 | if (*ivopts) { | ||
| 2421 | **ivopts = '\0'; | ||
| 2422 | (*ivopts)++; | ||
| 2423 | } | ||
| 2424 | /* Parse IV mode */ | ||
| 2425 | *ivmode = strrchr(tmp, '-'); | ||
| 2426 | if (*ivmode) { | ||
| 2427 | **ivmode = '\0'; | ||
| 2428 | (*ivmode)++; | ||
| 2429 | } | ||
| 2430 | /* The rest is crypto API spec */ | ||
| 2431 | cipher_api = tmp; | ||
| 2420 | 2432 | ||
| 2421 | if (*ivmode && !strcmp(*ivmode, "lmk")) | 2433 | if (*ivmode && !strcmp(*ivmode, "lmk")) |
| 2422 | cc->tfms_count = 64; | 2434 | cc->tfms_count = 64; |
| @@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key | |||
| 2486 | goto bad_mem; | 2498 | goto bad_mem; |
| 2487 | 2499 | ||
| 2488 | chainmode = strsep(&tmp, "-"); | 2500 | chainmode = strsep(&tmp, "-"); |
| 2489 | *ivopts = strsep(&tmp, "-"); | 2501 | *ivmode = strsep(&tmp, ":"); |
| 2490 | *ivmode = strsep(&*ivopts, ":"); | 2502 | *ivopts = tmp; |
| 2491 | |||
| 2492 | if (tmp) | ||
| 2493 | DMWARN("Ignoring unexpected additional cipher options"); | ||
| 2494 | 2503 | ||
| 2495 | /* | 2504 | /* |
| 2496 | * For compatibility with the original dm-crypt mapping format, if | 2505 | * For compatibility with the original dm-crypt mapping format, if |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 20b0776e39ef..ed3caceaed07 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
| @@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, | |||
| 1678 | return r; | 1678 | return r; |
| 1679 | } | 1679 | } |
| 1680 | 1680 | ||
| 1681 | int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) | 1681 | int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) |
| 1682 | { | 1682 | { |
| 1683 | int r; | 1683 | int r; |
| 1684 | uint32_t ref_count; | 1684 | uint32_t ref_count; |
| @@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu | |||
| 1686 | down_read(&pmd->root_lock); | 1686 | down_read(&pmd->root_lock); |
| 1687 | r = dm_sm_get_count(pmd->data_sm, b, &ref_count); | 1687 | r = dm_sm_get_count(pmd->data_sm, b, &ref_count); |
| 1688 | if (!r) | 1688 | if (!r) |
| 1689 | *result = (ref_count != 0); | 1689 | *result = (ref_count > 1); |
| 1690 | up_read(&pmd->root_lock); | 1690 | up_read(&pmd->root_lock); |
| 1691 | 1691 | ||
| 1692 | return r; | 1692 | return r; |
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 35e954ea20a9..f6be0d733c20 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h | |||
| @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, | |||
| 195 | 195 | ||
| 196 | int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); | 196 | int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); |
| 197 | 197 | ||
| 198 | int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); | 198 | int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); |
| 199 | 199 | ||
| 200 | int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); | 200 | int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); |
| 201 | int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); | 201 | int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index dadd9696340c..ca8af21bf644 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | |||
| 1048 | * passdown we have to check that these blocks are now unused. | 1048 | * passdown we have to check that these blocks are now unused. |
| 1049 | */ | 1049 | */ |
| 1050 | int r = 0; | 1050 | int r = 0; |
| 1051 | bool used = true; | 1051 | bool shared = true; |
| 1052 | struct thin_c *tc = m->tc; | 1052 | struct thin_c *tc = m->tc; |
| 1053 | struct pool *pool = tc->pool; | 1053 | struct pool *pool = tc->pool; |
| 1054 | dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; | 1054 | dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; |
| @@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | |||
| 1058 | while (b != end) { | 1058 | while (b != end) { |
| 1059 | /* find start of unmapped run */ | 1059 | /* find start of unmapped run */ |
| 1060 | for (; b < end; b++) { | 1060 | for (; b < end; b++) { |
| 1061 | r = dm_pool_block_is_used(pool->pmd, b, &used); | 1061 | r = dm_pool_block_is_shared(pool->pmd, b, &shared); |
| 1062 | if (r) | 1062 | if (r) |
| 1063 | goto out; | 1063 | goto out; |
| 1064 | 1064 | ||
| 1065 | if (!used) | 1065 | if (!shared) |
| 1066 | break; | 1066 | break; |
| 1067 | } | 1067 | } |
| 1068 | 1068 | ||
| @@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | |||
| 1071 | 1071 | ||
| 1072 | /* find end of run */ | 1072 | /* find end of run */ |
| 1073 | for (e = b + 1; e != end; e++) { | 1073 | for (e = b + 1; e != end; e++) { |
| 1074 | r = dm_pool_block_is_used(pool->pmd, e, &used); | 1074 | r = dm_pool_block_is_shared(pool->pmd, e, &shared); |
| 1075 | if (r) | 1075 | if (r) |
| 1076 | goto out; | 1076 | goto out; |
| 1077 | 1077 | ||
| 1078 | if (used) | 1078 | if (shared) |
| 1079 | break; | 1079 | break; |
| 1080 | } | 1080 | } |
| 1081 | 1081 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d67c95ef8d7e..2b53c3841b53 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, | |||
| 1320 | 1320 | ||
| 1321 | __bio_clone_fast(clone, bio); | 1321 | __bio_clone_fast(clone, bio); |
| 1322 | 1322 | ||
| 1323 | if (unlikely(bio_integrity(bio) != NULL)) { | 1323 | if (bio_integrity(bio)) { |
| 1324 | int r; | 1324 | int r; |
| 1325 | 1325 | ||
| 1326 | if (unlikely(!dm_target_has_integrity(tio->ti->type) && | 1326 | if (unlikely(!dm_target_has_integrity(tio->ti->type) && |
| @@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, | |||
| 1336 | return r; | 1336 | return r; |
| 1337 | } | 1337 | } |
| 1338 | 1338 | ||
| 1339 | bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); | 1339 | bio_trim(clone, sector - clone->bi_iter.bi_sector, len); |
| 1340 | clone->bi_iter.bi_size = to_bytes(len); | ||
| 1341 | |||
| 1342 | if (unlikely(bio_integrity(bio) != NULL)) | ||
| 1343 | bio_integrity_trim(clone); | ||
| 1344 | 1340 | ||
| 1345 | return 0; | 1341 | return 0; |
| 1346 | } | 1342 | } |
| @@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, | |||
| 1588 | ci->sector = bio->bi_iter.bi_sector; | 1584 | ci->sector = bio->bi_iter.bi_sector; |
| 1589 | } | 1585 | } |
| 1590 | 1586 | ||
| 1587 | #define __dm_part_stat_sub(part, field, subnd) \ | ||
| 1588 | (part_stat_get(part, field) -= (subnd)) | ||
| 1589 | |||
| 1591 | /* | 1590 | /* |
| 1592 | * Entry point to split a bio into clones and submit them to the targets. | 1591 | * Entry point to split a bio into clones and submit them to the targets. |
| 1593 | */ | 1592 | */ |
| @@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, | |||
| 1642 | struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, | 1641 | struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, |
| 1643 | GFP_NOIO, &md->queue->bio_split); | 1642 | GFP_NOIO, &md->queue->bio_split); |
| 1644 | ci.io->orig_bio = b; | 1643 | ci.io->orig_bio = b; |
| 1644 | |||
| 1645 | /* | ||
| 1646 | * Adjust IO stats for each split, otherwise upon queue | ||
| 1647 | * reentry there will be redundant IO accounting. | ||
| 1648 | * NOTE: this is a stop-gap fix, a proper fix involves | ||
| 1649 | * significant refactoring of DM core's bio splitting | ||
| 1650 | * (by eliminating DM's splitting and just using bio_split) | ||
| 1651 | */ | ||
| 1652 | part_stat_lock(); | ||
| 1653 | __dm_part_stat_sub(&dm_disk(md)->part0, | ||
| 1654 | sectors[op_stat_group(bio_op(bio))], ci.sector_count); | ||
| 1655 | part_stat_unlock(); | ||
| 1656 | |||
| 1645 | bio_chain(b, bio); | 1657 | bio_chain(b, bio); |
| 1658 | trace_block_split(md->queue, b, bio->bi_iter.bi_sector); | ||
| 1646 | ret = generic_make_request(bio); | 1659 | ret = generic_make_request(bio); |
| 1647 | break; | 1660 | break; |
| 1648 | } | 1661 | } |
| @@ -1713,6 +1726,15 @@ out: | |||
| 1713 | return ret; | 1726 | return ret; |
| 1714 | } | 1727 | } |
| 1715 | 1728 | ||
| 1729 | static blk_qc_t dm_process_bio(struct mapped_device *md, | ||
| 1730 | struct dm_table *map, struct bio *bio) | ||
| 1731 | { | ||
| 1732 | if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) | ||
| 1733 | return __process_bio(md, map, bio); | ||
| 1734 | else | ||
| 1735 | return __split_and_process_bio(md, map, bio); | ||
| 1736 | } | ||
| 1737 | |||
| 1716 | static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) | 1738 | static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) |
| 1717 | { | 1739 | { |
| 1718 | struct mapped_device *md = q->queuedata; | 1740 | struct mapped_device *md = q->queuedata; |
| @@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) | |||
| 1733 | return ret; | 1755 | return ret; |
| 1734 | } | 1756 | } |
| 1735 | 1757 | ||
| 1736 | if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) | 1758 | ret = dm_process_bio(md, map, bio); |
| 1737 | ret = __process_bio(md, map, bio); | ||
| 1738 | else | ||
| 1739 | ret = __split_and_process_bio(md, map, bio); | ||
| 1740 | 1759 | ||
| 1741 | dm_put_live_table(md, srcu_idx); | 1760 | dm_put_live_table(md, srcu_idx); |
| 1742 | return ret; | 1761 | return ret; |
| @@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work) | |||
| 2415 | break; | 2434 | break; |
| 2416 | 2435 | ||
| 2417 | if (dm_request_based(md)) | 2436 | if (dm_request_based(md)) |
| 2418 | generic_make_request(c); | 2437 | (void) generic_make_request(c); |
| 2419 | else | 2438 | else |
| 2420 | __split_and_process_bio(md, map, c); | 2439 | (void) dm_process_bio(md, map, c); |
| 2421 | } | 2440 | } |
| 2422 | 2441 | ||
| 2423 | dm_put_live_table(md, srcu_idx); | 2442 | dm_put_live_table(md, srcu_idx); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index fd4af4de03b4..05ffffb8b769 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -207,15 +207,10 @@ static bool create_on_open = true; | |||
| 207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
| 208 | struct mddev *mddev) | 208 | struct mddev *mddev) |
| 209 | { | 209 | { |
| 210 | struct bio *b; | ||
| 211 | |||
| 212 | if (!mddev || !bioset_initialized(&mddev->bio_set)) | 210 | if (!mddev || !bioset_initialized(&mddev->bio_set)) |
| 213 | return bio_alloc(gfp_mask, nr_iovecs); | 211 | return bio_alloc(gfp_mask, nr_iovecs); |
| 214 | 212 | ||
| 215 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); | 213 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); |
| 216 | if (!b) | ||
| 217 | return NULL; | ||
| 218 | return b; | ||
| 219 | } | 214 | } |
| 220 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 215 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
| 221 | 216 | ||
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index d01821a6906a..89d9c4c21037 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c | |||
| @@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) | |||
| 807 | struct vb2_v4l2_buffer *vbuf; | 807 | struct vb2_v4l2_buffer *vbuf; |
| 808 | unsigned long flags; | 808 | unsigned long flags; |
| 809 | 809 | ||
| 810 | cancel_delayed_work_sync(&dev->work_run); | 810 | if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) |
| 811 | cancel_delayed_work_sync(&dev->work_run); | ||
| 812 | |||
| 811 | for (;;) { | 813 | for (;;) { |
| 812 | if (V4L2_TYPE_IS_OUTPUT(q->type)) | 814 | if (V4L2_TYPE_IS_OUTPUT(q->type)) |
| 813 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); | 815 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); |
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 1441a73ce64c..90aad465f9ed 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c | |||
| @@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
| 287 | const struct v4l2_window *win; | 287 | const struct v4l2_window *win; |
| 288 | const struct v4l2_sdr_format *sdr; | 288 | const struct v4l2_sdr_format *sdr; |
| 289 | const struct v4l2_meta_format *meta; | 289 | const struct v4l2_meta_format *meta; |
| 290 | u32 planes; | ||
| 290 | unsigned i; | 291 | unsigned i; |
| 291 | 292 | ||
| 292 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); | 293 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); |
| @@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
| 317 | prt_names(mp->field, v4l2_field_names), | 318 | prt_names(mp->field, v4l2_field_names), |
| 318 | mp->colorspace, mp->num_planes, mp->flags, | 319 | mp->colorspace, mp->num_planes, mp->flags, |
| 319 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); | 320 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); |
| 320 | for (i = 0; i < mp->num_planes; i++) | 321 | planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); |
| 322 | for (i = 0; i < planes; i++) | ||
| 321 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, | 323 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, |
| 322 | mp->plane_fmt[i].bytesperline, | 324 | mp->plane_fmt[i].bytesperline, |
| 323 | mp->plane_fmt[i].sizeimage); | 325 | mp->plane_fmt[i].sizeimage); |
| @@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1551 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) | 1553 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) |
| 1552 | break; | 1554 | break; |
| 1553 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1556 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1557 | break; | ||
| 1554 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1558 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1559 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1560 | bytesperline); | ||
| 1556 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); | 1561 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); |
| 1557 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1562 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
| 1558 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) | 1563 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) |
| @@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1581 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) | 1586 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) |
| 1582 | break; | 1587 | break; |
| 1583 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1588 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1589 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1590 | break; | ||
| 1584 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1591 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1585 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1592 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1593 | bytesperline); | ||
| 1586 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); | 1594 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); |
| 1587 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1595 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
| 1588 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) | 1596 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) |
| @@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1648 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) | 1656 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) |
| 1649 | break; | 1657 | break; |
| 1650 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1658 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1659 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1660 | break; | ||
| 1651 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1661 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1652 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1662 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1663 | bytesperline); | ||
| 1653 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); | 1664 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); |
| 1654 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1665 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
| 1655 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) | 1666 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) |
| @@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1678 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) | 1689 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) |
| 1679 | break; | 1690 | break; |
| 1680 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1691 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1692 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1693 | break; | ||
| 1681 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1694 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1682 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1695 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1696 | bytesperline); | ||
| 1683 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); | 1697 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); |
| 1684 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1698 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
| 1685 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) | 1699 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 8c5dfdce4326..f461460a2aeb 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
| @@ -102,6 +102,7 @@ config MFD_AAT2870_CORE | |||
| 102 | config MFD_AT91_USART | 102 | config MFD_AT91_USART |
| 103 | tristate "AT91 USART Driver" | 103 | tristate "AT91 USART Driver" |
| 104 | select MFD_CORE | 104 | select MFD_CORE |
| 105 | depends on ARCH_AT91 || COMPILE_TEST | ||
| 105 | help | 106 | help |
| 106 | Select this to get support for AT91 USART IP. This is a wrapper | 107 | Select this to get support for AT91 USART IP. This is a wrapper |
| 107 | over at91-usart-serial driver and usart-spi-driver. Only one function | 108 | over at91-usart-serial driver and usart-spi-driver. Only one function |
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 30d09d177171..11ab17f64c64 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c | |||
| @@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank, | |||
| 261 | mutex_unlock(&ab8500->lock); | 261 | mutex_unlock(&ab8500->lock); |
| 262 | dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); | 262 | dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); |
| 263 | 263 | ||
| 264 | return ret; | 264 | return (ret < 0) ? ret : 0; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | static int ab8500_get_register(struct device *dev, u8 bank, | 267 | static int ab8500_get_register(struct device *dev, u8 bank, |
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index e1450a56fc07..3c97f2c0fdfe 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c | |||
| @@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = { | |||
| 641 | 641 | ||
| 642 | static const struct mfd_cell axp223_cells[] = { | 642 | static const struct mfd_cell axp223_cells[] = { |
| 643 | { | 643 | { |
| 644 | .name = "axp221-pek", | 644 | .name = "axp221-pek", |
| 645 | .num_resources = ARRAY_SIZE(axp22x_pek_resources), | 645 | .num_resources = ARRAY_SIZE(axp22x_pek_resources), |
| 646 | .resources = axp22x_pek_resources, | 646 | .resources = axp22x_pek_resources, |
| 647 | }, { | 647 | }, { |
| 648 | .name = "axp22x-adc", | 648 | .name = "axp22x-adc", |
| 649 | .of_compatible = "x-powers,axp221-adc", | 649 | .of_compatible = "x-powers,axp221-adc", |
| @@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = { | |||
| 651 | .name = "axp20x-battery-power-supply", | 651 | .name = "axp20x-battery-power-supply", |
| 652 | .of_compatible = "x-powers,axp221-battery-power-supply", | 652 | .of_compatible = "x-powers,axp221-battery-power-supply", |
| 653 | }, { | 653 | }, { |
| 654 | .name = "axp20x-regulator", | 654 | .name = "axp20x-regulator", |
| 655 | }, { | 655 | }, { |
| 656 | .name = "axp20x-ac-power-supply", | 656 | .name = "axp20x-ac-power-supply", |
| 657 | .of_compatible = "x-powers,axp221-ac-power-supply", | 657 | .of_compatible = "x-powers,axp221-ac-power-supply", |
| @@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = { | |||
| 667 | 667 | ||
| 668 | static const struct mfd_cell axp152_cells[] = { | 668 | static const struct mfd_cell axp152_cells[] = { |
| 669 | { | 669 | { |
| 670 | .name = "axp20x-pek", | 670 | .name = "axp20x-pek", |
| 671 | .num_resources = ARRAY_SIZE(axp152_pek_resources), | 671 | .num_resources = ARRAY_SIZE(axp152_pek_resources), |
| 672 | .resources = axp152_pek_resources, | 672 | .resources = axp152_pek_resources, |
| 673 | }, | 673 | }, |
| 674 | }; | 674 | }; |
| 675 | 675 | ||
| @@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = { | |||
| 698 | 698 | ||
| 699 | static const struct mfd_cell axp288_cells[] = { | 699 | static const struct mfd_cell axp288_cells[] = { |
| 700 | { | 700 | { |
| 701 | .name = "axp288_adc", | 701 | .name = "axp288_adc", |
| 702 | .num_resources = ARRAY_SIZE(axp288_adc_resources), | 702 | .num_resources = ARRAY_SIZE(axp288_adc_resources), |
| 703 | .resources = axp288_adc_resources, | 703 | .resources = axp288_adc_resources, |
| 704 | }, | 704 | }, { |
| 705 | { | 705 | .name = "axp288_extcon", |
| 706 | .name = "axp288_extcon", | 706 | .num_resources = ARRAY_SIZE(axp288_extcon_resources), |
| 707 | .num_resources = ARRAY_SIZE(axp288_extcon_resources), | 707 | .resources = axp288_extcon_resources, |
| 708 | .resources = axp288_extcon_resources, | 708 | }, { |
| 709 | }, | 709 | .name = "axp288_charger", |
| 710 | { | 710 | .num_resources = ARRAY_SIZE(axp288_charger_resources), |
| 711 | .name = "axp288_charger", | 711 | .resources = axp288_charger_resources, |
| 712 | .num_resources = ARRAY_SIZE(axp288_charger_resources), | 712 | }, { |
| 713 | .resources = axp288_charger_resources, | 713 | .name = "axp288_fuel_gauge", |
| 714 | }, | 714 | .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), |
| 715 | { | 715 | .resources = axp288_fuel_gauge_resources, |
| 716 | .name = "axp288_fuel_gauge", | 716 | }, { |
| 717 | .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), | 717 | .name = "axp221-pek", |
| 718 | .resources = axp288_fuel_gauge_resources, | 718 | .num_resources = ARRAY_SIZE(axp288_power_button_resources), |
| 719 | }, | 719 | .resources = axp288_power_button_resources, |
| 720 | { | 720 | }, { |
| 721 | .name = "axp221-pek", | 721 | .name = "axp288_pmic_acpi", |
| 722 | .num_resources = ARRAY_SIZE(axp288_power_button_resources), | ||
| 723 | .resources = axp288_power_button_resources, | ||
| 724 | }, | ||
| 725 | { | ||
| 726 | .name = "axp288_pmic_acpi", | ||
| 727 | }, | 722 | }, |
| 728 | }; | 723 | }; |
| 729 | 724 | ||
| 730 | static const struct mfd_cell axp803_cells[] = { | 725 | static const struct mfd_cell axp803_cells[] = { |
| 731 | { | 726 | { |
| 732 | .name = "axp221-pek", | 727 | .name = "axp221-pek", |
| 733 | .num_resources = ARRAY_SIZE(axp803_pek_resources), | 728 | .num_resources = ARRAY_SIZE(axp803_pek_resources), |
| 734 | .resources = axp803_pek_resources, | 729 | .resources = axp803_pek_resources, |
| 730 | }, { | ||
| 731 | .name = "axp20x-gpio", | ||
| 732 | .of_compatible = "x-powers,axp813-gpio", | ||
| 733 | }, { | ||
| 734 | .name = "axp813-adc", | ||
| 735 | .of_compatible = "x-powers,axp813-adc", | ||
| 736 | }, { | ||
| 737 | .name = "axp20x-battery-power-supply", | ||
| 738 | .of_compatible = "x-powers,axp813-battery-power-supply", | ||
| 739 | }, { | ||
| 740 | .name = "axp20x-ac-power-supply", | ||
| 741 | .of_compatible = "x-powers,axp813-ac-power-supply", | ||
| 742 | .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), | ||
| 743 | .resources = axp20x_ac_power_supply_resources, | ||
| 735 | }, | 744 | }, |
| 736 | { .name = "axp20x-regulator" }, | 745 | { .name = "axp20x-regulator" }, |
| 737 | }; | 746 | }; |
| 738 | 747 | ||
| 739 | static const struct mfd_cell axp806_self_working_cells[] = { | 748 | static const struct mfd_cell axp806_self_working_cells[] = { |
| 740 | { | 749 | { |
| 741 | .name = "axp221-pek", | 750 | .name = "axp221-pek", |
| 742 | .num_resources = ARRAY_SIZE(axp806_pek_resources), | 751 | .num_resources = ARRAY_SIZE(axp806_pek_resources), |
| 743 | .resources = axp806_pek_resources, | 752 | .resources = axp806_pek_resources, |
| 744 | }, | 753 | }, |
| 745 | { .name = "axp20x-regulator" }, | 754 | { .name = "axp20x-regulator" }, |
| 746 | }; | 755 | }; |
| 747 | 756 | ||
| 748 | static const struct mfd_cell axp806_cells[] = { | 757 | static const struct mfd_cell axp806_cells[] = { |
| 749 | { | 758 | { |
| 750 | .id = 2, | 759 | .id = 2, |
| 751 | .name = "axp20x-regulator", | 760 | .name = "axp20x-regulator", |
| 752 | }, | 761 | }, |
| 753 | }; | 762 | }; |
| 754 | 763 | ||
| 755 | static const struct mfd_cell axp809_cells[] = { | 764 | static const struct mfd_cell axp809_cells[] = { |
| 756 | { | 765 | { |
| 757 | .name = "axp221-pek", | 766 | .name = "axp221-pek", |
| 758 | .num_resources = ARRAY_SIZE(axp809_pek_resources), | 767 | .num_resources = ARRAY_SIZE(axp809_pek_resources), |
| 759 | .resources = axp809_pek_resources, | 768 | .resources = axp809_pek_resources, |
| 760 | }, { | 769 | }, { |
| 761 | .id = 1, | 770 | .id = 1, |
| 762 | .name = "axp20x-regulator", | 771 | .name = "axp20x-regulator", |
| 763 | }, | 772 | }, |
| 764 | }; | 773 | }; |
| 765 | 774 | ||
| 766 | static const struct mfd_cell axp813_cells[] = { | 775 | static const struct mfd_cell axp813_cells[] = { |
| 767 | { | 776 | { |
| 768 | .name = "axp221-pek", | 777 | .name = "axp221-pek", |
| 769 | .num_resources = ARRAY_SIZE(axp803_pek_resources), | 778 | .num_resources = ARRAY_SIZE(axp803_pek_resources), |
| 770 | .resources = axp803_pek_resources, | 779 | .resources = axp803_pek_resources, |
| 771 | }, { | 780 | }, { |
| 772 | .name = "axp20x-regulator", | 781 | .name = "axp20x-regulator", |
| 773 | }, { | 782 | }, { |
| 774 | .name = "axp20x-gpio", | 783 | .name = "axp20x-gpio", |
| 775 | .of_compatible = "x-powers,axp813-gpio", | 784 | .of_compatible = "x-powers,axp813-gpio", |
| 776 | }, { | 785 | }, { |
| 777 | .name = "axp813-adc", | 786 | .name = "axp813-adc", |
| 778 | .of_compatible = "x-powers,axp813-adc", | 787 | .of_compatible = "x-powers,axp813-adc", |
| 779 | }, { | 788 | }, { |
| 780 | .name = "axp20x-battery-power-supply", | 789 | .name = "axp20x-battery-power-supply", |
| 781 | .of_compatible = "x-powers,axp813-battery-power-supply", | 790 | .of_compatible = "x-powers,axp813-battery-power-supply", |
| 791 | }, { | ||
| 792 | .name = "axp20x-ac-power-supply", | ||
| 793 | .of_compatible = "x-powers,axp813-ac-power-supply", | ||
| 794 | .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources), | ||
| 795 | .resources = axp20x_ac_power_supply_resources, | ||
| 782 | }, | 796 | }, |
| 783 | }; | 797 | }; |
| 784 | 798 | ||
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c index 503979c81dae..fab3cdc27ed6 100644 --- a/drivers/mfd/bd9571mwv.c +++ b/drivers/mfd/bd9571mwv.c | |||
| @@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = { | |||
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { | 61 | static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { |
| 62 | regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC), | ||
| 62 | regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), | 63 | regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), |
| 63 | regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), | 64 | regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), |
| 64 | regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), | 65 | regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), |
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index b99a194ce5a4..2d0fee488c5a 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c | |||
| @@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev) | |||
| 499 | 499 | ||
| 500 | cros_ec_debugfs_remove(ec); | 500 | cros_ec_debugfs_remove(ec); |
| 501 | 501 | ||
| 502 | mfd_remove_devices(ec->dev); | ||
| 502 | cdev_del(&ec->cdev); | 503 | cdev_del(&ec->cdev); |
| 503 | device_unregister(&ec->class_dev); | 504 | device_unregister(&ec->class_dev); |
| 504 | return 0; | 505 | return 0; |
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5970b8def548..aec20e1c7d3d 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c | |||
| @@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = { | |||
| 2584 | .irq_unmask = prcmu_irq_unmask, | 2584 | .irq_unmask = prcmu_irq_unmask, |
| 2585 | }; | 2585 | }; |
| 2586 | 2586 | ||
| 2587 | static __init char *fw_project_name(u32 project) | 2587 | static char *fw_project_name(u32 project) |
| 2588 | { | 2588 | { |
| 2589 | switch (project) { | 2589 | switch (project) { |
| 2590 | case PRCMU_FW_PROJECT_U8500: | 2590 | case PRCMU_FW_PROJECT_U8500: |
| @@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size) | |||
| 2732 | INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); | 2732 | INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); |
| 2733 | } | 2733 | } |
| 2734 | 2734 | ||
| 2735 | static void __init init_prcm_registers(void) | 2735 | static void init_prcm_registers(void) |
| 2736 | { | 2736 | { |
| 2737 | u32 val; | 2737 | u32 val; |
| 2738 | 2738 | ||
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c index ca829f85672f..2713de989f05 100644 --- a/drivers/mfd/exynos-lpass.c +++ b/drivers/mfd/exynos-lpass.c | |||
| @@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass) | |||
| 82 | LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); | 82 | LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); |
| 83 | 83 | ||
| 84 | regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, | 84 | regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, |
| 85 | LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); | 85 | LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S | |
| 86 | LPASS_INTR_UART); | ||
| 86 | 87 | ||
| 87 | exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); | 88 | exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); |
| 88 | exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); | 89 | exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); |
| 89 | exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); | 90 | exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); |
| 91 | exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET); | ||
| 90 | } | 92 | } |
| 91 | 93 | ||
| 92 | static void exynos_lpass_disable(struct exynos_lpass *lpass) | 94 | static void exynos_lpass_disable(struct exynos_lpass *lpass) |
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c index 440030cecbbd..2a77988d0462 100644 --- a/drivers/mfd/madera-core.c +++ b/drivers/mfd/madera-core.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
| 16 | #include <linux/mfd/core.h> | 16 | #include <linux/mfd/core.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/mutex.h> | ||
| 18 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
| 19 | #include <linux/of.h> | 20 | #include <linux/of.h> |
| 20 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
| @@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera) | |||
| 155 | usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, | 156 | usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, |
| 156 | MADERA_BOOT_POLL_INTERVAL_USEC); | 157 | MADERA_BOOT_POLL_INTERVAL_USEC); |
| 157 | regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); | 158 | regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); |
| 158 | }; | 159 | } |
| 159 | 160 | ||
| 160 | if (!(val & MADERA_BOOT_DONE_STS1)) { | 161 | if (!(val & MADERA_BOOT_DONE_STS1)) { |
| 161 | dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); | 162 | dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); |
| @@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera) | |||
| 357 | 358 | ||
| 358 | dev_set_drvdata(madera->dev, madera); | 359 | dev_set_drvdata(madera->dev, madera); |
| 359 | BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); | 360 | BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); |
| 361 | mutex_init(&madera->dapm_ptr_lock); | ||
| 362 | |||
| 360 | madera_set_micbias_info(madera); | 363 | madera_set_micbias_info(madera); |
| 361 | 364 | ||
| 362 | /* | 365 | /* |
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index d8217366ed36..d8ddd1a6f304 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c | |||
| @@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip, | |||
| 280 | 280 | ||
| 281 | for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { | 281 | for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { |
| 282 | sprintf(fps_name, "fps%d", fps_id); | 282 | sprintf(fps_name, "fps%d", fps_id); |
| 283 | if (!strcmp(fps_np->name, fps_name)) | 283 | if (of_node_name_eq(fps_np, fps_name)) |
| 284 | break; | 284 | break; |
| 285 | } | 285 | } |
| 286 | 286 | ||
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index f475e848252f..d0bf50e3568d 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c | |||
| @@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, | |||
| 274 | 274 | ||
| 275 | mc13xxx->adcflags |= MC13XXX_ADC_WORKING; | 275 | mc13xxx->adcflags |= MC13XXX_ADC_WORKING; |
| 276 | 276 | ||
| 277 | mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); | 277 | ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); |
| 278 | if (ret) | ||
| 279 | goto out; | ||
| 278 | 280 | ||
| 279 | adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | | 281 | adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | |
| 280 | MC13XXX_ADC0_CHRGRAWDIV; | 282 | MC13XXX_ADC0_CHRGRAWDIV; |
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index 77b64bd64df3..ab24e176ef44 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c | |||
| @@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev) | |||
| 329 | 329 | ||
| 330 | default: | 330 | default: |
| 331 | dev_err(&pdev->dev, "unsupported chip: %d\n", id); | 331 | dev_err(&pdev->dev, "unsupported chip: %d\n", id); |
| 332 | ret = -ENODEV; | 332 | return -ENODEV; |
| 333 | break; | ||
| 334 | } | 333 | } |
| 335 | 334 | ||
| 336 | if (ret) { | 335 | if (ret) { |
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c index 52fafea06067..8d420c37b2a6 100644 --- a/drivers/mfd/qcom_rpm.c +++ b/drivers/mfd/qcom_rpm.c | |||
| @@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev) | |||
| 638 | return -EFAULT; | 638 | return -EFAULT; |
| 639 | } | 639 | } |
| 640 | 640 | ||
| 641 | writel(fw_version[0], RPM_CTRL_REG(rpm, 0)); | ||
| 642 | writel(fw_version[1], RPM_CTRL_REG(rpm, 1)); | ||
| 643 | writel(fw_version[2], RPM_CTRL_REG(rpm, 2)); | ||
| 644 | |||
| 641 | dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], | 645 | dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], |
| 642 | fw_version[1], | 646 | fw_version[1], |
| 643 | fw_version[2]); | 647 | fw_version[2]); |
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index 2a8369657e38..26c7b63e008a 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c | |||
| @@ -109,7 +109,7 @@ struct rave_sp_reply { | |||
| 109 | /** | 109 | /** |
| 110 | * struct rave_sp_checksum - Variant specific checksum implementation details | 110 | * struct rave_sp_checksum - Variant specific checksum implementation details |
| 111 | * | 111 | * |
| 112 | * @length: Caculated checksum length | 112 | * @length: Calculated checksum length |
| 113 | * @subroutine: Utilized checksum algorithm implementation | 113 | * @subroutine: Utilized checksum algorithm implementation |
| 114 | */ | 114 | */ |
| 115 | struct rave_sp_checksum { | 115 | struct rave_sp_checksum { |
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 566caca4efd8..7569a4be0608 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c | |||
| @@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata, | |||
| 1302 | pdata->autosleep = (pdata->autosleep_timeout) ? true : false; | 1302 | pdata->autosleep = (pdata->autosleep_timeout) ? true : false; |
| 1303 | 1303 | ||
| 1304 | for_each_child_of_node(np, child) { | 1304 | for_each_child_of_node(np, child) { |
| 1305 | if (!strcmp(child->name, "stmpe_gpio")) { | 1305 | if (of_node_name_eq(child, "stmpe_gpio")) { |
| 1306 | pdata->blocks |= STMPE_BLOCK_GPIO; | 1306 | pdata->blocks |= STMPE_BLOCK_GPIO; |
| 1307 | } else if (!strcmp(child->name, "stmpe_keypad")) { | 1307 | } else if (of_node_name_eq(child, "stmpe_keypad")) { |
| 1308 | pdata->blocks |= STMPE_BLOCK_KEYPAD; | 1308 | pdata->blocks |= STMPE_BLOCK_KEYPAD; |
| 1309 | } else if (!strcmp(child->name, "stmpe_touchscreen")) { | 1309 | } else if (of_node_name_eq(child, "stmpe_touchscreen")) { |
| 1310 | pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; | 1310 | pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; |
| 1311 | } else if (!strcmp(child->name, "stmpe_adc")) { | 1311 | } else if (of_node_name_eq(child, "stmpe_adc")) { |
| 1312 | pdata->blocks |= STMPE_BLOCK_ADC; | 1312 | pdata->blocks |= STMPE_BLOCK_ADC; |
| 1313 | } else if (!strcmp(child->name, "stmpe_pwm")) { | 1313 | } else if (of_node_name_eq(child, "stmpe_pwm")) { |
| 1314 | pdata->blocks |= STMPE_BLOCK_PWM; | 1314 | pdata->blocks |= STMPE_BLOCK_PWM; |
| 1315 | } else if (!strcmp(child->name, "stmpe_rotator")) { | 1315 | } else if (of_node_name_eq(child, "stmpe_rotator")) { |
| 1316 | pdata->blocks |= STMPE_BLOCK_ROTATOR; | 1316 | pdata->blocks |= STMPE_BLOCK_ROTATOR; |
| 1317 | } | 1317 | } |
| 1318 | } | 1318 | } |
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index c2d47d78705b..fd111296b959 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c | |||
| @@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev) | |||
| 264 | cell->pdata_size = sizeof(tscadc); | 264 | cell->pdata_size = sizeof(tscadc); |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, | 267 | err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, |
| 268 | tscadc->used_cells, NULL, 0, NULL); | 268 | tscadc->cells, tscadc->used_cells, NULL, |
| 269 | 0, NULL); | ||
| 269 | if (err < 0) | 270 | if (err < 0) |
| 270 | goto err_disable_clk; | 271 | goto err_disable_clk; |
| 271 | 272 | ||
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 910f569ff77c..8bcdecf494d0 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c | |||
| @@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client, | |||
| 235 | 235 | ||
| 236 | mutex_init(&tps->tps_lock); | 236 | mutex_init(&tps->tps_lock); |
| 237 | 237 | ||
| 238 | ret = regmap_add_irq_chip(tps->regmap, tps->irq, | 238 | ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq, |
| 239 | IRQF_ONESHOT, 0, &tps65218_irq_chip, | 239 | IRQF_ONESHOT, 0, &tps65218_irq_chip, |
| 240 | &tps->irq_data); | 240 | &tps->irq_data); |
| 241 | if (ret < 0) | 241 | if (ret < 0) |
| 242 | return ret; | 242 | return ret; |
| 243 | 243 | ||
| @@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client, | |||
| 253 | ARRAY_SIZE(tps65218_cells), NULL, 0, | 253 | ARRAY_SIZE(tps65218_cells), NULL, 0, |
| 254 | regmap_irq_get_domain(tps->irq_data)); | 254 | regmap_irq_get_domain(tps->irq_data)); |
| 255 | 255 | ||
| 256 | if (ret < 0) | ||
| 257 | goto err_irq; | ||
| 258 | |||
| 259 | return 0; | ||
| 260 | |||
| 261 | err_irq: | ||
| 262 | regmap_del_irq_chip(tps->irq, tps->irq_data); | ||
| 263 | |||
| 264 | return ret; | 256 | return ret; |
| 265 | } | 257 | } |
| 266 | 258 | ||
| 267 | static int tps65218_remove(struct i2c_client *client) | ||
| 268 | { | ||
| 269 | struct tps65218 *tps = i2c_get_clientdata(client); | ||
| 270 | |||
| 271 | regmap_del_irq_chip(tps->irq, tps->irq_data); | ||
| 272 | |||
| 273 | return 0; | ||
| 274 | } | ||
| 275 | |||
| 276 | static const struct i2c_device_id tps65218_id_table[] = { | 259 | static const struct i2c_device_id tps65218_id_table[] = { |
| 277 | { "tps65218", TPS65218 }, | 260 | { "tps65218", TPS65218 }, |
| 278 | { }, | 261 | { }, |
| @@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = { | |||
| 285 | .of_match_table = of_tps65218_match_table, | 268 | .of_match_table = of_tps65218_match_table, |
| 286 | }, | 269 | }, |
| 287 | .probe = tps65218_probe, | 270 | .probe = tps65218_probe, |
| 288 | .remove = tps65218_remove, | ||
| 289 | .id_table = tps65218_id_table, | 271 | .id_table = tps65218_id_table, |
| 290 | }; | 272 | }; |
| 291 | 273 | ||
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b89379782741..9c7925ca13cf 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
| @@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client) | |||
| 592 | return 0; | 592 | return 0; |
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) | ||
| 596 | { | ||
| 597 | struct tps6586x *tps6586x = dev_get_drvdata(dev); | ||
| 598 | |||
| 599 | if (tps6586x->client->irq) | ||
| 600 | disable_irq(tps6586x->client->irq); | ||
| 601 | |||
| 602 | return 0; | ||
| 603 | } | ||
| 604 | |||
| 605 | static int __maybe_unused tps6586x_i2c_resume(struct device *dev) | ||
| 606 | { | ||
| 607 | struct tps6586x *tps6586x = dev_get_drvdata(dev); | ||
| 608 | |||
| 609 | if (tps6586x->client->irq) | ||
| 610 | enable_irq(tps6586x->client->irq); | ||
| 611 | |||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, | ||
| 616 | tps6586x_i2c_resume); | ||
| 617 | |||
| 595 | static const struct i2c_device_id tps6586x_id_table[] = { | 618 | static const struct i2c_device_id tps6586x_id_table[] = { |
| 596 | { "tps6586x", 0 }, | 619 | { "tps6586x", 0 }, |
| 597 | { }, | 620 | { }, |
| @@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = { | |||
| 602 | .driver = { | 625 | .driver = { |
| 603 | .name = "tps6586x", | 626 | .name = "tps6586x", |
| 604 | .of_match_table = of_match_ptr(tps6586x_of_match), | 627 | .of_match_table = of_match_ptr(tps6586x_of_match), |
| 628 | .pm = &tps6586x_pm_ops, | ||
| 605 | }, | 629 | }, |
| 606 | .probe = tps6586x_i2c_probe, | 630 | .probe = tps6586x_i2c_probe, |
| 607 | .remove = tps6586x_i2c_remove, | 631 | .remove = tps6586x_i2c_remove, |
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 4be3d239da9e..299016bc46d9 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c | |||
| @@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base, | |||
| 979 | * letting it generate the right frequencies for USB, MADC, and | 979 | * letting it generate the right frequencies for USB, MADC, and |
| 980 | * other purposes. | 980 | * other purposes. |
| 981 | */ | 981 | */ |
| 982 | static inline int __init protect_pm_master(void) | 982 | static inline int protect_pm_master(void) |
| 983 | { | 983 | { |
| 984 | int e = 0; | 984 | int e = 0; |
| 985 | 985 | ||
| @@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void) | |||
| 988 | return e; | 988 | return e; |
| 989 | } | 989 | } |
| 990 | 990 | ||
| 991 | static inline int __init unprotect_pm_master(void) | 991 | static inline int unprotect_pm_master(void) |
| 992 | { | 992 | { |
| 993 | int e = 0; | 993 | int e = 0; |
| 994 | 994 | ||
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 1ee68bd440fb..16c6e2accfaa 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c | |||
| @@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = { | |||
| 1618 | { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ | 1618 | { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ |
| 1619 | { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ | 1619 | { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ |
| 1620 | { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ | 1620 | { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ |
| 1621 | { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */ | ||
| 1621 | { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ | 1622 | { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ |
| 1622 | { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ | 1623 | { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ |
| 1623 | { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ | 1624 | { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ |
| @@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) | |||
| 2869 | case ARIZONA_ASRC_ENABLE: | 2870 | case ARIZONA_ASRC_ENABLE: |
| 2870 | case ARIZONA_ASRC_STATUS: | 2871 | case ARIZONA_ASRC_STATUS: |
| 2871 | case ARIZONA_ASRC_RATE1: | 2872 | case ARIZONA_ASRC_RATE1: |
| 2873 | case ARIZONA_ASRC_RATE2: | ||
| 2872 | case ARIZONA_ISRC_1_CTRL_1: | 2874 | case ARIZONA_ISRC_1_CTRL_1: |
| 2873 | case ARIZONA_ISRC_1_CTRL_2: | 2875 | case ARIZONA_ISRC_1_CTRL_2: |
| 2874 | case ARIZONA_ISRC_1_CTRL_3: | 2876 | case ARIZONA_ISRC_1_CTRL_3: |
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index b8aaa684c397..2ed23c99f59f 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c | |||
| @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, | |||
| 820 | * | 820 | * |
| 821 | * Return: | 821 | * Return: |
| 822 | * 0 - Success | 822 | * 0 - Success |
| 823 | * Non-zero - Failure | ||
| 823 | */ | 824 | */ |
| 824 | static int ibmvmc_open(struct inode *inode, struct file *file) | 825 | static int ibmvmc_open(struct inode *inode, struct file *file) |
| 825 | { | 826 | { |
| 826 | struct ibmvmc_file_session *session; | 827 | struct ibmvmc_file_session *session; |
| 827 | int rc = 0; | ||
| 828 | 828 | ||
| 829 | pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, | 829 | pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, |
| 830 | (unsigned long)inode, (unsigned long)file, | 830 | (unsigned long)inode, (unsigned long)file, |
| 831 | ibmvmc.state); | 831 | ibmvmc.state); |
| 832 | 832 | ||
| 833 | session = kzalloc(sizeof(*session), GFP_KERNEL); | 833 | session = kzalloc(sizeof(*session), GFP_KERNEL); |
| 834 | if (!session) | ||
| 835 | return -ENOMEM; | ||
| 836 | |||
| 834 | session->file = file; | 837 | session->file = file; |
| 835 | file->private_data = session; | 838 | file->private_data = session; |
| 836 | 839 | ||
| 837 | return rc; | 840 | return 0; |
| 838 | } | 841 | } |
| 839 | 842 | ||
| 840 | /** | 843 | /** |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 78c26cebf5d4..8f7616557c97 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 1187 | dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; | 1187 | dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; |
| 1188 | 1188 | ||
| 1189 | if (dma_setup_res->status) { | 1189 | if (dma_setup_res->status) { |
| 1190 | dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", | 1190 | u8 status = dma_setup_res->status; |
| 1191 | dma_setup_res->status, | 1191 | |
| 1192 | mei_hbm_status_str(dma_setup_res->status)); | 1192 | if (status == MEI_HBMS_NOT_ALLOWED) { |
| 1193 | dev_dbg(dev->dev, "hbm: dma setup not allowed\n"); | ||
| 1194 | } else { | ||
| 1195 | dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", | ||
| 1196 | status, | ||
| 1197 | mei_hbm_status_str(status)); | ||
| 1198 | } | ||
| 1193 | dev->hbm_f_dr_supported = 0; | 1199 | dev->hbm_f_dr_supported = 0; |
| 1194 | mei_dmam_ring_free(dev); | 1200 | mei_dmam_ring_free(dev); |
| 1195 | } | 1201 | } |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index e4b10b2d1a08..23739a60517f 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
| @@ -127,6 +127,8 @@ | |||
| 127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ | 127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ |
| 128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ | 128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ |
| 129 | 129 | ||
| 130 | #define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ | ||
| 131 | |||
| 130 | #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ | 132 | #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ |
| 131 | 133 | ||
| 132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | 134 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 73ace2d59dea..e89497f858ae 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
| 88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, | 88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, |
| 89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, | 89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, |
| 90 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, | 90 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, |
| 91 | {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, | 91 | {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, |
| 92 | 92 | ||
| 93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, | 93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, |
| 94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, | 94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, |
| 95 | 95 | ||
| 96 | {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, | ||
| 97 | |||
| 96 | {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, | 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, |
| 97 | 99 | ||
| 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | 100 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, |
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 6b212c8b78e7..2bfa3a903bf9 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c | |||
| @@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, | |||
| 394 | struct _vop_vdev *vdev = to_vopvdev(dev); | 394 | struct _vop_vdev *vdev = to_vopvdev(dev); |
| 395 | struct vop_device *vpdev = vdev->vpdev; | 395 | struct vop_device *vpdev = vdev->vpdev; |
| 396 | struct mic_device_ctrl __iomem *dc = vdev->dc; | 396 | struct mic_device_ctrl __iomem *dc = vdev->dc; |
| 397 | int i, err, retry; | 397 | int i, err, retry, queue_idx = 0; |
| 398 | 398 | ||
| 399 | /* We must have this many virtqueues. */ | 399 | /* We must have this many virtqueues. */ |
| 400 | if (nvqs > ioread8(&vdev->desc->num_vq)) | 400 | if (nvqs > ioread8(&vdev->desc->num_vq)) |
| 401 | return -ENOENT; | 401 | return -ENOENT; |
| 402 | 402 | ||
| 403 | for (i = 0; i < nvqs; ++i) { | 403 | for (i = 0; i < nvqs; ++i) { |
| 404 | if (!names[i]) { | ||
| 405 | vqs[i] = NULL; | ||
| 406 | continue; | ||
| 407 | } | ||
| 408 | |||
| 404 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", | 409 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", |
| 405 | __func__, i, names[i]); | 410 | __func__, i, names[i]); |
| 406 | vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], | 411 | vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], |
| 407 | ctx ? ctx[i] : false); | 412 | ctx ? ctx[i] : false); |
| 408 | if (IS_ERR(vqs[i])) { | 413 | if (IS_ERR(vqs[i])) { |
| 409 | err = PTR_ERR(vqs[i]); | 414 | err = PTR_ERR(vqs[i]); |
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 595ac065b401..95ff7c5a1dfb 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c | |||
| @@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context) | |||
| 70 | struct resource r; | 70 | struct resource r; |
| 71 | 71 | ||
| 72 | if (acpi_dev_resource_io(res, &r)) { | 72 | if (acpi_dev_resource_io(res, &r)) { |
| 73 | #ifdef CONFIG_HAS_IOPORT_MAP | ||
| 73 | base = ioport_map(r.start, resource_size(&r)); | 74 | base = ioport_map(r.start, resource_size(&r)); |
| 74 | return AE_OK; | 75 | return AE_OK; |
| 76 | #else | ||
| 77 | return AE_ERROR; | ||
| 78 | #endif | ||
| 75 | } else if (acpi_dev_resource_memory(res, &r)) { | 79 | } else if (acpi_dev_resource_memory(res, &r)) { |
| 76 | base = ioremap(r.start, resource_size(&r)); | 80 | base = ioremap(r.start, resource_size(&r)); |
| 77 | return AE_OK; | 81 | return AE_OK; |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index f57f5de54206..cf58ccaf22d5 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
| @@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host) | |||
| 234 | if (device_property_read_bool(dev, "broken-cd")) | 234 | if (device_property_read_bool(dev, "broken-cd")) |
| 235 | host->caps |= MMC_CAP_NEEDS_POLL; | 235 | host->caps |= MMC_CAP_NEEDS_POLL; |
| 236 | 236 | ||
| 237 | ret = mmc_gpiod_request_cd(host, "cd", 0, true, | 237 | ret = mmc_gpiod_request_cd(host, "cd", 0, false, |
| 238 | cd_debounce_delay_ms * 1000, | 238 | cd_debounce_delay_ms * 1000, |
| 239 | &cd_gpio_invert); | 239 | &cd_gpio_invert); |
| 240 | if (!ret) | 240 | if (!ret) |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index e26b8145efb3..a44ec8bb5418 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
| @@ -116,7 +116,7 @@ config MMC_RICOH_MMC | |||
| 116 | 116 | ||
| 117 | config MMC_SDHCI_ACPI | 117 | config MMC_SDHCI_ACPI |
| 118 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" | 118 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" |
| 119 | depends on MMC_SDHCI && ACPI | 119 | depends on MMC_SDHCI && ACPI && PCI |
| 120 | select IOSF_MBI if X86 | 120 | select IOSF_MBI if X86 |
| 121 | help | 121 | help |
| 122 | This selects support for ACPI enumerated SDHCI controllers, | 122 | This selects support for ACPI enumerated SDHCI controllers, |
| @@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP | |||
| 978 | tristate "TI SDHCI Controller Support" | 978 | tristate "TI SDHCI Controller Support" |
| 979 | depends on MMC_SDHCI_PLTFM && OF | 979 | depends on MMC_SDHCI_PLTFM && OF |
| 980 | select THERMAL | 980 | select THERMAL |
| 981 | select TI_SOC_THERMAL | 981 | imply TI_SOC_THERMAL |
| 982 | help | 982 | help |
| 983 | This selects the Secure Digital Host Controller Interface (SDHCI) | 983 | This selects the Secure Digital Host Controller Interface (SDHCI) |
| 984 | support present in TI's DRA7 SOCs. The controller supports | 984 | support present in TI's DRA7 SOCs. The controller supports |
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c index ed8f2254b66a..aa38b1a8017e 100644 --- a/drivers/mmc/host/dw_mmc-bluefield.c +++ b/drivers/mmc/host/dw_mmc-bluefield.c | |||
| @@ -1,11 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2018 Mellanox Technologies. | 3 | * Copyright (C) 2018 Mellanox Technologies. |
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | */ | 4 | */ |
| 10 | 5 | ||
| 11 | #include <linux/bitfield.h> | 6 | #include <linux/bitfield.h> |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c2690c1a50ff..f19ec60bcbdc 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
| @@ -179,6 +179,8 @@ struct meson_host { | |||
| 179 | struct sd_emmc_desc *descs; | 179 | struct sd_emmc_desc *descs; |
| 180 | dma_addr_t descs_dma_addr; | 180 | dma_addr_t descs_dma_addr; |
| 181 | 181 | ||
| 182 | int irq; | ||
| 183 | |||
| 182 | bool vqmmc_enabled; | 184 | bool vqmmc_enabled; |
| 183 | }; | 185 | }; |
| 184 | 186 | ||
| @@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, | |||
| 738 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) | 740 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
| 739 | { | 741 | { |
| 740 | struct meson_host *host = mmc_priv(mmc); | 742 | struct meson_host *host = mmc_priv(mmc); |
| 743 | int adj = 0; | ||
| 744 | |||
| 745 | /* enable signal resampling w/o delay */ | ||
| 746 | adj = ADJUST_ADJ_EN; | ||
| 747 | writel(adj, host->regs + host->data->adjust); | ||
| 741 | 748 | ||
| 742 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | 749 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
| 743 | } | 750 | } |
| @@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 768 | if (!IS_ERR(mmc->supply.vmmc)) | 775 | if (!IS_ERR(mmc->supply.vmmc)) |
| 769 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); | 776 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
| 770 | 777 | ||
| 778 | /* disable signal resampling */ | ||
| 779 | writel(0, host->regs + host->data->adjust); | ||
| 780 | |||
| 771 | /* Reset rx phase */ | 781 | /* Reset rx phase */ |
| 772 | clk_set_phase(host->rx_clk, 0); | 782 | clk_set_phase(host->rx_clk, 0); |
| 773 | 783 | ||
| @@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc) | |||
| 1166 | 1176 | ||
| 1167 | static void meson_mmc_cfg_init(struct meson_host *host) | 1177 | static void meson_mmc_cfg_init(struct meson_host *host) |
| 1168 | { | 1178 | { |
| 1169 | u32 cfg = 0, adj = 0; | 1179 | u32 cfg = 0; |
| 1170 | 1180 | ||
| 1171 | cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, | 1181 | cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, |
| 1172 | ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); | 1182 | ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); |
| @@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host) | |||
| 1177 | cfg |= CFG_ERR_ABORT; | 1187 | cfg |= CFG_ERR_ABORT; |
| 1178 | 1188 | ||
| 1179 | writel(cfg, host->regs + SD_EMMC_CFG); | 1189 | writel(cfg, host->regs + SD_EMMC_CFG); |
| 1180 | |||
| 1181 | /* enable signal resampling w/o delay */ | ||
| 1182 | adj = ADJUST_ADJ_EN; | ||
| 1183 | writel(adj, host->regs + host->data->adjust); | ||
| 1184 | } | 1190 | } |
| 1185 | 1191 | ||
| 1186 | static int meson_mmc_card_busy(struct mmc_host *mmc) | 1192 | static int meson_mmc_card_busy(struct mmc_host *mmc) |
| @@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
| 1231 | struct resource *res; | 1237 | struct resource *res; |
| 1232 | struct meson_host *host; | 1238 | struct meson_host *host; |
| 1233 | struct mmc_host *mmc; | 1239 | struct mmc_host *mmc; |
| 1234 | int ret, irq; | 1240 | int ret; |
| 1235 | 1241 | ||
| 1236 | mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); | 1242 | mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); |
| 1237 | if (!mmc) | 1243 | if (!mmc) |
| @@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
| 1276 | goto free_host; | 1282 | goto free_host; |
| 1277 | } | 1283 | } |
| 1278 | 1284 | ||
| 1279 | irq = platform_get_irq(pdev, 0); | 1285 | host->irq = platform_get_irq(pdev, 0); |
| 1280 | if (irq <= 0) { | 1286 | if (host->irq <= 0) { |
| 1281 | dev_err(&pdev->dev, "failed to get interrupt resource.\n"); | 1287 | dev_err(&pdev->dev, "failed to get interrupt resource.\n"); |
| 1282 | ret = -EINVAL; | 1288 | ret = -EINVAL; |
| 1283 | goto free_host; | 1289 | goto free_host; |
| @@ -1331,9 +1337,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
| 1331 | writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, | 1337 | writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, |
| 1332 | host->regs + SD_EMMC_IRQ_EN); | 1338 | host->regs + SD_EMMC_IRQ_EN); |
| 1333 | 1339 | ||
| 1334 | ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, | 1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, |
| 1335 | meson_mmc_irq_thread, IRQF_SHARED, | 1341 | meson_mmc_irq_thread, IRQF_SHARED, NULL, host); |
| 1336 | NULL, host); | ||
| 1337 | if (ret) | 1342 | if (ret) |
| 1338 | goto err_init_clk; | 1343 | goto err_init_clk; |
| 1339 | 1344 | ||
| @@ -1351,7 +1356,7 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
| 1351 | if (host->bounce_buf == NULL) { | 1356 | if (host->bounce_buf == NULL) { |
| 1352 | dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); | 1357 | dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); |
| 1353 | ret = -ENOMEM; | 1358 | ret = -ENOMEM; |
| 1354 | goto err_init_clk; | 1359 | goto err_free_irq; |
| 1355 | } | 1360 | } |
| 1356 | 1361 | ||
| 1357 | host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, | 1362 | host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, |
| @@ -1370,6 +1375,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
| 1370 | err_bounce_buf: | 1375 | err_bounce_buf: |
| 1371 | dma_free_coherent(host->dev, host->bounce_buf_size, | 1376 | dma_free_coherent(host->dev, host->bounce_buf_size, |
| 1372 | host->bounce_buf, host->bounce_dma_addr); | 1377 | host->bounce_buf, host->bounce_dma_addr); |
| 1378 | err_free_irq: | ||
| 1379 | free_irq(host->irq, host); | ||
| 1373 | err_init_clk: | 1380 | err_init_clk: |
| 1374 | clk_disable_unprepare(host->mmc_clk); | 1381 | clk_disable_unprepare(host->mmc_clk); |
| 1375 | err_core_clk: | 1382 | err_core_clk: |
| @@ -1387,6 +1394,7 @@ static int meson_mmc_remove(struct platform_device *pdev) | |||
| 1387 | 1394 | ||
| 1388 | /* disable interrupts */ | 1395 | /* disable interrupts */ |
| 1389 | writel(0, host->regs + SD_EMMC_IRQ_EN); | 1396 | writel(0, host->regs + SD_EMMC_IRQ_EN); |
| 1397 | free_irq(host->irq, host); | ||
| 1390 | 1398 | ||
| 1391 | dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, | 1399 | dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, |
| 1392 | host->descs, host->descs_dma_addr); | 1400 | host->descs, host->descs_dma_addr); |
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 0db99057c44f..9d12c06c7fd6 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c | |||
| @@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev) | |||
| 296 | 296 | ||
| 297 | iproc_host->data = iproc_data; | 297 | iproc_host->data = iproc_data; |
| 298 | 298 | ||
| 299 | mmc_of_parse(host->mmc); | 299 | ret = mmc_of_parse(host->mmc); |
| 300 | if (ret) | ||
| 301 | goto err; | ||
| 302 | |||
| 300 | sdhci_get_property(pdev); | 303 | sdhci_get_property(pdev); |
| 301 | 304 | ||
| 302 | host->mmc->caps |= iproc_host->data->mmc_caps; | 305 | host->mmc->caps |= iproc_host->data->mmc_caps; |
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index eebac35304c6..6e8edc9375dd 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
| @@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali) | |||
| 1322 | } | 1322 | } |
| 1323 | 1323 | ||
| 1324 | /* clk rate info is needed for setup_data_interface */ | 1324 | /* clk rate info is needed for setup_data_interface */ |
| 1325 | if (denali->clk_rate && denali->clk_x_rate) | 1325 | if (!denali->clk_rate || !denali->clk_x_rate) |
| 1326 | chip->options |= NAND_KEEP_TIMINGS; | 1326 | chip->options |= NAND_KEEP_TIMINGS; |
| 1327 | 1327 | ||
| 1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; | 1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; |
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 325b4414dccc..c9149a37f8f0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c | |||
| @@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf, | |||
| 593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); | 593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | /* fsmc_select_chip - assert or deassert nCE */ | ||
| 597 | static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) | ||
| 598 | { | ||
| 599 | u32 pc = readl(host->regs_va + FSMC_PC); | ||
| 600 | |||
| 601 | if (!assert) | ||
| 602 | writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
| 603 | else | ||
| 604 | writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
| 605 | |||
| 606 | /* | ||
| 607 | * nCE line changes must be applied before returning from this | ||
| 608 | * function. | ||
| 609 | */ | ||
| 610 | mb(); | ||
| 611 | } | ||
| 612 | |||
| 613 | /* | 596 | /* |
| 614 | * fsmc_exec_op - hook called by the core to execute NAND operations | 597 | * fsmc_exec_op - hook called by the core to execute NAND operations |
| 615 | * | 598 | * |
| @@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
| 627 | 610 | ||
| 628 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); | 611 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); |
| 629 | 612 | ||
| 630 | fsmc_ce_ctrl(host, true); | ||
| 631 | |||
| 632 | for (op_id = 0; op_id < op->ninstrs; op_id++) { | 613 | for (op_id = 0; op_id < op->ninstrs; op_id++) { |
| 633 | instr = &op->instrs[op_id]; | 614 | instr = &op->instrs[op_id]; |
| 634 | 615 | ||
| @@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
| 686 | } | 667 | } |
| 687 | } | 668 | } |
| 688 | 669 | ||
| 689 | fsmc_ce_ctrl(host, false); | ||
| 690 | |||
| 691 | return ret; | 670 | return ret; |
| 692 | } | 671 | } |
| 693 | 672 | ||
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c index f92ae5aa2a54..9526d5b23c80 100644 --- a/drivers/mtd/nand/raw/jz4740_nand.c +++ b/drivers/mtd/nand/raw/jz4740_nand.c | |||
| @@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, | |||
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, | 262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, |
| 263 | const char *name, struct resource **res, void *__iomem *base) | 263 | const char *name, struct resource **res, void __iomem **base) |
| 264 | { | 264 | { |
| 265 | int ret; | 265 | int ret; |
| 266 | 266 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6371958dd170..edb1c023a753 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -519,7 +519,7 @@ config NET_FAILOVER | |||
| 519 | and destroy a failover master netdev and manages a primary and | 519 | and destroy a failover master netdev and manages a primary and |
| 520 | standby slave netdevs that get registered via the generic failover | 520 | standby slave netdevs that get registered via the generic failover |
| 521 | infrastructure. This can be used by paravirtual drivers to enable | 521 | infrastructure. This can be used by paravirtual drivers to enable |
| 522 | an alternate low latency datapath. It alsoenables live migration of | 522 | an alternate low latency datapath. It also enables live migration of |
| 523 | a VM with direct attached VF by failing over to the paravirtual | 523 | a VM with direct attached VF by failing over to the paravirtual |
| 524 | datapath when the VF is unplugged. | 524 | datapath when the VF is unplugged. |
| 525 | 525 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a9d597f28023..485462d3087f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -1963,6 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
| 1963 | if (!bond_has_slaves(bond)) { | 1963 | if (!bond_has_slaves(bond)) { |
| 1964 | bond_set_carrier(bond); | 1964 | bond_set_carrier(bond); |
| 1965 | eth_hw_addr_random(bond_dev); | 1965 | eth_hw_addr_random(bond_dev); |
| 1966 | bond->nest_level = SINGLE_DEPTH_NESTING; | ||
| 1967 | } else { | ||
| 1968 | bond->nest_level = dev_get_nest_level(bond_dev) + 1; | ||
| 1966 | } | 1969 | } |
| 1967 | 1970 | ||
| 1968 | unblock_netpoll_tx(); | 1971 | unblock_netpoll_tx(); |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3b3f88ffab53..c05e4d50d43d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
| @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); | |||
| 480 | struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) | 480 | struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) |
| 481 | { | 481 | { |
| 482 | struct can_priv *priv = netdev_priv(dev); | 482 | struct can_priv *priv = netdev_priv(dev); |
| 483 | struct sk_buff *skb = priv->echo_skb[idx]; | ||
| 484 | struct canfd_frame *cf; | ||
| 485 | 483 | ||
| 486 | if (idx >= priv->echo_skb_max) { | 484 | if (idx >= priv->echo_skb_max) { |
| 487 | netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", | 485 | netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", |
| @@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 | |||
| 489 | return NULL; | 487 | return NULL; |
| 490 | } | 488 | } |
| 491 | 489 | ||
| 492 | if (!skb) { | 490 | if (priv->echo_skb[idx]) { |
| 493 | netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", | 491 | /* Using "struct canfd_frame::len" for the frame |
| 494 | __func__, idx); | 492 | * length is supported on both CAN and CANFD frames. |
| 495 | return NULL; | 493 | */ |
| 496 | } | 494 | struct sk_buff *skb = priv->echo_skb[idx]; |
| 495 | struct canfd_frame *cf = (struct canfd_frame *)skb->data; | ||
| 496 | u8 len = cf->len; | ||
| 497 | 497 | ||
| 498 | /* Using "struct canfd_frame::len" for the frame | 498 | *len_ptr = len; |
| 499 | * length is supported on both CAN and CANFD frames. | 499 | priv->echo_skb[idx] = NULL; |
| 500 | */ | ||
| 501 | cf = (struct canfd_frame *)skb->data; | ||
| 502 | *len_ptr = cf->len; | ||
| 503 | priv->echo_skb[idx] = NULL; | ||
| 504 | 500 | ||
| 505 | return skb; | 501 | return skb; |
| 502 | } | ||
| 503 | |||
| 504 | return NULL; | ||
| 506 | } | 505 | } |
| 507 | 506 | ||
| 508 | /* | 507 | /* |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0f36eafe3ac1..1c66fb2ad76b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 1106 | } | 1106 | } |
| 1107 | } else { | 1107 | } else { |
| 1108 | /* clear and invalidate unused mailboxes first */ | 1108 | /* clear and invalidate unused mailboxes first */ |
| 1109 | for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { | 1109 | for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) { |
| 1110 | mb = flexcan_get_mb(priv, i); | 1110 | mb = flexcan_get_mb(priv, i); |
| 1111 | priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, | 1111 | priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, |
| 1112 | &mb->can_ctrl); | 1112 | &mb->can_ctrl); |
| @@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) | |||
| 1432 | gpr_np = of_find_node_by_phandle(phandle); | 1432 | gpr_np = of_find_node_by_phandle(phandle); |
| 1433 | if (!gpr_np) { | 1433 | if (!gpr_np) { |
| 1434 | dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); | 1434 | dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); |
| 1435 | return PTR_ERR(gpr_np); | 1435 | return -ENODEV; |
| 1436 | } | 1436 | } |
| 1437 | 1437 | ||
| 1438 | priv = netdev_priv(dev); | 1438 | priv = netdev_priv(dev); |
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 3b12e2dcff31..8a5111f9414c 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
| 9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
| 10 | #include <linux/gpio.h> | ||
| 11 | #include <linux/gpio/consumer.h> | 10 | #include <linux/gpio/consumer.h> |
| 12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| @@ -15,7 +14,6 @@ | |||
| 15 | #include <linux/phy.h> | 14 | #include <linux/phy.h> |
| 16 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
| 17 | #include <linux/if_bridge.h> | 16 | #include <linux/if_bridge.h> |
| 18 | #include <linux/of_gpio.h> | ||
| 19 | #include <linux/of_net.h> | 17 | #include <linux/of_net.h> |
| 20 | #include <net/dsa.h> | 18 | #include <net/dsa.h> |
| 21 | #include <net/switchdev.h> | 19 | #include <net/switchdev.h> |
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 74547f43b938..a8a2c728afba 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
| 21 | #include <linux/of_gpio.h> | ||
| 22 | #include <linux/of_mdio.h> | 21 | #include <linux/of_mdio.h> |
| 23 | #include <linux/of_net.h> | 22 | #include <linux/of_net.h> |
| 24 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8a517d8fb9d1..8dca2c949e73 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
| @@ -2403,6 +2403,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) | |||
| 2403 | return mv88e6xxx_g1_stats_clear(chip); | 2403 | return mv88e6xxx_g1_stats_clear(chip); |
| 2404 | } | 2404 | } |
| 2405 | 2405 | ||
| 2406 | /* The mv88e6390 has some hidden registers used for debug and | ||
| 2407 | * development. The errata also makes use of them. | ||
| 2408 | */ | ||
| 2409 | static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port, | ||
| 2410 | int reg, u16 val) | ||
| 2411 | { | ||
| 2412 | u16 ctrl; | ||
| 2413 | int err; | ||
| 2414 | |||
| 2415 | err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT, | ||
| 2416 | PORT_RESERVED_1A, val); | ||
| 2417 | if (err) | ||
| 2418 | return err; | ||
| 2419 | |||
| 2420 | ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE | | ||
| 2421 | PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | | ||
| 2422 | reg; | ||
| 2423 | |||
| 2424 | return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
| 2425 | PORT_RESERVED_1A, ctrl); | ||
| 2426 | } | ||
| 2427 | |||
| 2428 | static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip) | ||
| 2429 | { | ||
| 2430 | return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
| 2431 | PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY); | ||
| 2432 | } | ||
| 2433 | |||
| 2434 | |||
| 2435 | static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port, | ||
| 2436 | int reg, u16 *val) | ||
| 2437 | { | ||
| 2438 | u16 ctrl; | ||
| 2439 | int err; | ||
| 2440 | |||
| 2441 | ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ | | ||
| 2442 | PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT | | ||
| 2443 | reg; | ||
| 2444 | |||
| 2445 | err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT, | ||
| 2446 | PORT_RESERVED_1A, ctrl); | ||
| 2447 | if (err) | ||
| 2448 | return err; | ||
| 2449 | |||
| 2450 | err = mv88e6390_hidden_wait(chip); | ||
| 2451 | if (err) | ||
| 2452 | return err; | ||
| 2453 | |||
| 2454 | return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT, | ||
| 2455 | PORT_RESERVED_1A, val); | ||
| 2456 | } | ||
| 2457 | |||
| 2458 | /* Check if the errata has already been applied. */ | ||
| 2459 | static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) | ||
| 2460 | { | ||
| 2461 | int port; | ||
| 2462 | int err; | ||
| 2463 | u16 val; | ||
| 2464 | |||
| 2465 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
| 2466 | err = mv88e6390_hidden_read(chip, port, 0, &val); | ||
| 2467 | if (err) { | ||
| 2468 | dev_err(chip->dev, | ||
| 2469 | "Error reading hidden register: %d\n", err); | ||
| 2470 | return false; | ||
| 2471 | } | ||
| 2472 | if (val != 0x01c0) | ||
| 2473 | return false; | ||
| 2474 | } | ||
| 2475 | |||
| 2476 | return true; | ||
| 2477 | } | ||
| 2478 | |||
| 2479 | /* The 6390 copper ports have an errata which require poking magic | ||
| 2480 | * values into undocumented hidden registers and then performing a | ||
| 2481 | * software reset. | ||
| 2482 | */ | ||
| 2483 | static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) | ||
| 2484 | { | ||
| 2485 | int port; | ||
| 2486 | int err; | ||
| 2487 | |||
| 2488 | if (mv88e6390_setup_errata_applied(chip)) | ||
| 2489 | return 0; | ||
| 2490 | |||
| 2491 | /* Set the ports into blocking mode */ | ||
| 2492 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
| 2493 | err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED); | ||
| 2494 | if (err) | ||
| 2495 | return err; | ||
| 2496 | } | ||
| 2497 | |||
| 2498 | for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { | ||
| 2499 | err = mv88e6390_hidden_write(chip, port, 0, 0x01c0); | ||
| 2500 | if (err) | ||
| 2501 | return err; | ||
| 2502 | } | ||
| 2503 | |||
| 2504 | return mv88e6xxx_software_reset(chip); | ||
| 2505 | } | ||
| 2506 | |||
| 2406 | static int mv88e6xxx_setup(struct dsa_switch *ds) | 2507 | static int mv88e6xxx_setup(struct dsa_switch *ds) |
| 2407 | { | 2508 | { |
| 2408 | struct mv88e6xxx_chip *chip = ds->priv; | 2509 | struct mv88e6xxx_chip *chip = ds->priv; |
| @@ -2415,6 +2516,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) | |||
| 2415 | 2516 | ||
| 2416 | mutex_lock(&chip->reg_lock); | 2517 | mutex_lock(&chip->reg_lock); |
| 2417 | 2518 | ||
| 2519 | if (chip->info->ops->setup_errata) { | ||
| 2520 | err = chip->info->ops->setup_errata(chip); | ||
| 2521 | if (err) | ||
| 2522 | goto unlock; | ||
| 2523 | } | ||
| 2524 | |||
| 2418 | /* Cache the cmode of each port. */ | 2525 | /* Cache the cmode of each port. */ |
| 2419 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { | 2526 | for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { |
| 2420 | if (chip->info->ops->port_get_cmode) { | 2527 | if (chip->info->ops->port_get_cmode) { |
| @@ -3226,6 +3333,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { | |||
| 3226 | 3333 | ||
| 3227 | static const struct mv88e6xxx_ops mv88e6190_ops = { | 3334 | static const struct mv88e6xxx_ops mv88e6190_ops = { |
| 3228 | /* MV88E6XXX_FAMILY_6390 */ | 3335 | /* MV88E6XXX_FAMILY_6390 */ |
| 3336 | .setup_errata = mv88e6390_setup_errata, | ||
| 3229 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3337 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3230 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3338 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3231 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3339 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
| @@ -3269,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { | |||
| 3269 | 3377 | ||
| 3270 | static const struct mv88e6xxx_ops mv88e6190x_ops = { | 3378 | static const struct mv88e6xxx_ops mv88e6190x_ops = { |
| 3271 | /* MV88E6XXX_FAMILY_6390 */ | 3379 | /* MV88E6XXX_FAMILY_6390 */ |
| 3380 | .setup_errata = mv88e6390_setup_errata, | ||
| 3272 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3381 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3273 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3382 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3274 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3383 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
| @@ -3312,6 +3421,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { | |||
| 3312 | 3421 | ||
| 3313 | static const struct mv88e6xxx_ops mv88e6191_ops = { | 3422 | static const struct mv88e6xxx_ops mv88e6191_ops = { |
| 3314 | /* MV88E6XXX_FAMILY_6390 */ | 3423 | /* MV88E6XXX_FAMILY_6390 */ |
| 3424 | .setup_errata = mv88e6390_setup_errata, | ||
| 3315 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3425 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3316 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3426 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3317 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3427 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
| @@ -3404,6 +3514,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { | |||
| 3404 | 3514 | ||
| 3405 | static const struct mv88e6xxx_ops mv88e6290_ops = { | 3515 | static const struct mv88e6xxx_ops mv88e6290_ops = { |
| 3406 | /* MV88E6XXX_FAMILY_6390 */ | 3516 | /* MV88E6XXX_FAMILY_6390 */ |
| 3517 | .setup_errata = mv88e6390_setup_errata, | ||
| 3407 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3518 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3408 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3519 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3409 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3520 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
| @@ -3709,6 +3820,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { | |||
| 3709 | 3820 | ||
| 3710 | static const struct mv88e6xxx_ops mv88e6390_ops = { | 3821 | static const struct mv88e6xxx_ops mv88e6390_ops = { |
| 3711 | /* MV88E6XXX_FAMILY_6390 */ | 3822 | /* MV88E6XXX_FAMILY_6390 */ |
| 3823 | .setup_errata = mv88e6390_setup_errata, | ||
| 3712 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3824 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3713 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3825 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3714 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3826 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
| @@ -3756,6 +3868,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { | |||
| 3756 | 3868 | ||
| 3757 | static const struct mv88e6xxx_ops mv88e6390x_ops = { | 3869 | static const struct mv88e6xxx_ops mv88e6390x_ops = { |
| 3758 | /* MV88E6XXX_FAMILY_6390 */ | 3870 | /* MV88E6XXX_FAMILY_6390 */ |
| 3871 | .setup_errata = mv88e6390_setup_errata, | ||
| 3759 | .irl_init_all = mv88e6390_g2_irl_init_all, | 3872 | .irl_init_all = mv88e6390_g2_irl_init_all, |
| 3760 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, | 3873 | .get_eeprom = mv88e6xxx_g2_get_eeprom8, |
| 3761 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, | 3874 | .set_eeprom = mv88e6xxx_g2_set_eeprom8, |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index f9ecb7872d32..546651d8c3e1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h | |||
| @@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus { | |||
| 300 | }; | 300 | }; |
| 301 | 301 | ||
| 302 | struct mv88e6xxx_ops { | 302 | struct mv88e6xxx_ops { |
| 303 | /* Switch Setup Errata, called early in the switch setup to | ||
| 304 | * allow any errata actions to be performed | ||
| 305 | */ | ||
| 306 | int (*setup_errata)(struct mv88e6xxx_chip *chip); | ||
| 307 | |||
| 303 | int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); | 308 | int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); |
| 304 | int (*ip_pri_map)(struct mv88e6xxx_chip *chip); | 309 | int (*ip_pri_map)(struct mv88e6xxx_chip *chip); |
| 305 | 310 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 0d81866d0e4a..e583641de758 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h | |||
| @@ -251,6 +251,16 @@ | |||
| 251 | /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ | 251 | /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ |
| 252 | #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 | 252 | #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 |
| 253 | 253 | ||
| 254 | /* Offset 0x1a: Magic undocumented errata register */ | ||
| 255 | #define PORT_RESERVED_1A 0x1a | ||
| 256 | #define PORT_RESERVED_1A_BUSY BIT(15) | ||
| 257 | #define PORT_RESERVED_1A_WRITE BIT(14) | ||
| 258 | #define PORT_RESERVED_1A_READ 0 | ||
| 259 | #define PORT_RESERVED_1A_PORT_SHIFT 5 | ||
| 260 | #define PORT_RESERVED_1A_BLOCK (0xf << 10) | ||
| 261 | #define PORT_RESERVED_1A_CTRL_PORT 4 | ||
| 262 | #define PORT_RESERVED_1A_DATA_PORT 5 | ||
| 263 | |||
| 254 | int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, | 264 | int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, |
| 255 | u16 *val); | 265 | u16 *val); |
| 256 | int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, | 266 | int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, |
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c index b4b839a1d095..ad41ec63cc9f 100644 --- a/drivers/net/dsa/realtek-smi.c +++ b/drivers/net/dsa/realtek-smi.c | |||
| @@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) | |||
| 347 | struct device_node *mdio_np; | 347 | struct device_node *mdio_np; |
| 348 | int ret; | 348 | int ret; |
| 349 | 349 | ||
| 350 | mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, | 350 | mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); |
| 351 | "realtek,smi-mdio"); | ||
| 352 | if (!mdio_np) { | 351 | if (!mdio_np) { |
| 353 | dev_err(smi->dev, "no MDIO bus node\n"); | 352 | dev_err(smi->dev, "no MDIO bus node\n"); |
| 354 | return -ENODEV; | 353 | return -ENODEV; |
| 355 | } | 354 | } |
| 356 | 355 | ||
| 357 | smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); | 356 | smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); |
| 358 | if (!smi->slave_mii_bus) | 357 | if (!smi->slave_mii_bus) { |
| 359 | return -ENOMEM; | 358 | ret = -ENOMEM; |
| 359 | goto err_put_node; | ||
| 360 | } | ||
| 360 | smi->slave_mii_bus->priv = smi; | 361 | smi->slave_mii_bus->priv = smi; |
| 361 | smi->slave_mii_bus->name = "SMI slave MII"; | 362 | smi->slave_mii_bus->name = "SMI slave MII"; |
| 362 | smi->slave_mii_bus->read = realtek_smi_mdio_read; | 363 | smi->slave_mii_bus->read = realtek_smi_mdio_read; |
| @@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) | |||
| 371 | if (ret) { | 372 | if (ret) { |
| 372 | dev_err(smi->dev, "unable to register MDIO bus %s\n", | 373 | dev_err(smi->dev, "unable to register MDIO bus %s\n", |
| 373 | smi->slave_mii_bus->id); | 374 | smi->slave_mii_bus->id); |
| 374 | of_node_put(mdio_np); | 375 | goto err_put_node; |
| 375 | } | 376 | } |
| 376 | 377 | ||
| 377 | return 0; | 378 | return 0; |
| 379 | |||
| 380 | err_put_node: | ||
| 381 | of_node_put(mdio_np); | ||
| 382 | |||
| 383 | return ret; | ||
| 378 | } | 384 | } |
| 379 | 385 | ||
| 380 | static int realtek_smi_probe(struct platform_device *pdev) | 386 | static int realtek_smi_probe(struct platform_device *pdev) |
| @@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) | |||
| 457 | struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); | 463 | struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); |
| 458 | 464 | ||
| 459 | dsa_unregister_switch(smi->ds); | 465 | dsa_unregister_switch(smi->ds); |
| 466 | if (smi->slave_mii_bus) | ||
| 467 | of_node_put(smi->slave_mii_bus->dev.of_node); | ||
| 460 | gpiod_set_value(smi->reset, 1); | 468 | gpiod_set_value(smi->reset, 1); |
| 461 | 469 | ||
| 462 | return 0; | 470 | return 0; |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 02921d877c08..aa1d1f5339d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
| @@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) | |||
| 714 | 714 | ||
| 715 | phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, | 715 | phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, |
| 716 | priv->phy_iface); | 716 | priv->phy_iface); |
| 717 | if (IS_ERR(phydev)) | 717 | if (IS_ERR(phydev)) { |
| 718 | netdev_err(dev, "Could not attach to PHY\n"); | 718 | netdev_err(dev, "Could not attach to PHY\n"); |
| 719 | phydev = NULL; | ||
| 720 | } | ||
| 719 | 721 | ||
| 720 | } else { | 722 | } else { |
| 721 | int ret; | 723 | int ret; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d272dc6984ac..b40d4377cc71 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
| @@ -431,8 +431,6 @@ | |||
| 431 | #define MAC_MDIOSCAR_PA_WIDTH 5 | 431 | #define MAC_MDIOSCAR_PA_WIDTH 5 |
| 432 | #define MAC_MDIOSCAR_RA_INDEX 0 | 432 | #define MAC_MDIOSCAR_RA_INDEX 0 |
| 433 | #define MAC_MDIOSCAR_RA_WIDTH 16 | 433 | #define MAC_MDIOSCAR_RA_WIDTH 16 |
| 434 | #define MAC_MDIOSCAR_REG_INDEX 0 | ||
| 435 | #define MAC_MDIOSCAR_REG_WIDTH 21 | ||
| 436 | #define MAC_MDIOSCCDR_BUSY_INDEX 22 | 434 | #define MAC_MDIOSCCDR_BUSY_INDEX 22 |
| 437 | #define MAC_MDIOSCCDR_BUSY_WIDTH 1 | 435 | #define MAC_MDIOSCCDR_BUSY_WIDTH 1 |
| 438 | #define MAC_MDIOSCCDR_CMD_INDEX 16 | 436 | #define MAC_MDIOSCCDR_CMD_INDEX 16 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1e929a1e4ca7..4666084eda16 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
| @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, | |||
| 1284 | } | 1284 | } |
| 1285 | } | 1285 | } |
| 1286 | 1286 | ||
| 1287 | static unsigned int xgbe_create_mdio_sca(int port, int reg) | ||
| 1288 | { | ||
| 1289 | unsigned int mdio_sca, da; | ||
| 1290 | |||
| 1291 | da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; | ||
| 1292 | |||
| 1293 | mdio_sca = 0; | ||
| 1294 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); | ||
| 1295 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); | ||
| 1296 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); | ||
| 1297 | |||
| 1298 | return mdio_sca; | ||
| 1299 | } | ||
| 1300 | |||
| 1287 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | 1301 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, |
| 1288 | int reg, u16 val) | 1302 | int reg, u16 val) |
| 1289 | { | 1303 | { |
| @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | |||
| 1291 | 1305 | ||
| 1292 | reinit_completion(&pdata->mdio_complete); | 1306 | reinit_completion(&pdata->mdio_complete); |
| 1293 | 1307 | ||
| 1294 | mdio_sca = 0; | 1308 | mdio_sca = xgbe_create_mdio_sca(addr, reg); |
| 1295 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
| 1296 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
| 1297 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | 1309 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
| 1298 | 1310 | ||
| 1299 | mdio_sccd = 0; | 1311 | mdio_sccd = 0; |
| @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | |||
| 1317 | 1329 | ||
| 1318 | reinit_completion(&pdata->mdio_complete); | 1330 | reinit_completion(&pdata->mdio_complete); |
| 1319 | 1331 | ||
| 1320 | mdio_sca = 0; | 1332 | mdio_sca = xgbe_create_mdio_sca(addr, reg); |
| 1321 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
| 1322 | XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
| 1323 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | 1333 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); |
| 1324 | 1334 | ||
| 1325 | mdio_sccd = 0; | 1335 | mdio_sccd = 0; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4ab6eb3baefc..6a512871176b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -5601,7 +5601,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
| 5601 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | | 5601 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
| 5602 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; | 5602 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
| 5603 | if (bp->flags & BNXT_FLAG_CHIP_P5) | 5603 | if (bp->flags & BNXT_FLAG_CHIP_P5) |
| 5604 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; | 5604 | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | |
| 5605 | FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; | ||
| 5605 | else | 5606 | else |
| 5606 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; | 5607 | flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; |
| 5607 | } | 5608 | } |
| @@ -6221,9 +6222,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, | |||
| 6221 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; | 6222 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; |
| 6222 | rmem->depth = 1; | 6223 | rmem->depth = 1; |
| 6223 | rmem->nr_pages = MAX_CTX_PAGES; | 6224 | rmem->nr_pages = MAX_CTX_PAGES; |
| 6224 | if (i == (nr_tbls - 1)) | 6225 | if (i == (nr_tbls - 1)) { |
| 6225 | rmem->nr_pages = ctx_pg->nr_pages % | 6226 | int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; |
| 6226 | MAX_CTX_PAGES; | 6227 | |
| 6228 | if (rem) | ||
| 6229 | rmem->nr_pages = rem; | ||
| 6230 | } | ||
| 6227 | rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); | 6231 | rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); |
| 6228 | if (rc) | 6232 | if (rc) |
| 6229 | break; | 6233 | break; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index f1aaac8e6268..0a0995894ddb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | |||
| @@ -386,8 +386,8 @@ struct hwrm_err_output { | |||
| 386 | #define HWRM_VERSION_MAJOR 1 | 386 | #define HWRM_VERSION_MAJOR 1 |
| 387 | #define HWRM_VERSION_MINOR 10 | 387 | #define HWRM_VERSION_MINOR 10 |
| 388 | #define HWRM_VERSION_UPDATE 0 | 388 | #define HWRM_VERSION_UPDATE 0 |
| 389 | #define HWRM_VERSION_RSVD 33 | 389 | #define HWRM_VERSION_RSVD 35 |
| 390 | #define HWRM_VERSION_STR "1.10.0.33" | 390 | #define HWRM_VERSION_STR "1.10.0.35" |
| 391 | 391 | ||
| 392 | /* hwrm_ver_get_input (size:192b/24B) */ | 392 | /* hwrm_ver_get_input (size:192b/24B) */ |
| 393 | struct hwrm_ver_get_input { | 393 | struct hwrm_ver_get_input { |
| @@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input { | |||
| 1184 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL | 1184 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL |
| 1185 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL | 1185 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL |
| 1186 | #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL | 1186 | #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL |
| 1187 | #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL | ||
| 1187 | __le32 enables; | 1188 | __le32 enables; |
| 1188 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL | 1189 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL |
| 1189 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL | 1190 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b126926ef7f5..66cc7927061a 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -1738,12 +1738,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) | |||
| 1738 | *skb = nskb; | 1738 | *skb = nskb; |
| 1739 | } | 1739 | } |
| 1740 | 1740 | ||
| 1741 | if (padlen) { | 1741 | if (padlen > ETH_FCS_LEN) |
| 1742 | if (padlen >= ETH_FCS_LEN) | 1742 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); |
| 1743 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); | ||
| 1744 | else | ||
| 1745 | skb_trim(*skb, ETH_FCS_LEN - padlen); | ||
| 1746 | } | ||
| 1747 | 1743 | ||
| 1748 | add_fcs: | 1744 | add_fcs: |
| 1749 | /* set FCS to packet */ | 1745 | /* set FCS to packet */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 85f22c286680..89db739b7819 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
| @@ -2381,7 +2381,7 @@ no_mem: | |||
| 2381 | lro_add_page(adap, qs, fl, | 2381 | lro_add_page(adap, qs, fl, |
| 2382 | G_RSPD_LEN(len), | 2382 | G_RSPD_LEN(len), |
| 2383 | flags & F_RSPD_EOP); | 2383 | flags & F_RSPD_EOP); |
| 2384 | goto next_fl; | 2384 | goto next_fl; |
| 2385 | } | 2385 | } |
| 2386 | 2386 | ||
| 2387 | skb = get_packet_pg(adap, fl, q, | 2387 | skb = get_packet_pg(adap, fl, q, |
| @@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap) | |||
| 3214 | for (i = 0; i < SGE_QSETS; ++i) { | 3214 | for (i = 0; i < SGE_QSETS; ++i) { |
| 3215 | struct sge_qset *q = &adap->sge.qs[i]; | 3215 | struct sge_qset *q = &adap->sge.qs[i]; |
| 3216 | 3216 | ||
| 3217 | if (q->tx_reclaim_timer.function) | 3217 | if (q->tx_reclaim_timer.function) |
| 3218 | mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | 3218 | mod_timer(&q->tx_reclaim_timer, |
| 3219 | jiffies + TX_RECLAIM_PERIOD); | ||
| 3219 | 3220 | ||
| 3220 | if (q->rx_reclaim_timer.function) | 3221 | if (q->rx_reclaim_timer.function) |
| 3221 | mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); | 3222 | mod_timer(&q->rx_reclaim_timer, |
| 3223 | jiffies + RX_RECLAIM_PERIOD); | ||
| 3222 | } | 3224 | } |
| 3223 | } | 3225 | } |
| 3224 | 3226 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 080918af773c..0a9f2c596624 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | |||
| @@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter) | |||
| 1082 | CH_WARN(adapter, "found newer FW version(%u.%u), " | 1082 | CH_WARN(adapter, "found newer FW version(%u.%u), " |
| 1083 | "driver compiled for version %u.%u\n", major, minor, | 1083 | "driver compiled for version %u.%u\n", major, minor, |
| 1084 | FW_VERSION_MAJOR, FW_VERSION_MINOR); | 1084 | FW_VERSION_MAJOR, FW_VERSION_MINOR); |
| 1085 | return 0; | 1085 | return 0; |
| 1086 | } | 1086 | } |
| 1087 | return -EINVAL; | 1087 | return -EINVAL; |
| 1088 | } | 1088 | } |
| @@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter) | |||
| 3619 | 3619 | ||
| 3620 | static int init_parity(struct adapter *adap) | 3620 | static int init_parity(struct adapter *adap) |
| 3621 | { | 3621 | { |
| 3622 | int i, err, addr; | 3622 | int i, err, addr; |
| 3623 | 3623 | ||
| 3624 | if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) | 3624 | if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) |
| 3625 | return -EBUSY; | 3625 | return -EBUSY; |
| @@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter) | |||
| 3806 | p->phy.ops->power_down(&p->phy, 1); | 3806 | p->phy.ops->power_down(&p->phy, 1); |
| 3807 | } | 3807 | } |
| 3808 | 3808 | ||
| 3809 | return 0; | 3809 | return 0; |
| 3810 | } | 3810 | } |
| 3811 | 3811 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 9f9d6cae39d5..58a039c3224a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c | |||
| @@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter) | |||
| 378 | int err; | 378 | int err; |
| 379 | 379 | ||
| 380 | memset(&c, 0, sizeof(c)); | 380 | memset(&c, 0, sizeof(c)); |
| 381 | c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | | 381 | c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | |
| 382 | FW_CMD_REQUEST_F | | 382 | FW_CMD_REQUEST_F | |
| 383 | FW_CMD_WRITE_F | | 383 | FW_CMD_WRITE_F | |
| 384 | FW_PTP_CMD_PORTID_V(0)); | 384 | FW_PTP_CMD_PORTID_V(0)); |
| 385 | c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); | 385 | c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); |
| 386 | c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; | 386 | c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; |
| 387 | 387 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 9a6065a3fa46..c041f44324db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
| @@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) | |||
| 78 | unsigned long flags; | 78 | unsigned long flags; |
| 79 | 79 | ||
| 80 | spin_lock_irqsave(&bmap->lock, flags); | 80 | spin_lock_irqsave(&bmap->lock, flags); |
| 81 | __clear_bit(msix_idx, bmap->msix_bmap); | 81 | __clear_bit(msix_idx, bmap->msix_bmap); |
| 82 | spin_unlock_irqrestore(&bmap->lock, flags); | 82 | spin_unlock_irqrestore(&bmap->lock, flags); |
| 83 | } | 83 | } |
| 84 | 84 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8c34292a0ec..2b03f6187a24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap, | |||
| 3794 | /* If we have version number support, then check to see if the adapter | 3794 | /* If we have version number support, then check to see if the adapter |
| 3795 | * already has up-to-date PHY firmware loaded. | 3795 | * already has up-to-date PHY firmware loaded. |
| 3796 | */ | 3796 | */ |
| 3797 | if (phy_fw_version) { | 3797 | if (phy_fw_version) { |
| 3798 | new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); | 3798 | new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); |
| 3799 | ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); | 3799 | ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); |
| 3800 | if (ret < 0) | 3800 | if (ret < 0) |
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index f53090cde041..dfebc30c4841 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
| @@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
| 2051 | bool nonlinear = skb_is_nonlinear(skb); | 2051 | bool nonlinear = skb_is_nonlinear(skb); |
| 2052 | struct rtnl_link_stats64 *percpu_stats; | 2052 | struct rtnl_link_stats64 *percpu_stats; |
| 2053 | struct dpaa_percpu_priv *percpu_priv; | 2053 | struct dpaa_percpu_priv *percpu_priv; |
| 2054 | struct netdev_queue *txq; | ||
| 2054 | struct dpaa_priv *priv; | 2055 | struct dpaa_priv *priv; |
| 2055 | struct qm_fd fd; | 2056 | struct qm_fd fd; |
| 2056 | int offset = 0; | 2057 | int offset = 0; |
| @@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
| 2100 | if (unlikely(err < 0)) | 2101 | if (unlikely(err < 0)) |
| 2101 | goto skb_to_fd_failed; | 2102 | goto skb_to_fd_failed; |
| 2102 | 2103 | ||
| 2104 | txq = netdev_get_tx_queue(net_dev, queue_mapping); | ||
| 2105 | |||
| 2106 | /* LLTX requires to do our own update of trans_start */ | ||
| 2107 | txq->trans_start = jiffies; | ||
| 2108 | |||
| 2103 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { | 2109 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
| 2104 | fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); | 2110 | fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); |
| 2105 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 2111 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index 809a155eb193..f6d244c663fd 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig | |||
| @@ -9,8 +9,9 @@ config FSL_DPAA2_ETH | |||
| 9 | 9 | ||
| 10 | config FSL_DPAA2_PTP_CLOCK | 10 | config FSL_DPAA2_PTP_CLOCK |
| 11 | tristate "Freescale DPAA2 PTP Clock" | 11 | tristate "Freescale DPAA2 PTP Clock" |
| 12 | depends on FSL_DPAA2_ETH && POSIX_TIMERS | 12 | depends on FSL_DPAA2_ETH |
| 13 | select PTP_1588_CLOCK | 13 | imply PTP_1588_CLOCK |
| 14 | default y | ||
| 14 | help | 15 | help |
| 15 | This driver adds support for using the DPAA2 1588 timer module | 16 | This driver adds support for using the DPAA2 1588 timer module |
| 16 | as a PTP clock. | 17 | as a PTP clock. |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ae0f88bce9aa..2370dc204202 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev) | |||
| 3467 | if (ret) | 3467 | if (ret) |
| 3468 | goto failed_clk_ipg; | 3468 | goto failed_clk_ipg; |
| 3469 | 3469 | ||
| 3470 | fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); | 3470 | fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); |
| 3471 | if (!IS_ERR(fep->reg_phy)) { | 3471 | if (!IS_ERR(fep->reg_phy)) { |
| 3472 | ret = regulator_enable(fep->reg_phy); | 3472 | ret = regulator_enable(fep->reg_phy); |
| 3473 | if (ret) { | 3473 | if (ret) { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index ad1779fc410e..a78bfafd212c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
| @@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
| 147 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | 147 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); |
| 148 | int i; | 148 | int i; |
| 149 | 149 | ||
| 150 | vf_cb->mac_cb = NULL; | ||
| 151 | |||
| 152 | kfree(vf_cb); | ||
| 153 | |||
| 154 | for (i = 0; i < handle->q_num; i++) | 150 | for (i = 0; i < handle->q_num; i++) |
| 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 151 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
| 152 | |||
| 153 | kfree(vf_cb); | ||
| 156 | } | 154 | } |
| 157 | 155 | ||
| 158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | 156 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5748d3f722f6..5b33238c6680 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | |||
| 1170 | if (!h->phy_dev) | 1170 | if (!h->phy_dev) |
| 1171 | return 0; | 1171 | return 0; |
| 1172 | 1172 | ||
| 1173 | ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); | ||
| 1174 | linkmode_and(phy_dev->supported, phy_dev->supported, supported); | ||
| 1175 | linkmode_copy(phy_dev->advertising, phy_dev->supported); | ||
| 1176 | |||
| 1177 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
| 1178 | phy_dev->autoneg = false; | ||
| 1179 | |||
| 1173 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { | 1180 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { |
| 1174 | phy_dev->dev_flags = 0; | 1181 | phy_dev->dev_flags = 0; |
| 1175 | 1182 | ||
| @@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | |||
| 1181 | if (unlikely(ret)) | 1188 | if (unlikely(ret)) |
| 1182 | return -ENODEV; | 1189 | return -ENODEV; |
| 1183 | 1190 | ||
| 1184 | ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); | ||
| 1185 | linkmode_and(phy_dev->supported, phy_dev->supported, supported); | ||
| 1186 | linkmode_copy(phy_dev->advertising, phy_dev->supported); | ||
| 1187 | |||
| 1188 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
| 1189 | phy_dev->autoneg = false; | ||
| 1190 | |||
| 1191 | if (h->phy_if == PHY_INTERFACE_MODE_SGMII) | ||
| 1192 | phy_stop(phy_dev); | ||
| 1193 | |||
| 1194 | return 0; | 1191 | return 0; |
| 1195 | } | 1192 | } |
| 1196 | 1193 | ||
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 098d8764c0ea..dd71d5db7274 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) | |||
| 1313 | unsigned long lpar_rc; | 1313 | unsigned long lpar_rc; |
| 1314 | u16 mss = 0; | 1314 | u16 mss = 0; |
| 1315 | 1315 | ||
| 1316 | restart_poll: | ||
| 1317 | while (frames_processed < budget) { | 1316 | while (frames_processed < budget) { |
| 1318 | if (!ibmveth_rxq_pending_buffer(adapter)) | 1317 | if (!ibmveth_rxq_pending_buffer(adapter)) |
| 1319 | break; | 1318 | break; |
| @@ -1401,7 +1400,6 @@ restart_poll: | |||
| 1401 | napi_reschedule(napi)) { | 1400 | napi_reschedule(napi)) { |
| 1402 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1401 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
| 1403 | VIO_IRQ_DISABLE); | 1402 | VIO_IRQ_DISABLE); |
| 1404 | goto restart_poll; | ||
| 1405 | } | 1403 | } |
| 1406 | } | 1404 | } |
| 1407 | 1405 | ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 31fb76ee9d82..a1246e89aad4 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
| @@ -159,7 +159,7 @@ config IXGBE | |||
| 159 | tristate "Intel(R) 10GbE PCI Express adapters support" | 159 | tristate "Intel(R) 10GbE PCI Express adapters support" |
| 160 | depends on PCI | 160 | depends on PCI |
| 161 | select MDIO | 161 | select MDIO |
| 162 | select MDIO_DEVICE | 162 | select PHYLIB |
| 163 | imply PTP_1588_CLOCK | 163 | imply PTP_1588_CLOCK |
| 164 | ---help--- | 164 | ---help--- |
| 165 | This driver supports Intel(R) 10GbE PCI Express family of | 165 | This driver supports Intel(R) 10GbE PCI Express family of |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index fe1592ae8769..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
| @@ -515,7 +515,7 @@ struct igb_adapter { | |||
| 515 | /* OS defined structs */ | 515 | /* OS defined structs */ |
| 516 | struct pci_dev *pdev; | 516 | struct pci_dev *pdev; |
| 517 | 517 | ||
| 518 | struct mutex stats64_lock; | 518 | spinlock_t stats64_lock; |
| 519 | struct rtnl_link_stats64 stats64; | 519 | struct rtnl_link_stats64 stats64; |
| 520 | 520 | ||
| 521 | /* structs defined in e1000_hw.h */ | 521 | /* structs defined in e1000_hw.h */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7426060b678f..c57671068245 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
| @@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
| 2295 | int i, j; | 2295 | int i, j; |
| 2296 | char *p; | 2296 | char *p; |
| 2297 | 2297 | ||
| 2298 | mutex_lock(&adapter->stats64_lock); | 2298 | spin_lock(&adapter->stats64_lock); |
| 2299 | igb_update_stats(adapter); | 2299 | igb_update_stats(adapter); |
| 2300 | 2300 | ||
| 2301 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { | 2301 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { |
| @@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
| 2338 | } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); | 2338 | } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); |
| 2339 | i += IGB_RX_QUEUE_STATS_LEN; | 2339 | i += IGB_RX_QUEUE_STATS_LEN; |
| 2340 | } | 2340 | } |
| 2341 | mutex_unlock(&adapter->stats64_lock); | 2341 | spin_unlock(&adapter->stats64_lock); |
| 2342 | } | 2342 | } |
| 2343 | 2343 | ||
| 2344 | static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | 2344 | static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 87bdf1604ae2..7137e7f9c7f3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter) | |||
| 2203 | del_timer_sync(&adapter->phy_info_timer); | 2203 | del_timer_sync(&adapter->phy_info_timer); |
| 2204 | 2204 | ||
| 2205 | /* record the stats before reset*/ | 2205 | /* record the stats before reset*/ |
| 2206 | mutex_lock(&adapter->stats64_lock); | 2206 | spin_lock(&adapter->stats64_lock); |
| 2207 | igb_update_stats(adapter); | 2207 | igb_update_stats(adapter); |
| 2208 | mutex_unlock(&adapter->stats64_lock); | 2208 | spin_unlock(&adapter->stats64_lock); |
| 2209 | 2209 | ||
| 2210 | adapter->link_speed = 0; | 2210 | adapter->link_speed = 0; |
| 2211 | adapter->link_duplex = 0; | 2211 | adapter->link_duplex = 0; |
| @@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
| 3840 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | 3840 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
| 3841 | 3841 | ||
| 3842 | spin_lock_init(&adapter->nfc_lock); | 3842 | spin_lock_init(&adapter->nfc_lock); |
| 3843 | mutex_init(&adapter->stats64_lock); | 3843 | spin_lock_init(&adapter->stats64_lock); |
| 3844 | #ifdef CONFIG_PCI_IOV | 3844 | #ifdef CONFIG_PCI_IOV |
| 3845 | switch (hw->mac.type) { | 3845 | switch (hw->mac.type) { |
| 3846 | case e1000_82576: | 3846 | case e1000_82576: |
| @@ -5406,9 +5406,9 @@ no_wait: | |||
| 5406 | } | 5406 | } |
| 5407 | } | 5407 | } |
| 5408 | 5408 | ||
| 5409 | mutex_lock(&adapter->stats64_lock); | 5409 | spin_lock(&adapter->stats64_lock); |
| 5410 | igb_update_stats(adapter); | 5410 | igb_update_stats(adapter); |
| 5411 | mutex_unlock(&adapter->stats64_lock); | 5411 | spin_unlock(&adapter->stats64_lock); |
| 5412 | 5412 | ||
| 5413 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5413 | for (i = 0; i < adapter->num_tx_queues; i++) { |
| 5414 | struct igb_ring *tx_ring = adapter->tx_ring[i]; | 5414 | struct igb_ring *tx_ring = adapter->tx_ring[i]; |
| @@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev, | |||
| 6235 | { | 6235 | { |
| 6236 | struct igb_adapter *adapter = netdev_priv(netdev); | 6236 | struct igb_adapter *adapter = netdev_priv(netdev); |
| 6237 | 6237 | ||
| 6238 | mutex_lock(&adapter->stats64_lock); | 6238 | spin_lock(&adapter->stats64_lock); |
| 6239 | igb_update_stats(adapter); | 6239 | igb_update_stats(adapter); |
| 6240 | memcpy(stats, &adapter->stats64, sizeof(*stats)); | 6240 | memcpy(stats, &adapter->stats64, sizeof(*stats)); |
| 6241 | mutex_unlock(&adapter->stats64_lock); | 6241 | spin_unlock(&adapter->stats64_lock); |
| 6242 | } | 6242 | } |
| 6243 | 6243 | ||
| 6244 | /** | 6244 | /** |
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 742f0c1f60df..6d55e3d0b7ea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c | |||
| @@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 825 | if (!cgx->cgx_cmd_workq) { | 825 | if (!cgx->cgx_cmd_workq) { |
| 826 | dev_err(dev, "alloc workqueue failed for cgx cmd"); | 826 | dev_err(dev, "alloc workqueue failed for cgx cmd"); |
| 827 | err = -ENOMEM; | 827 | err = -ENOMEM; |
| 828 | goto err_release_regions; | 828 | goto err_free_irq_vectors; |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | list_add(&cgx->cgx_list, &cgx_list); | 831 | list_add(&cgx->cgx_list, &cgx_list); |
| @@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 841 | err_release_lmac: | 841 | err_release_lmac: |
| 842 | cgx_lmac_exit(cgx); | 842 | cgx_lmac_exit(cgx); |
| 843 | list_del(&cgx->cgx_list); | 843 | list_del(&cgx->cgx_list); |
| 844 | err_free_irq_vectors: | ||
| 845 | pci_free_irq_vectors(pdev); | ||
| 844 | err_release_regions: | 846 | err_release_regions: |
| 845 | pci_release_regions(pdev); | 847 | pci_release_regions(pdev); |
| 846 | err_disable_device: | 848 | err_disable_device: |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fe9653fa8aea..49f926b7a91c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
| 258 | 258 | ||
| 259 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 259 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
| 260 | 260 | ||
| 261 | if (dev->phydev->link) | ||
| 262 | netif_carrier_on(dev); | ||
| 263 | else | ||
| 264 | netif_carrier_off(dev); | ||
| 265 | |||
| 266 | if (!of_phy_is_fixed_link(mac->of_node)) | 261 | if (!of_phy_is_fixed_link(mac->of_node)) |
| 267 | phy_print_status(dev->phydev); | 262 | phy_print_status(dev->phydev); |
| 268 | } | 263 | } |
| @@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev) | |||
| 347 | if (mtk_phy_connect_node(eth, mac, np)) | 342 | if (mtk_phy_connect_node(eth, mac, np)) |
| 348 | goto err_phy; | 343 | goto err_phy; |
| 349 | 344 | ||
| 350 | dev->phydev->autoneg = AUTONEG_ENABLE; | ||
| 351 | dev->phydev->speed = 0; | ||
| 352 | dev->phydev->duplex = 0; | ||
| 353 | |||
| 354 | phy_set_max_speed(dev->phydev, SPEED_1000); | ||
| 355 | phy_support_asym_pause(dev->phydev); | ||
| 356 | linkmode_copy(dev->phydev->advertising, dev->phydev->supported); | ||
| 357 | linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, | ||
| 358 | dev->phydev->advertising); | ||
| 359 | phy_start_aneg(dev->phydev); | ||
| 360 | |||
| 361 | of_node_put(np); | 345 | of_node_put(np); |
| 362 | 346 | ||
| 363 | return 0; | 347 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index db909b6069b5..65f8a4b6ed0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c | |||
| @@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) | |||
| 306 | 306 | ||
| 307 | if (entries_per_copy < entries) { | 307 | if (entries_per_copy < entries) { |
| 308 | for (i = 0; i < entries / entries_per_copy; i++) { | 308 | for (i = 0; i < entries / entries_per_copy; i++) { |
| 309 | err = copy_to_user(buf, init_ents, PAGE_SIZE); | 309 | err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ? |
| 310 | -EFAULT : 0; | ||
| 310 | if (err) | 311 | if (err) |
| 311 | goto out; | 312 | goto out; |
| 312 | 313 | ||
| 313 | buf += PAGE_SIZE; | 314 | buf += PAGE_SIZE; |
| 314 | } | 315 | } |
| 315 | } else { | 316 | } else { |
| 316 | err = copy_to_user(buf, init_ents, entries * cqe_size); | 317 | err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? |
| 318 | -EFAULT : 0; | ||
| 317 | } | 319 | } |
| 318 | 320 | ||
| 319 | out: | 321 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 7df728f1e5b5..6e501af0e532 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, | |||
| 2067 | { | 2067 | { |
| 2068 | struct mlx4_cmd_mailbox *mailbox; | 2068 | struct mlx4_cmd_mailbox *mailbox; |
| 2069 | __be32 *outbox; | 2069 | __be32 *outbox; |
| 2070 | u64 qword_field; | ||
| 2070 | u32 dword_field; | 2071 | u32 dword_field; |
| 2071 | int err; | 2072 | u16 word_field; |
| 2072 | u8 byte_field; | 2073 | u8 byte_field; |
| 2074 | int err; | ||
| 2073 | static const u8 a0_dmfs_query_hw_steering[] = { | 2075 | static const u8 a0_dmfs_query_hw_steering[] = { |
| 2074 | [0] = MLX4_STEERING_DMFS_A0_DEFAULT, | 2076 | [0] = MLX4_STEERING_DMFS_A0_DEFAULT, |
| 2075 | [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, | 2077 | [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, |
| @@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, | |||
| 2097 | 2099 | ||
| 2098 | /* QPC/EEC/CQC/EQC/RDMARC attributes */ | 2100 | /* QPC/EEC/CQC/EQC/RDMARC attributes */ |
| 2099 | 2101 | ||
| 2100 | MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); | 2102 | MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET); |
| 2101 | MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); | 2103 | param->qpc_base = qword_field & ~((u64)0x1f); |
| 2102 | MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); | 2104 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET); |
| 2103 | MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); | 2105 | param->log_num_qps = byte_field & 0x1f; |
| 2104 | MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); | 2106 | MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET); |
| 2105 | MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); | 2107 | param->srqc_base = qword_field & ~((u64)0x1f); |
| 2106 | MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); | 2108 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET); |
| 2107 | MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); | 2109 | param->log_num_srqs = byte_field & 0x1f; |
| 2108 | MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); | 2110 | MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET); |
| 2109 | MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); | 2111 | param->cqc_base = qword_field & ~((u64)0x1f); |
| 2110 | MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); | 2112 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET); |
| 2111 | MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); | 2113 | param->log_num_cqs = byte_field & 0x1f; |
| 2112 | MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); | 2114 | MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET); |
| 2115 | param->altc_base = qword_field; | ||
| 2116 | MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET); | ||
| 2117 | param->auxc_base = qword_field; | ||
| 2118 | MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET); | ||
| 2119 | param->eqc_base = qword_field & ~((u64)0x1f); | ||
| 2120 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET); | ||
| 2121 | param->log_num_eqs = byte_field & 0x1f; | ||
| 2122 | MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); | ||
| 2123 | param->num_sys_eqs = word_field & 0xfff; | ||
| 2124 | MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET); | ||
| 2125 | param->rdmarc_base = qword_field & ~((u64)0x1f); | ||
| 2126 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET); | ||
| 2127 | param->log_rd_per_qp = byte_field & 0x7; | ||
| 2113 | 2128 | ||
| 2114 | MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); | 2129 | MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); |
| 2115 | if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { | 2130 | if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { |
| @@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, | |||
| 2128 | /* steering attributes */ | 2143 | /* steering attributes */ |
| 2129 | if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { | 2144 | if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { |
| 2130 | MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); | 2145 | MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); |
| 2131 | MLX4_GET(param->log_mc_entry_sz, outbox, | 2146 | MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); |
| 2132 | INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); | 2147 | param->log_mc_entry_sz = byte_field & 0x1f; |
| 2133 | MLX4_GET(param->log_mc_table_sz, outbox, | 2148 | MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); |
| 2134 | INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); | 2149 | param->log_mc_table_sz = byte_field & 0x1f; |
| 2135 | MLX4_GET(byte_field, outbox, | 2150 | MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET); |
| 2136 | INIT_HCA_FS_A0_OFFSET); | ||
| 2137 | param->dmfs_high_steer_mode = | 2151 | param->dmfs_high_steer_mode = |
| 2138 | a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; | 2152 | a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; |
| 2139 | } else { | 2153 | } else { |
| 2140 | MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); | 2154 | MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); |
| 2141 | MLX4_GET(param->log_mc_entry_sz, outbox, | 2155 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); |
| 2142 | INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); | 2156 | param->log_mc_entry_sz = byte_field & 0x1f; |
| 2143 | MLX4_GET(param->log_mc_hash_sz, outbox, | 2157 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); |
| 2144 | INIT_HCA_LOG_MC_HASH_SZ_OFFSET); | 2158 | param->log_mc_hash_sz = byte_field & 0x1f; |
| 2145 | MLX4_GET(param->log_mc_table_sz, outbox, | 2159 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); |
| 2146 | INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); | 2160 | param->log_mc_table_sz = byte_field & 0x1f; |
| 2147 | } | 2161 | } |
| 2148 | 2162 | ||
| 2149 | /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ | 2163 | /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ |
| @@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, | |||
| 2167 | /* TPT attributes */ | 2181 | /* TPT attributes */ |
| 2168 | 2182 | ||
| 2169 | MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); | 2183 | MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); |
| 2170 | MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); | 2184 | MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET); |
| 2171 | MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); | 2185 | param->mw_enabled = byte_field >> 7; |
| 2186 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); | ||
| 2187 | param->log_mpt_sz = byte_field & 0x3f; | ||
| 2172 | MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); | 2188 | MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); |
| 2173 | MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); | 2189 | MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); |
| 2174 | 2190 | ||
| 2175 | /* UAR attributes */ | 2191 | /* UAR attributes */ |
| 2176 | 2192 | ||
| 2177 | MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); | 2193 | MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); |
| 2178 | MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); | 2194 | MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); |
| 2195 | param->log_uar_sz = byte_field & 0xf; | ||
| 2179 | 2196 | ||
| 2180 | /* phv_check enable */ | 2197 | /* phv_check enable */ |
| 2181 | MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); | 2198 | MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 4b4351141b94..d89a3da89e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
| @@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu | |||
| 57 | int i; | 57 | int i; |
| 58 | 58 | ||
| 59 | if (chunk->nsg > 0) | 59 | if (chunk->nsg > 0) |
| 60 | pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, | 60 | dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, |
| 61 | PCI_DMA_BIDIRECTIONAL); | 61 | DMA_BIDIRECTIONAL); |
| 62 | 62 | ||
| 63 | for (i = 0; i < chunk->npages; ++i) | 63 | for (i = 0; i < chunk->npages; ++i) |
| 64 | __free_pages(sg_page(&chunk->mem[i]), | 64 | __free_pages(sg_page(&chunk->sg[i]), |
| 65 | get_order(chunk->mem[i].length)); | 65 | get_order(chunk->sg[i].length)); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | 68 | static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) |
| @@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * | |||
| 71 | 71 | ||
| 72 | for (i = 0; i < chunk->npages; ++i) | 72 | for (i = 0; i < chunk->npages; ++i) |
| 73 | dma_free_coherent(&dev->persist->pdev->dev, | 73 | dma_free_coherent(&dev->persist->pdev->dev, |
| 74 | chunk->mem[i].length, | 74 | chunk->buf[i].size, |
| 75 | lowmem_page_address(sg_page(&chunk->mem[i])), | 75 | chunk->buf[i].addr, |
| 76 | sg_dma_address(&chunk->mem[i])); | 76 | chunk->buf[i].dma_addr); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | 79 | void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) |
| @@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, | |||
| 111 | return 0; | 111 | return 0; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, | 114 | static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, |
| 115 | int order, gfp_t gfp_mask) | 115 | int order, gfp_t gfp_mask) |
| 116 | { | 116 | { |
| 117 | void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, | 117 | buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order, |
| 118 | &sg_dma_address(mem), gfp_mask); | 118 | &buf->dma_addr, gfp_mask); |
| 119 | if (!buf) | 119 | if (!buf->addr) |
| 120 | return -ENOMEM; | 120 | return -ENOMEM; |
| 121 | 121 | ||
| 122 | if (offset_in_page(buf)) { | 122 | if (offset_in_page(buf->addr)) { |
| 123 | dma_free_coherent(dev, PAGE_SIZE << order, | 123 | dma_free_coherent(dev, PAGE_SIZE << order, buf->addr, |
| 124 | buf, sg_dma_address(mem)); | 124 | buf->dma_addr); |
| 125 | return -ENOMEM; | 125 | return -ENOMEM; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | sg_set_buf(mem, buf, PAGE_SIZE << order); | 128 | buf->size = PAGE_SIZE << order; |
| 129 | sg_dma_len(mem) = PAGE_SIZE << order; | ||
| 130 | return 0; | 129 | return 0; |
| 131 | } | 130 | } |
| 132 | 131 | ||
| @@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
| 159 | 158 | ||
| 160 | while (npages > 0) { | 159 | while (npages > 0) { |
| 161 | if (!chunk) { | 160 | if (!chunk) { |
| 162 | chunk = kmalloc_node(sizeof(*chunk), | 161 | chunk = kzalloc_node(sizeof(*chunk), |
| 163 | gfp_mask & ~(__GFP_HIGHMEM | | 162 | gfp_mask & ~(__GFP_HIGHMEM | |
| 164 | __GFP_NOWARN), | 163 | __GFP_NOWARN), |
| 165 | dev->numa_node); | 164 | dev->numa_node); |
| 166 | if (!chunk) { | 165 | if (!chunk) { |
| 167 | chunk = kmalloc(sizeof(*chunk), | 166 | chunk = kzalloc(sizeof(*chunk), |
| 168 | gfp_mask & ~(__GFP_HIGHMEM | | 167 | gfp_mask & ~(__GFP_HIGHMEM | |
| 169 | __GFP_NOWARN)); | 168 | __GFP_NOWARN)); |
| 170 | if (!chunk) | 169 | if (!chunk) |
| 171 | goto fail; | 170 | goto fail; |
| 172 | } | 171 | } |
| 172 | chunk->coherent = coherent; | ||
| 173 | 173 | ||
| 174 | sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); | 174 | if (!coherent) |
| 175 | chunk->npages = 0; | 175 | sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN); |
| 176 | chunk->nsg = 0; | ||
| 177 | list_add_tail(&chunk->list, &icm->chunk_list); | 176 | list_add_tail(&chunk->list, &icm->chunk_list); |
| 178 | } | 177 | } |
| 179 | 178 | ||
| @@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
| 186 | 185 | ||
| 187 | if (coherent) | 186 | if (coherent) |
| 188 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, | 187 | ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, |
| 189 | &chunk->mem[chunk->npages], | 188 | &chunk->buf[chunk->npages], |
| 190 | cur_order, mask); | 189 | cur_order, mask); |
| 191 | else | 190 | else |
| 192 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | 191 | ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], |
| 193 | cur_order, mask, | 192 | cur_order, mask, |
| 194 | dev->numa_node); | 193 | dev->numa_node); |
| 195 | 194 | ||
| @@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
| 205 | if (coherent) | 204 | if (coherent) |
| 206 | ++chunk->nsg; | 205 | ++chunk->nsg; |
| 207 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | 206 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { |
| 208 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | 207 | chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, |
| 209 | chunk->npages, | 208 | chunk->sg, chunk->npages, |
| 210 | PCI_DMA_BIDIRECTIONAL); | 209 | DMA_BIDIRECTIONAL); |
| 211 | 210 | ||
| 212 | if (chunk->nsg <= 0) | 211 | if (chunk->nsg <= 0) |
| 213 | goto fail; | 212 | goto fail; |
| @@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
| 220 | } | 219 | } |
| 221 | 220 | ||
| 222 | if (!coherent && chunk) { | 221 | if (!coherent && chunk) { |
| 223 | chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, | 222 | chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, |
| 224 | chunk->npages, | 223 | chunk->npages, DMA_BIDIRECTIONAL); |
| 225 | PCI_DMA_BIDIRECTIONAL); | ||
| 226 | 224 | ||
| 227 | if (chunk->nsg <= 0) | 225 | if (chunk->nsg <= 0) |
| 228 | goto fail; | 226 | goto fail; |
| @@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, | |||
| 320 | u64 idx; | 318 | u64 idx; |
| 321 | struct mlx4_icm_chunk *chunk; | 319 | struct mlx4_icm_chunk *chunk; |
| 322 | struct mlx4_icm *icm; | 320 | struct mlx4_icm *icm; |
| 323 | struct page *page = NULL; | 321 | void *addr = NULL; |
| 324 | 322 | ||
| 325 | if (!table->lowmem) | 323 | if (!table->lowmem) |
| 326 | return NULL; | 324 | return NULL; |
| @@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, | |||
| 336 | 334 | ||
| 337 | list_for_each_entry(chunk, &icm->chunk_list, list) { | 335 | list_for_each_entry(chunk, &icm->chunk_list, list) { |
| 338 | for (i = 0; i < chunk->npages; ++i) { | 336 | for (i = 0; i < chunk->npages; ++i) { |
| 337 | dma_addr_t dma_addr; | ||
| 338 | size_t len; | ||
| 339 | |||
| 340 | if (table->coherent) { | ||
| 341 | len = chunk->buf[i].size; | ||
| 342 | dma_addr = chunk->buf[i].dma_addr; | ||
| 343 | addr = chunk->buf[i].addr; | ||
| 344 | } else { | ||
| 345 | struct page *page; | ||
| 346 | |||
| 347 | len = sg_dma_len(&chunk->sg[i]); | ||
| 348 | dma_addr = sg_dma_address(&chunk->sg[i]); | ||
| 349 | |||
| 350 | /* XXX: we should never do this for highmem | ||
| 351 | * allocation. This function either needs | ||
| 352 | * to be split, or the kernel virtual address | ||
| 353 | * return needs to be made optional. | ||
| 354 | */ | ||
| 355 | page = sg_page(&chunk->sg[i]); | ||
| 356 | addr = lowmem_page_address(page); | ||
| 357 | } | ||
| 358 | |||
| 339 | if (dma_handle && dma_offset >= 0) { | 359 | if (dma_handle && dma_offset >= 0) { |
| 340 | if (sg_dma_len(&chunk->mem[i]) > dma_offset) | 360 | if (len > dma_offset) |
| 341 | *dma_handle = sg_dma_address(&chunk->mem[i]) + | 361 | *dma_handle = dma_addr + dma_offset; |
| 342 | dma_offset; | 362 | dma_offset -= len; |
| 343 | dma_offset -= sg_dma_len(&chunk->mem[i]); | ||
| 344 | } | 363 | } |
| 364 | |||
| 345 | /* | 365 | /* |
| 346 | * DMA mapping can merge pages but not split them, | 366 | * DMA mapping can merge pages but not split them, |
| 347 | * so if we found the page, dma_handle has already | 367 | * so if we found the page, dma_handle has already |
| 348 | * been assigned to. | 368 | * been assigned to. |
| 349 | */ | 369 | */ |
| 350 | if (chunk->mem[i].length > offset) { | 370 | if (len > offset) |
| 351 | page = sg_page(&chunk->mem[i]); | ||
| 352 | goto out; | 371 | goto out; |
| 353 | } | 372 | offset -= len; |
| 354 | offset -= chunk->mem[i].length; | ||
| 355 | } | 373 | } |
| 356 | } | 374 | } |
| 357 | 375 | ||
| 376 | addr = NULL; | ||
| 358 | out: | 377 | out: |
| 359 | mutex_unlock(&table->mutex); | 378 | mutex_unlock(&table->mutex); |
| 360 | return page ? lowmem_page_address(page) + offset : NULL; | 379 | return addr ? addr + offset : NULL; |
| 361 | } | 380 | } |
| 362 | 381 | ||
| 363 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, | 382 | int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index c9169a490557..d199874b1c07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h | |||
| @@ -47,11 +47,21 @@ enum { | |||
| 47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, | 47 | MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, |
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | struct mlx4_icm_buf { | ||
| 51 | void *addr; | ||
| 52 | size_t size; | ||
| 53 | dma_addr_t dma_addr; | ||
| 54 | }; | ||
| 55 | |||
| 50 | struct mlx4_icm_chunk { | 56 | struct mlx4_icm_chunk { |
| 51 | struct list_head list; | 57 | struct list_head list; |
| 52 | int npages; | 58 | int npages; |
| 53 | int nsg; | 59 | int nsg; |
| 54 | struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; | 60 | bool coherent; |
| 61 | union { | ||
| 62 | struct scatterlist sg[MLX4_ICM_CHUNK_LEN]; | ||
| 63 | struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN]; | ||
| 64 | }; | ||
| 55 | }; | 65 | }; |
| 56 | 66 | ||
| 57 | struct mlx4_icm { | 67 | struct mlx4_icm { |
| @@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) | |||
| 114 | 124 | ||
| 115 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) | 125 | static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) |
| 116 | { | 126 | { |
| 117 | return sg_dma_address(&iter->chunk->mem[iter->page_idx]); | 127 | if (iter->chunk->coherent) |
| 128 | return iter->chunk->buf[iter->page_idx].dma_addr; | ||
| 129 | else | ||
| 130 | return sg_dma_address(&iter->chunk->sg[iter->page_idx]); | ||
| 118 | } | 131 | } |
| 119 | 132 | ||
| 120 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) | 133 | static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) |
| 121 | { | 134 | { |
| 122 | return sg_dma_len(&iter->chunk->mem[iter->page_idx]); | 135 | if (iter->chunk->coherent) |
| 136 | return iter->chunk->buf[iter->page_idx].size; | ||
| 137 | else | ||
| 138 | return sg_dma_len(&iter->chunk->sg[iter->page_idx]); | ||
| 123 | } | 139 | } |
| 124 | 140 | ||
| 125 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); | 141 | int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c9df08133718..3bbccead2f63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -844,9 +844,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
| 844 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, | 844 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, |
| 845 | Autoneg); | 845 | Autoneg); |
| 846 | 846 | ||
| 847 | if (get_fec_supported_advertised(mdev, link_ksettings)) | 847 | err = get_fec_supported_advertised(mdev, link_ksettings); |
| 848 | if (err) { | ||
| 848 | netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", | 849 | netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", |
| 849 | __func__, err); | 850 | __func__, err); |
| 851 | err = 0; /* don't fail caps query because of FEC error */ | ||
| 852 | } | ||
| 850 | 853 | ||
| 851 | if (!an_disable_admin) | 854 | if (!an_disable_admin) |
| 852 | ethtool_link_ksettings_add_link_mode(link_ksettings, | 855 | ethtool_link_ksettings_add_link_mode(link_ksettings, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 96cc0c6a4014..04736212a21c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv { | |||
| 58 | struct list_head list; | 58 | struct list_head list; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); | 61 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
| 62 | struct net_device *netdev); | ||
| 62 | 63 | ||
| 63 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, | 64 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, |
| 64 | struct ethtool_drvinfo *drvinfo) | 65 | struct ethtool_drvinfo *drvinfo) |
| @@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) | |||
| 179 | 180 | ||
| 180 | s->tx_packets += sq_stats->packets; | 181 | s->tx_packets += sq_stats->packets; |
| 181 | s->tx_bytes += sq_stats->bytes; | 182 | s->tx_bytes += sq_stats->bytes; |
| 183 | s->tx_queue_dropped += sq_stats->dropped; | ||
| 182 | } | 184 | } |
| 183 | } | 185 | } |
| 184 | } | 186 | } |
| @@ -663,7 +665,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) | |||
| 663 | struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; | 665 | struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; |
| 664 | 666 | ||
| 665 | list_for_each_entry_safe(cb_priv, temp, head, list) { | 667 | list_for_each_entry_safe(cb_priv, temp, head, list) { |
| 666 | mlx5e_rep_indr_unregister_block(cb_priv->netdev); | 668 | mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); |
| 667 | kfree(cb_priv); | 669 | kfree(cb_priv); |
| 668 | } | 670 | } |
| 669 | } | 671 | } |
| @@ -735,7 +737,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, | |||
| 735 | 737 | ||
| 736 | err = tcf_block_cb_register(f->block, | 738 | err = tcf_block_cb_register(f->block, |
| 737 | mlx5e_rep_indr_setup_block_cb, | 739 | mlx5e_rep_indr_setup_block_cb, |
| 738 | netdev, indr_priv, f->extack); | 740 | indr_priv, indr_priv, f->extack); |
| 739 | if (err) { | 741 | if (err) { |
| 740 | list_del(&indr_priv->list); | 742 | list_del(&indr_priv->list); |
| 741 | kfree(indr_priv); | 743 | kfree(indr_priv); |
| @@ -743,14 +745,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, | |||
| 743 | 745 | ||
| 744 | return err; | 746 | return err; |
| 745 | case TC_BLOCK_UNBIND: | 747 | case TC_BLOCK_UNBIND: |
| 748 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); | ||
| 749 | if (!indr_priv) | ||
| 750 | return -ENOENT; | ||
| 751 | |||
| 746 | tcf_block_cb_unregister(f->block, | 752 | tcf_block_cb_unregister(f->block, |
| 747 | mlx5e_rep_indr_setup_block_cb, | 753 | mlx5e_rep_indr_setup_block_cb, |
| 748 | netdev); | 754 | indr_priv); |
| 749 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); | 755 | list_del(&indr_priv->list); |
| 750 | if (indr_priv) { | 756 | kfree(indr_priv); |
| 751 | list_del(&indr_priv->list); | ||
| 752 | kfree(indr_priv); | ||
| 753 | } | ||
| 754 | 757 | ||
| 755 | return 0; | 758 | return 0; |
| 756 | default: | 759 | default: |
| @@ -779,7 +782,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, | |||
| 779 | 782 | ||
| 780 | err = __tc_indr_block_cb_register(netdev, rpriv, | 783 | err = __tc_indr_block_cb_register(netdev, rpriv, |
| 781 | mlx5e_rep_indr_setup_tc_cb, | 784 | mlx5e_rep_indr_setup_tc_cb, |
| 782 | netdev); | 785 | rpriv); |
| 783 | if (err) { | 786 | if (err) { |
| 784 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); | 787 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); |
| 785 | 788 | ||
| @@ -789,10 +792,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, | |||
| 789 | return err; | 792 | return err; |
| 790 | } | 793 | } |
| 791 | 794 | ||
| 792 | static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) | 795 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
| 796 | struct net_device *netdev) | ||
| 793 | { | 797 | { |
| 794 | __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, | 798 | __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, |
| 795 | netdev); | 799 | rpriv); |
| 796 | } | 800 | } |
| 797 | 801 | ||
| 798 | static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, | 802 | static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, |
| @@ -811,7 +815,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, | |||
| 811 | mlx5e_rep_indr_register_block(rpriv, netdev); | 815 | mlx5e_rep_indr_register_block(rpriv, netdev); |
| 812 | break; | 816 | break; |
| 813 | case NETDEV_UNREGISTER: | 817 | case NETDEV_UNREGISTER: |
| 814 | mlx5e_rep_indr_unregister_block(netdev); | 818 | mlx5e_rep_indr_unregister_block(rpriv, netdev); |
| 815 | break; | 819 | break; |
| 816 | } | 820 | } |
| 817 | return NOTIFY_OK; | 821 | return NOTIFY_OK; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1d0bb5ff8c26..f86e4804e83e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) | |||
| 732 | ((struct ipv6hdr *)ip_p)->nexthdr; | 732 | ((struct ipv6hdr *)ip_p)->nexthdr; |
| 733 | } | 733 | } |
| 734 | 734 | ||
| 735 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | ||
| 736 | |||
| 735 | static inline void mlx5e_handle_csum(struct net_device *netdev, | 737 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
| 736 | struct mlx5_cqe64 *cqe, | 738 | struct mlx5_cqe64 *cqe, |
| 737 | struct mlx5e_rq *rq, | 739 | struct mlx5e_rq *rq, |
| @@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 754 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) | 756 | if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) |
| 755 | goto csum_unnecessary; | 757 | goto csum_unnecessary; |
| 756 | 758 | ||
| 759 | /* CQE csum doesn't cover padding octets in short ethernet | ||
| 760 | * frames. And the pad field is appended prior to calculating | ||
| 761 | * and appending the FCS field. | ||
| 762 | * | ||
| 763 | * Detecting these padded frames requires to verify and parse | ||
| 764 | * IP headers, so we simply force all those small frames to be | ||
| 765 | * CHECKSUM_UNNECESSARY even if they are not padded. | ||
| 766 | */ | ||
| 767 | if (short_frame(skb->len)) | ||
| 768 | goto csum_unnecessary; | ||
| 769 | |||
| 757 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { | 770 | if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { |
| 758 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) | 771 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
| 759 | goto csum_unnecessary; | 772 | goto csum_unnecessary; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 080ddd1942ec..b9a25aed5d11 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig | |||
| @@ -78,6 +78,7 @@ config MLXSW_SPECTRUM | |||
| 78 | depends on IPV6 || IPV6=n | 78 | depends on IPV6 || IPV6=n |
| 79 | depends on NET_IPGRE || NET_IPGRE=n | 79 | depends on NET_IPGRE || NET_IPGRE=n |
| 80 | depends on IPV6_GRE || IPV6_GRE=n | 80 | depends on IPV6_GRE || IPV6_GRE=n |
| 81 | depends on VXLAN || VXLAN=n | ||
| 81 | select GENERIC_ALLOCATOR | 82 | select GENERIC_ALLOCATOR |
| 82 | select PARMAN | 83 | select PARMAN |
| 83 | select OBJAGG | 84 | select OBJAGG |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 66b8098c6fd2..a2321fe8d6a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c | |||
| @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) | |||
| 604 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); | 604 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); |
| 605 | u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); | 605 | u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); |
| 606 | u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); | 606 | u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); |
| 607 | char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; | ||
| 608 | |||
| 609 | memcpy(ncqe, cqe, q->elem_size); | ||
| 610 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
| 607 | 611 | ||
| 608 | if (sendq) { | 612 | if (sendq) { |
| 609 | struct mlxsw_pci_queue *sdq; | 613 | struct mlxsw_pci_queue *sdq; |
| 610 | 614 | ||
| 611 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); | 615 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); |
| 612 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, | 616 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, |
| 613 | wqe_counter, cqe); | 617 | wqe_counter, ncqe); |
| 614 | q->u.cq.comp_sdq_count++; | 618 | q->u.cq.comp_sdq_count++; |
| 615 | } else { | 619 | } else { |
| 616 | struct mlxsw_pci_queue *rdq; | 620 | struct mlxsw_pci_queue *rdq; |
| 617 | 621 | ||
| 618 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); | 622 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); |
| 619 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, | 623 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, |
| 620 | wqe_counter, q->u.cq.v, cqe); | 624 | wqe_counter, q->u.cq.v, ncqe); |
| 621 | q->u.cq.comp_rdq_count++; | 625 | q->u.cq.comp_rdq_count++; |
| 622 | } | 626 | } |
| 623 | if (++items == credits) | 627 | if (++items == credits) |
| 624 | break; | 628 | break; |
| 625 | } | 629 | } |
| 626 | if (items) { | 630 | if (items) |
| 627 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
| 628 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); | 631 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
| 629 | } | ||
| 630 | } | 632 | } |
| 631 | 633 | ||
| 632 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) | 634 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) |
| @@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, | |||
| 1365 | u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); | 1367 | u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); |
| 1366 | 1368 | ||
| 1367 | if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) | 1369 | if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) |
| 1368 | break; | 1370 | return 0; |
| 1369 | cond_resched(); | 1371 | cond_resched(); |
| 1370 | } while (time_before(jiffies, end)); | 1372 | } while (time_before(jiffies, end)); |
| 1371 | return 0; | 1373 | return -EBUSY; |
| 1372 | } | 1374 | } |
| 1373 | 1375 | ||
| 1374 | static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) | 1376 | static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index bb99f6d41fe0..ffee38e36ce8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #define MLXSW_PCI_SW_RESET 0xF0010 | 28 | #define MLXSW_PCI_SW_RESET 0xF0010 |
| 29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) | 29 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) |
| 30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 | 30 | #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 |
| 31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 | 31 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 |
| 32 | #define MLXSW_PCI_FW_READY 0xA1844 | 32 | #define MLXSW_PCI_FW_READY 0xA1844 |
| 33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF | 33 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF |
| @@ -53,6 +53,7 @@ | |||
| 53 | #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ | 53 | #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ |
| 54 | #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ | 54 | #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ |
| 55 | #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ | 55 | #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ |
| 56 | #define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE | ||
| 56 | #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ | 57 | #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ |
| 57 | #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) | 58 | #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) |
| 58 | #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) | 59 | #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eed1045e4d96..32519c93df17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -5005,12 +5005,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, | |||
| 5005 | lower_dev, | 5005 | lower_dev, |
| 5006 | upper_dev); | 5006 | upper_dev); |
| 5007 | } else if (netif_is_lag_master(upper_dev)) { | 5007 | } else if (netif_is_lag_master(upper_dev)) { |
| 5008 | if (info->linking) | 5008 | if (info->linking) { |
| 5009 | err = mlxsw_sp_port_lag_join(mlxsw_sp_port, | 5009 | err = mlxsw_sp_port_lag_join(mlxsw_sp_port, |
| 5010 | upper_dev); | 5010 | upper_dev); |
| 5011 | else | 5011 | } else { |
| 5012 | mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, | ||
| 5013 | false); | ||
| 5012 | mlxsw_sp_port_lag_leave(mlxsw_sp_port, | 5014 | mlxsw_sp_port_lag_leave(mlxsw_sp_port, |
| 5013 | upper_dev); | 5015 | upper_dev); |
| 5016 | } | ||
| 5014 | } else if (netif_is_ovs_master(upper_dev)) { | 5017 | } else if (netif_is_ovs_master(upper_dev)) { |
| 5015 | if (info->linking) | 5018 | if (info->linking) |
| 5016 | err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); | 5019 | err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index b0f2d8e8ded0..ac222833a5cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c | |||
| @@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, | |||
| 72 | act_set = mlxsw_afa_block_first_set(rulei->act_block); | 72 | act_set = mlxsw_afa_block_first_set(rulei->act_block); |
| 73 | mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); | 73 | mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); |
| 74 | 74 | ||
| 75 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); | 75 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); |
| 76 | if (err) | ||
| 77 | goto err_ptce2_write; | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | |||
| 81 | err_ptce2_write: | ||
| 82 | cregion->ops->entry_remove(cregion, centry); | ||
| 83 | return err; | ||
| 76 | } | 84 | } |
| 77 | 85 | ||
| 78 | static void | 86 | static void |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 1c19feefa5f2..2941967e1cc5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c | |||
| @@ -1022,7 +1022,6 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion, | |||
| 1022 | { | 1022 | { |
| 1023 | struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; | 1023 | struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; |
| 1024 | 1024 | ||
| 1025 | ASSERT_RTNL(); | ||
| 1026 | objagg_obj_put(aregion->erp_table->objagg, objagg_obj); | 1025 | objagg_obj_put(aregion->erp_table->objagg, objagg_obj); |
| 1027 | } | 1026 | } |
| 1028 | 1027 | ||
| @@ -1054,7 +1053,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, | |||
| 1054 | const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); | 1053 | const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); |
| 1055 | unsigned int erp_bank; | 1054 | unsigned int erp_bank; |
| 1056 | 1055 | ||
| 1057 | ASSERT_RTNL(); | ||
| 1058 | if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) | 1056 | if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) |
| 1059 | return; | 1057 | return; |
| 1060 | 1058 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 055cc6943b34..9d9aa28684af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | |||
| @@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { | |||
| 997 | static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { | 997 | static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { |
| 998 | .type = MLXSW_SP_FID_TYPE_DUMMY, | 998 | .type = MLXSW_SP_FID_TYPE_DUMMY, |
| 999 | .fid_size = sizeof(struct mlxsw_sp_fid), | 999 | .fid_size = sizeof(struct mlxsw_sp_fid), |
| 1000 | .start_index = MLXSW_SP_RFID_BASE - 1, | 1000 | .start_index = VLAN_N_VID - 1, |
| 1001 | .end_index = MLXSW_SP_RFID_BASE - 1, | 1001 | .end_index = VLAN_N_VID - 1, |
| 1002 | .ops = &mlxsw_sp_fid_dummy_ops, | 1002 | .ops = &mlxsw_sp_fid_dummy_ops, |
| 1003 | }; | 1003 | }; |
| 1004 | 1004 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 0a31fff2516e..fb1c48c698f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | |||
| @@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, | |||
| 816 | ops = nve->nve_ops_arr[params->type]; | 816 | ops = nve->nve_ops_arr[params->type]; |
| 817 | 817 | ||
| 818 | if (!ops->can_offload(nve, params->dev, extack)) | 818 | if (!ops->can_offload(nve, params->dev, extack)) |
| 819 | return -EOPNOTSUPP; | 819 | return -EINVAL; |
| 820 | 820 | ||
| 821 | memset(&config, 0, sizeof(config)); | 821 | memset(&config, 0, sizeof(config)); |
| 822 | ops->nve_config(nve, params->dev, &config); | 822 | ops->nve_config(nve, params->dev, &config); |
| 823 | if (nve->num_nve_tunnels && | 823 | if (nve->num_nve_tunnels && |
| 824 | memcmp(&config, &nve->config, sizeof(config))) { | 824 | memcmp(&config, &nve->config, sizeof(config))) { |
| 825 | NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); | 825 | NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); |
| 826 | return -EOPNOTSUPP; | 826 | return -EINVAL; |
| 827 | } | 827 | } |
| 828 | 828 | ||
| 829 | err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); | 829 | err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 1bd2c6e15f8d..c772109b638d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -1078,8 +1078,7 @@ static int | |||
| 1078 | mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, | 1078 | mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, |
| 1079 | struct mlxsw_sp_bridge_port *bridge_port, | 1079 | struct mlxsw_sp_bridge_port *bridge_port, |
| 1080 | u16 vid, bool is_untagged, bool is_pvid, | 1080 | u16 vid, bool is_untagged, bool is_pvid, |
| 1081 | struct netlink_ext_ack *extack, | 1081 | struct netlink_ext_ack *extack) |
| 1082 | struct switchdev_trans *trans) | ||
| 1083 | { | 1082 | { |
| 1084 | u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); | 1083 | u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); |
| 1085 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1084 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
| @@ -1095,9 +1094,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1095 | mlxsw_sp_port_vlan->bridge_port != bridge_port) | 1094 | mlxsw_sp_port_vlan->bridge_port != bridge_port) |
| 1096 | return -EEXIST; | 1095 | return -EEXIST; |
| 1097 | 1096 | ||
| 1098 | if (switchdev_trans_ph_prepare(trans)) | ||
| 1099 | return 0; | ||
| 1100 | |||
| 1101 | if (!mlxsw_sp_port_vlan) { | 1097 | if (!mlxsw_sp_port_vlan) { |
| 1102 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, | 1098 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, |
| 1103 | vid); | 1099 | vid); |
| @@ -1188,6 +1184,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1188 | return err; | 1184 | return err; |
| 1189 | } | 1185 | } |
| 1190 | 1186 | ||
| 1187 | if (switchdev_trans_ph_commit(trans)) | ||
| 1188 | return 0; | ||
| 1189 | |||
| 1191 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1190 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
| 1192 | if (WARN_ON(!bridge_port)) | 1191 | if (WARN_ON(!bridge_port)) |
| 1193 | return -EINVAL; | 1192 | return -EINVAL; |
| @@ -1200,7 +1199,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1200 | 1199 | ||
| 1201 | err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, | 1200 | err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, |
| 1202 | vid, flag_untagged, | 1201 | vid, flag_untagged, |
| 1203 | flag_pvid, extack, trans); | 1202 | flag_pvid, extack); |
| 1204 | if (err) | 1203 | if (err) |
| 1205 | return err; | 1204 | return err; |
| 1206 | } | 1205 | } |
| @@ -1234,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, | |||
| 1234 | static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) | 1233 | static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) |
| 1235 | { | 1234 | { |
| 1236 | return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : | 1235 | return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : |
| 1237 | MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; | 1236 | MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG; |
| 1238 | } | 1237 | } |
| 1239 | 1238 | ||
| 1240 | static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) | 1239 | static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) |
| @@ -1291,7 +1290,7 @@ out: | |||
| 1291 | static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1290 | static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 1292 | const char *mac, u16 fid, bool adding, | 1291 | const char *mac, u16 fid, bool adding, |
| 1293 | enum mlxsw_reg_sfd_rec_action action, | 1292 | enum mlxsw_reg_sfd_rec_action action, |
| 1294 | bool dynamic) | 1293 | enum mlxsw_reg_sfd_rec_policy policy) |
| 1295 | { | 1294 | { |
| 1296 | char *sfd_pl; | 1295 | char *sfd_pl; |
| 1297 | u8 num_rec; | 1296 | u8 num_rec; |
| @@ -1302,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1302 | return -ENOMEM; | 1301 | return -ENOMEM; |
| 1303 | 1302 | ||
| 1304 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1303 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
| 1305 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1304 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); |
| 1306 | mac, fid, action, local_port); | ||
| 1307 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | 1305 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); |
| 1308 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1306 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
| 1309 | if (err) | 1307 | if (err) |
| @@ -1322,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1322 | bool dynamic) | 1320 | bool dynamic) |
| 1323 | { | 1321 | { |
| 1324 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, | 1322 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, |
| 1325 | MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); | 1323 | MLXSW_REG_SFD_REC_ACTION_NOP, |
| 1324 | mlxsw_sp_sfd_rec_policy(dynamic)); | ||
| 1326 | } | 1325 | } |
| 1327 | 1326 | ||
| 1328 | int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, | 1327 | int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, |
| @@ -1330,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, | |||
| 1330 | { | 1329 | { |
| 1331 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, | 1330 | return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, |
| 1332 | MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, | 1331 | MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, |
| 1333 | false); | 1332 | MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); |
| 1334 | } | 1333 | } |
| 1335 | 1334 | ||
| 1336 | static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | 1335 | static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, |
| @@ -1808,7 +1807,7 @@ static void | |||
| 1808 | mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1807 | mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, |
| 1809 | struct mlxsw_sp_bridge_port *bridge_port, u16 vid) | 1808 | struct mlxsw_sp_bridge_port *bridge_port, u16 vid) |
| 1810 | { | 1809 | { |
| 1811 | u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; | 1810 | u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; |
| 1812 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1811 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
| 1813 | 1812 | ||
| 1814 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 1813 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
| @@ -3207,7 +3206,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, | |||
| 3207 | struct mlxsw_sp_bridge_device *bridge_device, | 3206 | struct mlxsw_sp_bridge_device *bridge_device, |
| 3208 | const struct net_device *vxlan_dev, u16 vid, | 3207 | const struct net_device *vxlan_dev, u16 vid, |
| 3209 | bool flag_untagged, bool flag_pvid, | 3208 | bool flag_untagged, bool flag_pvid, |
| 3210 | struct switchdev_trans *trans, | ||
| 3211 | struct netlink_ext_ack *extack) | 3209 | struct netlink_ext_ack *extack) |
| 3212 | { | 3210 | { |
| 3213 | struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); | 3211 | struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); |
| @@ -3225,9 +3223,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, | |||
| 3225 | mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) | 3223 | mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) |
| 3226 | return -EINVAL; | 3224 | return -EINVAL; |
| 3227 | 3225 | ||
| 3228 | if (switchdev_trans_ph_prepare(trans)) | ||
| 3229 | return 0; | ||
| 3230 | |||
| 3231 | if (!netif_running(vxlan_dev)) | 3226 | if (!netif_running(vxlan_dev)) |
| 3232 | return 0; | 3227 | return 0; |
| 3233 | 3228 | ||
| @@ -3345,6 +3340,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, | |||
| 3345 | 3340 | ||
| 3346 | port_obj_info->handled = true; | 3341 | port_obj_info->handled = true; |
| 3347 | 3342 | ||
| 3343 | if (switchdev_trans_ph_commit(trans)) | ||
| 3344 | return 0; | ||
| 3345 | |||
| 3348 | bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); | 3346 | bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
| 3349 | if (!bridge_device) | 3347 | if (!bridge_device) |
| 3350 | return -EINVAL; | 3348 | return -EINVAL; |
| @@ -3358,8 +3356,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, | |||
| 3358 | err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, | 3356 | err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, |
| 3359 | vxlan_dev, vid, | 3357 | vxlan_dev, vid, |
| 3360 | flag_untagged, | 3358 | flag_untagged, |
| 3361 | flag_pvid, trans, | 3359 | flag_pvid, extack); |
| 3362 | extack); | ||
| 3363 | if (err) | 3360 | if (err) |
| 3364 | return err; | 3361 | return err; |
| 3365 | } | 3362 | } |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 20c9377e99cb..310807ef328b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c | |||
| @@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) | |||
| 962 | 962 | ||
| 963 | memset(&ksettings, 0, sizeof(ksettings)); | 963 | memset(&ksettings, 0, sizeof(ksettings)); |
| 964 | phy_ethtool_get_link_ksettings(netdev, &ksettings); | 964 | phy_ethtool_get_link_ksettings(netdev, &ksettings); |
| 965 | local_advertisement = phy_read(phydev, MII_ADVERTISE); | 965 | local_advertisement = |
| 966 | if (local_advertisement < 0) | 966 | linkmode_adv_to_mii_adv_t(phydev->advertising); |
| 967 | return; | 967 | remote_advertisement = |
| 968 | 968 | linkmode_adv_to_mii_adv_t(phydev->lp_advertising); | |
| 969 | remote_advertisement = phy_read(phydev, MII_LPA); | ||
| 970 | if (remote_advertisement < 0) | ||
| 971 | return; | ||
| 972 | 969 | ||
| 973 | lan743x_phy_update_flowcontrol(adapter, | 970 | lan743x_phy_update_flowcontrol(adapter, |
| 974 | ksettings.base.duplex, | 971 | ksettings.base.duplex, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 90afd514ffe1..d9237c65a838 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
| @@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, | |||
| 1619 | cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); | 1619 | cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); |
| 1620 | rx_prod.bd_prod = cpu_to_le16(bd_prod); | 1620 | rx_prod.bd_prod = cpu_to_le16(bd_prod); |
| 1621 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); | 1621 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); |
| 1622 | |||
| 1623 | /* Make sure chain element is updated before ringing the doorbell */ | ||
| 1624 | dma_wmb(); | ||
| 1625 | |||
| 1622 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); | 1626 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); |
| 1623 | } | 1627 | } |
| 1624 | 1628 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 298930d39b79..abb94c543aa2 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -205,6 +205,8 @@ enum cfg_version { | |||
| 205 | }; | 205 | }; |
| 206 | 206 | ||
| 207 | static const struct pci_device_id rtl8169_pci_tbl[] = { | 207 | static const struct pci_device_id rtl8169_pci_tbl[] = { |
| 208 | { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, | ||
| 209 | { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, | ||
| 208 | { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, | 210 | { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, |
| 209 | { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, | 211 | { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, |
| 210 | { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, | 212 | { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, |
| @@ -706,6 +708,7 @@ module_param(use_dac, int, 0); | |||
| 706 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); | 708 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
| 707 | module_param_named(debug, debug.msg_enable, int, 0); | 709 | module_param_named(debug, debug.msg_enable, int, 0); |
| 708 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | 710 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
| 711 | MODULE_SOFTDEP("pre: realtek"); | ||
| 709 | MODULE_LICENSE("GPL"); | 712 | MODULE_LICENSE("GPL"); |
| 710 | MODULE_FIRMWARE(FIRMWARE_8168D_1); | 713 | MODULE_FIRMWARE(FIRMWARE_8168D_1); |
| 711 | MODULE_FIRMWARE(FIRMWARE_8168D_2); | 714 | MODULE_FIRMWARE(FIRMWARE_8168D_2); |
| @@ -1679,11 +1682,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp) | |||
| 1679 | 1682 | ||
| 1680 | static bool rtl8169_update_counters(struct rtl8169_private *tp) | 1683 | static bool rtl8169_update_counters(struct rtl8169_private *tp) |
| 1681 | { | 1684 | { |
| 1685 | u8 val = RTL_R8(tp, ChipCmd); | ||
| 1686 | |||
| 1682 | /* | 1687 | /* |
| 1683 | * Some chips are unable to dump tally counters when the receiver | 1688 | * Some chips are unable to dump tally counters when the receiver |
| 1684 | * is disabled. | 1689 | * is disabled. If 0xff chip may be in a PCI power-save state. |
| 1685 | */ | 1690 | */ |
| 1686 | if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) | 1691 | if (!(val & CmdRxEnb) || val == 0xff) |
| 1687 | return true; | 1692 | return true; |
| 1688 | 1693 | ||
| 1689 | return rtl8169_do_counters(tp, CounterDump); | 1694 | return rtl8169_do_counters(tp, CounterDump); |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ffc1ada4e6da..d28c8f9ca55b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) | |||
| 343 | int i; | 343 | int i; |
| 344 | 344 | ||
| 345 | priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + | 345 | priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + |
| 346 | ETH_HLEN + VLAN_HLEN; | 346 | ETH_HLEN + VLAN_HLEN + sizeof(__sum16); |
| 347 | 347 | ||
| 348 | /* Allocate RX and TX skb rings */ | 348 | /* Allocate RX and TX skb rings */ |
| 349 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], | 349 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], |
| @@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb) | |||
| 524 | { | 524 | { |
| 525 | u8 *hw_csum; | 525 | u8 *hw_csum; |
| 526 | 526 | ||
| 527 | /* The hardware checksum is 2 bytes appended to packet data */ | 527 | /* The hardware checksum is contained in sizeof(__sum16) (2) bytes |
| 528 | if (unlikely(skb->len < 2)) | 528 | * appended to packet data |
| 529 | */ | ||
| 530 | if (unlikely(skb->len < sizeof(__sum16))) | ||
| 529 | return; | 531 | return; |
| 530 | hw_csum = skb_tail_pointer(skb) - 2; | 532 | hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); |
| 531 | skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); | 533 | skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); |
| 532 | skb->ip_summed = CHECKSUM_COMPLETE; | 534 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 533 | skb_trim(skb, skb->len - 2); | 535 | skb_trim(skb, skb->len - sizeof(__sum16)); |
| 534 | } | 536 | } |
| 535 | 537 | ||
| 536 | /* Packet receive function for Ethernet AVB */ | 538 | /* Packet receive function for Ethernet AVB */ |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b6a50058bb8d..2f2bda68d861 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { | |||
| 6046 | { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, | 6046 | { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, |
| 6047 | { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } | 6047 | { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } |
| 6048 | }; | 6048 | }; |
| 6049 | #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) | ||
| 6049 | 6050 | ||
| 6050 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | 6051 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, |
| 6051 | struct efx_mcdi_mtd_partition *part, | 6052 | struct efx_mcdi_mtd_partition *part, |
| 6052 | unsigned int type) | 6053 | unsigned int type, |
| 6054 | unsigned long *found) | ||
| 6053 | { | 6055 | { |
| 6054 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); | 6056 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); |
| 6055 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); | 6057 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); |
| 6056 | const struct efx_ef10_nvram_type_info *info; | 6058 | const struct efx_ef10_nvram_type_info *info; |
| 6057 | size_t size, erase_size, outlen; | 6059 | size_t size, erase_size, outlen; |
| 6060 | int type_idx = 0; | ||
| 6058 | bool protected; | 6061 | bool protected; |
| 6059 | int rc; | 6062 | int rc; |
| 6060 | 6063 | ||
| 6061 | for (info = efx_ef10_nvram_types; ; info++) { | 6064 | for (type_idx = 0; ; type_idx++) { |
| 6062 | if (info == | 6065 | if (type_idx == EF10_NVRAM_PARTITION_COUNT) |
| 6063 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) | ||
| 6064 | return -ENODEV; | 6066 | return -ENODEV; |
| 6067 | info = efx_ef10_nvram_types + type_idx; | ||
| 6065 | if ((type & ~info->type_mask) == info->type) | 6068 | if ((type & ~info->type_mask) == info->type) |
| 6066 | break; | 6069 | break; |
| 6067 | } | 6070 | } |
| @@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |||
| 6074 | if (protected) | 6077 | if (protected) |
| 6075 | return -ENODEV; /* hide it */ | 6078 | return -ENODEV; /* hide it */ |
| 6076 | 6079 | ||
| 6080 | /* If we've already exposed a partition of this type, hide this | ||
| 6081 | * duplicate. All operations on MTDs are keyed by the type anyway, | ||
| 6082 | * so we can't act on the duplicate. | ||
| 6083 | */ | ||
| 6084 | if (__test_and_set_bit(type_idx, found)) | ||
| 6085 | return -EEXIST; | ||
| 6086 | |||
| 6077 | part->nvram_type = type; | 6087 | part->nvram_type = type; |
| 6078 | 6088 | ||
| 6079 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); | 6089 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); |
| @@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |||
| 6105 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | 6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
| 6106 | { | 6116 | { |
| 6107 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | 6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
| 6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); | ||
| 6108 | struct efx_mcdi_mtd_partition *parts; | 6119 | struct efx_mcdi_mtd_partition *parts; |
| 6109 | size_t outlen, n_parts_total, i, n_parts; | 6120 | size_t outlen, n_parts_total, i, n_parts; |
| 6110 | unsigned int type; | 6121 | unsigned int type; |
| @@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx) | |||
| 6133 | for (i = 0; i < n_parts_total; i++) { | 6144 | for (i = 0; i < n_parts_total; i++) { |
| 6134 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, | 6145 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, |
| 6135 | i); | 6146 | i); |
| 6136 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); | 6147 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type, |
| 6137 | if (rc == 0) | 6148 | found); |
| 6138 | n_parts++; | 6149 | if (rc == -EEXIST || rc == -ENODEV) |
| 6139 | else if (rc != -ENODEV) | 6150 | continue; |
| 6151 | if (rc) | ||
| 6140 | goto fail; | 6152 | goto fail; |
| 6153 | n_parts++; | ||
| 6141 | } | 6154 | } |
| 6142 | 6155 | ||
| 6143 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); | 6156 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 6c5092e7771c..c5e25580a43f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c | |||
| @@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
| 263 | struct stmmac_extra_stats *x, u32 chan) | 263 | struct stmmac_extra_stats *x, u32 chan) |
| 264 | { | 264 | { |
| 265 | u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); | 265 | u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); |
| 266 | u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); | ||
| 266 | int ret = 0; | 267 | int ret = 0; |
| 267 | 268 | ||
| 268 | /* ABNORMAL interrupts */ | 269 | /* ABNORMAL interrupts */ |
| @@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
| 282 | x->normal_irq_n++; | 283 | x->normal_irq_n++; |
| 283 | 284 | ||
| 284 | if (likely(intr_status & XGMAC_RI)) { | 285 | if (likely(intr_status & XGMAC_RI)) { |
| 285 | u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); | 286 | if (likely(intr_en & XGMAC_RIE)) { |
| 286 | if (likely(value & XGMAC_RIE)) { | ||
| 287 | x->rx_normal_irq_n++; | 287 | x->rx_normal_irq_n++; |
| 288 | ret |= handle_rx; | 288 | ret |= handle_rx; |
| 289 | } | 289 | } |
| @@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, | |||
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | /* Clear interrupts */ | 297 | /* Clear interrupts */ |
| 298 | writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); | 298 | writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan)); |
| 299 | 299 | ||
| 300 | return ret; | 300 | return ret; |
| 301 | } | 301 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0c4ab3444cc3..5afba69981cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3517,27 +3517,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget) | |||
| 3517 | struct stmmac_channel *ch = | 3517 | struct stmmac_channel *ch = |
| 3518 | container_of(napi, struct stmmac_channel, napi); | 3518 | container_of(napi, struct stmmac_channel, napi); |
| 3519 | struct stmmac_priv *priv = ch->priv_data; | 3519 | struct stmmac_priv *priv = ch->priv_data; |
| 3520 | int work_done = 0, work_rem = budget; | 3520 | int work_done, rx_done = 0, tx_done = 0; |
| 3521 | u32 chan = ch->index; | 3521 | u32 chan = ch->index; |
| 3522 | 3522 | ||
| 3523 | priv->xstats.napi_poll++; | 3523 | priv->xstats.napi_poll++; |
| 3524 | 3524 | ||
| 3525 | if (ch->has_tx) { | 3525 | if (ch->has_tx) |
| 3526 | int done = stmmac_tx_clean(priv, work_rem, chan); | 3526 | tx_done = stmmac_tx_clean(priv, budget, chan); |
| 3527 | if (ch->has_rx) | ||
| 3528 | rx_done = stmmac_rx(priv, budget, chan); | ||
| 3527 | 3529 | ||
| 3528 | work_done += done; | 3530 | work_done = max(rx_done, tx_done); |
| 3529 | work_rem -= done; | 3531 | work_done = min(work_done, budget); |
| 3530 | } | ||
| 3531 | |||
| 3532 | if (ch->has_rx) { | ||
| 3533 | int done = stmmac_rx(priv, work_rem, chan); | ||
| 3534 | 3532 | ||
| 3535 | work_done += done; | 3533 | if (work_done < budget && napi_complete_done(napi, work_done)) { |
| 3536 | work_rem -= done; | 3534 | int stat; |
| 3537 | } | ||
| 3538 | 3535 | ||
| 3539 | if (work_done < budget && napi_complete_done(napi, work_done)) | ||
| 3540 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | 3536 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); |
| 3537 | stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, | ||
| 3538 | &priv->xstats, chan); | ||
| 3539 | if (stat && napi_reschedule(napi)) | ||
| 3540 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
| 3541 | } | ||
| 3541 | 3542 | ||
| 3542 | return work_done; | 3543 | return work_done; |
| 3543 | } | 3544 | } |
| @@ -4160,6 +4161,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 4160 | return ret; | 4161 | return ret; |
| 4161 | } | 4162 | } |
| 4162 | 4163 | ||
| 4164 | /* Rx Watchdog is available in the COREs newer than the 3.40. | ||
| 4165 | * In some case, for example on bugged HW this feature | ||
| 4166 | * has to be disable and this can be done by passing the | ||
| 4167 | * riwt_off field from the platform. | ||
| 4168 | */ | ||
| 4169 | if (((priv->synopsys_id >= DWMAC_CORE_3_50) || | ||
| 4170 | (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { | ||
| 4171 | priv->use_riwt = 1; | ||
| 4172 | dev_info(priv->device, | ||
| 4173 | "Enable RX Mitigation via HW Watchdog Timer\n"); | ||
| 4174 | } | ||
| 4175 | |||
| 4163 | return 0; | 4176 | return 0; |
| 4164 | } | 4177 | } |
| 4165 | 4178 | ||
| @@ -4292,18 +4305,6 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4292 | if (flow_ctrl) | 4305 | if (flow_ctrl) |
| 4293 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ | 4306 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ |
| 4294 | 4307 | ||
| 4295 | /* Rx Watchdog is available in the COREs newer than the 3.40. | ||
| 4296 | * In some case, for example on bugged HW this feature | ||
| 4297 | * has to be disable and this can be done by passing the | ||
| 4298 | * riwt_off field from the platform. | ||
| 4299 | */ | ||
| 4300 | if (((priv->synopsys_id >= DWMAC_CORE_3_50) || | ||
| 4301 | (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { | ||
| 4302 | priv->use_riwt = 1; | ||
| 4303 | dev_info(priv->device, | ||
| 4304 | "Enable RX Mitigation via HW Watchdog Timer\n"); | ||
| 4305 | } | ||
| 4306 | |||
| 4307 | /* Setup channels NAPI */ | 4308 | /* Setup channels NAPI */ |
| 4308 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); | 4309 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
| 4309 | 4310 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index c54a50dbd5ac..d819e8eaba12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
| @@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev, | |||
| 299 | */ | 299 | */ |
| 300 | static void stmmac_pci_remove(struct pci_dev *pdev) | 300 | static void stmmac_pci_remove(struct pci_dev *pdev) |
| 301 | { | 301 | { |
| 302 | int i; | ||
| 303 | |||
| 302 | stmmac_dvr_remove(&pdev->dev); | 304 | stmmac_dvr_remove(&pdev->dev); |
| 305 | |||
| 306 | for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { | ||
| 307 | if (pci_resource_len(pdev, i) == 0) | ||
| 308 | continue; | ||
| 309 | pcim_iounmap_regions(pdev, BIT(i)); | ||
| 310 | break; | ||
| 311 | } | ||
| 312 | |||
| 303 | pci_disable_device(pdev); | 313 | pci_disable_device(pdev); |
| 304 | } | 314 | } |
| 305 | 315 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 531294f4978b..58ea18af9813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
| @@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv, | |||
| 301 | /* Queue 0 is not AVB capable */ | 301 | /* Queue 0 is not AVB capable */ |
| 302 | if (queue <= 0 || queue >= tx_queues_count) | 302 | if (queue <= 0 || queue >= tx_queues_count) |
| 303 | return -EINVAL; | 303 | return -EINVAL; |
| 304 | if (!priv->dma_cap.av) | ||
| 305 | return -EOPNOTSUPP; | ||
| 304 | if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) | 306 | if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) |
| 305 | return -EOPNOTSUPP; | 307 | return -EOPNOTSUPP; |
| 306 | 308 | ||
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 9020b084b953..7ec4eb74fe21 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
| @@ -1,22 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. | 2 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2004 Sun Microsystems Inc. | 4 | * Copyright (C) 2004 Sun Microsystems Inc. |
| 5 | * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) | 5 | * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License as | ||
| 9 | * published by the Free Software Foundation; either version 2 of the | ||
| 10 | * License, or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 19 | * | ||
| 20 | * This driver uses the sungem driver (c) David Miller | 7 | * This driver uses the sungem driver (c) David Miller |
| 21 | * (davem@redhat.com) as its basis. | 8 | * (davem@redhat.com) as its basis. |
| 22 | * | 9 | * |
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 13f3860496a8..ae5f05f03f88 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h | |||
| @@ -1,23 +1,10 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ | 2 | /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ |
| 3 | * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. | 3 | * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2004 Sun Microsystems Inc. | 5 | * Copyright (C) 2004 Sun Microsystems Inc. |
| 6 | * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) | 6 | * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License as | ||
| 10 | * published by the Free Software Foundation; either version 2 of the | ||
| 11 | * License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 20 | * | ||
| 21 | * vendor id: 0x108E (Sun Microsystems, Inc.) | 8 | * vendor id: 0x108E (Sun Microsystems, Inc.) |
| 22 | * device id: 0xabba (Cassini) | 9 | * device id: 0xabba (Cassini) |
| 23 | * revision ids: 0x01 = Cassini | 10 | * revision ids: 0x01 = Cassini |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ef6f766f6389..e859ae2e42d5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -144,6 +144,8 @@ struct hv_netvsc_packet { | |||
| 144 | u32 total_data_buflen; | 144 | u32 total_data_buflen; |
| 145 | }; | 145 | }; |
| 146 | 146 | ||
| 147 | #define NETVSC_HASH_KEYLEN 40 | ||
| 148 | |||
| 147 | struct netvsc_device_info { | 149 | struct netvsc_device_info { |
| 148 | unsigned char mac_adr[ETH_ALEN]; | 150 | unsigned char mac_adr[ETH_ALEN]; |
| 149 | u32 num_chn; | 151 | u32 num_chn; |
| @@ -151,6 +153,8 @@ struct netvsc_device_info { | |||
| 151 | u32 recv_sections; | 153 | u32 recv_sections; |
| 152 | u32 send_section_size; | 154 | u32 send_section_size; |
| 153 | u32 recv_section_size; | 155 | u32 recv_section_size; |
| 156 | |||
| 157 | u8 rss_key[NETVSC_HASH_KEYLEN]; | ||
| 154 | }; | 158 | }; |
| 155 | 159 | ||
| 156 | enum rndis_device_state { | 160 | enum rndis_device_state { |
| @@ -160,8 +164,6 @@ enum rndis_device_state { | |||
| 160 | RNDIS_DEV_DATAINITIALIZED, | 164 | RNDIS_DEV_DATAINITIALIZED, |
| 161 | }; | 165 | }; |
| 162 | 166 | ||
| 163 | #define NETVSC_HASH_KEYLEN 40 | ||
| 164 | |||
| 165 | struct rndis_device { | 167 | struct rndis_device { |
| 166 | struct net_device *ndev; | 168 | struct net_device *ndev; |
| 167 | 169 | ||
| @@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net, | |||
| 209 | void netvsc_channel_cb(void *context); | 211 | void netvsc_channel_cb(void *context); |
| 210 | int netvsc_poll(struct napi_struct *napi, int budget); | 212 | int netvsc_poll(struct napi_struct *napi, int budget); |
| 211 | 213 | ||
| 212 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); | 214 | int rndis_set_subchannel(struct net_device *ndev, |
| 215 | struct netvsc_device *nvdev, | ||
| 216 | struct netvsc_device_info *dev_info); | ||
| 213 | int rndis_filter_open(struct netvsc_device *nvdev); | 217 | int rndis_filter_open(struct netvsc_device *nvdev); |
| 214 | int rndis_filter_close(struct netvsc_device *nvdev); | 218 | int rndis_filter_close(struct netvsc_device *nvdev); |
| 215 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | 219 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
| @@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type { | |||
| 1177 | 1181 | ||
| 1178 | enum rndis_per_pkt_info_interal_type { | 1182 | enum rndis_per_pkt_info_interal_type { |
| 1179 | RNDIS_PKTINFO_ID = 1, | 1183 | RNDIS_PKTINFO_ID = 1, |
| 1180 | /* Add more memebers here */ | 1184 | /* Add more members here */ |
| 1181 | 1185 | ||
| 1182 | RNDIS_PKTINFO_MAX | 1186 | RNDIS_PKTINFO_MAX |
| 1183 | }; | 1187 | }; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 922054c1d544..813d195bbd57 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w) | |||
| 84 | 84 | ||
| 85 | rdev = nvdev->extension; | 85 | rdev = nvdev->extension; |
| 86 | if (rdev) { | 86 | if (rdev) { |
| 87 | ret = rndis_set_subchannel(rdev->ndev, nvdev); | 87 | ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); |
| 88 | if (ret == 0) { | 88 | if (ret == 0) { |
| 89 | netif_device_attach(rdev->ndev); | 89 | netif_device_attach(rdev->ndev); |
| 90 | } else { | 90 | } else { |
| @@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context) | |||
| 1331 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); | 1331 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); |
| 1332 | 1332 | ||
| 1333 | if (napi_schedule_prep(&nvchan->napi)) { | 1333 | if (napi_schedule_prep(&nvchan->napi)) { |
| 1334 | /* disable interupts from host */ | 1334 | /* disable interrupts from host */ |
| 1335 | hv_begin_read(rbi); | 1335 | hv_begin_read(rbi); |
| 1336 | 1336 | ||
| 1337 | __napi_schedule_irqoff(&nvchan->napi); | 1337 | __napi_schedule_irqoff(&nvchan->napi); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 91ed15ea5883..256adbd044f5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, | |||
| 370 | { | 370 | { |
| 371 | int j = 0; | 371 | int j = 0; |
| 372 | 372 | ||
| 373 | /* Deal with compund pages by ignoring unused part | 373 | /* Deal with compound pages by ignoring unused part |
| 374 | * of the page. | 374 | * of the page. |
| 375 | */ | 375 | */ |
| 376 | page += (offset >> PAGE_SHIFT); | 376 | page += (offset >> PAGE_SHIFT); |
| @@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net, | |||
| 858 | } | 858 | } |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | /* Alloc struct netvsc_device_info, and initialize it from either existing | ||
| 862 | * struct netvsc_device, or from default values. | ||
| 863 | */ | ||
| 864 | static struct netvsc_device_info *netvsc_devinfo_get | ||
| 865 | (struct netvsc_device *nvdev) | ||
| 866 | { | ||
| 867 | struct netvsc_device_info *dev_info; | ||
| 868 | |||
| 869 | dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC); | ||
| 870 | |||
| 871 | if (!dev_info) | ||
| 872 | return NULL; | ||
| 873 | |||
| 874 | if (nvdev) { | ||
| 875 | dev_info->num_chn = nvdev->num_chn; | ||
| 876 | dev_info->send_sections = nvdev->send_section_cnt; | ||
| 877 | dev_info->send_section_size = nvdev->send_section_size; | ||
| 878 | dev_info->recv_sections = nvdev->recv_section_cnt; | ||
| 879 | dev_info->recv_section_size = nvdev->recv_section_size; | ||
| 880 | |||
| 881 | memcpy(dev_info->rss_key, nvdev->extension->rss_key, | ||
| 882 | NETVSC_HASH_KEYLEN); | ||
| 883 | } else { | ||
| 884 | dev_info->num_chn = VRSS_CHANNEL_DEFAULT; | ||
| 885 | dev_info->send_sections = NETVSC_DEFAULT_TX; | ||
| 886 | dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE; | ||
| 887 | dev_info->recv_sections = NETVSC_DEFAULT_RX; | ||
| 888 | dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE; | ||
| 889 | } | ||
| 890 | |||
| 891 | return dev_info; | ||
| 892 | } | ||
| 893 | |||
| 861 | static int netvsc_detach(struct net_device *ndev, | 894 | static int netvsc_detach(struct net_device *ndev, |
| 862 | struct netvsc_device *nvdev) | 895 | struct netvsc_device *nvdev) |
| 863 | { | 896 | { |
| @@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev, | |||
| 909 | return PTR_ERR(nvdev); | 942 | return PTR_ERR(nvdev); |
| 910 | 943 | ||
| 911 | if (nvdev->num_chn > 1) { | 944 | if (nvdev->num_chn > 1) { |
| 912 | ret = rndis_set_subchannel(ndev, nvdev); | 945 | ret = rndis_set_subchannel(ndev, nvdev, dev_info); |
| 913 | 946 | ||
| 914 | /* if unavailable, just proceed with one queue */ | 947 | /* if unavailable, just proceed with one queue */ |
| 915 | if (ret) { | 948 | if (ret) { |
| @@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net, | |||
| 943 | struct net_device_context *net_device_ctx = netdev_priv(net); | 976 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| 944 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 977 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
| 945 | unsigned int orig, count = channels->combined_count; | 978 | unsigned int orig, count = channels->combined_count; |
| 946 | struct netvsc_device_info device_info; | 979 | struct netvsc_device_info *device_info; |
| 947 | int ret; | 980 | int ret; |
| 948 | 981 | ||
| 949 | /* We do not support separate count for rx, tx, or other */ | 982 | /* We do not support separate count for rx, tx, or other */ |
| @@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net, | |||
| 962 | 995 | ||
| 963 | orig = nvdev->num_chn; | 996 | orig = nvdev->num_chn; |
| 964 | 997 | ||
| 965 | memset(&device_info, 0, sizeof(device_info)); | 998 | device_info = netvsc_devinfo_get(nvdev); |
| 966 | device_info.num_chn = count; | 999 | |
| 967 | device_info.send_sections = nvdev->send_section_cnt; | 1000 | if (!device_info) |
| 968 | device_info.send_section_size = nvdev->send_section_size; | 1001 | return -ENOMEM; |
| 969 | device_info.recv_sections = nvdev->recv_section_cnt; | 1002 | |
| 970 | device_info.recv_section_size = nvdev->recv_section_size; | 1003 | device_info->num_chn = count; |
| 971 | 1004 | ||
| 972 | ret = netvsc_detach(net, nvdev); | 1005 | ret = netvsc_detach(net, nvdev); |
| 973 | if (ret) | 1006 | if (ret) |
| 974 | return ret; | 1007 | goto out; |
| 975 | 1008 | ||
| 976 | ret = netvsc_attach(net, &device_info); | 1009 | ret = netvsc_attach(net, device_info); |
| 977 | if (ret) { | 1010 | if (ret) { |
| 978 | device_info.num_chn = orig; | 1011 | device_info->num_chn = orig; |
| 979 | if (netvsc_attach(net, &device_info)) | 1012 | if (netvsc_attach(net, device_info)) |
| 980 | netdev_err(net, "restoring channel setting failed\n"); | 1013 | netdev_err(net, "restoring channel setting failed\n"); |
| 981 | } | 1014 | } |
| 982 | 1015 | ||
| 1016 | out: | ||
| 1017 | kfree(device_info); | ||
| 983 | return ret; | 1018 | return ret; |
| 984 | } | 1019 | } |
| 985 | 1020 | ||
| @@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
| 1048 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); | 1083 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
| 1049 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); | 1084 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
| 1050 | int orig_mtu = ndev->mtu; | 1085 | int orig_mtu = ndev->mtu; |
| 1051 | struct netvsc_device_info device_info; | 1086 | struct netvsc_device_info *device_info; |
| 1052 | int ret = 0; | 1087 | int ret = 0; |
| 1053 | 1088 | ||
| 1054 | if (!nvdev || nvdev->destroy) | 1089 | if (!nvdev || nvdev->destroy) |
| 1055 | return -ENODEV; | 1090 | return -ENODEV; |
| 1056 | 1091 | ||
| 1092 | device_info = netvsc_devinfo_get(nvdev); | ||
| 1093 | |||
| 1094 | if (!device_info) | ||
| 1095 | return -ENOMEM; | ||
| 1096 | |||
| 1057 | /* Change MTU of underlying VF netdev first. */ | 1097 | /* Change MTU of underlying VF netdev first. */ |
| 1058 | if (vf_netdev) { | 1098 | if (vf_netdev) { |
| 1059 | ret = dev_set_mtu(vf_netdev, mtu); | 1099 | ret = dev_set_mtu(vf_netdev, mtu); |
| 1060 | if (ret) | 1100 | if (ret) |
| 1061 | return ret; | 1101 | goto out; |
| 1062 | } | 1102 | } |
| 1063 | 1103 | ||
| 1064 | memset(&device_info, 0, sizeof(device_info)); | ||
| 1065 | device_info.num_chn = nvdev->num_chn; | ||
| 1066 | device_info.send_sections = nvdev->send_section_cnt; | ||
| 1067 | device_info.send_section_size = nvdev->send_section_size; | ||
| 1068 | device_info.recv_sections = nvdev->recv_section_cnt; | ||
| 1069 | device_info.recv_section_size = nvdev->recv_section_size; | ||
| 1070 | |||
| 1071 | ret = netvsc_detach(ndev, nvdev); | 1104 | ret = netvsc_detach(ndev, nvdev); |
| 1072 | if (ret) | 1105 | if (ret) |
| 1073 | goto rollback_vf; | 1106 | goto rollback_vf; |
| 1074 | 1107 | ||
| 1075 | ndev->mtu = mtu; | 1108 | ndev->mtu = mtu; |
| 1076 | 1109 | ||
| 1077 | ret = netvsc_attach(ndev, &device_info); | 1110 | ret = netvsc_attach(ndev, device_info); |
| 1078 | if (ret) | 1111 | if (!ret) |
| 1079 | goto rollback; | 1112 | goto out; |
| 1080 | |||
| 1081 | return 0; | ||
| 1082 | 1113 | ||
| 1083 | rollback: | ||
| 1084 | /* Attempt rollback to original MTU */ | 1114 | /* Attempt rollback to original MTU */ |
| 1085 | ndev->mtu = orig_mtu; | 1115 | ndev->mtu = orig_mtu; |
| 1086 | 1116 | ||
| 1087 | if (netvsc_attach(ndev, &device_info)) | 1117 | if (netvsc_attach(ndev, device_info)) |
| 1088 | netdev_err(ndev, "restoring mtu failed\n"); | 1118 | netdev_err(ndev, "restoring mtu failed\n"); |
| 1089 | rollback_vf: | 1119 | rollback_vf: |
| 1090 | if (vf_netdev) | 1120 | if (vf_netdev) |
| 1091 | dev_set_mtu(vf_netdev, orig_mtu); | 1121 | dev_set_mtu(vf_netdev, orig_mtu); |
| 1092 | 1122 | ||
| 1123 | out: | ||
| 1124 | kfree(device_info); | ||
| 1093 | return ret; | 1125 | return ret; |
| 1094 | } | 1126 | } |
| 1095 | 1127 | ||
| @@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev, | |||
| 1674 | { | 1706 | { |
| 1675 | struct net_device_context *ndevctx = netdev_priv(ndev); | 1707 | struct net_device_context *ndevctx = netdev_priv(ndev); |
| 1676 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); | 1708 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
| 1677 | struct netvsc_device_info device_info; | 1709 | struct netvsc_device_info *device_info; |
| 1678 | struct ethtool_ringparam orig; | 1710 | struct ethtool_ringparam orig; |
| 1679 | u32 new_tx, new_rx; | 1711 | u32 new_tx, new_rx; |
| 1680 | int ret = 0; | 1712 | int ret = 0; |
| @@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev, | |||
| 1694 | new_rx == orig.rx_pending) | 1726 | new_rx == orig.rx_pending) |
| 1695 | return 0; /* no change */ | 1727 | return 0; /* no change */ |
| 1696 | 1728 | ||
| 1697 | memset(&device_info, 0, sizeof(device_info)); | 1729 | device_info = netvsc_devinfo_get(nvdev); |
| 1698 | device_info.num_chn = nvdev->num_chn; | 1730 | |
| 1699 | device_info.send_sections = new_tx; | 1731 | if (!device_info) |
| 1700 | device_info.send_section_size = nvdev->send_section_size; | 1732 | return -ENOMEM; |
| 1701 | device_info.recv_sections = new_rx; | 1733 | |
| 1702 | device_info.recv_section_size = nvdev->recv_section_size; | 1734 | device_info->send_sections = new_tx; |
| 1735 | device_info->recv_sections = new_rx; | ||
| 1703 | 1736 | ||
| 1704 | ret = netvsc_detach(ndev, nvdev); | 1737 | ret = netvsc_detach(ndev, nvdev); |
| 1705 | if (ret) | 1738 | if (ret) |
| 1706 | return ret; | 1739 | goto out; |
| 1707 | 1740 | ||
| 1708 | ret = netvsc_attach(ndev, &device_info); | 1741 | ret = netvsc_attach(ndev, device_info); |
| 1709 | if (ret) { | 1742 | if (ret) { |
| 1710 | device_info.send_sections = orig.tx_pending; | 1743 | device_info->send_sections = orig.tx_pending; |
| 1711 | device_info.recv_sections = orig.rx_pending; | 1744 | device_info->recv_sections = orig.rx_pending; |
| 1712 | 1745 | ||
| 1713 | if (netvsc_attach(ndev, &device_info)) | 1746 | if (netvsc_attach(ndev, device_info)) |
| 1714 | netdev_err(ndev, "restoring ringparam failed"); | 1747 | netdev_err(ndev, "restoring ringparam failed"); |
| 1715 | } | 1748 | } |
| 1716 | 1749 | ||
| 1750 | out: | ||
| 1751 | kfree(device_info); | ||
| 1717 | return ret; | 1752 | return ret; |
| 1718 | } | 1753 | } |
| 1719 | 1754 | ||
| @@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
| 2088 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) | 2123 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) |
| 2089 | return NOTIFY_DONE; | 2124 | return NOTIFY_DONE; |
| 2090 | 2125 | ||
| 2091 | /* if syntihetic interface is a different namespace, | 2126 | /* if synthetic interface is a different namespace, |
| 2092 | * then move the VF to that namespace; join will be | 2127 | * then move the VF to that namespace; join will be |
| 2093 | * done again in that context. | 2128 | * done again in that context. |
| 2094 | */ | 2129 | */ |
| @@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2167 | { | 2202 | { |
| 2168 | struct net_device *net = NULL; | 2203 | struct net_device *net = NULL; |
| 2169 | struct net_device_context *net_device_ctx; | 2204 | struct net_device_context *net_device_ctx; |
| 2170 | struct netvsc_device_info device_info; | 2205 | struct netvsc_device_info *device_info = NULL; |
| 2171 | struct netvsc_device *nvdev; | 2206 | struct netvsc_device *nvdev; |
| 2172 | int ret = -ENOMEM; | 2207 | int ret = -ENOMEM; |
| 2173 | 2208 | ||
| @@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2214 | netif_set_real_num_rx_queues(net, 1); | 2249 | netif_set_real_num_rx_queues(net, 1); |
| 2215 | 2250 | ||
| 2216 | /* Notify the netvsc driver of the new device */ | 2251 | /* Notify the netvsc driver of the new device */ |
| 2217 | memset(&device_info, 0, sizeof(device_info)); | 2252 | device_info = netvsc_devinfo_get(NULL); |
| 2218 | device_info.num_chn = VRSS_CHANNEL_DEFAULT; | 2253 | |
| 2219 | device_info.send_sections = NETVSC_DEFAULT_TX; | 2254 | if (!device_info) { |
| 2220 | device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; | 2255 | ret = -ENOMEM; |
| 2221 | device_info.recv_sections = NETVSC_DEFAULT_RX; | 2256 | goto devinfo_failed; |
| 2222 | device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; | 2257 | } |
| 2223 | 2258 | ||
| 2224 | nvdev = rndis_filter_device_add(dev, &device_info); | 2259 | nvdev = rndis_filter_device_add(dev, device_info); |
| 2225 | if (IS_ERR(nvdev)) { | 2260 | if (IS_ERR(nvdev)) { |
| 2226 | ret = PTR_ERR(nvdev); | 2261 | ret = PTR_ERR(nvdev); |
| 2227 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); | 2262 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); |
| 2228 | goto rndis_failed; | 2263 | goto rndis_failed; |
| 2229 | } | 2264 | } |
| 2230 | 2265 | ||
| 2231 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2266 | memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); |
| 2232 | 2267 | ||
| 2233 | /* We must get rtnl lock before scheduling nvdev->subchan_work, | 2268 | /* We must get rtnl lock before scheduling nvdev->subchan_work, |
| 2234 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait | 2269 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait |
| @@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2236 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() | 2271 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() |
| 2237 | * -> ... -> device_add() -> ... -> __device_attach() can't get | 2272 | * -> ... -> device_add() -> ... -> __device_attach() can't get |
| 2238 | * the device lock, so all the subchannels can't be processed -- | 2273 | * the device lock, so all the subchannels can't be processed -- |
| 2239 | * finally netvsc_subchan_work() hangs for ever. | 2274 | * finally netvsc_subchan_work() hangs forever. |
| 2240 | */ | 2275 | */ |
| 2241 | rtnl_lock(); | 2276 | rtnl_lock(); |
| 2242 | 2277 | ||
| @@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2266 | 2301 | ||
| 2267 | list_add(&net_device_ctx->list, &netvsc_dev_list); | 2302 | list_add(&net_device_ctx->list, &netvsc_dev_list); |
| 2268 | rtnl_unlock(); | 2303 | rtnl_unlock(); |
| 2304 | |||
| 2305 | kfree(device_info); | ||
| 2269 | return 0; | 2306 | return 0; |
| 2270 | 2307 | ||
| 2271 | register_failed: | 2308 | register_failed: |
| 2272 | rtnl_unlock(); | 2309 | rtnl_unlock(); |
| 2273 | rndis_filter_device_remove(dev, nvdev); | 2310 | rndis_filter_device_remove(dev, nvdev); |
| 2274 | rndis_failed: | 2311 | rndis_failed: |
| 2312 | kfree(device_info); | ||
| 2313 | devinfo_failed: | ||
| 2275 | free_percpu(net_device_ctx->vf_stats); | 2314 | free_percpu(net_device_ctx->vf_stats); |
| 2276 | no_stats: | 2315 | no_stats: |
| 2277 | hv_set_drvdata(dev, NULL); | 2316 | hv_set_drvdata(dev, NULL); |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8b537a049c1e..73b60592de06 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -774,8 +774,8 @@ cleanup: | |||
| 774 | return ret; | 774 | return ret; |
| 775 | } | 775 | } |
| 776 | 776 | ||
| 777 | int rndis_filter_set_rss_param(struct rndis_device *rdev, | 777 | static int rndis_set_rss_param_msg(struct rndis_device *rdev, |
| 778 | const u8 *rss_key) | 778 | const u8 *rss_key, u16 flag) |
| 779 | { | 779 | { |
| 780 | struct net_device *ndev = rdev->ndev; | 780 | struct net_device *ndev = rdev->ndev; |
| 781 | struct rndis_request *request; | 781 | struct rndis_request *request; |
| @@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, | |||
| 804 | rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; | 804 | rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; |
| 805 | rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; | 805 | rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; |
| 806 | rssp->hdr.size = sizeof(struct ndis_recv_scale_param); | 806 | rssp->hdr.size = sizeof(struct ndis_recv_scale_param); |
| 807 | rssp->flag = 0; | 807 | rssp->flag = flag; |
| 808 | rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | | 808 | rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | |
| 809 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | | 809 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | |
| 810 | NDIS_HASH_TCP_IPV6; | 810 | NDIS_HASH_TCP_IPV6; |
| @@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, | |||
| 829 | 829 | ||
| 830 | wait_for_completion(&request->wait_event); | 830 | wait_for_completion(&request->wait_event); |
| 831 | set_complete = &request->response_msg.msg.set_complete; | 831 | set_complete = &request->response_msg.msg.set_complete; |
| 832 | if (set_complete->status == RNDIS_STATUS_SUCCESS) | 832 | if (set_complete->status == RNDIS_STATUS_SUCCESS) { |
| 833 | memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); | 833 | if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) && |
| 834 | else { | 834 | !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) |
| 835 | memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); | ||
| 836 | |||
| 837 | } else { | ||
| 835 | netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", | 838 | netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", |
| 836 | set_complete->status); | 839 | set_complete->status); |
| 837 | ret = -EINVAL; | 840 | ret = -EINVAL; |
| @@ -842,6 +845,16 @@ cleanup: | |||
| 842 | return ret; | 845 | return ret; |
| 843 | } | 846 | } |
| 844 | 847 | ||
| 848 | int rndis_filter_set_rss_param(struct rndis_device *rdev, | ||
| 849 | const u8 *rss_key) | ||
| 850 | { | ||
| 851 | /* Disable RSS before change */ | ||
| 852 | rndis_set_rss_param_msg(rdev, rss_key, | ||
| 853 | NDIS_RSS_PARAM_FLAG_DISABLE_RSS); | ||
| 854 | |||
| 855 | return rndis_set_rss_param_msg(rdev, rss_key, 0); | ||
| 856 | } | ||
| 857 | |||
| 845 | static int rndis_filter_query_device_link_status(struct rndis_device *dev, | 858 | static int rndis_filter_query_device_link_status(struct rndis_device *dev, |
| 846 | struct netvsc_device *net_device) | 859 | struct netvsc_device *net_device) |
| 847 | { | 860 | { |
| @@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) | |||
| 1121 | * This breaks overlap of processing the host message for the | 1134 | * This breaks overlap of processing the host message for the |
| 1122 | * new primary channel with the initialization of sub-channels. | 1135 | * new primary channel with the initialization of sub-channels. |
| 1123 | */ | 1136 | */ |
| 1124 | int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) | 1137 | int rndis_set_subchannel(struct net_device *ndev, |
| 1138 | struct netvsc_device *nvdev, | ||
| 1139 | struct netvsc_device_info *dev_info) | ||
| 1125 | { | 1140 | { |
| 1126 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; | 1141 | struct nvsp_message *init_packet = &nvdev->channel_init_pkt; |
| 1127 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | 1142 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| @@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) | |||
| 1161 | wait_event(nvdev->subchan_open, | 1176 | wait_event(nvdev->subchan_open, |
| 1162 | atomic_read(&nvdev->open_chn) == nvdev->num_chn); | 1177 | atomic_read(&nvdev->open_chn) == nvdev->num_chn); |
| 1163 | 1178 | ||
| 1164 | /* ignore failues from setting rss parameters, still have channels */ | 1179 | /* ignore failures from setting rss parameters, still have channels */ |
| 1165 | rndis_filter_set_rss_param(rdev, netvsc_hash_key); | 1180 | if (dev_info) |
| 1181 | rndis_filter_set_rss_param(rdev, dev_info->rss_key); | ||
| 1182 | else | ||
| 1183 | rndis_filter_set_rss_param(rdev, netvsc_hash_key); | ||
| 1166 | 1184 | ||
| 1167 | netif_set_real_num_tx_queues(ndev, nvdev->num_chn); | 1185 | netif_set_real_num_tx_queues(ndev, nvdev->num_chn); |
| 1168 | netif_set_real_num_rx_queues(ndev, nvdev->num_chn); | 1186 | netif_set_real_num_rx_queues(ndev, nvdev->num_chn); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fc726ce4c164..6d067176320f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w) | |||
| 337 | 337 | ||
| 338 | if (src) | 338 | if (src) |
| 339 | dev_put(src->dev); | 339 | dev_put(src->dev); |
| 340 | kfree_skb(skb); | 340 | consume_skb(skb); |
| 341 | } | 341 | } |
| 342 | } | 342 | } |
| 343 | 343 | ||
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c index 8ebe7f5484ae..f14ba5366b91 100644 --- a/drivers/net/phy/asix.c +++ b/drivers/net/phy/asix.c | |||
| @@ -1,13 +1,7 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* Driver for Asix PHYs | 2 | /* Driver for Asix PHYs |
| 3 | * | 3 | * |
| 4 | * Author: Michael Schmitz <schmitzmic@gmail.com> | 4 | * Author: Michael Schmitz <schmitzmic@gmail.com> |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License as published by the | ||
| 8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 9 | * option) any later version. | ||
| 10 | * | ||
| 11 | */ | 5 | */ |
| 12 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
| 13 | #include <linux/errno.h> | 7 | #include <linux/errno.h> |
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 1b350183bffb..a271239748f2 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c | |||
| @@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = { | |||
| 197 | .phy_id = PHY_ID_BCM8706, | 197 | .phy_id = PHY_ID_BCM8706, |
| 198 | .phy_id_mask = 0xffffffff, | 198 | .phy_id_mask = 0xffffffff, |
| 199 | .name = "Broadcom BCM8706", | 199 | .name = "Broadcom BCM8706", |
| 200 | .features = PHY_10GBIT_FEC_FEATURES, | ||
| 200 | .config_init = bcm87xx_config_init, | 201 | .config_init = bcm87xx_config_init, |
| 201 | .config_aneg = bcm87xx_config_aneg, | 202 | .config_aneg = bcm87xx_config_aneg, |
| 202 | .read_status = bcm87xx_read_status, | 203 | .read_status = bcm87xx_read_status, |
| @@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = { | |||
| 208 | .phy_id = PHY_ID_BCM8727, | 209 | .phy_id = PHY_ID_BCM8727, |
| 209 | .phy_id_mask = 0xffffffff, | 210 | .phy_id_mask = 0xffffffff, |
| 210 | .name = "Broadcom BCM8727", | 211 | .name = "Broadcom BCM8727", |
| 212 | .features = PHY_10GBIT_FEC_FEATURES, | ||
| 211 | .config_init = bcm87xx_config_init, | 213 | .config_init = bcm87xx_config_init, |
| 212 | .config_aneg = bcm87xx_config_aneg, | 214 | .config_aneg = bcm87xx_config_aneg, |
| 213 | .read_status = bcm87xx_read_status, | 215 | .read_status = bcm87xx_read_status, |
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 8022cd317f62..1a4d04afb7f0 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c | |||
| @@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = { | |||
| 88 | .phy_id = PHY_ID_CS4340, | 88 | .phy_id = PHY_ID_CS4340, |
| 89 | .phy_id_mask = 0xffffffff, | 89 | .phy_id_mask = 0xffffffff, |
| 90 | .name = "Cortina CS4340", | 90 | .name = "Cortina CS4340", |
| 91 | .features = PHY_10GBIT_FEATURES, | ||
| 91 | .config_init = gen10g_config_init, | 92 | .config_init = gen10g_config_init, |
| 92 | .config_aneg = gen10g_config_aneg, | 93 | .config_aneg = gen10g_config_aneg, |
| 93 | .read_status = cortina_read_status, | 94 | .read_status = cortina_read_status, |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a9c7c7f41b0c..2e12f982534f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
| @@ -1046,6 +1046,39 @@ static int m88e1145_config_init(struct phy_device *phydev) | |||
| 1046 | return 0; | 1046 | return 0; |
| 1047 | } | 1047 | } |
| 1048 | 1048 | ||
| 1049 | /* The VOD can be out of specification on link up. Poke an | ||
| 1050 | * undocumented register, in an undocumented page, with a magic value | ||
| 1051 | * to fix this. | ||
| 1052 | */ | ||
| 1053 | static int m88e6390_errata(struct phy_device *phydev) | ||
| 1054 | { | ||
| 1055 | int err; | ||
| 1056 | |||
| 1057 | err = phy_write(phydev, MII_BMCR, | ||
| 1058 | BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); | ||
| 1059 | if (err) | ||
| 1060 | return err; | ||
| 1061 | |||
| 1062 | usleep_range(300, 400); | ||
| 1063 | |||
| 1064 | err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); | ||
| 1065 | if (err) | ||
| 1066 | return err; | ||
| 1067 | |||
| 1068 | return genphy_soft_reset(phydev); | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static int m88e6390_config_aneg(struct phy_device *phydev) | ||
| 1072 | { | ||
| 1073 | int err; | ||
| 1074 | |||
| 1075 | err = m88e6390_errata(phydev); | ||
| 1076 | if (err) | ||
| 1077 | return err; | ||
| 1078 | |||
| 1079 | return m88e1510_config_aneg(phydev); | ||
| 1080 | } | ||
| 1081 | |||
| 1049 | /** | 1082 | /** |
| 1050 | * fiber_lpa_mod_linkmode_lpa_t | 1083 | * fiber_lpa_mod_linkmode_lpa_t |
| 1051 | * @advertising: the linkmode advertisement settings | 1084 | * @advertising: the linkmode advertisement settings |
| @@ -1402,7 +1435,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, | |||
| 1402 | * before enabling it if !phy_interrupt_is_valid() | 1435 | * before enabling it if !phy_interrupt_is_valid() |
| 1403 | */ | 1436 | */ |
| 1404 | if (!phy_interrupt_is_valid(phydev)) | 1437 | if (!phy_interrupt_is_valid(phydev)) |
| 1405 | phy_read(phydev, MII_M1011_IEVENT); | 1438 | __phy_read(phydev, MII_M1011_IEVENT); |
| 1406 | 1439 | ||
| 1407 | /* Enable the WOL interrupt */ | 1440 | /* Enable the WOL interrupt */ |
| 1408 | err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, | 1441 | err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, |
| @@ -2283,7 +2316,7 @@ static struct phy_driver marvell_drivers[] = { | |||
| 2283 | .features = PHY_GBIT_FEATURES, | 2316 | .features = PHY_GBIT_FEATURES, |
| 2284 | .probe = m88e6390_probe, | 2317 | .probe = m88e6390_probe, |
| 2285 | .config_init = &marvell_config_init, | 2318 | .config_init = &marvell_config_init, |
| 2286 | .config_aneg = &m88e1510_config_aneg, | 2319 | .config_aneg = &m88e6390_config_aneg, |
| 2287 | .read_status = &marvell_read_status, | 2320 | .read_status = &marvell_read_status, |
| 2288 | .ack_interrupt = &marvell_ack_interrupt, | 2321 | .ack_interrupt = &marvell_ack_interrupt, |
| 2289 | .config_intr = &marvell_config_intr, | 2322 | .config_intr = &marvell_config_intr, |
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c index b03fedd6c1d8..287f3ccf1da1 100644 --- a/drivers/net/phy/mdio-hisi-femac.c +++ b/drivers/net/phy/mdio-hisi-femac.c | |||
| @@ -1,20 +1,8 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* | 2 | /* |
| 2 | * Hisilicon Fast Ethernet MDIO Bus Driver | 3 | * Hisilicon Fast Ethernet MDIO Bus Driver |
| 3 | * | 4 | * |
| 4 | * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. | 5 | * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 18 | */ | 6 | */ |
| 19 | 7 | ||
| 20 | #include <linux/clk.h> | 8 | #include <linux/clk.h> |
| @@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver); | |||
| 163 | 151 | ||
| 164 | MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); | 152 | MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); |
| 165 | MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); | 153 | MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); |
| 166 | MODULE_LICENSE("GPL v2"); | 154 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2e59a8419b17..66b9cfe692fc 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
| @@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | |||
| 390 | if (IS_ERR(gpiod)) { | 390 | if (IS_ERR(gpiod)) { |
| 391 | dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", | 391 | dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", |
| 392 | bus->id); | 392 | bus->id); |
| 393 | device_del(&bus->dev); | ||
| 393 | return PTR_ERR(gpiod); | 394 | return PTR_ERR(gpiod); |
| 394 | } else if (gpiod) { | 395 | } else if (gpiod) { |
| 395 | bus->reset_gpiod = gpiod; | 396 | bus->reset_gpiod = gpiod; |
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index b03bcf2c388a..3ddaf9595697 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c | |||
| @@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = { | |||
| 233 | .name = "Meson GXL Internal PHY", | 233 | .name = "Meson GXL Internal PHY", |
| 234 | .features = PHY_BASIC_FEATURES, | 234 | .features = PHY_BASIC_FEATURES, |
| 235 | .flags = PHY_IS_INTERNAL, | 235 | .flags = PHY_IS_INTERNAL, |
| 236 | .soft_reset = genphy_soft_reset, | ||
| 236 | .config_init = meson_gxl_config_init, | 237 | .config_init = meson_gxl_config_init, |
| 237 | .aneg_done = genphy_aneg_done, | 238 | .aneg_done = genphy_aneg_done, |
| 238 | .read_status = meson_gxl_read_status, | 239 | .read_status = meson_gxl_read_status, |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index c33384710d26..b1f959935f50 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -1070,6 +1070,7 @@ static struct phy_driver ksphy_driver[] = { | |||
| 1070 | .driver_data = &ksz9021_type, | 1070 | .driver_data = &ksz9021_type, |
| 1071 | .probe = kszphy_probe, | 1071 | .probe = kszphy_probe, |
| 1072 | .config_init = ksz9031_config_init, | 1072 | .config_init = ksz9031_config_init, |
| 1073 | .soft_reset = genphy_soft_reset, | ||
| 1073 | .read_status = ksz9031_read_status, | 1074 | .read_status = ksz9031_read_status, |
| 1074 | .ack_interrupt = kszphy_ack_interrupt, | 1075 | .ack_interrupt = kszphy_ack_interrupt, |
| 1075 | .config_intr = kszphy_config_intr, | 1076 | .config_intr = kszphy_config_intr, |
| @@ -1098,6 +1099,7 @@ static struct phy_driver ksphy_driver[] = { | |||
| 1098 | .phy_id = PHY_ID_KSZ8873MLL, | 1099 | .phy_id = PHY_ID_KSZ8873MLL, |
| 1099 | .phy_id_mask = MICREL_PHY_ID_MASK, | 1100 | .phy_id_mask = MICREL_PHY_ID_MASK, |
| 1100 | .name = "Micrel KSZ8873MLL Switch", | 1101 | .name = "Micrel KSZ8873MLL Switch", |
| 1102 | .features = PHY_BASIC_FEATURES, | ||
| 1101 | .config_init = kszphy_config_init, | 1103 | .config_init = kszphy_config_init, |
| 1102 | .config_aneg = ksz8873mll_config_aneg, | 1104 | .config_aneg = ksz8873mll_config_aneg, |
| 1103 | .read_status = ksz8873mll_read_status, | 1105 | .read_status = ksz8873mll_read_status, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d33e7b3caf03..189cd2048c3a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -543,13 +543,6 @@ int phy_start_aneg(struct phy_device *phydev) | |||
| 543 | 543 | ||
| 544 | mutex_lock(&phydev->lock); | 544 | mutex_lock(&phydev->lock); |
| 545 | 545 | ||
| 546 | if (!__phy_is_started(phydev)) { | ||
| 547 | WARN(1, "called from state %s\n", | ||
| 548 | phy_state_to_str(phydev->state)); | ||
| 549 | err = -EBUSY; | ||
| 550 | goto out_unlock; | ||
| 551 | } | ||
| 552 | |||
| 553 | if (AUTONEG_DISABLE == phydev->autoneg) | 546 | if (AUTONEG_DISABLE == phydev->autoneg) |
| 554 | phy_sanitize_settings(phydev); | 547 | phy_sanitize_settings(phydev); |
| 555 | 548 | ||
| @@ -560,11 +553,13 @@ int phy_start_aneg(struct phy_device *phydev) | |||
| 560 | if (err < 0) | 553 | if (err < 0) |
| 561 | goto out_unlock; | 554 | goto out_unlock; |
| 562 | 555 | ||
| 563 | if (phydev->autoneg == AUTONEG_ENABLE) { | 556 | if (__phy_is_started(phydev)) { |
| 564 | err = phy_check_link_status(phydev); | 557 | if (phydev->autoneg == AUTONEG_ENABLE) { |
| 565 | } else { | 558 | err = phy_check_link_status(phydev); |
| 566 | phydev->state = PHY_FORCING; | 559 | } else { |
| 567 | phydev->link_timeout = PHY_FORCE_TIMEOUT; | 560 | phydev->state = PHY_FORCING; |
| 561 | phydev->link_timeout = PHY_FORCE_TIMEOUT; | ||
| 562 | } | ||
| 568 | } | 563 | } |
| 569 | 564 | ||
| 570 | out_unlock: | 565 | out_unlock: |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 51990002d495..46c86725a693 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features); | |||
| 61 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; | 61 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
| 62 | EXPORT_SYMBOL_GPL(phy_10gbit_features); | 62 | EXPORT_SYMBOL_GPL(phy_10gbit_features); |
| 63 | 63 | ||
| 64 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; | ||
| 65 | EXPORT_SYMBOL_GPL(phy_10gbit_fec_features); | ||
| 66 | |||
| 64 | static const int phy_basic_ports_array[] = { | 67 | static const int phy_basic_ports_array[] = { |
| 65 | ETHTOOL_LINK_MODE_Autoneg_BIT, | 68 | ETHTOOL_LINK_MODE_Autoneg_BIT, |
| 66 | ETHTOOL_LINK_MODE_TP_BIT, | 69 | ETHTOOL_LINK_MODE_TP_BIT, |
| @@ -109,6 +112,11 @@ const int phy_10gbit_features_array[1] = { | |||
| 109 | }; | 112 | }; |
| 110 | EXPORT_SYMBOL_GPL(phy_10gbit_features_array); | 113 | EXPORT_SYMBOL_GPL(phy_10gbit_features_array); |
| 111 | 114 | ||
| 115 | const int phy_10gbit_fec_features_array[1] = { | ||
| 116 | ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, | ||
| 117 | }; | ||
| 118 | EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); | ||
| 119 | |||
| 112 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; | 120 | __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
| 113 | EXPORT_SYMBOL_GPL(phy_10gbit_full_features); | 121 | EXPORT_SYMBOL_GPL(phy_10gbit_full_features); |
| 114 | 122 | ||
| @@ -191,6 +199,10 @@ static void features_init(void) | |||
| 191 | linkmode_set_bit_array(phy_10gbit_full_features_array, | 199 | linkmode_set_bit_array(phy_10gbit_full_features_array, |
| 192 | ARRAY_SIZE(phy_10gbit_full_features_array), | 200 | ARRAY_SIZE(phy_10gbit_full_features_array), |
| 193 | phy_10gbit_full_features); | 201 | phy_10gbit_full_features); |
| 202 | /* 10G FEC only */ | ||
| 203 | linkmode_set_bit_array(phy_10gbit_fec_features_array, | ||
| 204 | ARRAY_SIZE(phy_10gbit_fec_features_array), | ||
| 205 | phy_10gbit_fec_features); | ||
| 194 | } | 206 | } |
| 195 | 207 | ||
| 196 | void phy_device_free(struct phy_device *phydev) | 208 | void phy_device_free(struct phy_device *phydev) |
| @@ -2243,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) | |||
| 2243 | { | 2255 | { |
| 2244 | int retval; | 2256 | int retval; |
| 2245 | 2257 | ||
| 2258 | if (WARN_ON(!new_driver->features)) { | ||
| 2259 | pr_err("%s: Driver features are missing\n", new_driver->name); | ||
| 2260 | return -EINVAL; | ||
| 2261 | } | ||
| 2262 | |||
| 2246 | new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; | 2263 | new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; |
| 2247 | new_driver->mdiodrv.driver.name = new_driver->name; | 2264 | new_driver->mdiodrv.driver.name = new_driver->name; |
| 2248 | new_driver->mdiodrv.driver.bus = &mdio_bus_type; | 2265 | new_driver->mdiodrv.driver.bus = &mdio_bus_type; |
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c index f1da70b9b55f..95abf7072f32 100644 --- a/drivers/net/phy/rockchip.c +++ b/drivers/net/phy/rockchip.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /** | 2 | /** |
| 2 | * drivers/net/phy/rockchip.c | 3 | * drivers/net/phy/rockchip.c |
| 3 | * | 4 | * |
| @@ -6,12 +7,6 @@ | |||
| 6 | * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd | 7 | * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd |
| 7 | * | 8 | * |
| 8 | * David Wu <david.wu@rock-chips.com> | 9 | * David Wu <david.wu@rock-chips.com> |
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License as published by | ||
| 12 | * the Free Software Foundation; either version 2 of the License, or | ||
| 13 | * (at your option) any later version. | ||
| 14 | * | ||
| 15 | */ | 10 | */ |
| 16 | 11 | ||
| 17 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
| @@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl); | |||
| 229 | 224 | ||
| 230 | MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); | 225 | MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); |
| 231 | MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); | 226 | MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); |
| 232 | MODULE_LICENSE("GPL v2"); | 227 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 22f3bdd8206c..91247182bc52 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c | |||
| @@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = { | |||
| 80 | .phy_id = PHY_ID_TN2020, | 80 | .phy_id = PHY_ID_TN2020, |
| 81 | .phy_id_mask = 0xffffffff, | 81 | .phy_id_mask = 0xffffffff, |
| 82 | .name = "Teranetics TN2020", | 82 | .name = "Teranetics TN2020", |
| 83 | .features = PHY_10GBIT_FEATURES, | ||
| 83 | .soft_reset = gen10g_no_soft_reset, | 84 | .soft_reset = gen10g_no_soft_reset, |
| 84 | .aneg_done = teranetics_aneg_done, | 85 | .aneg_done = teranetics_aneg_done, |
| 85 | .config_init = gen10g_config_init, | 86 | .config_init = gen10g_config_init, |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 62dc564b251d..f22639f0116a 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
| @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 445 | if (pskb_trim_rcsum(skb, len)) | 445 | if (pskb_trim_rcsum(skb, len)) |
| 446 | goto drop; | 446 | goto drop; |
| 447 | 447 | ||
| 448 | ph = pppoe_hdr(skb); | ||
| 448 | pn = pppoe_pernet(dev_net(dev)); | 449 | pn = pppoe_pernet(dev_net(dev)); |
| 449 | 450 | ||
| 450 | /* Note that get_item does a sock_hold(), so sk_pppox(po) | 451 | /* Note that get_item does a sock_hold(), so sk_pppox(po) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a4fdad475594..18656c4094b3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
| 856 | err = 0; | 856 | err = 0; |
| 857 | } | 857 | } |
| 858 | 858 | ||
| 859 | rcu_assign_pointer(tfile->tun, tun); | ||
| 860 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | ||
| 861 | tun->numqueues++; | ||
| 862 | |||
| 863 | if (tfile->detached) { | 859 | if (tfile->detached) { |
| 864 | tun_enable_queue(tfile); | 860 | tun_enable_queue(tfile); |
| 865 | } else { | 861 | } else { |
| @@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
| 876 | * refcnt. | 872 | * refcnt. |
| 877 | */ | 873 | */ |
| 878 | 874 | ||
| 875 | /* Publish tfile->tun and tun->tfiles only after we've fully | ||
| 876 | * initialized tfile; otherwise we risk using half-initialized | ||
| 877 | * object. | ||
| 878 | */ | ||
| 879 | rcu_assign_pointer(tfile->tun, tun); | ||
| 880 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | ||
| 881 | tun->numqueues++; | ||
| 879 | out: | 882 | out: |
| 880 | return err; | 883 | return err; |
| 881 | } | 884 | } |
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 57f1c94fca0b..820a2fe7d027 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c | |||
| @@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = { | |||
| 1287 | 1287 | ||
| 1288 | #undef ASIX112_DESC | 1288 | #undef ASIX112_DESC |
| 1289 | 1289 | ||
| 1290 | static const struct driver_info trendnet_info = { | ||
| 1291 | .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", | ||
| 1292 | .bind = aqc111_bind, | ||
| 1293 | .unbind = aqc111_unbind, | ||
| 1294 | .status = aqc111_status, | ||
| 1295 | .link_reset = aqc111_link_reset, | ||
| 1296 | .reset = aqc111_reset, | ||
| 1297 | .stop = aqc111_stop, | ||
| 1298 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | | ||
| 1299 | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, | ||
| 1300 | .rx_fixup = aqc111_rx_fixup, | ||
| 1301 | .tx_fixup = aqc111_tx_fixup, | ||
| 1302 | }; | ||
| 1303 | |||
| 1290 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) | 1304 | static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) |
| 1291 | { | 1305 | { |
| 1292 | struct usbnet *dev = usb_get_intfdata(intf); | 1306 | struct usbnet *dev = usb_get_intfdata(intf); |
| @@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = { | |||
| 1440 | {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, | 1454 | {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, |
| 1441 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, | 1455 | {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, |
| 1442 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, | 1456 | {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, |
| 1457 | {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, | ||
| 1443 | { },/* END */ | 1458 | { },/* END */ |
| 1444 | }; | 1459 | }; |
| 1445 | MODULE_DEVICE_TABLE(usb, products); | 1460 | MODULE_DEVICE_TABLE(usb, products); |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b654f05b2ccd..3d93993e74da 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
| @@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 739 | asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); | 739 | asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); |
| 740 | chipcode &= AX_CHIPCODE_MASK; | 740 | chipcode &= AX_CHIPCODE_MASK; |
| 741 | 741 | ||
| 742 | (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : | 742 | ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : |
| 743 | ax88772a_hw_reset(dev, 0); | 743 | ax88772a_hw_reset(dev, 0); |
| 744 | |||
| 745 | if (ret < 0) { | ||
| 746 | netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); | ||
| 747 | return ret; | ||
| 748 | } | ||
| 744 | 749 | ||
| 745 | /* Read PHYID register *AFTER* the PHY was reset properly */ | 750 | /* Read PHYID register *AFTER* the PHY was reset properly */ |
| 746 | phyid = asix_get_phyid(dev); | 751 | phyid = asix_get_phyid(dev); |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index b3b3c05903a1..5512a1038721 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 179 | * probed with) and a slave/data interface; union | 179 | * probed with) and a slave/data interface; union |
| 180 | * descriptors sort this all out. | 180 | * descriptors sort this all out. |
| 181 | */ | 181 | */ |
| 182 | info->control = usb_ifnum_to_if(dev->udev, | 182 | info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0); |
| 183 | info->u->bMasterInterface0); | 183 | info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0); |
| 184 | info->data = usb_ifnum_to_if(dev->udev, | ||
| 185 | info->u->bSlaveInterface0); | ||
| 186 | if (!info->control || !info->data) { | 184 | if (!info->control || !info->data) { |
| 187 | dev_dbg(&intf->dev, | 185 | dev_dbg(&intf->dev, |
| 188 | "master #%u/%p slave #%u/%p\n", | 186 | "master #%u/%p slave #%u/%p\n", |
| @@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 216 | /* a data interface altsetting does the real i/o */ | 214 | /* a data interface altsetting does the real i/o */ |
| 217 | d = &info->data->cur_altsetting->desc; | 215 | d = &info->data->cur_altsetting->desc; |
| 218 | if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { | 216 | if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { |
| 219 | dev_dbg(&intf->dev, "slave class %u\n", | 217 | dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass); |
| 220 | d->bInterfaceClass); | ||
| 221 | goto bad_desc; | 218 | goto bad_desc; |
| 222 | } | 219 | } |
| 223 | skip: | 220 | skip: |
| 224 | if ( rndis && | 221 | if (rndis && header.usb_cdc_acm_descriptor && |
| 225 | header.usb_cdc_acm_descriptor && | 222 | header.usb_cdc_acm_descriptor->bmCapabilities) { |
| 226 | header.usb_cdc_acm_descriptor->bmCapabilities) { | 223 | dev_dbg(&intf->dev, |
| 227 | dev_dbg(&intf->dev, | 224 | "ACM capabilities %02x, not really RNDIS?\n", |
| 228 | "ACM capabilities %02x, not really RNDIS?\n", | 225 | header.usb_cdc_acm_descriptor->bmCapabilities); |
| 229 | header.usb_cdc_acm_descriptor->bmCapabilities); | 226 | goto bad_desc; |
| 230 | goto bad_desc; | ||
| 231 | } | 227 | } |
| 232 | 228 | ||
| 233 | if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { | 229 | if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { |
| @@ -238,7 +234,7 @@ skip: | |||
| 238 | } | 234 | } |
| 239 | 235 | ||
| 240 | if (header.usb_cdc_mdlm_desc && | 236 | if (header.usb_cdc_mdlm_desc && |
| 241 | memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { | 237 | memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { |
| 242 | dev_dbg(&intf->dev, "GUID doesn't match\n"); | 238 | dev_dbg(&intf->dev, "GUID doesn't match\n"); |
| 243 | goto bad_desc; | 239 | goto bad_desc; |
| 244 | } | 240 | } |
| @@ -302,7 +298,7 @@ skip: | |||
| 302 | if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { | 298 | if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { |
| 303 | struct usb_endpoint_descriptor *desc; | 299 | struct usb_endpoint_descriptor *desc; |
| 304 | 300 | ||
| 305 | dev->status = &info->control->cur_altsetting->endpoint [0]; | 301 | dev->status = &info->control->cur_altsetting->endpoint[0]; |
| 306 | desc = &dev->status->desc; | 302 | desc = &dev->status->desc; |
| 307 | if (!usb_endpoint_is_int_in(desc) || | 303 | if (!usb_endpoint_is_int_in(desc) || |
| 308 | (le16_to_cpu(desc->wMaxPacketSize) | 304 | (le16_to_cpu(desc->wMaxPacketSize) |
| @@ -847,6 +843,14 @@ static const struct usb_device_id products[] = { | |||
| 847 | .driver_info = 0, | 843 | .driver_info = 0, |
| 848 | }, | 844 | }, |
| 849 | 845 | ||
| 846 | /* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */ | ||
| 847 | { | ||
| 848 | USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM, | ||
| 849 | USB_CDC_SUBCLASS_ETHERNET, | ||
| 850 | USB_CDC_PROTO_NONE), | ||
| 851 | .driver_info = 0, | ||
| 852 | }, | ||
| 853 | |||
| 850 | /* WHITELIST!!! | 854 | /* WHITELIST!!! |
| 851 | * | 855 | * |
| 852 | * CDC Ether uses two interfaces, not necessarily consecutive. | 856 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 774e1ff01c9a..735ad838e2ba 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev) | |||
| 123 | dev->addr_len = 0; | 123 | dev->addr_len = 0; |
| 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 124 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
| 125 | dev->netdev_ops = &qmimux_netdev_ops; | 125 | dev->netdev_ops = &qmimux_netdev_ops; |
| 126 | dev->mtu = 1500; | ||
| 126 | dev->needs_free_netdev = true; | 127 | dev->needs_free_netdev = true; |
| 127 | } | 128 | } |
| 128 | 129 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 023725086046..8fadd8eaf601 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
| 1330 | return stats.packets; | 1330 | return stats.packets; |
| 1331 | } | 1331 | } |
| 1332 | 1332 | ||
| 1333 | static void free_old_xmit_skbs(struct send_queue *sq) | 1333 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
| 1334 | { | 1334 | { |
| 1335 | struct sk_buff *skb; | 1335 | struct sk_buff *skb; |
| 1336 | unsigned int len; | 1336 | unsigned int len; |
| @@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) | |||
| 1343 | bytes += skb->len; | 1343 | bytes += skb->len; |
| 1344 | packets++; | 1344 | packets++; |
| 1345 | 1345 | ||
| 1346 | dev_consume_skb_any(skb); | 1346 | napi_consume_skb(skb, in_napi); |
| 1347 | } | 1347 | } |
| 1348 | 1348 | ||
| 1349 | /* Avoid overhead when no packets have been processed | 1349 | /* Avoid overhead when no packets have been processed |
| @@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) | |||
| 1369 | return; | 1369 | return; |
| 1370 | 1370 | ||
| 1371 | if (__netif_tx_trylock(txq)) { | 1371 | if (__netif_tx_trylock(txq)) { |
| 1372 | free_old_xmit_skbs(sq); | 1372 | free_old_xmit_skbs(sq, true); |
| 1373 | __netif_tx_unlock(txq); | 1373 | __netif_tx_unlock(txq); |
| 1374 | } | 1374 | } |
| 1375 | 1375 | ||
| @@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) | |||
| 1445 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | 1445 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); |
| 1446 | 1446 | ||
| 1447 | __netif_tx_lock(txq, raw_smp_processor_id()); | 1447 | __netif_tx_lock(txq, raw_smp_processor_id()); |
| 1448 | free_old_xmit_skbs(sq); | 1448 | free_old_xmit_skbs(sq, true); |
| 1449 | __netif_tx_unlock(txq); | 1449 | __netif_tx_unlock(txq); |
| 1450 | 1450 | ||
| 1451 | virtqueue_napi_complete(napi, sq->vq, 0); | 1451 | virtqueue_napi_complete(napi, sq->vq, 0); |
| @@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1514 | bool use_napi = sq->napi.weight; | 1514 | bool use_napi = sq->napi.weight; |
| 1515 | 1515 | ||
| 1516 | /* Free up any pending old buffers before queueing new ones. */ | 1516 | /* Free up any pending old buffers before queueing new ones. */ |
| 1517 | free_old_xmit_skbs(sq); | 1517 | free_old_xmit_skbs(sq, false); |
| 1518 | 1518 | ||
| 1519 | if (use_napi && kick) | 1519 | if (use_napi && kick) |
| 1520 | virtqueue_enable_cb_delayed(sq->vq); | 1520 | virtqueue_enable_cb_delayed(sq->vq); |
| @@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1557 | if (!use_napi && | 1557 | if (!use_napi && |
| 1558 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | 1558 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
| 1559 | /* More just got used, free them then recheck. */ | 1559 | /* More just got used, free them then recheck. */ |
| 1560 | free_old_xmit_skbs(sq); | 1560 | free_old_xmit_skbs(sq, false); |
| 1561 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | 1561 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
| 1562 | netif_start_subqueue(dev, qnum); | 1562 | netif_start_subqueue(dev, qnum); |
| 1563 | virtqueue_disable_cb(sq->vq); | 1563 | virtqueue_disable_cb(sq->vq); |
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index be6485428198..66d889d54e58 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
| @@ -1056,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = { | |||
| 1056 | .ndo_tx_timeout = uhdlc_tx_timeout, | 1056 | .ndo_tx_timeout = uhdlc_tx_timeout, |
| 1057 | }; | 1057 | }; |
| 1058 | 1058 | ||
| 1059 | static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr) | ||
| 1060 | { | ||
| 1061 | struct device_node *np; | ||
| 1062 | struct platform_device *pdev; | ||
| 1063 | struct resource *res; | ||
| 1064 | static int siram_init_flag; | ||
| 1065 | int ret = 0; | ||
| 1066 | |||
| 1067 | np = of_find_compatible_node(NULL, NULL, name); | ||
| 1068 | if (!np) | ||
| 1069 | return -EINVAL; | ||
| 1070 | |||
| 1071 | pdev = of_find_device_by_node(np); | ||
| 1072 | if (!pdev) { | ||
| 1073 | pr_err("%pOFn: failed to lookup pdev\n", np); | ||
| 1074 | of_node_put(np); | ||
| 1075 | return -EINVAL; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | of_node_put(np); | ||
| 1079 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1080 | if (!res) { | ||
| 1081 | ret = -EINVAL; | ||
| 1082 | goto error_put_device; | ||
| 1083 | } | ||
| 1084 | *ptr = ioremap(res->start, resource_size(res)); | ||
| 1085 | if (!*ptr) { | ||
| 1086 | ret = -ENOMEM; | ||
| 1087 | goto error_put_device; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | /* We've remapped the addresses, and we don't need the device any | ||
| 1091 | * more, so we should release it. | ||
| 1092 | */ | ||
| 1093 | put_device(&pdev->dev); | ||
| 1094 | |||
| 1095 | if (init_flag && siram_init_flag == 0) { | ||
| 1096 | memset_io(*ptr, 0, resource_size(res)); | ||
| 1097 | siram_init_flag = 1; | ||
| 1098 | } | ||
| 1099 | return 0; | ||
| 1100 | |||
| 1101 | error_put_device: | ||
| 1102 | put_device(&pdev->dev); | ||
| 1103 | |||
| 1104 | return ret; | ||
| 1105 | } | ||
| 1106 | |||
| 1059 | static int ucc_hdlc_probe(struct platform_device *pdev) | 1107 | static int ucc_hdlc_probe(struct platform_device *pdev) |
| 1060 | { | 1108 | { |
| 1061 | struct device_node *np = pdev->dev.of_node; | 1109 | struct device_node *np = pdev->dev.of_node; |
| @@ -1150,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
| 1150 | ret = ucc_of_parse_tdm(np, utdm, ut_info); | 1198 | ret = ucc_of_parse_tdm(np, utdm, ut_info); |
| 1151 | if (ret) | 1199 | if (ret) |
| 1152 | goto free_utdm; | 1200 | goto free_utdm; |
| 1201 | |||
| 1202 | ret = hdlc_map_iomem("fsl,t1040-qe-si", 0, | ||
| 1203 | (void __iomem **)&utdm->si_regs); | ||
| 1204 | if (ret) | ||
| 1205 | goto free_utdm; | ||
| 1206 | ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1, | ||
| 1207 | (void __iomem **)&utdm->siram); | ||
| 1208 | if (ret) | ||
| 1209 | goto unmap_si_regs; | ||
| 1153 | } | 1210 | } |
| 1154 | 1211 | ||
| 1155 | if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) | 1212 | if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) |
| @@ -1158,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
| 1158 | ret = uhdlc_init(uhdlc_priv); | 1215 | ret = uhdlc_init(uhdlc_priv); |
| 1159 | if (ret) { | 1216 | if (ret) { |
| 1160 | dev_err(&pdev->dev, "Failed to init uhdlc\n"); | 1217 | dev_err(&pdev->dev, "Failed to init uhdlc\n"); |
| 1161 | goto free_utdm; | 1218 | goto undo_uhdlc_init; |
| 1162 | } | 1219 | } |
| 1163 | 1220 | ||
| 1164 | dev = alloc_hdlcdev(uhdlc_priv); | 1221 | dev = alloc_hdlcdev(uhdlc_priv); |
| @@ -1187,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) | |||
| 1187 | free_dev: | 1244 | free_dev: |
| 1188 | free_netdev(dev); | 1245 | free_netdev(dev); |
| 1189 | undo_uhdlc_init: | 1246 | undo_uhdlc_init: |
| 1247 | iounmap(utdm->siram); | ||
| 1248 | unmap_si_regs: | ||
| 1249 | iounmap(utdm->si_regs); | ||
| 1190 | free_utdm: | 1250 | free_utdm: |
| 1191 | if (uhdlc_priv->tsa) | 1251 | if (uhdlc_priv->tsa) |
| 1192 | kfree(utdm); | 1252 | kfree(utdm); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 3a4b8786f7ea..320edcac4699 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2761 | BIT(NL80211_CHAN_WIDTH_160); | 2761 | BIT(NL80211_CHAN_WIDTH_160); |
| 2762 | } | 2762 | } |
| 2763 | 2763 | ||
| 2764 | if (!n_limits) { | ||
| 2765 | err = -EINVAL; | ||
| 2766 | goto failed_hw; | ||
| 2767 | } | ||
| 2768 | |||
| 2764 | data->if_combination.n_limits = n_limits; | 2769 | data->if_combination.n_limits = n_limits; |
| 2765 | data->if_combination.max_interfaces = 2048; | 2770 | data->if_combination.max_interfaces = 2048; |
| 2766 | data->if_combination.limits = data->if_limits; | 2771 | data->if_combination.limits = data->if_limits; |
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 64b218699656..3a93e4d9828b 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c | |||
| @@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, | |||
| 530 | SET_NETDEV_DEV(dev, &priv->lowerdev->dev); | 530 | SET_NETDEV_DEV(dev, &priv->lowerdev->dev); |
| 531 | dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); | 531 | dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); |
| 532 | 532 | ||
| 533 | if (!dev->ieee80211_ptr) | 533 | if (!dev->ieee80211_ptr) { |
| 534 | err = -ENOMEM; | ||
| 534 | goto remove_handler; | 535 | goto remove_handler; |
| 536 | } | ||
| 535 | 537 | ||
| 536 | dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; | 538 | dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; |
| 537 | dev->ieee80211_ptr->wiphy = common_wiphy; | 539 | dev->ieee80211_ptr->wiphy = common_wiphy; |
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 0cf58cabc9ed..3cf50274fadb 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c | |||
| @@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev) | |||
| 26 | struct nvdimm_drvdata *ndd; | 26 | struct nvdimm_drvdata *ndd; |
| 27 | int rc; | 27 | int rc; |
| 28 | 28 | ||
| 29 | rc = nvdimm_security_setup_events(dev); | ||
| 30 | if (rc < 0) { | ||
| 31 | dev_err(dev, "security event setup failed: %d\n", rc); | ||
| 32 | return rc; | ||
| 33 | } | ||
| 34 | |||
| 29 | rc = nvdimm_check_config_data(dev); | 35 | rc = nvdimm_check_config_data(dev); |
| 30 | if (rc) { | 36 | if (rc) { |
| 31 | /* not required for non-aliased nvdimm, ex. NVDIMM-N */ | 37 | /* not required for non-aliased nvdimm, ex. NVDIMM-N */ |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 553aa78abeee..91b9abbf689c 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
| @@ -585,13 +585,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, | |||
| 585 | } | 585 | } |
| 586 | EXPORT_SYMBOL_GPL(__nvdimm_create); | 586 | EXPORT_SYMBOL_GPL(__nvdimm_create); |
| 587 | 587 | ||
| 588 | int nvdimm_security_setup_events(struct nvdimm *nvdimm) | 588 | static void shutdown_security_notify(void *data) |
| 589 | { | 589 | { |
| 590 | nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd, | 590 | struct nvdimm *nvdimm = data; |
| 591 | "security"); | 591 | |
| 592 | sysfs_put(nvdimm->sec.overwrite_state); | ||
| 593 | } | ||
| 594 | |||
| 595 | int nvdimm_security_setup_events(struct device *dev) | ||
| 596 | { | ||
| 597 | struct nvdimm *nvdimm = to_nvdimm(dev); | ||
| 598 | |||
| 599 | if (nvdimm->sec.state < 0 || !nvdimm->sec.ops | ||
| 600 | || !nvdimm->sec.ops->overwrite) | ||
| 601 | return 0; | ||
| 602 | nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security"); | ||
| 592 | if (!nvdimm->sec.overwrite_state) | 603 | if (!nvdimm->sec.overwrite_state) |
| 593 | return -ENODEV; | 604 | return -ENOMEM; |
| 594 | return 0; | 605 | |
| 606 | return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm); | ||
| 595 | } | 607 | } |
| 596 | EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); | 608 | EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); |
| 597 | 609 | ||
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 2b2cf4e554d3..e5ffd5733540 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
| @@ -54,12 +54,12 @@ struct nvdimm { | |||
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | static inline enum nvdimm_security_state nvdimm_security_state( | 56 | static inline enum nvdimm_security_state nvdimm_security_state( |
| 57 | struct nvdimm *nvdimm, bool master) | 57 | struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) |
| 58 | { | 58 | { |
| 59 | if (!nvdimm->sec.ops) | 59 | if (!nvdimm->sec.ops) |
| 60 | return -ENXIO; | 60 | return -ENXIO; |
| 61 | 61 | ||
| 62 | return nvdimm->sec.ops->state(nvdimm, master); | 62 | return nvdimm->sec.ops->state(nvdimm, ptype); |
| 63 | } | 63 | } |
| 64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); | 64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); |
| 65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) | 65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) |
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index cfde992684e7..379bf4305e61 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
| @@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, | |||
| 250 | void nvdimm_set_aliasing(struct device *dev); | 250 | void nvdimm_set_aliasing(struct device *dev); |
| 251 | void nvdimm_set_locked(struct device *dev); | 251 | void nvdimm_set_locked(struct device *dev); |
| 252 | void nvdimm_clear_locked(struct device *dev); | 252 | void nvdimm_clear_locked(struct device *dev); |
| 253 | int nvdimm_security_setup_events(struct device *dev); | ||
| 253 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) | 254 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) |
| 254 | int nvdimm_security_unlock(struct device *dev); | 255 | int nvdimm_security_unlock(struct device *dev); |
| 255 | #else | 256 | #else |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index df4b3a6db51b..b9fff3b8ed1b 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
| @@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
| 545 | timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); | 545 | timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); |
| 546 | ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + | 546 | ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + |
| 547 | ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); | 547 | ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); |
| 548 | if (!(ctrl->anacap & (1 << 6))) | 548 | ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); |
| 549 | ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); | ||
| 550 | 549 | ||
| 551 | if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { | 550 | if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { |
| 552 | dev_err(ctrl->device, | 551 | dev_err(ctrl->device, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index deb1a66bf117..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | |||
| 2041 | return ret; | 2041 | return ret; |
| 2042 | } | 2042 | } |
| 2043 | 2043 | ||
| 2044 | /* irq_queues covers admin queue */ | ||
| 2044 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | 2045 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) |
| 2045 | { | 2046 | { |
| 2046 | unsigned int this_w_queues = write_queues; | 2047 | unsigned int this_w_queues = write_queues; |
| 2047 | 2048 | ||
| 2049 | WARN_ON(!irq_queues); | ||
| 2050 | |||
| 2048 | /* | 2051 | /* |
| 2049 | * Setup read/write queue split | 2052 | * Setup read/write queue split, assign admin queue one independent |
| 2053 | * irq vector if irq_queues is > 1. | ||
| 2050 | */ | 2054 | */ |
| 2051 | if (irq_queues == 1) { | 2055 | if (irq_queues <= 2) { |
| 2052 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; | 2056 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
| 2053 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2057 | dev->io_queues[HCTX_TYPE_READ] = 0; |
| 2054 | return; | 2058 | return; |
| @@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | |||
| 2056 | 2060 | ||
| 2057 | /* | 2061 | /* |
| 2058 | * If 'write_queues' is set, ensure it leaves room for at least | 2062 | * If 'write_queues' is set, ensure it leaves room for at least |
| 2059 | * one read queue | 2063 | * one read queue and one admin queue |
| 2060 | */ | 2064 | */ |
| 2061 | if (this_w_queues >= irq_queues) | 2065 | if (this_w_queues >= irq_queues) |
| 2062 | this_w_queues = irq_queues - 1; | 2066 | this_w_queues = irq_queues - 2; |
| 2063 | 2067 | ||
| 2064 | /* | 2068 | /* |
| 2065 | * If 'write_queues' is set to zero, reads and writes will share | 2069 | * If 'write_queues' is set to zero, reads and writes will share |
| 2066 | * a queue set. | 2070 | * a queue set. |
| 2067 | */ | 2071 | */ |
| 2068 | if (!this_w_queues) { | 2072 | if (!this_w_queues) { |
| 2069 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; | 2073 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; |
| 2070 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2074 | dev->io_queues[HCTX_TYPE_READ] = 0; |
| 2071 | } else { | 2075 | } else { |
| 2072 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; | 2076 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; |
| 2073 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; | 2077 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; |
| 2074 | } | 2078 | } |
| 2075 | } | 2079 | } |
| 2076 | 2080 | ||
| @@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
| 2095 | this_p_queues = nr_io_queues - 1; | 2099 | this_p_queues = nr_io_queues - 1; |
| 2096 | irq_queues = 1; | 2100 | irq_queues = 1; |
| 2097 | } else { | 2101 | } else { |
| 2098 | irq_queues = nr_io_queues - this_p_queues; | 2102 | irq_queues = nr_io_queues - this_p_queues + 1; |
| 2099 | } | 2103 | } |
| 2100 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; | 2104 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
| 2101 | 2105 | ||
| @@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
| 2115 | * If we got a failure and we're down to asking for just | 2119 | * If we got a failure and we're down to asking for just |
| 2116 | * 1 + 1 queues, just ask for a single vector. We'll share | 2120 | * 1 + 1 queues, just ask for a single vector. We'll share |
| 2117 | * that between the single IO queue and the admin queue. | 2121 | * that between the single IO queue and the admin queue. |
| 2122 | * Otherwise, we assign one independent vector to admin queue. | ||
| 2118 | */ | 2123 | */ |
| 2119 | if (result >= 0 && irq_queues > 1) | 2124 | if (irq_queues > 1) |
| 2120 | irq_queues = irq_sets[0] + irq_sets[1] + 1; | 2125 | irq_queues = irq_sets[0] + irq_sets[1] + 1; |
| 2121 | 2126 | ||
| 2122 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, | 2127 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0a2fd2949ad7..52abc3a6de12 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -119,6 +119,7 @@ struct nvme_rdma_ctrl { | |||
| 119 | 119 | ||
| 120 | struct nvme_ctrl ctrl; | 120 | struct nvme_ctrl ctrl; |
| 121 | bool use_inline_data; | 121 | bool use_inline_data; |
| 122 | u32 io_queues[HCTX_MAX_TYPES]; | ||
| 122 | }; | 123 | }; |
| 123 | 124 | ||
| 124 | static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) | 125 | static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) |
| @@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) | |||
| 165 | static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) | 166 | static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) |
| 166 | { | 167 | { |
| 167 | return nvme_rdma_queue_idx(queue) > | 168 | return nvme_rdma_queue_idx(queue) > |
| 168 | queue->ctrl->ctrl.opts->nr_io_queues + | 169 | queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + |
| 169 | queue->ctrl->ctrl.opts->nr_write_queues; | 170 | queue->ctrl->io_queues[HCTX_TYPE_READ]; |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) | 173 | static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) |
| @@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
| 661 | nr_io_queues = min_t(unsigned int, nr_io_queues, | 662 | nr_io_queues = min_t(unsigned int, nr_io_queues, |
| 662 | ibdev->num_comp_vectors); | 663 | ibdev->num_comp_vectors); |
| 663 | 664 | ||
| 664 | nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); | 665 | if (opts->nr_write_queues) { |
| 665 | nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); | 666 | ctrl->io_queues[HCTX_TYPE_DEFAULT] = |
| 667 | min(opts->nr_write_queues, nr_io_queues); | ||
| 668 | nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT]; | ||
| 669 | } else { | ||
| 670 | ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues; | ||
| 671 | } | ||
| 672 | |||
| 673 | ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues; | ||
| 674 | |||
| 675 | if (opts->nr_poll_queues) { | ||
| 676 | ctrl->io_queues[HCTX_TYPE_POLL] = | ||
| 677 | min(opts->nr_poll_queues, num_online_cpus()); | ||
| 678 | nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL]; | ||
| 679 | } | ||
| 666 | 680 | ||
| 667 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | 681 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); |
| 668 | if (ret) | 682 | if (ret) |
| @@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return | |||
| 1689 | nvme_rdma_timeout(struct request *rq, bool reserved) | 1703 | nvme_rdma_timeout(struct request *rq, bool reserved) |
| 1690 | { | 1704 | { |
| 1691 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); | 1705 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); |
| 1706 | struct nvme_rdma_queue *queue = req->queue; | ||
| 1707 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | ||
| 1692 | 1708 | ||
| 1693 | dev_warn(req->queue->ctrl->ctrl.device, | 1709 | dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", |
| 1694 | "I/O %d QID %d timeout, reset controller\n", | 1710 | rq->tag, nvme_rdma_queue_idx(queue)); |
| 1695 | rq->tag, nvme_rdma_queue_idx(req->queue)); | ||
| 1696 | 1711 | ||
| 1697 | /* queue error recovery */ | 1712 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) { |
| 1698 | nvme_rdma_error_recovery(req->queue->ctrl); | 1713 | /* |
| 1714 | * Teardown immediately if controller times out while starting | ||
| 1715 | * or we are already started error recovery. all outstanding | ||
| 1716 | * requests are completed on shutdown, so we return BLK_EH_DONE. | ||
| 1717 | */ | ||
| 1718 | flush_work(&ctrl->err_work); | ||
| 1719 | nvme_rdma_teardown_io_queues(ctrl, false); | ||
| 1720 | nvme_rdma_teardown_admin_queue(ctrl, false); | ||
| 1721 | return BLK_EH_DONE; | ||
| 1722 | } | ||
| 1699 | 1723 | ||
| 1700 | /* fail with DNR on cmd timeout */ | 1724 | dev_warn(ctrl->ctrl.device, "starting error recovery\n"); |
| 1701 | nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; | 1725 | nvme_rdma_error_recovery(ctrl); |
| 1702 | 1726 | ||
| 1703 | return BLK_EH_DONE; | 1727 | return BLK_EH_RESET_TIMER; |
| 1704 | } | 1728 | } |
| 1705 | 1729 | ||
| 1706 | static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | 1730 | static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
| @@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) | |||
| 1779 | struct nvme_rdma_ctrl *ctrl = set->driver_data; | 1803 | struct nvme_rdma_ctrl *ctrl = set->driver_data; |
| 1780 | 1804 | ||
| 1781 | set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; | 1805 | set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; |
| 1782 | set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; | 1806 | set->map[HCTX_TYPE_DEFAULT].nr_queues = |
| 1807 | ctrl->io_queues[HCTX_TYPE_DEFAULT]; | ||
| 1808 | set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ]; | ||
| 1783 | if (ctrl->ctrl.opts->nr_write_queues) { | 1809 | if (ctrl->ctrl.opts->nr_write_queues) { |
| 1784 | /* separate read/write queues */ | 1810 | /* separate read/write queues */ |
| 1785 | set->map[HCTX_TYPE_DEFAULT].nr_queues = | ||
| 1786 | ctrl->ctrl.opts->nr_write_queues; | ||
| 1787 | set->map[HCTX_TYPE_READ].queue_offset = | 1811 | set->map[HCTX_TYPE_READ].queue_offset = |
| 1788 | ctrl->ctrl.opts->nr_write_queues; | 1812 | ctrl->io_queues[HCTX_TYPE_DEFAULT]; |
| 1789 | } else { | 1813 | } else { |
| 1790 | /* mixed read/write queues */ | 1814 | /* mixed read/write queues */ |
| 1791 | set->map[HCTX_TYPE_DEFAULT].nr_queues = | ||
| 1792 | ctrl->ctrl.opts->nr_io_queues; | ||
| 1793 | set->map[HCTX_TYPE_READ].queue_offset = 0; | 1815 | set->map[HCTX_TYPE_READ].queue_offset = 0; |
| 1794 | } | 1816 | } |
| 1795 | blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], | 1817 | blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], |
| @@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) | |||
| 1799 | 1821 | ||
| 1800 | if (ctrl->ctrl.opts->nr_poll_queues) { | 1822 | if (ctrl->ctrl.opts->nr_poll_queues) { |
| 1801 | set->map[HCTX_TYPE_POLL].nr_queues = | 1823 | set->map[HCTX_TYPE_POLL].nr_queues = |
| 1802 | ctrl->ctrl.opts->nr_poll_queues; | 1824 | ctrl->io_queues[HCTX_TYPE_POLL]; |
| 1803 | set->map[HCTX_TYPE_POLL].queue_offset = | 1825 | set->map[HCTX_TYPE_POLL].queue_offset = |
| 1804 | ctrl->ctrl.opts->nr_io_queues; | 1826 | ctrl->io_queues[HCTX_TYPE_DEFAULT]; |
| 1805 | if (ctrl->ctrl.opts->nr_write_queues) | 1827 | if (ctrl->ctrl.opts->nr_write_queues) |
| 1806 | set->map[HCTX_TYPE_POLL].queue_offset += | 1828 | set->map[HCTX_TYPE_POLL].queue_offset += |
| 1807 | ctrl->ctrl.opts->nr_write_queues; | 1829 | ctrl->io_queues[HCTX_TYPE_READ]; |
| 1808 | blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); | 1830 | blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); |
| 1809 | } | 1831 | } |
| 1810 | return 0; | 1832 | return 0; |
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 265a0543b381..5f0a00425242 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c | |||
| @@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved) | |||
| 1948 | struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; | 1948 | struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; |
| 1949 | struct nvme_tcp_cmd_pdu *pdu = req->pdu; | 1949 | struct nvme_tcp_cmd_pdu *pdu = req->pdu; |
| 1950 | 1950 | ||
| 1951 | dev_dbg(ctrl->ctrl.device, | 1951 | dev_warn(ctrl->ctrl.device, |
| 1952 | "queue %d: timeout request %#x type %d\n", | 1952 | "queue %d: timeout request %#x type %d\n", |
| 1953 | nvme_tcp_queue_id(req->queue), rq->tag, | 1953 | nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); |
| 1954 | pdu->hdr.type); | ||
| 1955 | 1954 | ||
| 1956 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) { | 1955 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) { |
| 1957 | union nvme_result res = {}; | 1956 | /* |
| 1958 | 1957 | * Teardown immediately if controller times out while starting | |
| 1959 | nvme_req(rq)->flags |= NVME_REQ_CANCELLED; | 1958 | * or we are already started error recovery. all outstanding |
| 1960 | nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); | 1959 | * requests are completed on shutdown, so we return BLK_EH_DONE. |
| 1960 | */ | ||
| 1961 | flush_work(&ctrl->err_work); | ||
| 1962 | nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); | ||
| 1963 | nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); | ||
| 1961 | return BLK_EH_DONE; | 1964 | return BLK_EH_DONE; |
| 1962 | } | 1965 | } |
| 1963 | 1966 | ||
| 1964 | /* queue error recovery */ | 1967 | dev_warn(ctrl->ctrl.device, "starting error recovery\n"); |
| 1965 | nvme_tcp_error_recovery(&ctrl->ctrl); | 1968 | nvme_tcp_error_recovery(&ctrl->ctrl); |
| 1966 | 1969 | ||
| 1967 | return BLK_EH_RESET_TIMER; | 1970 | return BLK_EH_RESET_TIMER; |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a8d23eb80192..a884e3a0e8af 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
| @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | |||
| 139 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); | 139 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); |
| 140 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); | 140 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); |
| 141 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); | 141 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); |
| 142 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, | ||
| 143 | struct nvmet_rdma_rsp *r); | ||
| 144 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | ||
| 145 | struct nvmet_rdma_rsp *r); | ||
| 142 | 146 | ||
| 143 | static const struct nvmet_fabrics_ops nvmet_rdma_ops; | 147 | static const struct nvmet_fabrics_ops nvmet_rdma_ops; |
| 144 | 148 | ||
| @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | |||
| 182 | spin_unlock_irqrestore(&queue->rsps_lock, flags); | 186 | spin_unlock_irqrestore(&queue->rsps_lock, flags); |
| 183 | 187 | ||
| 184 | if (unlikely(!rsp)) { | 188 | if (unlikely(!rsp)) { |
| 185 | rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); | 189 | int ret; |
| 190 | |||
| 191 | rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); | ||
| 186 | if (unlikely(!rsp)) | 192 | if (unlikely(!rsp)) |
| 187 | return NULL; | 193 | return NULL; |
| 194 | ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); | ||
| 195 | if (unlikely(ret)) { | ||
| 196 | kfree(rsp); | ||
| 197 | return NULL; | ||
| 198 | } | ||
| 199 | |||
| 188 | rsp->allocated = true; | 200 | rsp->allocated = true; |
| 189 | } | 201 | } |
| 190 | 202 | ||
| @@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |||
| 197 | unsigned long flags; | 209 | unsigned long flags; |
| 198 | 210 | ||
| 199 | if (unlikely(rsp->allocated)) { | 211 | if (unlikely(rsp->allocated)) { |
| 212 | nvmet_rdma_free_rsp(rsp->queue->dev, rsp); | ||
| 200 | kfree(rsp); | 213 | kfree(rsp); |
| 201 | return; | 214 | return; |
| 202 | } | 215 | } |
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 44b37b202e39..ad0df786fe93 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c | |||
| @@ -1089,7 +1089,7 @@ out: | |||
| 1089 | 1089 | ||
| 1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) | 1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) |
| 1091 | { | 1091 | { |
| 1092 | int result; | 1092 | int result = 0; |
| 1093 | 1093 | ||
| 1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) | 1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) |
| 1095 | return 0; | 1095 | return 0; |
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index a09c1c3cf831..49b16f76d78e 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c | |||
| @@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np) | |||
| 207 | 207 | ||
| 208 | if (!of_node_check_flag(np, OF_OVERLAY)) { | 208 | if (!of_node_check_flag(np, OF_OVERLAY)) { |
| 209 | np->name = __of_get_property(np, "name", NULL); | 209 | np->name = __of_get_property(np, "name", NULL); |
| 210 | np->type = __of_get_property(np, "device_type", NULL); | ||
| 211 | if (!np->name) | 210 | if (!np->name) |
| 212 | np->name = "<NULL>"; | 211 | np->name = "<NULL>"; |
| 213 | if (!np->type) | ||
| 214 | np->type = "<NULL>"; | ||
| 215 | 212 | ||
| 216 | phandle = __of_get_property(np, "phandle", &sz); | 213 | phandle = __of_get_property(np, "phandle", &sz); |
| 217 | if (!phandle) | 214 | if (!phandle) |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7099c652c6a5..9cc1461aac7d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -314,12 +314,8 @@ static bool populate_node(const void *blob, | |||
| 314 | populate_properties(blob, offset, mem, np, pathp, dryrun); | 314 | populate_properties(blob, offset, mem, np, pathp, dryrun); |
| 315 | if (!dryrun) { | 315 | if (!dryrun) { |
| 316 | np->name = of_get_property(np, "name", NULL); | 316 | np->name = of_get_property(np, "name", NULL); |
| 317 | np->type = of_get_property(np, "device_type", NULL); | ||
| 318 | |||
| 319 | if (!np->name) | 317 | if (!np->name) |
| 320 | np->name = "<NULL>"; | 318 | np->name = "<NULL>"; |
| 321 | if (!np->type) | ||
| 322 | np->type = "<NULL>"; | ||
| 323 | } | 319 | } |
| 324 | 320 | ||
| 325 | *pnp = np; | 321 | *pnp = np; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2b5ac43a5690..c423e94baf0f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
| @@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, | |||
| 423 | 423 | ||
| 424 | tchild->parent = target->np; | 424 | tchild->parent = target->np; |
| 425 | tchild->name = __of_get_property(node, "name", NULL); | 425 | tchild->name = __of_get_property(node, "name", NULL); |
| 426 | tchild->type = __of_get_property(node, "device_type", NULL); | ||
| 427 | 426 | ||
| 428 | if (!tchild->name) | 427 | if (!tchild->name) |
| 429 | tchild->name = "<NULL>"; | 428 | tchild->name = "<NULL>"; |
| 430 | if (!tchild->type) | ||
| 431 | tchild->type = "<NULL>"; | ||
| 432 | 429 | ||
| 433 | /* ignore obsolete "linux,phandle" */ | 430 | /* ignore obsolete "linux,phandle" */ |
| 434 | phandle = __of_get_property(node, "phandle", &size); | 431 | phandle = __of_get_property(node, "phandle", &size); |
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index d3185063d369..7eda43c66c91 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c | |||
| @@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
| 155 | dp->parent = parent; | 155 | dp->parent = parent; |
| 156 | 156 | ||
| 157 | dp->name = of_pdt_get_one_property(node, "name"); | 157 | dp->name = of_pdt_get_one_property(node, "name"); |
| 158 | dp->type = of_pdt_get_one_property(node, "device_type"); | ||
| 159 | dp->phandle = node; | 158 | dp->phandle = node; |
| 160 | 159 | ||
| 161 | dp->properties = of_pdt_build_prop_list(node); | 160 | dp->properties = of_pdt_build_prop_list(node); |
diff --git a/drivers/of/property.c b/drivers/of/property.c index 08430031bd28..8631efa1daa1 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, | |||
| 806 | 806 | ||
| 807 | if (!of_device_is_available(remote)) { | 807 | if (!of_device_is_available(remote)) { |
| 808 | pr_debug("not available for remote node\n"); | 808 | pr_debug("not available for remote node\n"); |
| 809 | of_node_put(remote); | ||
| 809 | return NULL; | 810 | return NULL; |
| 810 | } | 811 | } |
| 811 | 812 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 4310c7a4212e..2ab92409210a 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
| @@ -21,13 +21,14 @@ menuconfig PCI | |||
| 21 | support for PCI-X and the foundations for PCI Express support. | 21 | support for PCI-X and the foundations for PCI Express support. |
| 22 | Say 'Y' here unless you know what you are doing. | 22 | Say 'Y' here unless you know what you are doing. |
| 23 | 23 | ||
| 24 | if PCI | ||
| 25 | |||
| 24 | config PCI_DOMAINS | 26 | config PCI_DOMAINS |
| 25 | bool | 27 | bool |
| 26 | depends on PCI | 28 | depends on PCI |
| 27 | 29 | ||
| 28 | config PCI_DOMAINS_GENERIC | 30 | config PCI_DOMAINS_GENERIC |
| 29 | bool | 31 | bool |
| 30 | depends on PCI | ||
| 31 | select PCI_DOMAINS | 32 | select PCI_DOMAINS |
| 32 | 33 | ||
| 33 | config PCI_SYSCALL | 34 | config PCI_SYSCALL |
| @@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig" | |||
| 37 | 38 | ||
| 38 | config PCI_MSI | 39 | config PCI_MSI |
| 39 | bool "Message Signaled Interrupts (MSI and MSI-X)" | 40 | bool "Message Signaled Interrupts (MSI and MSI-X)" |
| 40 | depends on PCI | ||
| 41 | select GENERIC_MSI_IRQ | 41 | select GENERIC_MSI_IRQ |
| 42 | help | 42 | help |
| 43 | This allows device drivers to enable MSI (Message Signaled | 43 | This allows device drivers to enable MSI (Message Signaled |
| @@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN | |||
| 59 | config PCI_QUIRKS | 59 | config PCI_QUIRKS |
| 60 | default y | 60 | default y |
| 61 | bool "Enable PCI quirk workarounds" if EXPERT | 61 | bool "Enable PCI quirk workarounds" if EXPERT |
| 62 | depends on PCI | ||
| 63 | help | 62 | help |
| 64 | This enables workarounds for various PCI chipset bugs/quirks. | 63 | This enables workarounds for various PCI chipset bugs/quirks. |
| 65 | Disable this only if your target machine is unaffected by PCI | 64 | Disable this only if your target machine is unaffected by PCI |
| @@ -67,7 +66,7 @@ config PCI_QUIRKS | |||
| 67 | 66 | ||
| 68 | config PCI_DEBUG | 67 | config PCI_DEBUG |
| 69 | bool "PCI Debugging" | 68 | bool "PCI Debugging" |
| 70 | depends on PCI && DEBUG_KERNEL | 69 | depends on DEBUG_KERNEL |
| 71 | help | 70 | help |
| 72 | Say Y here if you want the PCI core to produce a bunch of debug | 71 | Say Y here if you want the PCI core to produce a bunch of debug |
| 73 | messages to the system log. Select this if you are having a | 72 | messages to the system log. Select this if you are having a |
| @@ -77,7 +76,6 @@ config PCI_DEBUG | |||
| 77 | 76 | ||
| 78 | config PCI_REALLOC_ENABLE_AUTO | 77 | config PCI_REALLOC_ENABLE_AUTO |
| 79 | bool "Enable PCI resource re-allocation detection" | 78 | bool "Enable PCI resource re-allocation detection" |
| 80 | depends on PCI | ||
| 81 | depends on PCI_IOV | 79 | depends on PCI_IOV |
| 82 | help | 80 | help |
| 83 | Say Y here if you want the PCI core to detect if PCI resource | 81 | Say Y here if you want the PCI core to detect if PCI resource |
| @@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO | |||
| 90 | 88 | ||
| 91 | config PCI_STUB | 89 | config PCI_STUB |
| 92 | tristate "PCI Stub driver" | 90 | tristate "PCI Stub driver" |
| 93 | depends on PCI | ||
| 94 | help | 91 | help |
| 95 | Say Y or M here if you want be able to reserve a PCI device | 92 | Say Y or M here if you want be able to reserve a PCI device |
| 96 | when it is going to be assigned to a guest operating system. | 93 | when it is going to be assigned to a guest operating system. |
| @@ -99,7 +96,6 @@ config PCI_STUB | |||
| 99 | 96 | ||
| 100 | config PCI_PF_STUB | 97 | config PCI_PF_STUB |
| 101 | tristate "PCI PF Stub driver" | 98 | tristate "PCI PF Stub driver" |
| 102 | depends on PCI | ||
| 103 | depends on PCI_IOV | 99 | depends on PCI_IOV |
| 104 | help | 100 | help |
| 105 | Say Y or M here if you want to enable support for devices that | 101 | Say Y or M here if you want to enable support for devices that |
| @@ -111,7 +107,7 @@ config PCI_PF_STUB | |||
| 111 | 107 | ||
| 112 | config XEN_PCIDEV_FRONTEND | 108 | config XEN_PCIDEV_FRONTEND |
| 113 | tristate "Xen PCI Frontend" | 109 | tristate "Xen PCI Frontend" |
| 114 | depends on PCI && X86 && XEN | 110 | depends on X86 && XEN |
| 115 | select PCI_XEN | 111 | select PCI_XEN |
| 116 | select XEN_XENBUS_FRONTEND | 112 | select XEN_XENBUS_FRONTEND |
| 117 | default y | 113 | default y |
| @@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL | |||
| 133 | 129 | ||
| 134 | config PCI_IOV | 130 | config PCI_IOV |
| 135 | bool "PCI IOV support" | 131 | bool "PCI IOV support" |
| 136 | depends on PCI | ||
| 137 | select PCI_ATS | 132 | select PCI_ATS |
| 138 | help | 133 | help |
| 139 | I/O Virtualization is a PCI feature supported by some devices | 134 | I/O Virtualization is a PCI feature supported by some devices |
| @@ -144,7 +139,6 @@ config PCI_IOV | |||
| 144 | 139 | ||
| 145 | config PCI_PRI | 140 | config PCI_PRI |
| 146 | bool "PCI PRI support" | 141 | bool "PCI PRI support" |
| 147 | depends on PCI | ||
| 148 | select PCI_ATS | 142 | select PCI_ATS |
| 149 | help | 143 | help |
| 150 | PRI is the PCI Page Request Interface. It allows PCI devices that are | 144 | PRI is the PCI Page Request Interface. It allows PCI devices that are |
| @@ -154,7 +148,6 @@ config PCI_PRI | |||
| 154 | 148 | ||
| 155 | config PCI_PASID | 149 | config PCI_PASID |
| 156 | bool "PCI PASID support" | 150 | bool "PCI PASID support" |
| 157 | depends on PCI | ||
| 158 | select PCI_ATS | 151 | select PCI_ATS |
| 159 | help | 152 | help |
| 160 | Process Address Space Identifiers (PASIDs) can be used by PCI devices | 153 | Process Address Space Identifiers (PASIDs) can be used by PCI devices |
| @@ -167,7 +160,7 @@ config PCI_PASID | |||
| 167 | 160 | ||
| 168 | config PCI_P2PDMA | 161 | config PCI_P2PDMA |
| 169 | bool "PCI peer-to-peer transfer support" | 162 | bool "PCI peer-to-peer transfer support" |
| 170 | depends on PCI && ZONE_DEVICE | 163 | depends on ZONE_DEVICE |
| 171 | select GENERIC_ALLOCATOR | 164 | select GENERIC_ALLOCATOR |
| 172 | help | 165 | help |
| 173 | Enableѕ drivers to do PCI peer-to-peer transactions to and from | 166 | Enableѕ drivers to do PCI peer-to-peer transactions to and from |
| @@ -184,12 +177,11 @@ config PCI_P2PDMA | |||
| 184 | 177 | ||
| 185 | config PCI_LABEL | 178 | config PCI_LABEL |
| 186 | def_bool y if (DMI || ACPI) | 179 | def_bool y if (DMI || ACPI) |
| 187 | depends on PCI | ||
| 188 | select NLS | 180 | select NLS |
| 189 | 181 | ||
| 190 | config PCI_HYPERV | 182 | config PCI_HYPERV |
| 191 | tristate "Hyper-V PCI Frontend" | 183 | tristate "Hyper-V PCI Frontend" |
| 192 | depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 | 184 | depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 |
| 193 | help | 185 | help |
| 194 | The PCI device frontend driver allows the kernel to import arbitrary | 186 | The PCI device frontend driver allows the kernel to import arbitrary |
| 195 | PCI devices from a PCI backend to support PCI driver domains. | 187 | PCI devices from a PCI backend to support PCI driver domains. |
| @@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig" | |||
| 198 | source "drivers/pci/controller/Kconfig" | 190 | source "drivers/pci/controller/Kconfig" |
| 199 | source "drivers/pci/endpoint/Kconfig" | 191 | source "drivers/pci/endpoint/Kconfig" |
| 200 | source "drivers/pci/switch/Kconfig" | 192 | source "drivers/pci/switch/Kconfig" |
| 193 | |||
| 194 | endif | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7a1c8a09efa5..4c0b47867258 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1168 | const struct irq_affinity *affd) | 1168 | const struct irq_affinity *affd) |
| 1169 | { | 1169 | { |
| 1170 | static const struct irq_affinity msi_default_affd; | 1170 | static const struct irq_affinity msi_default_affd; |
| 1171 | int vecs = -ENOSPC; | 1171 | int msix_vecs = -ENOSPC; |
| 1172 | int msi_vecs = -ENOSPC; | ||
| 1172 | 1173 | ||
| 1173 | if (flags & PCI_IRQ_AFFINITY) { | 1174 | if (flags & PCI_IRQ_AFFINITY) { |
| 1174 | if (!affd) | 1175 | if (!affd) |
| @@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1179 | } | 1180 | } |
| 1180 | 1181 | ||
| 1181 | if (flags & PCI_IRQ_MSIX) { | 1182 | if (flags & PCI_IRQ_MSIX) { |
| 1182 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1183 | msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, |
| 1183 | affd); | 1184 | max_vecs, affd); |
| 1184 | if (vecs > 0) | 1185 | if (msix_vecs > 0) |
| 1185 | return vecs; | 1186 | return msix_vecs; |
| 1186 | } | 1187 | } |
| 1187 | 1188 | ||
| 1188 | if (flags & PCI_IRQ_MSI) { | 1189 | if (flags & PCI_IRQ_MSI) { |
| 1189 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); | 1190 | msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, |
| 1190 | if (vecs > 0) | 1191 | affd); |
| 1191 | return vecs; | 1192 | if (msi_vecs > 0) |
| 1193 | return msi_vecs; | ||
| 1192 | } | 1194 | } |
| 1193 | 1195 | ||
| 1194 | /* use legacy irq if allowed */ | 1196 | /* use legacy irq if allowed */ |
| @@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1199 | } | 1201 | } |
| 1200 | } | 1202 | } |
| 1201 | 1203 | ||
| 1202 | return vecs; | 1204 | if (msix_vecs == -ENOSPC) |
| 1205 | return -ENOSPC; | ||
| 1206 | return msi_vecs; | ||
| 1203 | } | 1207 | } |
| 1204 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); | 1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); |
| 1205 | 1209 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..c25acace7d91 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str) | |||
| 6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { | 6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { |
| 6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); | 6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); |
| 6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { | 6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
| 6198 | disable_acs_redir_param = str + 18; | 6198 | disable_acs_redir_param = |
| 6199 | kstrdup(str + 18, GFP_KERNEL); | ||
| 6199 | } else { | 6200 | } else { |
| 6200 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 6201 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
| 6201 | str); | 6202 | str); |
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c index 6fd6e07ab345..09a77e556ece 100644 --- a/drivers/phy/qualcomm/phy-ath79-usb.c +++ b/drivers/phy/qualcomm/phy-ath79-usb.c | |||
| @@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy) | |||
| 31 | 31 | ||
| 32 | err = reset_control_deassert(priv->reset); | 32 | err = reset_control_deassert(priv->reset); |
| 33 | if (err && priv->no_suspend_override) | 33 | if (err && priv->no_suspend_override) |
| 34 | reset_control_assert(priv->no_suspend_override); | 34 | reset_control_deassert(priv->no_suspend_override); |
| 35 | 35 | ||
| 36 | return err; | 36 | return err; |
| 37 | } | 37 | } |
| @@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev) | |||
| 69 | if (!priv) | 69 | if (!priv) |
| 70 | return -ENOMEM; | 70 | return -ENOMEM; |
| 71 | 71 | ||
| 72 | priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); | 72 | priv->reset = devm_reset_control_get(&pdev->dev, "phy"); |
| 73 | if (IS_ERR(priv->reset)) | 73 | if (IS_ERR(priv->reset)) |
| 74 | return PTR_ERR(priv->reset); | 74 | return PTR_ERR(priv->reset); |
| 75 | 75 | ||
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index f137e0107764..c4709ed7fb0e 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig | |||
| @@ -82,6 +82,7 @@ config PHY_TI_GMII_SEL | |||
| 82 | default y if TI_CPSW=y | 82 | default y if TI_CPSW=y |
| 83 | depends on TI_CPSW || COMPILE_TEST | 83 | depends on TI_CPSW || COMPILE_TEST |
| 84 | select GENERIC_PHY | 84 | select GENERIC_PHY |
| 85 | select REGMAP | ||
| 85 | default m | 86 | default m |
| 86 | help | 87 | help |
| 87 | This driver supports configuring of the TI CPSW Port mode depending on | 88 | This driver supports configuring of the TI CPSW Port mode depending on |
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 77fdaa551977..a52c5bb35033 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c | |||
| @@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev, | |||
| 204 | 204 | ||
| 205 | if (args->args_count < 1) | 205 | if (args->args_count < 1) |
| 206 | return ERR_PTR(-EINVAL); | 206 | return ERR_PTR(-EINVAL); |
| 207 | if (!priv || !priv->if_phys) | ||
| 208 | return ERR_PTR(-ENODEV); | ||
| 207 | if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && | 209 | if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && |
| 208 | args->args_count < 2) | 210 | args->args_count < 2) |
| 209 | return ERR_PTR(-EINVAL); | 211 | return ERR_PTR(-EINVAL); |
| 210 | if (!priv || !priv->if_phys) | ||
| 211 | return ERR_PTR(-ENODEV); | ||
| 212 | if (phy_id > priv->soc_data->num_ports) | 212 | if (phy_id > priv->soc_data->num_ports) |
| 213 | return ERR_PTR(-EINVAL); | 213 | return ERR_PTR(-EINVAL); |
| 214 | if (phy_id != priv->if_phys[phy_id - 1].id) | 214 | if (phy_id != priv->if_phys[phy_id - 1].id) |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e3b62c2ee8d1..5e2109c54c7c 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL | |||
| 1009 | 1009 | ||
| 1010 | config INTEL_IPS | 1010 | config INTEL_IPS |
| 1011 | tristate "Intel Intelligent Power Sharing" | 1011 | tristate "Intel Intelligent Power Sharing" |
| 1012 | depends on ACPI | 1012 | depends on ACPI && PCI |
| 1013 | ---help--- | 1013 | ---help--- |
| 1014 | Intel Calpella platforms support dynamic power sharing between the | 1014 | Intel Calpella platforms support dynamic power sharing between the |
| 1015 | CPU and GPU, maximizing performance in a given TDP. This driver, | 1015 | CPU and GPU, maximizing performance in a given TDP. This driver, |
| @@ -1135,7 +1135,7 @@ config SAMSUNG_Q10 | |||
| 1135 | 1135 | ||
| 1136 | config APPLE_GMUX | 1136 | config APPLE_GMUX |
| 1137 | tristate "Apple Gmux Driver" | 1137 | tristate "Apple Gmux Driver" |
| 1138 | depends on ACPI | 1138 | depends on ACPI && PCI |
| 1139 | depends on PNP | 1139 | depends on PNP |
| 1140 | depends on BACKLIGHT_CLASS_DEVICE | 1140 | depends on BACKLIGHT_CLASS_DEVICE |
| 1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE | 1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE |
| @@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT | |||
| 1174 | 1174 | ||
| 1175 | config INTEL_PMC_IPC | 1175 | config INTEL_PMC_IPC |
| 1176 | tristate "Intel PMC IPC Driver" | 1176 | tristate "Intel PMC IPC Driver" |
| 1177 | depends on ACPI | 1177 | depends on ACPI && PCI |
| 1178 | ---help--- | 1178 | ---help--- |
| 1179 | This driver provides support for PMC control on some Intel platforms. | 1179 | This driver provides support for PMC control on some Intel platforms. |
| 1180 | The PMC is an ARC processor which defines IPC commands for communication | 1180 | The PMC is an ARC processor which defines IPC commands for communication |
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 797fab33bb98..7cbea796652a 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
| @@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
| 224 | extoff = NULL; | 224 | extoff = NULL; |
| 225 | break; | 225 | break; |
| 226 | } | 226 | } |
| 227 | if (extoff->n_samples > PTP_MAX_SAMPLES) { | 227 | if (extoff->n_samples > PTP_MAX_SAMPLES |
| 228 | || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) { | ||
| 228 | err = -EINVAL; | 229 | err = -EINVAL; |
| 229 | break; | 230 | break; |
| 230 | } | 231 | } |
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 183fc42a510a..2d7cd344f3bf 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c | |||
| @@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, | |||
| 153 | const bool * ctx, | 153 | const bool * ctx, |
| 154 | struct irq_affinity *desc) | 154 | struct irq_affinity *desc) |
| 155 | { | 155 | { |
| 156 | int i, ret; | 156 | int i, ret, queue_idx = 0; |
| 157 | 157 | ||
| 158 | for (i = 0; i < nvqs; ++i) { | 158 | for (i = 0; i < nvqs; ++i) { |
| 159 | vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], | 159 | if (!names[i]) { |
| 160 | vqs[i] = NULL; | ||
| 161 | continue; | ||
| 162 | } | ||
| 163 | |||
| 164 | vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
| 160 | ctx ? ctx[i] : false); | 165 | ctx ? ctx[i] : false); |
| 161 | if (IS_ERR(vqs[i])) { | 166 | if (IS_ERR(vqs[i])) { |
| 162 | ret = PTR_ERR(vqs[i]); | 167 | ret = PTR_ERR(vqs[i]); |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 194ffd5c8580..039b2074db7e 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
| @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
| 60 | 60 | ||
| 61 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | 61 | static void __ref sclp_cpu_change_notify(struct work_struct *work) |
| 62 | { | 62 | { |
| 63 | lock_device_hotplug(); | ||
| 63 | smp_rescan_cpus(); | 64 | smp_rescan_cpus(); |
| 65 | unlock_device_hotplug(); | ||
| 64 | } | 66 | } |
| 65 | 67 | ||
| 66 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | 68 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index fc9dbad476c0..ae1d56da671d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
| @@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 635 | { | 635 | { |
| 636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); | 636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
| 637 | unsigned long *indicatorp = NULL; | 637 | unsigned long *indicatorp = NULL; |
| 638 | int ret, i; | 638 | int ret, i, queue_idx = 0; |
| 639 | struct ccw1 *ccw; | 639 | struct ccw1 *ccw; |
| 640 | 640 | ||
| 641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); | 641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
| @@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 643 | return -ENOMEM; | 643 | return -ENOMEM; |
| 644 | 644 | ||
| 645 | for (i = 0; i < nvqs; ++i) { | 645 | for (i = 0; i < nvqs; ++i) { |
| 646 | vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], | 646 | if (!names[i]) { |
| 647 | ctx ? ctx[i] : false, ccw); | 647 | vqs[i] = NULL; |
| 648 | continue; | ||
| 649 | } | ||
| 650 | |||
| 651 | vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], | ||
| 652 | names[i], ctx ? ctx[i] : false, | ||
| 653 | ccw); | ||
| 648 | if (IS_ERR(vqs[i])) { | 654 | if (IS_ERR(vqs[i])) { |
| 649 | ret = PTR_ERR(vqs[i]); | 655 | ret = PTR_ERR(vqs[i]); |
| 650 | vqs[i] = NULL; | 656 | vqs[i] = NULL; |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 634ddb90e7aa..7e56a11836c1 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
| @@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1747 | shost->max_sectors = (shost->sg_tablesize * 8) + 112; | 1747 | shost->max_sectors = (shost->sg_tablesize * 8) + 112; |
| 1748 | } | 1748 | } |
| 1749 | 1749 | ||
| 1750 | error = dma_set_max_seg_size(&pdev->dev, | 1750 | if (aac->adapter_info.options & AAC_OPT_NEW_COMM) |
| 1751 | (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? | 1751 | shost->max_segment_size = shost->max_sectors << 9; |
| 1752 | (shost->max_sectors << 9) : 65536); | 1752 | else |
| 1753 | if (error) | 1753 | shost->max_segment_size = 65536; |
| 1754 | goto out_deinit; | ||
| 1755 | 1754 | ||
| 1756 | /* | 1755 | /* |
| 1757 | * Firmware printf works only with older firmware. | 1756 | * Firmware printf works only with older firmware. |
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c index 8a004036e3d7..9bd2bd8dc2be 100644 --- a/drivers/scsi/csiostor/csio_attr.c +++ b/drivers/scsi/csiostor/csio_attr.c | |||
| @@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable) | |||
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); | 596 | fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); |
| 597 | ln->fc_vport = fc_vport; | ||
| 597 | 598 | ||
| 598 | if (csio_fcoe_alloc_vnp(hw, ln)) | 599 | if (csio_fcoe_alloc_vnp(hw, ln)) |
| 599 | goto error; | 600 | goto error; |
| 600 | 601 | ||
| 601 | *(struct csio_lnode **)fc_vport->dd_data = ln; | 602 | *(struct csio_lnode **)fc_vport->dd_data = ln; |
| 602 | ln->fc_vport = fc_vport; | ||
| 603 | if (!fc_vport->node_name) | 603 | if (!fc_vport->node_name) |
| 604 | fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); | 604 | fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); |
| 605 | if (!fc_vport->port_name) | 605 | if (!fc_vport->port_name) |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 8a20411699d9..75e1273a44b3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
| @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, | |||
| 1144 | } | 1144 | } |
| 1145 | 1145 | ||
| 1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | 1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, |
| 1147 | unsigned int tid, int pg_idx, bool reply) | 1147 | unsigned int tid, int pg_idx) |
| 1148 | { | 1148 | { |
| 1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
| 1150 | GFP_KERNEL); | 1150 | GFP_KERNEL); |
| @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
| 1160 | req = (struct cpl_set_tcb_field *)skb->head; | 1160 | req = (struct cpl_set_tcb_field *)skb->head; |
| 1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
| 1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 1163 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1163 | req->reply = V_NO_REPLY(1); |
| 1164 | req->cpu_idx = 0; | 1164 | req->cpu_idx = 0; |
| 1165 | req->word = htons(31); | 1165 | req->word = htons(31); |
| 1166 | req->mask = cpu_to_be64(0xF0000000); | 1166 | req->mask = cpu_to_be64(0xF0000000); |
| @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
| 1177 | * @tid: connection id | 1177 | * @tid: connection id |
| 1178 | * @hcrc: header digest enabled | 1178 | * @hcrc: header digest enabled |
| 1179 | * @dcrc: data digest enabled | 1179 | * @dcrc: data digest enabled |
| 1180 | * @reply: request reply from h/w | ||
| 1181 | * set up the iscsi digest settings for a connection identified by tid | 1180 | * set up the iscsi digest settings for a connection identified by tid |
| 1182 | */ | 1181 | */ |
| 1183 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 1182 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
| 1184 | int hcrc, int dcrc, int reply) | 1183 | int hcrc, int dcrc) |
| 1185 | { | 1184 | { |
| 1186 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1185 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
| 1187 | GFP_KERNEL); | 1186 | GFP_KERNEL); |
| @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 1197 | req = (struct cpl_set_tcb_field *)skb->head; | 1196 | req = (struct cpl_set_tcb_field *)skb->head; |
| 1198 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1197 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
| 1199 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1198 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 1200 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1199 | req->reply = V_NO_REPLY(1); |
| 1201 | req->cpu_idx = 0; | 1200 | req->cpu_idx = 0; |
| 1202 | req->word = htons(31); | 1201 | req->word = htons(31); |
| 1203 | req->mask = cpu_to_be64(0x0F000000); | 1202 | req->mask = cpu_to_be64(0x0F000000); |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 49f8028ac524..d26f50af00ea 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 1548 | struct cxgbi_sock *csk; | 1548 | struct cxgbi_sock *csk; |
| 1549 | 1549 | ||
| 1550 | csk = lookup_tid(t, tid); | 1550 | csk = lookup_tid(t, tid); |
| 1551 | if (!csk) | 1551 | if (!csk) { |
| 1552 | pr_err("can't find conn. for tid %u.\n", tid); | 1552 | pr_err("can't find conn. for tid %u.\n", tid); |
| 1553 | return; | ||
| 1554 | } | ||
| 1553 | 1555 | ||
| 1554 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1556 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 1555 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", | 1557 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", |
| 1556 | csk, csk->state, csk->flags, csk->tid, rpl->status); | 1558 | csk, csk->state, csk->flags, csk->tid, rpl->status); |
| 1557 | 1559 | ||
| 1558 | if (rpl->status != CPL_ERR_NONE) | 1560 | if (rpl->status != CPL_ERR_NONE) { |
| 1559 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", | 1561 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", |
| 1560 | csk, tid, rpl->status); | 1562 | csk, tid, rpl->status); |
| 1563 | csk->err = -EINVAL; | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | complete(&csk->cmpl); | ||
| 1561 | 1567 | ||
| 1562 | __kfree_skb(skb); | 1568 | __kfree_skb(skb); |
| 1563 | } | 1569 | } |
| @@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, | |||
| 1983 | } | 1989 | } |
| 1984 | 1990 | ||
| 1985 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | 1991 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, |
| 1986 | int pg_idx, bool reply) | 1992 | int pg_idx) |
| 1987 | { | 1993 | { |
| 1988 | struct sk_buff *skb; | 1994 | struct sk_buff *skb; |
| 1989 | struct cpl_set_tcb_field *req; | 1995 | struct cpl_set_tcb_field *req; |
| @@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
| 1999 | req = (struct cpl_set_tcb_field *)skb->head; | 2005 | req = (struct cpl_set_tcb_field *)skb->head; |
| 2000 | INIT_TP_WR(req, csk->tid); | 2006 | INIT_TP_WR(req, csk->tid); |
| 2001 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | 2007 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
| 2002 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2008 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
| 2003 | req->word_cookie = htons(0); | 2009 | req->word_cookie = htons(0); |
| 2004 | req->mask = cpu_to_be64(0x3 << 8); | 2010 | req->mask = cpu_to_be64(0x3 << 8); |
| 2005 | req->val = cpu_to_be64(pg_idx << 8); | 2011 | req->val = cpu_to_be64(pg_idx << 8); |
| @@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2008 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2014 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 2009 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); | 2015 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); |
| 2010 | 2016 | ||
| 2017 | reinit_completion(&csk->cmpl); | ||
| 2011 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2018 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
| 2012 | return 0; | 2019 | wait_for_completion(&csk->cmpl); |
| 2020 | |||
| 2021 | return csk->err; | ||
| 2013 | } | 2022 | } |
| 2014 | 2023 | ||
| 2015 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 2024 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
| 2016 | int hcrc, int dcrc, int reply) | 2025 | int hcrc, int dcrc) |
| 2017 | { | 2026 | { |
| 2018 | struct sk_buff *skb; | 2027 | struct sk_buff *skb; |
| 2019 | struct cpl_set_tcb_field *req; | 2028 | struct cpl_set_tcb_field *req; |
| @@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2031 | req = (struct cpl_set_tcb_field *)skb->head; | 2040 | req = (struct cpl_set_tcb_field *)skb->head; |
| 2032 | INIT_TP_WR(req, tid); | 2041 | INIT_TP_WR(req, tid); |
| 2033 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 2042 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 2034 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2043 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
| 2035 | req->word_cookie = htons(0); | 2044 | req->word_cookie = htons(0); |
| 2036 | req->mask = cpu_to_be64(0x3 << 4); | 2045 | req->mask = cpu_to_be64(0x3 << 4); |
| 2037 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | | 2046 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
| @@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2041 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2050 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 2042 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); | 2051 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); |
| 2043 | 2052 | ||
| 2053 | reinit_completion(&csk->cmpl); | ||
| 2044 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2054 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
| 2045 | return 0; | 2055 | wait_for_completion(&csk->cmpl); |
| 2056 | |||
| 2057 | return csk->err; | ||
| 2046 | } | 2058 | } |
| 2047 | 2059 | ||
| 2048 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) | 2060 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 75f876409fb9..245742557c03 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
| @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) | |||
| 573 | skb_queue_head_init(&csk->receive_queue); | 573 | skb_queue_head_init(&csk->receive_queue); |
| 574 | skb_queue_head_init(&csk->write_queue); | 574 | skb_queue_head_init(&csk->write_queue); |
| 575 | timer_setup(&csk->retry_timer, NULL, 0); | 575 | timer_setup(&csk->retry_timer, NULL, 0); |
| 576 | init_completion(&csk->cmpl); | ||
| 576 | rwlock_init(&csk->callback_lock); | 577 | rwlock_init(&csk->callback_lock); |
| 577 | csk->cdev = cdev; | 578 | csk->cdev = cdev; |
| 578 | csk->flags = 0; | 579 | csk->flags = 0; |
| @@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | |||
| 2251 | if (!err && conn->hdrdgst_en) | 2252 | if (!err && conn->hdrdgst_en) |
| 2252 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2253 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
| 2253 | conn->hdrdgst_en, | 2254 | conn->hdrdgst_en, |
| 2254 | conn->datadgst_en, 0); | 2255 | conn->datadgst_en); |
| 2255 | break; | 2256 | break; |
| 2256 | case ISCSI_PARAM_DATADGST_EN: | 2257 | case ISCSI_PARAM_DATADGST_EN: |
| 2257 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2258 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
| 2258 | if (!err && conn->datadgst_en) | 2259 | if (!err && conn->datadgst_en) |
| 2259 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2260 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
| 2260 | conn->hdrdgst_en, | 2261 | conn->hdrdgst_en, |
| 2261 | conn->datadgst_en, 0); | 2262 | conn->datadgst_en); |
| 2262 | break; | 2263 | break; |
| 2263 | case ISCSI_PARAM_MAX_R2T: | 2264 | case ISCSI_PARAM_MAX_R2T: |
| 2264 | return iscsi_tcp_set_max_r2t(conn, buf); | 2265 | return iscsi_tcp_set_max_r2t(conn, buf); |
| @@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, | |||
| 2384 | 2385 | ||
| 2385 | ppm = csk->cdev->cdev2ppm(csk->cdev); | 2386 | ppm = csk->cdev->cdev2ppm(csk->cdev); |
| 2386 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, | 2387 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, |
| 2387 | ppm->tformat.pgsz_idx_dflt, 0); | 2388 | ppm->tformat.pgsz_idx_dflt); |
| 2388 | if (err < 0) | 2389 | if (err < 0) |
| 2389 | return err; | 2390 | return err; |
| 2390 | 2391 | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 5d5d8b50d842..1917ff57651d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
| @@ -149,6 +149,7 @@ struct cxgbi_sock { | |||
| 149 | struct sk_buff_head receive_queue; | 149 | struct sk_buff_head receive_queue; |
| 150 | struct sk_buff_head write_queue; | 150 | struct sk_buff_head write_queue; |
| 151 | struct timer_list retry_timer; | 151 | struct timer_list retry_timer; |
| 152 | struct completion cmpl; | ||
| 152 | int err; | 153 | int err; |
| 153 | rwlock_t callback_lock; | 154 | rwlock_t callback_lock; |
| 154 | void *user_data; | 155 | void *user_data; |
| @@ -490,9 +491,9 @@ struct cxgbi_device { | |||
| 490 | struct cxgbi_ppm *, | 491 | struct cxgbi_ppm *, |
| 491 | struct cxgbi_task_tag_info *); | 492 | struct cxgbi_task_tag_info *); |
| 492 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, | 493 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, |
| 493 | unsigned int, int, int, int); | 494 | unsigned int, int, int); |
| 494 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, | 495 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, |
| 495 | unsigned int, int, bool); | 496 | unsigned int, int); |
| 496 | 497 | ||
| 497 | void (*csk_release_offload_resources)(struct cxgbi_sock *); | 498 | void (*csk_release_offload_resources)(struct cxgbi_sock *); |
| 498 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); | 499 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e2420a810e99..c92b3822c408 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | |||
| @@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; | 2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; |
| 2508 | } | 2508 | } |
| 2509 | 2509 | ||
| 2510 | if (hisi_hba->prot_mask) { | ||
| 2511 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
| 2512 | prot_mask); | ||
| 2513 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
| 2514 | } | ||
| 2515 | |||
| 2510 | rc = scsi_add_host(shost, dev); | 2516 | rc = scsi_add_host(shost, dev); |
| 2511 | if (rc) | 2517 | if (rc) |
| 2512 | goto err_out_ha; | 2518 | goto err_out_ha; |
| @@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2519 | if (rc) | 2525 | if (rc) |
| 2520 | goto err_out_register_ha; | 2526 | goto err_out_register_ha; |
| 2521 | 2527 | ||
| 2522 | if (hisi_hba->prot_mask) { | ||
| 2523 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
| 2524 | prot_mask); | ||
| 2525 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
| 2526 | } | ||
| 2527 | |||
| 2528 | scsi_scan_host(shost); | 2528 | scsi_scan_host(shost); |
| 2529 | 2529 | ||
| 2530 | return 0; | 2530 | return 0; |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68b90c4f79a3..1727d0c71b12 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
| @@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
| 576 | shost->max_lun = ~0; | 576 | shost->max_lun = ~0; |
| 577 | shost->max_cmd_len = MAX_COMMAND_SIZE; | 577 | shost->max_cmd_len = MAX_COMMAND_SIZE; |
| 578 | 578 | ||
| 579 | /* turn on DIF support */ | ||
| 580 | scsi_host_set_prot(shost, | ||
| 581 | SHOST_DIF_TYPE1_PROTECTION | | ||
| 582 | SHOST_DIF_TYPE2_PROTECTION | | ||
| 583 | SHOST_DIF_TYPE3_PROTECTION); | ||
| 584 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); | ||
| 585 | |||
| 579 | err = scsi_add_host(shost, &pdev->dev); | 586 | err = scsi_add_host(shost, &pdev->dev); |
| 580 | if (err) | 587 | if (err) |
| 581 | goto err_shost; | 588 | goto err_shost; |
| @@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 663 | goto err_host_alloc; | 670 | goto err_host_alloc; |
| 664 | } | 671 | } |
| 665 | pci_info->hosts[i] = h; | 672 | pci_info->hosts[i] = h; |
| 666 | |||
| 667 | /* turn on DIF support */ | ||
| 668 | scsi_host_set_prot(to_shost(h), | ||
| 669 | SHOST_DIF_TYPE1_PROTECTION | | ||
| 670 | SHOST_DIF_TYPE2_PROTECTION | | ||
| 671 | SHOST_DIF_TYPE3_PROTECTION); | ||
| 672 | scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); | ||
| 673 | } | 673 | } |
| 674 | 674 | ||
| 675 | err = isci_setup_interrupts(pdev); | 675 | err = isci_setup_interrupts(pdev); |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 4c66b19e6199..8c9f79042228 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
| @@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) | |||
| 297 | lport); | 297 | lport); |
| 298 | 298 | ||
| 299 | /* release any threads waiting for the unreg to complete */ | 299 | /* release any threads waiting for the unreg to complete */ |
| 300 | complete(&lport->lport_unreg_done); | 300 | if (lport->vport->localport) |
| 301 | complete(lport->lport_unreg_cmp); | ||
| 301 | } | 302 | } |
| 302 | 303 | ||
| 303 | /* lpfc_nvme_remoteport_delete | 304 | /* lpfc_nvme_remoteport_delete |
| @@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) | |||
| 2545 | */ | 2546 | */ |
| 2546 | void | 2547 | void |
| 2547 | lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, | 2548 | lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, |
| 2548 | struct lpfc_nvme_lport *lport) | 2549 | struct lpfc_nvme_lport *lport, |
| 2550 | struct completion *lport_unreg_cmp) | ||
| 2549 | { | 2551 | { |
| 2550 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 2552 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 2551 | u32 wait_tmo; | 2553 | u32 wait_tmo; |
| @@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, | |||
| 2557 | */ | 2559 | */ |
| 2558 | wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); | 2560 | wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); |
| 2559 | while (true) { | 2561 | while (true) { |
| 2560 | ret = wait_for_completion_timeout(&lport->lport_unreg_done, | 2562 | ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); |
| 2561 | wait_tmo); | ||
| 2562 | if (unlikely(!ret)) { | 2563 | if (unlikely(!ret)) { |
| 2563 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, | 2564 | lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, |
| 2564 | "6176 Lport %p Localport %p wait " | 2565 | "6176 Lport %p Localport %p wait " |
| @@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) | |||
| 2592 | struct lpfc_nvme_lport *lport; | 2593 | struct lpfc_nvme_lport *lport; |
| 2593 | struct lpfc_nvme_ctrl_stat *cstat; | 2594 | struct lpfc_nvme_ctrl_stat *cstat; |
| 2594 | int ret; | 2595 | int ret; |
| 2596 | DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); | ||
| 2595 | 2597 | ||
| 2596 | if (vport->nvmei_support == 0) | 2598 | if (vport->nvmei_support == 0) |
| 2597 | return; | 2599 | return; |
| 2598 | 2600 | ||
| 2599 | localport = vport->localport; | 2601 | localport = vport->localport; |
| 2600 | vport->localport = NULL; | ||
| 2601 | lport = (struct lpfc_nvme_lport *)localport->private; | 2602 | lport = (struct lpfc_nvme_lport *)localport->private; |
| 2602 | cstat = lport->cstat; | 2603 | cstat = lport->cstat; |
| 2603 | 2604 | ||
| @@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) | |||
| 2608 | /* lport's rport list is clear. Unregister | 2609 | /* lport's rport list is clear. Unregister |
| 2609 | * lport and release resources. | 2610 | * lport and release resources. |
| 2610 | */ | 2611 | */ |
| 2611 | init_completion(&lport->lport_unreg_done); | 2612 | lport->lport_unreg_cmp = &lport_unreg_cmp; |
| 2612 | ret = nvme_fc_unregister_localport(localport); | 2613 | ret = nvme_fc_unregister_localport(localport); |
| 2613 | 2614 | ||
| 2614 | /* Wait for completion. This either blocks | 2615 | /* Wait for completion. This either blocks |
| 2615 | * indefinitely or succeeds | 2616 | * indefinitely or succeeds |
| 2616 | */ | 2617 | */ |
| 2617 | lpfc_nvme_lport_unreg_wait(vport, lport); | 2618 | lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); |
| 2619 | vport->localport = NULL; | ||
| 2618 | kfree(cstat); | 2620 | kfree(cstat); |
| 2619 | 2621 | ||
| 2620 | /* Regardless of the unregister upcall response, clear | 2622 | /* Regardless of the unregister upcall response, clear |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index cfd4719be25c..b234d0298994 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h | |||
| @@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat { | |||
| 50 | /* Declare nvme-based local and remote port definitions. */ | 50 | /* Declare nvme-based local and remote port definitions. */ |
| 51 | struct lpfc_nvme_lport { | 51 | struct lpfc_nvme_lport { |
| 52 | struct lpfc_vport *vport; | 52 | struct lpfc_vport *vport; |
| 53 | struct completion lport_unreg_done; | 53 | struct completion *lport_unreg_cmp; |
| 54 | /* Add stats counters here */ | 54 | /* Add stats counters here */ |
| 55 | struct lpfc_nvme_ctrl_stat *cstat; | 55 | struct lpfc_nvme_ctrl_stat *cstat; |
| 56 | atomic_t fc4NvmeLsRequests; | 56 | atomic_t fc4NvmeLsRequests; |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 6245f442d784..95fee83090eb 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
| @@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) | |||
| 1003 | struct lpfc_nvmet_tgtport *tport = targetport->private; | 1003 | struct lpfc_nvmet_tgtport *tport = targetport->private; |
| 1004 | 1004 | ||
| 1005 | /* release any threads waiting for the unreg to complete */ | 1005 | /* release any threads waiting for the unreg to complete */ |
| 1006 | complete(&tport->tport_unreg_done); | 1006 | if (tport->phba->targetport) |
| 1007 | complete(tport->tport_unreg_cmp); | ||
| 1007 | } | 1008 | } |
| 1008 | 1009 | ||
| 1009 | static void | 1010 | static void |
| @@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) | |||
| 1692 | struct lpfc_nvmet_tgtport *tgtp; | 1693 | struct lpfc_nvmet_tgtport *tgtp; |
| 1693 | struct lpfc_queue *wq; | 1694 | struct lpfc_queue *wq; |
| 1694 | uint32_t qidx; | 1695 | uint32_t qidx; |
| 1696 | DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp); | ||
| 1695 | 1697 | ||
| 1696 | if (phba->nvmet_support == 0) | 1698 | if (phba->nvmet_support == 0) |
| 1697 | return; | 1699 | return; |
| @@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) | |||
| 1701 | wq = phba->sli4_hba.nvme_wq[qidx]; | 1703 | wq = phba->sli4_hba.nvme_wq[qidx]; |
| 1702 | lpfc_nvmet_wqfull_flush(phba, wq, NULL); | 1704 | lpfc_nvmet_wqfull_flush(phba, wq, NULL); |
| 1703 | } | 1705 | } |
| 1704 | init_completion(&tgtp->tport_unreg_done); | 1706 | tgtp->tport_unreg_cmp = &tport_unreg_cmp; |
| 1705 | nvmet_fc_unregister_targetport(phba->targetport); | 1707 | nvmet_fc_unregister_targetport(phba->targetport); |
| 1706 | wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); | 1708 | wait_for_completion_timeout(&tport_unreg_cmp, 5); |
| 1707 | lpfc_nvmet_cleanup_io_context(phba); | 1709 | lpfc_nvmet_cleanup_io_context(phba); |
| 1708 | } | 1710 | } |
| 1709 | phba->targetport = NULL; | 1711 | phba->targetport = NULL; |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 1aaff63f1f41..0ec1082ce7ef 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | /* Used for NVME Target */ | 34 | /* Used for NVME Target */ |
| 35 | struct lpfc_nvmet_tgtport { | 35 | struct lpfc_nvmet_tgtport { |
| 36 | struct lpfc_hba *phba; | 36 | struct lpfc_hba *phba; |
| 37 | struct completion tport_unreg_done; | 37 | struct completion *tport_unreg_cmp; |
| 38 | 38 | ||
| 39 | /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ | 39 | /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ |
| 40 | atomic_t rcv_ls_req_in; | 40 | atomic_t rcv_ls_req_in; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 12fd74761ae0..2242e9b3ca12 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -9407,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; | 9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
| 9408 | if (phba->link_flag & LS_LOOPBACK_MODE) | 9408 | if (phba->link_flag & LS_LOOPBACK_MODE) |
| 9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); | 9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); |
| 9410 | /* fall through */ | ||
| 9410 | case CMD_XMIT_SEQUENCE64_CR: | 9411 | case CMD_XMIT_SEQUENCE64_CR: |
| 9411 | /* word3 iocb=io_tag32 wqe=reserved */ | 9412 | /* word3 iocb=io_tag32 wqe=reserved */ |
| 9412 | wqe->xmit_sequence.rsvd3 = 0; | 9413 | wqe->xmit_sequence.rsvd3 = 0; |
| @@ -13528,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
| 13528 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13529 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 13529 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13530 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 13530 | "2537 Receive Frame Truncated!!\n"); | 13531 | "2537 Receive Frame Truncated!!\n"); |
| 13532 | /* fall through */ | ||
| 13531 | case FC_STATUS_RQ_SUCCESS: | 13533 | case FC_STATUS_RQ_SUCCESS: |
| 13532 | spin_lock_irqsave(&phba->hbalock, iflags); | 13534 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13533 | lpfc_sli4_rq_release(hrq, drq); | 13535 | lpfc_sli4_rq_release(hrq, drq); |
| @@ -13937,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 13937 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13939 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 13938 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13940 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 13939 | "6126 Receive Frame Truncated!!\n"); | 13941 | "6126 Receive Frame Truncated!!\n"); |
| 13940 | /* Drop thru */ | 13942 | /* fall through */ |
| 13941 | case FC_STATUS_RQ_SUCCESS: | 13943 | case FC_STATUS_RQ_SUCCESS: |
| 13942 | spin_lock_irqsave(&phba->hbalock, iflags); | 13944 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13943 | lpfc_sli4_rq_release(hrq, drq); | 13945 | lpfc_sli4_rq_release(hrq, drq); |
| @@ -14849,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) | |||
| 14849 | eq->entry_count); | 14851 | eq->entry_count); |
| 14850 | if (eq->entry_count < 256) | 14852 | if (eq->entry_count < 256) |
| 14851 | return -EINVAL; | 14853 | return -EINVAL; |
| 14852 | /* otherwise default to smallest count (drop through) */ | 14854 | /* fall through - otherwise default to smallest count */ |
| 14853 | case 256: | 14855 | case 256: |
| 14854 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | 14856 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 14855 | LPFC_EQ_CNT_256); | 14857 | LPFC_EQ_CNT_256); |
| @@ -14980,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 14980 | LPFC_CQ_CNT_WORD7); | 14982 | LPFC_CQ_CNT_WORD7); |
| 14981 | break; | 14983 | break; |
| 14982 | } | 14984 | } |
| 14983 | /* Fall Thru */ | 14985 | /* fall through */ |
| 14984 | default: | 14986 | default: |
| 14985 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 14987 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 14986 | "0361 Unsupported CQ count: " | 14988 | "0361 Unsupported CQ count: " |
| @@ -14991,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 14991 | status = -EINVAL; | 14993 | status = -EINVAL; |
| 14992 | goto out; | 14994 | goto out; |
| 14993 | } | 14995 | } |
| 14994 | /* otherwise default to smallest count (drop through) */ | 14996 | /* fall through - otherwise default to smallest count */ |
| 14995 | case 256: | 14997 | case 256: |
| 14996 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | 14998 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
| 14997 | LPFC_CQ_CNT_256); | 14999 | LPFC_CQ_CNT_256); |
| @@ -15151,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
| 15151 | LPFC_CQ_CNT_WORD7); | 15153 | LPFC_CQ_CNT_WORD7); |
| 15152 | break; | 15154 | break; |
| 15153 | } | 15155 | } |
| 15154 | /* Fall Thru */ | 15156 | /* fall through */ |
| 15155 | default: | 15157 | default: |
| 15156 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 15158 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 15157 | "3118 Bad CQ count. (%d)\n", | 15159 | "3118 Bad CQ count. (%d)\n", |
| @@ -15160,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
| 15160 | status = -EINVAL; | 15162 | status = -EINVAL; |
| 15161 | goto out; | 15163 | goto out; |
| 15162 | } | 15164 | } |
| 15163 | /* otherwise default to smallest (drop thru) */ | 15165 | /* fall through - otherwise default to smallest */ |
| 15164 | case 256: | 15166 | case 256: |
| 15165 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, | 15167 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, |
| 15166 | &cq_set->u.request, LPFC_CQ_CNT_256); | 15168 | &cq_set->u.request, LPFC_CQ_CNT_256); |
| @@ -15432,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
| 15432 | status = -EINVAL; | 15434 | status = -EINVAL; |
| 15433 | goto out; | 15435 | goto out; |
| 15434 | } | 15436 | } |
| 15435 | /* otherwise default to smallest count (drop through) */ | 15437 | /* fall through - otherwise default to smallest count */ |
| 15436 | case 16: | 15438 | case 16: |
| 15437 | bf_set(lpfc_mq_context_ring_size, | 15439 | bf_set(lpfc_mq_context_ring_size, |
| 15438 | &mq_create_ext->u.request.context, | 15440 | &mq_create_ext->u.request.context, |
| @@ -15851,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
| 15851 | status = -EINVAL; | 15853 | status = -EINVAL; |
| 15852 | goto out; | 15854 | goto out; |
| 15853 | } | 15855 | } |
| 15854 | /* otherwise default to smallest count (drop through) */ | 15856 | /* fall through - otherwise default to smallest count */ |
| 15855 | case 512: | 15857 | case 512: |
| 15856 | bf_set(lpfc_rq_context_rqe_count, | 15858 | bf_set(lpfc_rq_context_rqe_count, |
| 15857 | &rq_create->u.request.context, | 15859 | &rq_create->u.request.context, |
| @@ -15988,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
| 15988 | status = -EINVAL; | 15990 | status = -EINVAL; |
| 15989 | goto out; | 15991 | goto out; |
| 15990 | } | 15992 | } |
| 15991 | /* otherwise default to smallest count (drop through) */ | 15993 | /* fall through - otherwise default to smallest count */ |
| 15992 | case 512: | 15994 | case 512: |
| 15993 | bf_set(lpfc_rq_context_rqe_count, | 15995 | bf_set(lpfc_rq_context_rqe_count, |
| 15994 | &rq_create->u.request.context, | 15996 | &rq_create->u.request.context, |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7eaa400f6328..fcbff83c0097 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) | |||
| 6236 | instance->consistent_mask_64bit = true; | 6236 | instance->consistent_mask_64bit = true; |
| 6237 | 6237 | ||
| 6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", | 6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", |
| 6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), | 6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), |
| 6240 | (instance->consistent_mask_64bit ? "63" : "32")); | 6240 | (instance->consistent_mask_64bit ? "63" : "32")); |
| 6241 | 6241 | ||
| 6242 | return 0; | 6242 | return 0; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a9a25f0eaf6f..647f48a28f85 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance) | |||
| 175 | /* | 175 | /* |
| 176 | * Check if it is our interrupt | 176 | * Check if it is our interrupt |
| 177 | */ | 177 | */ |
| 178 | status = readl(®s->outbound_intr_status); | 178 | status = megasas_readl(instance, |
| 179 | ®s->outbound_intr_status); | ||
| 179 | 180 | ||
| 180 | if (status & 1) { | 181 | if (status & 1) { |
| 181 | writel(status, ®s->outbound_intr_status); | 182 | writel(status, ®s->outbound_intr_status); |
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 4c5a3d23e010..084f2fcced0a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
| @@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) | |||
| 657 | if (dev->dev_type == SAS_SATA_DEV) { | 657 | if (dev->dev_type == SAS_SATA_DEV) { |
| 658 | pm8001_device->attached_phy = | 658 | pm8001_device->attached_phy = |
| 659 | dev->rphy->identify.phy_identifier; | 659 | dev->rphy->identify.phy_identifier; |
| 660 | flag = 1; /* directly sata*/ | 660 | flag = 1; /* directly sata */ |
| 661 | } | 661 | } |
| 662 | } /*register this device to HBA*/ | 662 | } /*register this device to HBA*/ |
| 663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); | 663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4da660c1c431..6d6d6013e35b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
| @@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
| 953 | 953 | ||
| 954 | qedi_ep = ep->dd_data; | 954 | qedi_ep = ep->dd_data; |
| 955 | if (qedi_ep->state == EP_STATE_IDLE || | 955 | if (qedi_ep->state == EP_STATE_IDLE || |
| 956 | qedi_ep->state == EP_STATE_OFLDCONN_NONE || | ||
| 956 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) | 957 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) |
| 957 | return -1; | 958 | return -1; |
| 958 | 959 | ||
| @@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 1035 | 1036 | ||
| 1036 | switch (qedi_ep->state) { | 1037 | switch (qedi_ep->state) { |
| 1037 | case EP_STATE_OFLDCONN_START: | 1038 | case EP_STATE_OFLDCONN_START: |
| 1039 | case EP_STATE_OFLDCONN_NONE: | ||
| 1038 | goto ep_release_conn; | 1040 | goto ep_release_conn; |
| 1039 | case EP_STATE_OFLDCONN_FAILED: | 1041 | case EP_STATE_OFLDCONN_FAILED: |
| 1040 | break; | 1042 | break; |
| @@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) | |||
| 1225 | 1227 | ||
| 1226 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { | 1228 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { |
| 1227 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); | 1229 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); |
| 1230 | qedi_ep->state = EP_STATE_OFLDCONN_NONE; | ||
| 1228 | ret = -EIO; | 1231 | ret = -EIO; |
| 1229 | goto set_path_exit; | 1232 | goto set_path_exit; |
| 1230 | } | 1233 | } |
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212f..892d70d54553 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h | |||
| @@ -59,6 +59,7 @@ enum { | |||
| 59 | EP_STATE_OFLDCONN_FAILED = 0x2000, | 59 | EP_STATE_OFLDCONN_FAILED = 0x2000, |
| 60 | EP_STATE_CONNECT_FAILED = 0x4000, | 60 | EP_STATE_CONNECT_FAILED = 0x4000, |
| 61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, | 61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, |
| 62 | EP_STATE_OFLDCONN_NONE = 0x10000, | ||
| 62 | }; | 63 | }; |
| 63 | 64 | ||
| 64 | struct qedi_conn; | 65 | struct qedi_conn; |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index a414f51302b7..6856dfdfa473 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
| @@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 4248 | ha->devnum = devnum; /* specifies microcode load address */ | 4248 | ha->devnum = devnum; /* specifies microcode load address */ |
| 4249 | 4249 | ||
| 4250 | #ifdef QLA_64BIT_PTR | 4250 | #ifdef QLA_64BIT_PTR |
| 4251 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { | 4251 | if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { |
| 4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { | 4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { |
| 4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " | 4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " |
| 4254 | "suitable DMA mask - aborting\n", ha->host_no); | 4254 | "suitable DMA mask - aborting\n", ha->host_no); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 26b93c563f92..d1fc4958222a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host { | |||
| 4394 | uint16_t n2n_id; | 4394 | uint16_t n2n_id; |
| 4395 | struct list_head gpnid_list; | 4395 | struct list_head gpnid_list; |
| 4396 | struct fab_scan scan; | 4396 | struct fab_scan scan; |
| 4397 | |||
| 4398 | unsigned int irq_offset; | ||
| 4397 | } scsi_qla_host_t; | 4399 | } scsi_qla_host_t; |
| 4398 | 4400 | ||
| 4399 | struct qla27xx_image_status { | 4401 | struct qla27xx_image_status { |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 30d3090842f8..8507c43b918c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
| 3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); | 3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); |
| 3447 | } | 3447 | } |
| 3448 | } | 3448 | } |
| 3449 | vha->irq_offset = desc.pre_vectors; | ||
| 3449 | ha->msix_entries = kcalloc(ha->msix_count, | 3450 | ha->msix_entries = kcalloc(ha->msix_count, |
| 3450 | sizeof(struct qla_msix_entry), | 3451 | sizeof(struct qla_msix_entry), |
| 3451 | GFP_KERNEL); | 3452 | GFP_KERNEL); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea69dafc9774..c6ef83d0d99b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) | |||
| 6939 | if (USER_CTRL_IRQ(vha->hw)) | 6939 | if (USER_CTRL_IRQ(vha->hw)) |
| 6940 | rc = blk_mq_map_queues(qmap); | 6940 | rc = blk_mq_map_queues(qmap); |
| 6941 | else | 6941 | else |
| 6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); | 6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); |
| 6943 | return rc; | 6943 | return rc; |
| 6944 | } | 6944 | } |
| 6945 | 6945 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index cfdfcda28072..a77bfb224248 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
| @@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, | |||
| 7232 | 7232 | ||
| 7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, | 7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, |
| 7234 | fw_ddb_entry); | 7234 | fw_ddb_entry); |
| 7235 | if (rc) | ||
| 7236 | goto free_sess; | ||
| 7235 | 7237 | ||
| 7236 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", | 7238 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", |
| 7237 | __func__, fnode_sess->dev.kobj.name); | 7239 | __func__, fnode_sess->dev.kobj.name); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b13cc9288ba0..6d65ac584eba 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | |||
| 1842 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1842 | blk_queue_segment_boundary(q, shost->dma_boundary); |
| 1843 | dma_set_seg_boundary(dev, shost->dma_boundary); | 1843 | dma_set_seg_boundary(dev, shost->dma_boundary); |
| 1844 | 1844 | ||
| 1845 | blk_queue_max_segment_size(q, | 1845 | blk_queue_max_segment_size(q, shost->max_segment_size); |
| 1846 | min(shost->max_segment_size, dma_get_max_seg_size(dev))); | 1846 | dma_set_max_seg_size(dev, shost->max_segment_size); |
| 1847 | 1847 | ||
| 1848 | /* | 1848 | /* |
| 1849 | * Set a reasonable default alignment: The larger of 32-byte (dword), | 1849 | * Set a reasonable default alignment: The larger of 32-byte (dword), |
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index a2b4179bfdf7..7639df91b110 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c | |||
| @@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, | |||
| 80 | 80 | ||
| 81 | if (err == 0) { | 81 | if (err == 0) { |
| 82 | pm_runtime_disable(dev); | 82 | pm_runtime_disable(dev); |
| 83 | pm_runtime_set_active(dev); | 83 | err = pm_runtime_set_active(dev); |
| 84 | pm_runtime_enable(dev); | 84 | pm_runtime_enable(dev); |
| 85 | |||
| 86 | /* | ||
| 87 | * Forcibly set runtime PM status of request queue to "active" | ||
| 88 | * to make sure we can again get requests from the queue | ||
| 89 | * (see also blk_pm_peek_request()). | ||
| 90 | * | ||
| 91 | * The resume hook will correct runtime PM status of the disk. | ||
| 92 | */ | ||
| 93 | if (!err && scsi_is_sdev_device(dev)) { | ||
| 94 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 95 | |||
| 96 | if (sdev->request_queue->dev) | ||
| 97 | blk_set_runtime_active(sdev->request_queue); | ||
| 98 | } | ||
| 85 | } | 99 | } |
| 86 | 100 | ||
| 87 | return err; | 101 | return err; |
| @@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, | |||
| 140 | else | 154 | else |
| 141 | fn = NULL; | 155 | fn = NULL; |
| 142 | 156 | ||
| 143 | /* | ||
| 144 | * Forcibly set runtime PM status of request queue to "active" to | ||
| 145 | * make sure we can again get requests from the queue (see also | ||
| 146 | * blk_pm_peek_request()). | ||
| 147 | * | ||
| 148 | * The resume hook will correct runtime PM status of the disk. | ||
| 149 | */ | ||
| 150 | if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) | ||
| 151 | blk_set_runtime_active(to_scsi_device(dev)->request_queue); | ||
| 152 | |||
| 153 | if (fn) { | 157 | if (fn) { |
| 154 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); | 158 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); |
| 155 | 159 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a44f52e0e8..b2da8a00ec33 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, | |||
| 206 | sp = buffer_data[0] & 0x80 ? 1 : 0; | 206 | sp = buffer_data[0] & 0x80 ? 1 : 0; |
| 207 | buffer_data[0] &= ~0x80; | 207 | buffer_data[0] &= ~0x80; |
| 208 | 208 | ||
| 209 | /* | ||
| 210 | * Ensure WP, DPOFUA, and RESERVED fields are cleared in | ||
| 211 | * received mode parameter buffer before doing MODE SELECT. | ||
| 212 | */ | ||
| 213 | data.device_specific = 0; | ||
| 214 | |||
| 209 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, | 215 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, |
| 210 | SD_MAX_RETRIES, &data, &sshdr)) { | 216 | SD_MAX_RETRIES, &data, &sshdr)) { |
| 211 | if (scsi_sense_valid(&sshdr)) | 217 | if (scsi_sense_valid(&sshdr)) |
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7bde6c809442..f564af8949e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c | |||
| @@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) | |||
| 323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, | 323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, |
| 324 | struct pqi_scsi_dev *device) | 324 | struct pqi_scsi_dev *device) |
| 325 | { | 325 | { |
| 326 | return device->in_remove & !ctrl_info->in_shutdown; | 326 | return device->in_remove && !ctrl_info->in_shutdown; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static inline void pqi_schedule_rescan_worker_with_delay( | 329 | static inline void pqi_schedule_rescan_worker_with_delay( |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dd65fea07687..6d176815e6ce 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
| @@ -195,7 +195,7 @@ enum ufs_desc_def_size { | |||
| 195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, | 195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, |
| 196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, | 196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, |
| 197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, | 197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, |
| 198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, | 198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, |
| 199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, | 199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, |
| 200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, | 200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, |
| 201 | }; | 201 | }; |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9ba7671b84f8..2ddf24466a62 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -108,13 +108,19 @@ | |||
| 108 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, | 108 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, |
| 109 | const char *prefix) | 109 | const char *prefix) |
| 110 | { | 110 | { |
| 111 | u8 *regs; | 111 | u32 *regs; |
| 112 | size_t pos; | ||
| 113 | |||
| 114 | if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ | ||
| 115 | return -EINVAL; | ||
| 112 | 116 | ||
| 113 | regs = kzalloc(len, GFP_KERNEL); | 117 | regs = kzalloc(len, GFP_KERNEL); |
| 114 | if (!regs) | 118 | if (!regs) |
| 115 | return -ENOMEM; | 119 | return -ENOMEM; |
| 116 | 120 | ||
| 117 | memcpy_fromio(regs, hba->mmio_base + offset, len); | 121 | for (pos = 0; pos < len; pos += 4) |
| 122 | regs[pos / 4] = ufshcd_readl(hba, offset + pos); | ||
| 123 | |||
| 118 | ufshcd_hex_dump(prefix, regs, len); | 124 | ufshcd_hex_dump(prefix, regs, len); |
| 119 | kfree(regs); | 125 | kfree(regs); |
| 120 | 126 | ||
| @@ -8001,6 +8007,8 @@ out: | |||
| 8001 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, | 8007 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, |
| 8002 | ktime_to_us(ktime_sub(ktime_get(), start)), | 8008 | ktime_to_us(ktime_sub(ktime_get(), start)), |
| 8003 | hba->curr_dev_pwr_mode, hba->uic_link_state); | 8009 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
| 8010 | if (!ret) | ||
| 8011 | hba->is_sys_suspended = false; | ||
| 8004 | return ret; | 8012 | return ret; |
| 8005 | } | 8013 | } |
| 8006 | EXPORT_SYMBOL(ufshcd_system_resume); | 8014 | EXPORT_SYMBOL(ufshcd_system_resume); |
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c index f78c34647ca2..76480df195a8 100644 --- a/drivers/soc/fsl/qe/qe_tdm.c +++ b/drivers/soc/fsl/qe/qe_tdm.c | |||
| @@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
| 44 | const char *sprop; | 44 | const char *sprop; |
| 45 | int ret = 0; | 45 | int ret = 0; |
| 46 | u32 val; | 46 | u32 val; |
| 47 | struct resource *res; | ||
| 48 | struct device_node *np2; | ||
| 49 | static int siram_init_flag; | ||
| 50 | struct platform_device *pdev; | ||
| 51 | 47 | ||
| 52 | sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); | 48 | sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); |
| 53 | if (sprop) { | 49 | if (sprop) { |
| @@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm, | |||
| 124 | utdm->siram_entry_id = val; | 120 | utdm->siram_entry_id = val; |
| 125 | 121 | ||
| 126 | set_si_param(utdm, ut_info); | 122 | set_si_param(utdm, ut_info); |
| 127 | |||
| 128 | np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si"); | ||
| 129 | if (!np2) | ||
| 130 | return -EINVAL; | ||
| 131 | |||
| 132 | pdev = of_find_device_by_node(np2); | ||
| 133 | if (!pdev) { | ||
| 134 | pr_err("%pOFn: failed to lookup pdev\n", np2); | ||
| 135 | of_node_put(np2); | ||
| 136 | return -EINVAL; | ||
| 137 | } | ||
| 138 | |||
| 139 | of_node_put(np2); | ||
| 140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 141 | utdm->si_regs = devm_ioremap_resource(&pdev->dev, res); | ||
| 142 | if (IS_ERR(utdm->si_regs)) { | ||
| 143 | ret = PTR_ERR(utdm->si_regs); | ||
| 144 | goto err_miss_siram_property; | ||
| 145 | } | ||
| 146 | |||
| 147 | np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram"); | ||
| 148 | if (!np2) { | ||
| 149 | ret = -EINVAL; | ||
| 150 | goto err_miss_siram_property; | ||
| 151 | } | ||
| 152 | |||
| 153 | pdev = of_find_device_by_node(np2); | ||
| 154 | if (!pdev) { | ||
| 155 | ret = -EINVAL; | ||
| 156 | pr_err("%pOFn: failed to lookup pdev\n", np2); | ||
| 157 | of_node_put(np2); | ||
| 158 | goto err_miss_siram_property; | ||
| 159 | } | ||
| 160 | |||
| 161 | of_node_put(np2); | ||
| 162 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 163 | utdm->siram = devm_ioremap_resource(&pdev->dev, res); | ||
| 164 | if (IS_ERR(utdm->siram)) { | ||
| 165 | ret = PTR_ERR(utdm->siram); | ||
| 166 | goto err_miss_siram_property; | ||
| 167 | } | ||
| 168 | |||
| 169 | if (siram_init_flag == 0) { | ||
| 170 | memset_io(utdm->siram, 0, resource_size(res)); | ||
| 171 | siram_init_flag = 1; | ||
| 172 | } | ||
| 173 | |||
| 174 | return ret; | ||
| 175 | |||
| 176 | err_miss_siram_property: | ||
| 177 | devm_iounmap(&pdev->dev, utdm->si_regs); | ||
| 178 | return ret; | 123 | return ret; |
| 179 | } | 124 | } |
| 180 | EXPORT_SYMBOL(ucc_of_parse_tdm); | 125 | EXPORT_SYMBOL(ucc_of_parse_tdm); |
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a0802de8c3a1..6f5afab7c1a1 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c | |||
| @@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf, | |||
| 248 | struct ion_dma_buf_attachment *a = attachment->priv; | 248 | struct ion_dma_buf_attachment *a = attachment->priv; |
| 249 | struct ion_buffer *buffer = dmabuf->priv; | 249 | struct ion_buffer *buffer = dmabuf->priv; |
| 250 | 250 | ||
| 251 | free_duped_table(a->table); | ||
| 252 | mutex_lock(&buffer->lock); | 251 | mutex_lock(&buffer->lock); |
| 253 | list_del(&a->list); | 252 | list_del(&a->list); |
| 254 | mutex_unlock(&buffer->lock); | 253 | mutex_unlock(&buffer->lock); |
| 254 | free_duped_table(a->table); | ||
| 255 | 255 | ||
| 256 | kfree(a); | 256 | kfree(a); |
| 257 | } | 257 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 28cbd6b3d26c..dfee6985efa6 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | |||
| @@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { | |||
| 35 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ | 35 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ |
| 36 | {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ | 36 | {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ |
| 37 | {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ | 37 | {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ |
| 38 | {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ | ||
| 38 | {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ | 39 | {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ |
| 39 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ | 40 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ |
| 40 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ | 41 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ |
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h index bcc8dfa8e672..9efb4dcb9d3a 100644 --- a/drivers/staging/rtl8723bs/include/ieee80211.h +++ b/drivers/staging/rtl8723bs/include/ieee80211.h | |||
| @@ -850,18 +850,18 @@ enum ieee80211_state { | |||
| 850 | #define IP_FMT "%pI4" | 850 | #define IP_FMT "%pI4" |
| 851 | #define IP_ARG(x) (x) | 851 | #define IP_ARG(x) (x) |
| 852 | 852 | ||
| 853 | extern __inline int is_multicast_mac_addr(const u8 *addr) | 853 | static inline int is_multicast_mac_addr(const u8 *addr) |
| 854 | { | 854 | { |
| 855 | return ((addr[0] != 0xff) && (0x01 & addr[0])); | 855 | return ((addr[0] != 0xff) && (0x01 & addr[0])); |
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | extern __inline int is_broadcast_mac_addr(const u8 *addr) | 858 | static inline int is_broadcast_mac_addr(const u8 *addr) |
| 859 | { | 859 | { |
| 860 | return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ | 860 | return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ |
| 861 | (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); | 861 | (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); |
| 862 | } | 862 | } |
| 863 | 863 | ||
| 864 | extern __inline int is_zero_mac_addr(const u8 *addr) | 864 | static inline int is_zero_mac_addr(const u8 *addr) |
| 865 | { | 865 | { |
| 866 | return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ | 866 | return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ |
| 867 | (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); | 867 | (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 9e17ec651bde..53f5a1cb4636 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c | |||
| @@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) | |||
| 446 | static inline void | 446 | static inline void |
| 447 | remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) | 447 | remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) |
| 448 | { | 448 | { |
| 449 | event->fired = 1; | ||
| 449 | event->armed = 0; | 450 | event->armed = 0; |
| 450 | wake_up_all(wq); | 451 | wake_up_all(wq); |
| 451 | } | 452 | } |
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 70c854d939ce..3d0badc34825 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c | |||
| @@ -36,7 +36,7 @@ struct wilc_op_mode { | |||
| 36 | struct wilc_reg_frame { | 36 | struct wilc_reg_frame { |
| 37 | bool reg; | 37 | bool reg; |
| 38 | u8 reg_id; | 38 | u8 reg_id; |
| 39 | __le32 frame_type; | 39 | __le16 frame_type; |
| 40 | } __packed; | 40 | } __packed; |
| 41 | 41 | ||
| 42 | struct wilc_drv_handler { | 42 | struct wilc_drv_handler { |
| @@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, | |||
| 1744 | result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, | 1744 | result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, |
| 1745 | ARRAY_SIZE(wid_list), | 1745 | ARRAY_SIZE(wid_list), |
| 1746 | wilc_get_vif_idx(vif)); | 1746 | wilc_get_vif_idx(vif)); |
| 1747 | kfree(gtk_key); | ||
| 1748 | } else if (mode == WILC_STATION_MODE) { | 1747 | } else if (mode == WILC_STATION_MODE) { |
| 1749 | struct wid wid; | 1748 | struct wid wid; |
| 1750 | 1749 | ||
| @@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, | |||
| 1754 | wid.val = (u8 *)gtk_key; | 1753 | wid.val = (u8 *)gtk_key; |
| 1755 | result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, | 1754 | result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, |
| 1756 | wilc_get_vif_idx(vif)); | 1755 | wilc_get_vif_idx(vif)); |
| 1757 | kfree(gtk_key); | ||
| 1758 | } | 1756 | } |
| 1759 | 1757 | ||
| 1758 | kfree(gtk_key); | ||
| 1760 | return result; | 1759 | return result; |
| 1761 | } | 1760 | } |
| 1762 | 1761 | ||
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 3c5e9e030cad..489e5a5038f8 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c | |||
| @@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev) | |||
| 1252 | ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, ®); | 1252 | ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, ®); |
| 1253 | if (!ret) { | 1253 | if (!ret) { |
| 1254 | netdev_err(dev, "fail read reg 0x1118\n"); | 1254 | netdev_err(dev, "fail read reg 0x1118\n"); |
| 1255 | return ret; | 1255 | goto release; |
| 1256 | } | 1256 | } |
| 1257 | reg |= BIT(0); | 1257 | reg |= BIT(0); |
| 1258 | ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); | 1258 | ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); |
| 1259 | if (!ret) { | 1259 | if (!ret) { |
| 1260 | netdev_err(dev, "fail write reg 0x1118\n"); | 1260 | netdev_err(dev, "fail write reg 0x1118\n"); |
| 1261 | return ret; | 1261 | goto release; |
| 1262 | } | 1262 | } |
| 1263 | ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); | 1263 | ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); |
| 1264 | if (!ret) { | 1264 | if (!ret) { |
| 1265 | netdev_err(dev, "fail write reg 0xc0000\n"); | 1265 | netdev_err(dev, "fail write reg 0xc0000\n"); |
| 1266 | return ret; | 1266 | goto release; |
| 1267 | } | 1267 | } |
| 1268 | } | 1268 | } |
| 1269 | 1269 | ||
| 1270 | release: | ||
| 1270 | release_bus(wilc, WILC_BUS_RELEASE_ONLY); | 1271 | release_bus(wilc, WILC_BUS_RELEASE_ONLY); |
| 1271 | 1272 | ||
| 1272 | return ret; | 1273 | return ret; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 984941e036c8..bd15a564fe24 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void) | |||
| 714 | sizeof(struct iscsi_queue_req), | 714 | sizeof(struct iscsi_queue_req), |
| 715 | __alignof__(struct iscsi_queue_req), 0, NULL); | 715 | __alignof__(struct iscsi_queue_req), 0, NULL); |
| 716 | if (!lio_qr_cache) { | 716 | if (!lio_qr_cache) { |
| 717 | pr_err("nable to kmem_cache_create() for" | 717 | pr_err("Unable to kmem_cache_create() for" |
| 718 | " lio_qr_cache\n"); | 718 | " lio_qr_cache\n"); |
| 719 | goto bitmap_out; | 719 | goto bitmap_out; |
| 720 | } | 720 | } |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1e6d24943565..5831e0eecea1 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -148,7 +148,7 @@ struct tcmu_dev { | |||
| 148 | size_t ring_size; | 148 | size_t ring_size; |
| 149 | 149 | ||
| 150 | struct mutex cmdr_lock; | 150 | struct mutex cmdr_lock; |
| 151 | struct list_head cmdr_queue; | 151 | struct list_head qfull_queue; |
| 152 | 152 | ||
| 153 | uint32_t dbi_max; | 153 | uint32_t dbi_max; |
| 154 | uint32_t dbi_thresh; | 154 | uint32_t dbi_thresh; |
| @@ -159,6 +159,7 @@ struct tcmu_dev { | |||
| 159 | 159 | ||
| 160 | struct timer_list cmd_timer; | 160 | struct timer_list cmd_timer; |
| 161 | unsigned int cmd_time_out; | 161 | unsigned int cmd_time_out; |
| 162 | struct list_head inflight_queue; | ||
| 162 | 163 | ||
| 163 | struct timer_list qfull_timer; | 164 | struct timer_list qfull_timer; |
| 164 | int qfull_time_out; | 165 | int qfull_time_out; |
| @@ -179,7 +180,7 @@ struct tcmu_dev { | |||
| 179 | struct tcmu_cmd { | 180 | struct tcmu_cmd { |
| 180 | struct se_cmd *se_cmd; | 181 | struct se_cmd *se_cmd; |
| 181 | struct tcmu_dev *tcmu_dev; | 182 | struct tcmu_dev *tcmu_dev; |
| 182 | struct list_head cmdr_queue_entry; | 183 | struct list_head queue_entry; |
| 183 | 184 | ||
| 184 | uint16_t cmd_id; | 185 | uint16_t cmd_id; |
| 185 | 186 | ||
| @@ -192,6 +193,7 @@ struct tcmu_cmd { | |||
| 192 | unsigned long deadline; | 193 | unsigned long deadline; |
| 193 | 194 | ||
| 194 | #define TCMU_CMD_BIT_EXPIRED 0 | 195 | #define TCMU_CMD_BIT_EXPIRED 0 |
| 196 | #define TCMU_CMD_BIT_INFLIGHT 1 | ||
| 195 | unsigned long flags; | 197 | unsigned long flags; |
| 196 | }; | 198 | }; |
| 197 | /* | 199 | /* |
| @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |||
| 586 | if (!tcmu_cmd) | 588 | if (!tcmu_cmd) |
| 587 | return NULL; | 589 | return NULL; |
| 588 | 590 | ||
| 589 | INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); | 591 | INIT_LIST_HEAD(&tcmu_cmd->queue_entry); |
| 590 | tcmu_cmd->se_cmd = se_cmd; | 592 | tcmu_cmd->se_cmd = se_cmd; |
| 591 | tcmu_cmd->tcmu_dev = udev; | 593 | tcmu_cmd->tcmu_dev = udev; |
| 592 | 594 | ||
| @@ -915,11 +917,13 @@ setup_timer: | |||
| 915 | return 0; | 917 | return 0; |
| 916 | 918 | ||
| 917 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); | 919 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); |
| 918 | mod_timer(timer, tcmu_cmd->deadline); | 920 | if (!timer_pending(timer)) |
| 921 | mod_timer(timer, tcmu_cmd->deadline); | ||
| 922 | |||
| 919 | return 0; | 923 | return 0; |
| 920 | } | 924 | } |
| 921 | 925 | ||
| 922 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | 926 | static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) |
| 923 | { | 927 | { |
| 924 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 928 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
| 925 | unsigned int tmo; | 929 | unsigned int tmo; |
| @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | |||
| 942 | if (ret) | 946 | if (ret) |
| 943 | return ret; | 947 | return ret; |
| 944 | 948 | ||
| 945 | list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); | 949 | list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); |
| 946 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", | 950 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", |
| 947 | tcmu_cmd->cmd_id, udev->name); | 951 | tcmu_cmd->cmd_id, udev->name); |
| 948 | return 0; | 952 | return 0; |
| @@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
| 999 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); | 1003 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); |
| 1000 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | 1004 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); |
| 1001 | 1005 | ||
| 1002 | if (!list_empty(&udev->cmdr_queue)) | 1006 | if (!list_empty(&udev->qfull_queue)) |
| 1003 | goto queue; | 1007 | goto queue; |
| 1004 | 1008 | ||
| 1005 | mb = udev->mb_addr; | 1009 | mb = udev->mb_addr; |
| @@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
| 1096 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | 1100 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); |
| 1097 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 1101 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
| 1098 | 1102 | ||
| 1103 | list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); | ||
| 1104 | set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); | ||
| 1105 | |||
| 1099 | /* TODO: only if FLUSH and FUA? */ | 1106 | /* TODO: only if FLUSH and FUA? */ |
| 1100 | uio_event_notify(&udev->uio_info); | 1107 | uio_event_notify(&udev->uio_info); |
| 1101 | 1108 | ||
| 1102 | return 0; | 1109 | return 0; |
| 1103 | 1110 | ||
| 1104 | queue: | 1111 | queue: |
| 1105 | if (add_to_cmdr_queue(tcmu_cmd)) { | 1112 | if (add_to_qfull_queue(tcmu_cmd)) { |
| 1106 | *scsi_err = TCM_OUT_OF_RESOURCES; | 1113 | *scsi_err = TCM_OUT_OF_RESOURCES; |
| 1107 | return -1; | 1114 | return -1; |
| 1108 | } | 1115 | } |
| @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
| 1145 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | 1152 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) |
| 1146 | goto out; | 1153 | goto out; |
| 1147 | 1154 | ||
| 1155 | list_del_init(&cmd->queue_entry); | ||
| 1156 | |||
| 1148 | tcmu_cmd_reset_dbi_cur(cmd); | 1157 | tcmu_cmd_reset_dbi_cur(cmd); |
| 1149 | 1158 | ||
| 1150 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { | 1159 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
| @@ -1194,9 +1203,29 @@ out: | |||
| 1194 | tcmu_free_cmd(cmd); | 1203 | tcmu_free_cmd(cmd); |
| 1195 | } | 1204 | } |
| 1196 | 1205 | ||
| 1206 | static void tcmu_set_next_deadline(struct list_head *queue, | ||
| 1207 | struct timer_list *timer) | ||
| 1208 | { | ||
| 1209 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | ||
| 1210 | unsigned long deadline = 0; | ||
| 1211 | |||
| 1212 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { | ||
| 1213 | if (!time_after(jiffies, tcmu_cmd->deadline)) { | ||
| 1214 | deadline = tcmu_cmd->deadline; | ||
| 1215 | break; | ||
| 1216 | } | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | if (deadline) | ||
| 1220 | mod_timer(timer, deadline); | ||
| 1221 | else | ||
| 1222 | del_timer(timer); | ||
| 1223 | } | ||
| 1224 | |||
| 1197 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | 1225 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) |
| 1198 | { | 1226 | { |
| 1199 | struct tcmu_mailbox *mb; | 1227 | struct tcmu_mailbox *mb; |
| 1228 | struct tcmu_cmd *cmd; | ||
| 1200 | int handled = 0; | 1229 | int handled = 0; |
| 1201 | 1230 | ||
| 1202 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | 1231 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { |
| @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1210 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { | 1239 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
| 1211 | 1240 | ||
| 1212 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 1241 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
| 1213 | struct tcmu_cmd *cmd; | ||
| 1214 | 1242 | ||
| 1215 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 1243 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
| 1216 | 1244 | ||
| @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1243 | /* no more pending commands */ | 1271 | /* no more pending commands */ |
| 1244 | del_timer(&udev->cmd_timer); | 1272 | del_timer(&udev->cmd_timer); |
| 1245 | 1273 | ||
| 1246 | if (list_empty(&udev->cmdr_queue)) { | 1274 | if (list_empty(&udev->qfull_queue)) { |
| 1247 | /* | 1275 | /* |
| 1248 | * no more pending or waiting commands so try to | 1276 | * no more pending or waiting commands so try to |
| 1249 | * reclaim blocks if needed. | 1277 | * reclaim blocks if needed. |
| @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1252 | tcmu_global_max_blocks) | 1280 | tcmu_global_max_blocks) |
| 1253 | schedule_delayed_work(&tcmu_unmap_work, 0); | 1281 | schedule_delayed_work(&tcmu_unmap_work, 0); |
| 1254 | } | 1282 | } |
| 1283 | } else if (udev->cmd_time_out) { | ||
| 1284 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
| 1255 | } | 1285 | } |
| 1256 | 1286 | ||
| 1257 | return handled; | 1287 | return handled; |
| @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
| 1271 | if (!time_after(jiffies, cmd->deadline)) | 1301 | if (!time_after(jiffies, cmd->deadline)) |
| 1272 | return 0; | 1302 | return 0; |
| 1273 | 1303 | ||
| 1274 | is_running = list_empty(&cmd->cmdr_queue_entry); | 1304 | is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); |
| 1275 | se_cmd = cmd->se_cmd; | 1305 | se_cmd = cmd->se_cmd; |
| 1276 | 1306 | ||
| 1277 | if (is_running) { | 1307 | if (is_running) { |
| @@ -1287,9 +1317,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
| 1287 | * target_complete_cmd will translate this to LUN COMM FAILURE | 1317 | * target_complete_cmd will translate this to LUN COMM FAILURE |
| 1288 | */ | 1318 | */ |
| 1289 | scsi_status = SAM_STAT_CHECK_CONDITION; | 1319 | scsi_status = SAM_STAT_CHECK_CONDITION; |
| 1320 | list_del_init(&cmd->queue_entry); | ||
| 1290 | } else { | 1321 | } else { |
| 1291 | list_del_init(&cmd->cmdr_queue_entry); | 1322 | list_del_init(&cmd->queue_entry); |
| 1292 | |||
| 1293 | idr_remove(&udev->commands, id); | 1323 | idr_remove(&udev->commands, id); |
| 1294 | tcmu_free_cmd(cmd); | 1324 | tcmu_free_cmd(cmd); |
| 1295 | scsi_status = SAM_STAT_TASK_SET_FULL; | 1325 | scsi_status = SAM_STAT_TASK_SET_FULL; |
| @@ -1372,7 +1402,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 1372 | 1402 | ||
| 1373 | INIT_LIST_HEAD(&udev->node); | 1403 | INIT_LIST_HEAD(&udev->node); |
| 1374 | INIT_LIST_HEAD(&udev->timedout_entry); | 1404 | INIT_LIST_HEAD(&udev->timedout_entry); |
| 1375 | INIT_LIST_HEAD(&udev->cmdr_queue); | 1405 | INIT_LIST_HEAD(&udev->qfull_queue); |
| 1406 | INIT_LIST_HEAD(&udev->inflight_queue); | ||
| 1376 | idr_init(&udev->commands); | 1407 | idr_init(&udev->commands); |
| 1377 | 1408 | ||
| 1378 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); | 1409 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
| @@ -1383,7 +1414,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 1383 | return &udev->se_dev; | 1414 | return &udev->se_dev; |
| 1384 | } | 1415 | } |
| 1385 | 1416 | ||
| 1386 | static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | 1417 | static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) |
| 1387 | { | 1418 | { |
| 1388 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | 1419 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; |
| 1389 | LIST_HEAD(cmds); | 1420 | LIST_HEAD(cmds); |
| @@ -1391,15 +1422,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
| 1391 | sense_reason_t scsi_ret; | 1422 | sense_reason_t scsi_ret; |
| 1392 | int ret; | 1423 | int ret; |
| 1393 | 1424 | ||
| 1394 | if (list_empty(&udev->cmdr_queue)) | 1425 | if (list_empty(&udev->qfull_queue)) |
| 1395 | return true; | 1426 | return true; |
| 1396 | 1427 | ||
| 1397 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); | 1428 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); |
| 1398 | 1429 | ||
| 1399 | list_splice_init(&udev->cmdr_queue, &cmds); | 1430 | list_splice_init(&udev->qfull_queue, &cmds); |
| 1400 | 1431 | ||
| 1401 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { | 1432 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { |
| 1402 | list_del_init(&tcmu_cmd->cmdr_queue_entry); | 1433 | list_del_init(&tcmu_cmd->queue_entry); |
| 1403 | 1434 | ||
| 1404 | pr_debug("removing cmd %u on dev %s from queue\n", | 1435 | pr_debug("removing cmd %u on dev %s from queue\n", |
| 1405 | tcmu_cmd->cmd_id, udev->name); | 1436 | tcmu_cmd->cmd_id, udev->name); |
| @@ -1437,14 +1468,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
| 1437 | * cmd was requeued, so just put all cmds back in | 1468 | * cmd was requeued, so just put all cmds back in |
| 1438 | * the queue | 1469 | * the queue |
| 1439 | */ | 1470 | */ |
| 1440 | list_splice_tail(&cmds, &udev->cmdr_queue); | 1471 | list_splice_tail(&cmds, &udev->qfull_queue); |
| 1441 | drained = false; | 1472 | drained = false; |
| 1442 | goto done; | 1473 | break; |
| 1443 | } | 1474 | } |
| 1444 | } | 1475 | } |
| 1445 | if (list_empty(&udev->cmdr_queue)) | 1476 | |
| 1446 | del_timer(&udev->qfull_timer); | 1477 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); |
| 1447 | done: | ||
| 1448 | return drained; | 1478 | return drained; |
| 1449 | } | 1479 | } |
| 1450 | 1480 | ||
| @@ -1454,7 +1484,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) | |||
| 1454 | 1484 | ||
| 1455 | mutex_lock(&udev->cmdr_lock); | 1485 | mutex_lock(&udev->cmdr_lock); |
| 1456 | tcmu_handle_completions(udev); | 1486 | tcmu_handle_completions(udev); |
| 1457 | run_cmdr_queue(udev, false); | 1487 | run_qfull_queue(udev, false); |
| 1458 | mutex_unlock(&udev->cmdr_lock); | 1488 | mutex_unlock(&udev->cmdr_lock); |
| 1459 | 1489 | ||
| 1460 | return 0; | 1490 | return 0; |
| @@ -1982,7 +2012,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) | |||
| 1982 | /* complete IO that has executed successfully */ | 2012 | /* complete IO that has executed successfully */ |
| 1983 | tcmu_handle_completions(udev); | 2013 | tcmu_handle_completions(udev); |
| 1984 | /* fail IO waiting to be queued */ | 2014 | /* fail IO waiting to be queued */ |
| 1985 | run_cmdr_queue(udev, true); | 2015 | run_qfull_queue(udev, true); |
| 1986 | 2016 | ||
| 1987 | unlock: | 2017 | unlock: |
| 1988 | mutex_unlock(&udev->cmdr_lock); | 2018 | mutex_unlock(&udev->cmdr_lock); |
| @@ -1997,7 +2027,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
| 1997 | mutex_lock(&udev->cmdr_lock); | 2027 | mutex_lock(&udev->cmdr_lock); |
| 1998 | 2028 | ||
| 1999 | idr_for_each_entry(&udev->commands, cmd, i) { | 2029 | idr_for_each_entry(&udev->commands, cmd, i) { |
| 2000 | if (!list_empty(&cmd->cmdr_queue_entry)) | 2030 | if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) |
| 2001 | continue; | 2031 | continue; |
| 2002 | 2032 | ||
| 2003 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", | 2033 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", |
| @@ -2006,6 +2036,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
| 2006 | 2036 | ||
| 2007 | idr_remove(&udev->commands, i); | 2037 | idr_remove(&udev->commands, i); |
| 2008 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | 2038 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { |
| 2039 | list_del_init(&cmd->queue_entry); | ||
| 2009 | if (err_level == 1) { | 2040 | if (err_level == 1) { |
| 2010 | /* | 2041 | /* |
| 2011 | * Userspace was not able to start the | 2042 | * Userspace was not able to start the |
| @@ -2666,6 +2697,10 @@ static void check_timedout_devices(void) | |||
| 2666 | 2697 | ||
| 2667 | mutex_lock(&udev->cmdr_lock); | 2698 | mutex_lock(&udev->cmdr_lock); |
| 2668 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); | 2699 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); |
| 2700 | |||
| 2701 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
| 2702 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); | ||
| 2703 | |||
| 2669 | mutex_unlock(&udev->cmdr_lock); | 2704 | mutex_unlock(&udev->cmdr_lock); |
| 2670 | 2705 | ||
| 2671 | spin_lock_bh(&timed_out_udevs_lock); | 2706 | spin_lock_bh(&timed_out_udevs_lock); |
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 0582bd12a239..0ca908d12750 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | config INT340X_THERMAL | 5 | config INT340X_THERMAL |
| 6 | tristate "ACPI INT340X thermal drivers" | 6 | tristate "ACPI INT340X thermal drivers" |
| 7 | depends on X86 && ACPI | 7 | depends on X86 && ACPI && PCI |
| 8 | select THERMAL_GOV_USER_SPACE | 8 | select THERMAL_GOV_USER_SPACE |
| 9 | select ACPI_THERMAL_REL | 9 | select ACPI_THERMAL_REL |
| 10 | select ACPI_FAN | 10 | select ACPI_FAN |
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 284cf2c5a8fd..8e1cf4d789be 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c | |||
| @@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ | |||
| 84 | struct pci_dev *pci_dev; \ | 84 | struct pci_dev *pci_dev; \ |
| 85 | struct platform_device *pdev; \ | 85 | struct platform_device *pdev; \ |
| 86 | struct proc_thermal_device *proc_dev; \ | 86 | struct proc_thermal_device *proc_dev; \ |
| 87 | \ | 87 | \ |
| 88 | if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \ | ||
| 89 | dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \ | ||
| 90 | return 0; \ | ||
| 91 | } \ | ||
| 92 | \ | ||
| 88 | if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ | 93 | if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ |
| 89 | pdev = to_platform_device(dev); \ | 94 | pdev = to_platform_device(dev); \ |
| 90 | proc_dev = platform_get_drvdata(pdev); \ | 95 | proc_dev = platform_get_drvdata(pdev); \ |
| @@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev, | |||
| 298 | *priv = proc_priv; | 303 | *priv = proc_priv; |
| 299 | 304 | ||
| 300 | ret = proc_thermal_read_ppcc(proc_priv); | 305 | ret = proc_thermal_read_ppcc(proc_priv); |
| 301 | if (!ret) { | ||
| 302 | ret = sysfs_create_group(&dev->kobj, | ||
| 303 | &power_limit_attribute_group); | ||
| 304 | |||
| 305 | } | ||
| 306 | if (ret) | 306 | if (ret) |
| 307 | return ret; | 307 | return ret; |
| 308 | 308 | ||
| @@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev, | |||
| 316 | 316 | ||
| 317 | proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); | 317 | proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); |
| 318 | if (IS_ERR(proc_priv->int340x_zone)) { | 318 | if (IS_ERR(proc_priv->int340x_zone)) { |
| 319 | ret = PTR_ERR(proc_priv->int340x_zone); | 319 | return PTR_ERR(proc_priv->int340x_zone); |
| 320 | goto remove_group; | ||
| 321 | } else | 320 | } else |
| 322 | ret = 0; | 321 | ret = 0; |
| 323 | 322 | ||
| @@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev, | |||
| 331 | 330 | ||
| 332 | remove_zone: | 331 | remove_zone: |
| 333 | int340x_thermal_zone_remove(proc_priv->int340x_zone); | 332 | int340x_thermal_zone_remove(proc_priv->int340x_zone); |
| 334 | remove_group: | ||
| 335 | sysfs_remove_group(&proc_priv->dev->kobj, | ||
| 336 | &power_limit_attribute_group); | ||
| 337 | 333 | ||
| 338 | return ret; | 334 | return ret; |
| 339 | } | 335 | } |
| @@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev) | |||
| 364 | platform_set_drvdata(pdev, proc_priv); | 360 | platform_set_drvdata(pdev, proc_priv); |
| 365 | proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; | 361 | proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; |
| 366 | 362 | ||
| 367 | return 0; | 363 | dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n"); |
| 364 | |||
| 365 | return sysfs_create_group(&pdev->dev.kobj, | ||
| 366 | &power_limit_attribute_group); | ||
| 368 | } | 367 | } |
| 369 | 368 | ||
| 370 | static int int3401_remove(struct platform_device *pdev) | 369 | static int int3401_remove(struct platform_device *pdev) |
| @@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, | |||
| 423 | proc_priv->soc_dts = intel_soc_dts_iosf_init( | 422 | proc_priv->soc_dts = intel_soc_dts_iosf_init( |
| 424 | INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); | 423 | INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); |
| 425 | 424 | ||
| 426 | if (proc_priv->soc_dts && pdev->irq) { | 425 | if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { |
| 427 | ret = pci_enable_msi(pdev); | 426 | ret = pci_enable_msi(pdev); |
| 428 | if (!ret) { | 427 | if (!ret) { |
| 429 | ret = request_threaded_irq(pdev->irq, NULL, | 428 | ret = request_threaded_irq(pdev->irq, NULL, |
| @@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, | |||
| 441 | dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); | 440 | dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); |
| 442 | } | 441 | } |
| 443 | 442 | ||
| 444 | return 0; | 443 | dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n"); |
| 444 | |||
| 445 | return sysfs_create_group(&pdev->dev.kobj, | ||
| 446 | &power_limit_attribute_group); | ||
| 445 | } | 447 | } |
| 446 | 448 | ||
| 447 | static void proc_thermal_pci_remove(struct pci_dev *pdev) | 449 | static void proc_thermal_pci_remove(struct pci_dev *pdev) |
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 4164414d4c64..8bdf42bc8fc8 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c | |||
| @@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, | |||
| 597 | /* too large for caller's buffer */ | 597 | /* too large for caller's buffer */ |
| 598 | ret = -EOVERFLOW; | 598 | ret = -EOVERFLOW; |
| 599 | } else { | 599 | } else { |
| 600 | __set_current_state(TASK_RUNNING); | ||
| 600 | if (copy_to_user(buf, rbuf->buf, rbuf->count)) | 601 | if (copy_to_user(buf, rbuf->buf, rbuf->count)) |
| 601 | ret = -EFAULT; | 602 | ret = -EFAULT; |
| 602 | else | 603 | else |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 189ab1212d9a..e441221e04b9 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
| @@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up) | |||
| 1070 | 1070 | ||
| 1071 | ret = 0; | 1071 | ret = 0; |
| 1072 | } | 1072 | } |
| 1073 | } | ||
| 1074 | 1073 | ||
| 1075 | /* Initialise interrupt backoff work if required */ | 1074 | /* Initialise interrupt backoff work if required */ |
| 1076 | if (up->overrun_backoff_time_ms > 0) { | 1075 | if (up->overrun_backoff_time_ms > 0) { |
| 1077 | uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; | 1076 | uart->overrun_backoff_time_ms = |
| 1078 | INIT_DELAYED_WORK(&uart->overrun_backoff, | 1077 | up->overrun_backoff_time_ms; |
| 1079 | serial_8250_overrun_backoff_work); | 1078 | INIT_DELAYED_WORK(&uart->overrun_backoff, |
| 1080 | } else { | 1079 | serial_8250_overrun_backoff_work); |
| 1081 | uart->overrun_backoff_time_ms = 0; | 1080 | } else { |
| 1081 | uart->overrun_backoff_time_ms = 0; | ||
| 1082 | } | ||
| 1082 | } | 1083 | } |
| 1083 | 1084 | ||
| 1084 | mutex_unlock(&serial_mutex); | 1085 | mutex_unlock(&serial_mutex); |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 241a48e5052c..debdd1b9e01a 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
| @@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 1697 | } | 1697 | } |
| 1698 | 1698 | ||
| 1699 | /* ask the core to calculate the divisor */ | 1699 | /* ask the core to calculate the divisor */ |
| 1700 | baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); | 1700 | baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); |
| 1701 | 1701 | ||
| 1702 | spin_lock_irqsave(&sport->port.lock, flags); | 1702 | spin_lock_irqsave(&sport->port.lock, flags); |
| 1703 | 1703 | ||
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index a72d6d9fb983..38016609c7fa 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c | |||
| @@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport) | |||
| 225 | unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; | 225 | unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; |
| 226 | u32 geni_ios; | 226 | u32 geni_ios; |
| 227 | 227 | ||
| 228 | if (uart_console(uport) || !uart_cts_enabled(uport)) { | 228 | if (uart_console(uport)) { |
| 229 | mctrl |= TIOCM_CTS; | 229 | mctrl |= TIOCM_CTS; |
| 230 | } else { | 230 | } else { |
| 231 | geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); | 231 | geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); |
| @@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport, | |||
| 241 | { | 241 | { |
| 242 | u32 uart_manual_rfr = 0; | 242 | u32 uart_manual_rfr = 0; |
| 243 | 243 | ||
| 244 | if (uart_console(uport) || !uart_cts_enabled(uport)) | 244 | if (uart_console(uport)) |
| 245 | return; | 245 | return; |
| 246 | 246 | ||
| 247 | if (!(mctrl & TIOCM_RTS)) | 247 | if (!(mctrl & TIOCM_RTS)) |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index d4cca5bdaf1c..5c01bb6d1c24 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
| @@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) | |||
| 550 | int ret = 0; | 550 | int ret = 0; |
| 551 | 551 | ||
| 552 | circ = &state->xmit; | 552 | circ = &state->xmit; |
| 553 | if (!circ->buf) | 553 | port = uart_port_lock(state, flags); |
| 554 | if (!circ->buf) { | ||
| 555 | uart_port_unlock(port, flags); | ||
| 554 | return 0; | 556 | return 0; |
| 557 | } | ||
| 555 | 558 | ||
| 556 | port = uart_port_lock(state, flags); | ||
| 557 | if (port && uart_circ_chars_free(circ) != 0) { | 559 | if (port && uart_circ_chars_free(circ) != 0) { |
| 558 | circ->buf[circ->head] = c; | 560 | circ->buf[circ->head] = c; |
| 559 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); | 561 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); |
| @@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty, | |||
| 586 | return -EL3HLT; | 588 | return -EL3HLT; |
| 587 | } | 589 | } |
| 588 | 590 | ||
| 591 | port = uart_port_lock(state, flags); | ||
| 589 | circ = &state->xmit; | 592 | circ = &state->xmit; |
| 590 | if (!circ->buf) | 593 | if (!circ->buf) { |
| 594 | uart_port_unlock(port, flags); | ||
| 591 | return 0; | 595 | return 0; |
| 596 | } | ||
| 592 | 597 | ||
| 593 | port = uart_port_lock(state, flags); | ||
| 594 | while (port) { | 598 | while (port) { |
| 595 | c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); | 599 | c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); |
| 596 | if (count < c) | 600 | if (count < c) |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 23c6fd238422..21ffcce16927 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) | |||
| 2189 | ld = tty_ldisc_ref_wait(tty); | 2189 | ld = tty_ldisc_ref_wait(tty); |
| 2190 | if (!ld) | 2190 | if (!ld) |
| 2191 | return -EIO; | 2191 | return -EIO; |
| 2192 | ld->ops->receive_buf(tty, &ch, &mbz, 1); | 2192 | if (ld->ops->receive_buf) |
| 2193 | ld->ops->receive_buf(tty, &ch, &mbz, 1); | ||
| 2193 | tty_ldisc_deref(ld); | 2194 | tty_ldisc_deref(ld); |
| 2194 | return 0; | 2195 | return 0; |
| 2195 | } | 2196 | } |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 41ec8e5010f3..bba75560d11e 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
| @@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, | |||
| 1272 | if (con_is_visible(vc)) | 1272 | if (con_is_visible(vc)) |
| 1273 | update_screen(vc); | 1273 | update_screen(vc); |
| 1274 | vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); | 1274 | vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); |
| 1275 | notify_update(vc); | ||
| 1275 | return err; | 1276 | return err; |
| 1276 | } | 1277 | } |
| 1277 | 1278 | ||
| @@ -2764,8 +2765,8 @@ rescan_last_byte: | |||
| 2764 | con_flush(vc, draw_from, draw_to, &draw_x); | 2765 | con_flush(vc, draw_from, draw_to, &draw_x); |
| 2765 | vc_uniscr_debug_check(vc); | 2766 | vc_uniscr_debug_check(vc); |
| 2766 | console_conditional_schedule(); | 2767 | console_conditional_schedule(); |
| 2767 | console_unlock(); | ||
| 2768 | notify_update(vc); | 2768 | notify_update(vc); |
| 2769 | console_unlock(); | ||
| 2769 | return n; | 2770 | return n; |
| 2770 | } | 2771 | } |
| 2771 | 2772 | ||
| @@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | |||
| 2884 | unsigned char c; | 2885 | unsigned char c; |
| 2885 | static DEFINE_SPINLOCK(printing_lock); | 2886 | static DEFINE_SPINLOCK(printing_lock); |
| 2886 | const ushort *start; | 2887 | const ushort *start; |
| 2887 | ushort cnt = 0; | 2888 | ushort start_x, cnt; |
| 2888 | ushort myx; | ||
| 2889 | int kmsg_console; | 2889 | int kmsg_console; |
| 2890 | 2890 | ||
| 2891 | /* console busy or not yet initialized */ | 2891 | /* console busy or not yet initialized */ |
| @@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | |||
| 2898 | if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) | 2898 | if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) |
| 2899 | vc = vc_cons[kmsg_console - 1].d; | 2899 | vc = vc_cons[kmsg_console - 1].d; |
| 2900 | 2900 | ||
| 2901 | /* read `x' only after setting currcons properly (otherwise | ||
| 2902 | the `x' macro will read the x of the foreground console). */ | ||
| 2903 | myx = vc->vc_x; | ||
| 2904 | |||
| 2905 | if (!vc_cons_allocated(fg_console)) { | 2901 | if (!vc_cons_allocated(fg_console)) { |
| 2906 | /* impossible */ | 2902 | /* impossible */ |
| 2907 | /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ | 2903 | /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ |
| @@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | |||
| 2916 | hide_cursor(vc); | 2912 | hide_cursor(vc); |
| 2917 | 2913 | ||
| 2918 | start = (ushort *)vc->vc_pos; | 2914 | start = (ushort *)vc->vc_pos; |
| 2919 | 2915 | start_x = vc->vc_x; | |
| 2920 | /* Contrived structure to try to emulate original need_wrap behaviour | 2916 | cnt = 0; |
| 2921 | * Problems caused when we have need_wrap set on '\n' character */ | ||
| 2922 | while (count--) { | 2917 | while (count--) { |
| 2923 | c = *b++; | 2918 | c = *b++; |
| 2924 | if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { | 2919 | if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { |
| 2925 | if (cnt > 0) { | 2920 | if (cnt && con_is_visible(vc)) |
| 2926 | if (con_is_visible(vc)) | 2921 | vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); |
| 2927 | vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); | 2922 | cnt = 0; |
| 2928 | vc->vc_x += cnt; | ||
| 2929 | if (vc->vc_need_wrap) | ||
| 2930 | vc->vc_x--; | ||
| 2931 | cnt = 0; | ||
| 2932 | } | ||
| 2933 | if (c == 8) { /* backspace */ | 2923 | if (c == 8) { /* backspace */ |
| 2934 | bs(vc); | 2924 | bs(vc); |
| 2935 | start = (ushort *)vc->vc_pos; | 2925 | start = (ushort *)vc->vc_pos; |
| 2936 | myx = vc->vc_x; | 2926 | start_x = vc->vc_x; |
| 2937 | continue; | 2927 | continue; |
| 2938 | } | 2928 | } |
| 2939 | if (c != 13) | 2929 | if (c != 13) |
| 2940 | lf(vc); | 2930 | lf(vc); |
| 2941 | cr(vc); | 2931 | cr(vc); |
| 2942 | start = (ushort *)vc->vc_pos; | 2932 | start = (ushort *)vc->vc_pos; |
| 2943 | myx = vc->vc_x; | 2933 | start_x = vc->vc_x; |
| 2944 | if (c == 10 || c == 13) | 2934 | if (c == 10 || c == 13) |
| 2945 | continue; | 2935 | continue; |
| 2946 | } | 2936 | } |
| 2937 | vc_uniscr_putc(vc, c); | ||
| 2947 | scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); | 2938 | scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); |
| 2948 | notify_write(vc, c); | 2939 | notify_write(vc, c); |
| 2949 | cnt++; | 2940 | cnt++; |
| 2950 | if (myx == vc->vc_cols - 1) { | 2941 | if (vc->vc_x == vc->vc_cols - 1) { |
| 2951 | vc->vc_need_wrap = 1; | ||
| 2952 | continue; | ||
| 2953 | } | ||
| 2954 | vc->vc_pos += 2; | ||
| 2955 | myx++; | ||
| 2956 | } | ||
| 2957 | if (cnt > 0) { | ||
| 2958 | if (con_is_visible(vc)) | ||
| 2959 | vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); | ||
| 2960 | vc->vc_x += cnt; | ||
| 2961 | if (vc->vc_x == vc->vc_cols) { | ||
| 2962 | vc->vc_x--; | ||
| 2963 | vc->vc_need_wrap = 1; | 2942 | vc->vc_need_wrap = 1; |
| 2943 | } else { | ||
| 2944 | vc->vc_pos += 2; | ||
| 2945 | vc->vc_x++; | ||
| 2964 | } | 2946 | } |
| 2965 | } | 2947 | } |
| 2948 | if (cnt && con_is_visible(vc)) | ||
| 2949 | vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); | ||
| 2966 | set_cursor(vc); | 2950 | set_cursor(vc); |
| 2967 | notify_update(vc); | 2951 | notify_update(vc); |
| 2968 | 2952 | ||
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index e81de9ca8729..9b45aa422e69 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c | |||
| @@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 316 | if (IS_ERR(data->usbmisc_data)) | 316 | if (IS_ERR(data->usbmisc_data)) |
| 317 | return PTR_ERR(data->usbmisc_data); | 317 | return PTR_ERR(data->usbmisc_data); |
| 318 | 318 | ||
| 319 | if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) { | 319 | if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) |
| 320 | && data->usbmisc_data) { | ||
| 320 | pdata.flags |= CI_HDRC_IMX_IS_HSIC; | 321 | pdata.flags |= CI_HDRC_IMX_IS_HSIC; |
| 321 | data->usbmisc_data->hsic = 1; | 322 | data->usbmisc_data->hsic = 1; |
| 322 | data->pinctrl = devm_pinctrl_get(dev); | 323 | data->pinctrl = devm_pinctrl_get(dev); |
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index dc7f7fd71684..c12ac56606c3 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c | |||
| @@ -119,11 +119,6 @@ static const struct attribute_group ports_group = { | |||
| 119 | .attrs = ports_attrs, | 119 | .attrs = ports_attrs, |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | static const struct attribute_group *ports_groups[] = { | ||
| 123 | &ports_group, | ||
| 124 | NULL | ||
| 125 | }; | ||
| 126 | |||
| 127 | /*************************************** | 122 | /*************************************** |
| 128 | * Adding & removing ports | 123 | * Adding & removing ports |
| 129 | ***************************************/ | 124 | ***************************************/ |
| @@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, | |||
| 307 | static int usbport_trig_activate(struct led_classdev *led_cdev) | 302 | static int usbport_trig_activate(struct led_classdev *led_cdev) |
| 308 | { | 303 | { |
| 309 | struct usbport_trig_data *usbport_data; | 304 | struct usbport_trig_data *usbport_data; |
| 305 | int err; | ||
| 310 | 306 | ||
| 311 | usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); | 307 | usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); |
| 312 | if (!usbport_data) | 308 | if (!usbport_data) |
| @@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) | |||
| 315 | 311 | ||
| 316 | /* List of ports */ | 312 | /* List of ports */ |
| 317 | INIT_LIST_HEAD(&usbport_data->ports); | 313 | INIT_LIST_HEAD(&usbport_data->ports); |
| 314 | err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group); | ||
| 315 | if (err) | ||
| 316 | goto err_free; | ||
| 318 | usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); | 317 | usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); |
| 319 | usbport_trig_update_count(usbport_data); | 318 | usbport_trig_update_count(usbport_data); |
| 320 | 319 | ||
| @@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) | |||
| 322 | usbport_data->nb.notifier_call = usbport_trig_notify; | 321 | usbport_data->nb.notifier_call = usbport_trig_notify; |
| 323 | led_set_trigger_data(led_cdev, usbport_data); | 322 | led_set_trigger_data(led_cdev, usbport_data); |
| 324 | usb_register_notify(&usbport_data->nb); | 323 | usb_register_notify(&usbport_data->nb); |
| 325 | |||
| 326 | return 0; | 324 | return 0; |
| 325 | |||
| 326 | err_free: | ||
| 327 | kfree(usbport_data); | ||
| 328 | return err; | ||
| 327 | } | 329 | } |
| 328 | 330 | ||
| 329 | static void usbport_trig_deactivate(struct led_classdev *led_cdev) | 331 | static void usbport_trig_deactivate(struct led_classdev *led_cdev) |
| @@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev) | |||
| 335 | usbport_trig_remove_port(usbport_data, port); | 337 | usbport_trig_remove_port(usbport_data, port); |
| 336 | } | 338 | } |
| 337 | 339 | ||
| 340 | sysfs_remove_group(&led_cdev->dev->kobj, &ports_group); | ||
| 341 | |||
| 338 | usb_unregister_notify(&usbport_data->nb); | 342 | usb_unregister_notify(&usbport_data->nb); |
| 339 | 343 | ||
| 340 | kfree(usbport_data); | 344 | kfree(usbport_data); |
| @@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = { | |||
| 344 | .name = "usbport", | 348 | .name = "usbport", |
| 345 | .activate = usbport_trig_activate, | 349 | .activate = usbport_trig_activate, |
| 346 | .deactivate = usbport_trig_deactivate, | 350 | .deactivate = usbport_trig_deactivate, |
| 347 | .groups = ports_groups, | ||
| 348 | }; | 351 | }; |
| 349 | 352 | ||
| 350 | static int __init usbport_trig_init(void) | 353 | static int __init usbport_trig_init(void) |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 68ad75a7460d..55ef3cc2701b 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
| @@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg) | |||
| 261 | 261 | ||
| 262 | if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { | 262 | if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { |
| 263 | dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); | 263 | dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); |
| 264 | dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); | 264 | dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); |
| 265 | dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); | 265 | dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); |
| 266 | } | 266 | } |
| 267 | } | 267 | } |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 07bd31bb2f8a..bed2ff42780b 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, | |||
| 177 | req->started = false; | 177 | req->started = false; |
| 178 | list_del(&req->list); | 178 | list_del(&req->list); |
| 179 | req->remaining = 0; | 179 | req->remaining = 0; |
| 180 | req->needs_extra_trb = false; | ||
| 180 | 181 | ||
| 181 | if (req->request.status == -EINPROGRESS) | 182 | if (req->request.status == -EINPROGRESS) |
| 182 | req->request.status = status; | 183 | req->request.status = status; |
| @@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) | |||
| 1984 | 1985 | ||
| 1985 | /* begin to receive SETUP packets */ | 1986 | /* begin to receive SETUP packets */ |
| 1986 | dwc->ep0state = EP0_SETUP_PHASE; | 1987 | dwc->ep0state = EP0_SETUP_PHASE; |
| 1988 | dwc->link_state = DWC3_LINK_STATE_SS_DIS; | ||
| 1987 | dwc3_ep0_out_start(dwc); | 1989 | dwc3_ep0_out_start(dwc); |
| 1988 | 1990 | ||
| 1989 | dwc3_gadget_enable_irq(dwc); | 1991 | dwc3_gadget_enable_irq(dwc); |
| @@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) | |||
| 3379 | dwc3_disconnect_gadget(dwc); | 3381 | dwc3_disconnect_gadget(dwc); |
| 3380 | __dwc3_gadget_stop(dwc); | 3382 | __dwc3_gadget_stop(dwc); |
| 3381 | 3383 | ||
| 3384 | synchronize_irq(dwc->irq_gadget); | ||
| 3385 | |||
| 3382 | return 0; | 3386 | return 0; |
| 3383 | } | 3387 | } |
| 3384 | 3388 | ||
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9cdef108fb1b..ed68a4860b7d 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c | |||
| @@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func( | |||
| 838 | 838 | ||
| 839 | ss = kzalloc(sizeof(*ss), GFP_KERNEL); | 839 | ss = kzalloc(sizeof(*ss), GFP_KERNEL); |
| 840 | if (!ss) | 840 | if (!ss) |
| 841 | return NULL; | 841 | return ERR_PTR(-ENOMEM); |
| 842 | 842 | ||
| 843 | ss_opts = container_of(fi, struct f_ss_opts, func_inst); | 843 | ss_opts = container_of(fi, struct f_ss_opts, func_inst); |
| 844 | 844 | ||
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index f26109eafdbf..66ec1fdf9fe7 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c | |||
| @@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); | |||
| 302 | MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); | 302 | MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); |
| 303 | MODULE_ALIAS("mv-ehci"); | 303 | MODULE_ALIAS("mv-ehci"); |
| 304 | MODULE_LICENSE("GPL"); | 304 | MODULE_LICENSE("GPL"); |
| 305 | MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids); | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1ab2a6191013..77ef4c481f3c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) | |||
| 1783 | int result; | 1783 | int result; |
| 1784 | u16 val; | 1784 | u16 val; |
| 1785 | 1785 | ||
| 1786 | result = usb_autopm_get_interface(serial->interface); | ||
| 1787 | if (result) | ||
| 1788 | return result; | ||
| 1789 | |||
| 1786 | val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; | 1790 | val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; |
| 1787 | result = usb_control_msg(serial->dev, | 1791 | result = usb_control_msg(serial->dev, |
| 1788 | usb_sndctrlpipe(serial->dev, 0), | 1792 | usb_sndctrlpipe(serial->dev, 0), |
| @@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode) | |||
| 1795 | val, result); | 1799 | val, result); |
| 1796 | } | 1800 | } |
| 1797 | 1801 | ||
| 1802 | usb_autopm_put_interface(serial->interface); | ||
| 1803 | |||
| 1798 | return result; | 1804 | return result; |
| 1799 | } | 1805 | } |
| 1800 | 1806 | ||
| @@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) | |||
| 1846 | unsigned char *buf; | 1852 | unsigned char *buf; |
| 1847 | int result; | 1853 | int result; |
| 1848 | 1854 | ||
| 1855 | result = usb_autopm_get_interface(serial->interface); | ||
| 1856 | if (result) | ||
| 1857 | return result; | ||
| 1858 | |||
| 1849 | buf = kmalloc(1, GFP_KERNEL); | 1859 | buf = kmalloc(1, GFP_KERNEL); |
| 1850 | if (!buf) | 1860 | if (!buf) { |
| 1861 | usb_autopm_put_interface(serial->interface); | ||
| 1851 | return -ENOMEM; | 1862 | return -ENOMEM; |
| 1863 | } | ||
| 1852 | 1864 | ||
| 1853 | result = usb_control_msg(serial->dev, | 1865 | result = usb_control_msg(serial->dev, |
| 1854 | usb_rcvctrlpipe(serial->dev, 0), | 1866 | usb_rcvctrlpipe(serial->dev, 0), |
| @@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port) | |||
| 1863 | } | 1875 | } |
| 1864 | 1876 | ||
| 1865 | kfree(buf); | 1877 | kfree(buf); |
| 1878 | usb_autopm_put_interface(serial->interface); | ||
| 1866 | 1879 | ||
| 1867 | return result; | 1880 | return result; |
| 1868 | } | 1881 | } |
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h index 09e21e84fc4e..a68f1fb25b8a 100644 --- a/drivers/usb/serial/keyspan_usa26msg.h +++ b/drivers/usb/serial/keyspan_usa26msg.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | usa26msg.h | 3 | usa26msg.h |
| 3 | 4 | ||
diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h index dee454c4609a..a19f3fe5d98d 100644 --- a/drivers/usb/serial/keyspan_usa28msg.h +++ b/drivers/usb/serial/keyspan_usa28msg.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | usa28msg.h | 3 | usa28msg.h |
| 3 | 4 | ||
diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h index 163b2dea2ec5..8c3970fdd868 100644 --- a/drivers/usb/serial/keyspan_usa49msg.h +++ b/drivers/usb/serial/keyspan_usa49msg.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | usa49msg.h | 3 | usa49msg.h |
| 3 | 4 | ||
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h index 20fa3e2f7187..dcf502fdbb44 100644 --- a/drivers/usb/serial/keyspan_usa67msg.h +++ b/drivers/usb/serial/keyspan_usa67msg.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | usa67msg.h | 3 | usa67msg.h |
| 3 | 4 | ||
diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h index 86708ecd8735..c4ca0f631d20 100644 --- a/drivers/usb/serial/keyspan_usa90msg.h +++ b/drivers/usb/serial/keyspan_usa90msg.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | usa90msg.h | 3 | usa90msg.h |
| 3 | 4 | ||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 98e7a5df0f6d..bb3f9aa4a909 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = { | |||
| 46 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, | 46 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, |
| 47 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, | 47 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, |
| 48 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, | 48 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, |
| 49 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, | ||
| 49 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, | 50 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, |
| 50 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | 51 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, |
| 51 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), | 52 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 4e2554d55362..559941ca884d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #define PL2303_VENDOR_ID 0x067b | 9 | #define PL2303_VENDOR_ID 0x067b |
| 10 | #define PL2303_PRODUCT_ID 0x2303 | 10 | #define PL2303_PRODUCT_ID 0x2303 |
| 11 | #define PL2303_PRODUCT_ID_TB 0x2304 | ||
| 11 | #define PL2303_PRODUCT_ID_RSAQ2 0x04bb | 12 | #define PL2303_PRODUCT_ID_RSAQ2 0x04bb |
| 12 | #define PL2303_PRODUCT_ID_DCU11 0x1234 | 13 | #define PL2303_PRODUCT_ID_DCU11 0x1234 |
| 13 | #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 | 14 | #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 |
| @@ -20,6 +21,7 @@ | |||
| 20 | #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 | 21 | #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 |
| 21 | #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 | 22 | #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 |
| 22 | 23 | ||
| 24 | |||
| 23 | #define ATEN_VENDOR_ID 0x0557 | 25 | #define ATEN_VENDOR_ID 0x0557 |
| 24 | #define ATEN_VENDOR_ID2 0x0547 | 26 | #define ATEN_VENDOR_ID2 0x0547 |
| 25 | #define ATEN_PRODUCT_ID 0x2008 | 27 | #define ATEN_PRODUCT_ID 0x2008 |
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4d0273508043..edbbb13d6de6 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
| @@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS); | |||
| 85 | /* Motorola Tetra driver */ | 85 | /* Motorola Tetra driver */ |
| 86 | #define MOTOROLA_TETRA_IDS() \ | 86 | #define MOTOROLA_TETRA_IDS() \ |
| 87 | { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ | 87 | { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ |
| 88 | { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ | 88 | { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ |
| 89 | { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ | ||
| 89 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); | 90 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); |
| 90 | 91 | ||
| 91 | /* Novatel Wireless GPS driver */ | 92 | /* Novatel Wireless GPS driver */ |
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README deleted file mode 100644 index 41a2cf2e77a6..000000000000 --- a/drivers/usb/usbip/README +++ /dev/null | |||
| @@ -1,7 +0,0 @@ | |||
| 1 | TODO: | ||
| 2 | - more discussion about the protocol | ||
| 3 | - testing | ||
| 4 | - review of the userspace interface | ||
| 5 | - document the protocol | ||
| 6 | |||
| 7 | Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com> | ||
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h index 4d13e510590e..b2aa986ab9ed 100644 --- a/drivers/vfio/pci/trace.h +++ b/drivers/vfio/pci/trace.h | |||
| @@ -1,13 +1,9 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* | 2 | /* |
| 3 | * VFIO PCI mmap/mmap_fault tracepoints | 3 | * VFIO PCI mmap/mmap_fault tracepoints |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2018 IBM Corp. All rights reserved. | 5 | * Copyright (C) 2018 IBM Corp. All rights reserved. |
| 6 | * Author: Alexey Kardashevskiy <aik@ozlabs.ru> | 6 | * Author: Alexey Kardashevskiy <aik@ozlabs.ru> |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | 7 | */ |
| 12 | 8 | ||
| 13 | #undef TRACE_SYSTEM | 9 | #undef TRACE_SYSTEM |
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index 054a2cf9dd8e..32f695ffe128 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c | |||
| @@ -1,14 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* | 2 | /* |
| 3 | * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. | 3 | * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2018 IBM Corp. All rights reserved. | 5 | * Copyright (C) 2018 IBM Corp. All rights reserved. |
| 6 | * Author: Alexey Kardashevskiy <aik@ozlabs.ru> | 6 | * Author: Alexey Kardashevskiy <aik@ozlabs.ru> |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * Register an on-GPU RAM region for cacheable access. | 8 | * Register an on-GPU RAM region for cacheable access. |
| 13 | * | 9 | * |
| 14 | * Derived from original vfio_pci_igd.c: | 10 | * Derived from original vfio_pci_igd.c: |
| @@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev, | |||
| 178 | struct vfio_pci_region *region, struct vfio_info_cap *caps) | 174 | struct vfio_pci_region *region, struct vfio_info_cap *caps) |
| 179 | { | 175 | { |
| 180 | struct vfio_pci_nvgpu_data *data = region->data; | 176 | struct vfio_pci_nvgpu_data *data = region->data; |
| 181 | struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 }; | 177 | struct vfio_region_info_cap_nvlink2_ssatgt cap = { |
| 182 | 178 | .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, | |
| 183 | cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; | 179 | .header.version = 1, |
| 184 | cap.header.version = 1; | 180 | .tgt = data->gpu_tgt |
| 185 | cap.tgt = data->gpu_tgt; | 181 | }; |
| 186 | 182 | ||
| 187 | return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); | 183 | return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); |
| 188 | } | 184 | } |
| @@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev, | |||
| 365 | struct vfio_pci_region *region, struct vfio_info_cap *caps) | 361 | struct vfio_pci_region *region, struct vfio_info_cap *caps) |
| 366 | { | 362 | { |
| 367 | struct vfio_pci_npu2_data *data = region->data; | 363 | struct vfio_pci_npu2_data *data = region->data; |
| 368 | struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 }; | 364 | struct vfio_region_info_cap_nvlink2_ssatgt captgt = { |
| 369 | struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 }; | 365 | .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT, |
| 366 | .header.version = 1, | ||
| 367 | .tgt = data->gpu_tgt | ||
| 368 | }; | ||
| 369 | struct vfio_region_info_cap_nvlink2_lnkspd capspd = { | ||
| 370 | .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD, | ||
| 371 | .header.version = 1, | ||
| 372 | .link_speed = data->link_speed | ||
| 373 | }; | ||
| 370 | int ret; | 374 | int ret; |
| 371 | 375 | ||
| 372 | captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; | ||
| 373 | captgt.header.version = 1; | ||
| 374 | captgt.tgt = data->gpu_tgt; | ||
| 375 | |||
| 376 | capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD; | ||
| 377 | capspd.header.version = 1; | ||
| 378 | capspd.link_speed = data->link_speed; | ||
| 379 | |||
| 380 | ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); | 376 | ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); |
| 381 | if (ret) | 377 | if (ret) |
| 382 | return ret; | 378 | return ret; |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 36f3d0f49e60..bca86bf7189f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net) | |||
| 1236 | if (nvq->done_idx > VHOST_NET_BATCH) | 1236 | if (nvq->done_idx > VHOST_NET_BATCH) |
| 1237 | vhost_net_signal_used(nvq); | 1237 | vhost_net_signal_used(nvq); |
| 1238 | if (unlikely(vq_log)) | 1238 | if (unlikely(vq_log)) |
| 1239 | vhost_log_write(vq, vq_log, log, vhost_len); | 1239 | vhost_log_write(vq, vq_log, log, vhost_len, |
| 1240 | vq->iov, in); | ||
| 1240 | total_len += vhost_len; | 1241 | total_len += vhost_len; |
| 1241 | if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { | 1242 | if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { |
| 1242 | vhost_poll_queue(&vq->poll); | 1243 | vhost_poll_queue(&vq->poll); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8e10ab436d1f..344684f3e2e4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
| @@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, | |||
| 1127 | struct vhost_virtqueue *vq, | 1127 | struct vhost_virtqueue *vq, |
| 1128 | struct vhost_scsi_ctx *vc) | 1128 | struct vhost_scsi_ctx *vc) |
| 1129 | { | 1129 | { |
| 1130 | struct virtio_scsi_ctrl_tmf_resp __user *resp; | ||
| 1131 | struct virtio_scsi_ctrl_tmf_resp rsp; | 1130 | struct virtio_scsi_ctrl_tmf_resp rsp; |
| 1131 | struct iov_iter iov_iter; | ||
| 1132 | int ret; | 1132 | int ret; |
| 1133 | 1133 | ||
| 1134 | pr_debug("%s\n", __func__); | 1134 | pr_debug("%s\n", __func__); |
| 1135 | memset(&rsp, 0, sizeof(rsp)); | 1135 | memset(&rsp, 0, sizeof(rsp)); |
| 1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; | 1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; |
| 1137 | resp = vq->iov[vc->out].iov_base; | 1137 | |
| 1138 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1138 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
| 1139 | if (!ret) | 1139 | |
| 1140 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
| 1141 | if (likely(ret == sizeof(rsp))) | ||
| 1140 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1142 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
| 1141 | else | 1143 | else |
| 1142 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); | 1144 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); |
| @@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs, | |||
| 1147 | struct vhost_virtqueue *vq, | 1149 | struct vhost_virtqueue *vq, |
| 1148 | struct vhost_scsi_ctx *vc) | 1150 | struct vhost_scsi_ctx *vc) |
| 1149 | { | 1151 | { |
| 1150 | struct virtio_scsi_ctrl_an_resp __user *resp; | ||
| 1151 | struct virtio_scsi_ctrl_an_resp rsp; | 1152 | struct virtio_scsi_ctrl_an_resp rsp; |
| 1153 | struct iov_iter iov_iter; | ||
| 1152 | int ret; | 1154 | int ret; |
| 1153 | 1155 | ||
| 1154 | pr_debug("%s\n", __func__); | 1156 | pr_debug("%s\n", __func__); |
| 1155 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ | 1157 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ |
| 1156 | rsp.response = VIRTIO_SCSI_S_OK; | 1158 | rsp.response = VIRTIO_SCSI_S_OK; |
| 1157 | resp = vq->iov[vc->out].iov_base; | 1159 | |
| 1158 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1160 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
| 1159 | if (!ret) | 1161 | |
| 1162 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
| 1163 | if (likely(ret == sizeof(rsp))) | ||
| 1160 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1164 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
| 1161 | else | 1165 | else |
| 1162 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); | 1166 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9f7942cbcbb2..15a216cdd507 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
| 1034 | int type, ret; | 1034 | int type, ret; |
| 1035 | 1035 | ||
| 1036 | ret = copy_from_iter(&type, sizeof(type), from); | 1036 | ret = copy_from_iter(&type, sizeof(type), from); |
| 1037 | if (ret != sizeof(type)) | 1037 | if (ret != sizeof(type)) { |
| 1038 | ret = -EINVAL; | ||
| 1038 | goto done; | 1039 | goto done; |
| 1040 | } | ||
| 1039 | 1041 | ||
| 1040 | switch (type) { | 1042 | switch (type) { |
| 1041 | case VHOST_IOTLB_MSG: | 1043 | case VHOST_IOTLB_MSG: |
| @@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
| 1054 | 1056 | ||
| 1055 | iov_iter_advance(from, offset); | 1057 | iov_iter_advance(from, offset); |
| 1056 | ret = copy_from_iter(&msg, sizeof(msg), from); | 1058 | ret = copy_from_iter(&msg, sizeof(msg), from); |
| 1057 | if (ret != sizeof(msg)) | 1059 | if (ret != sizeof(msg)) { |
| 1060 | ret = -EINVAL; | ||
| 1058 | goto done; | 1061 | goto done; |
| 1062 | } | ||
| 1059 | if (vhost_process_iotlb_msg(dev, &msg)) { | 1063 | if (vhost_process_iotlb_msg(dev, &msg)) { |
| 1060 | ret = -EFAULT; | 1064 | ret = -EFAULT; |
| 1061 | goto done; | 1065 | goto done; |
| @@ -1733,13 +1737,87 @@ static int log_write(void __user *log_base, | |||
| 1733 | return r; | 1737 | return r; |
| 1734 | } | 1738 | } |
| 1735 | 1739 | ||
| 1740 | static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) | ||
| 1741 | { | ||
| 1742 | struct vhost_umem *umem = vq->umem; | ||
| 1743 | struct vhost_umem_node *u; | ||
| 1744 | u64 start, end, l, min; | ||
| 1745 | int r; | ||
| 1746 | bool hit = false; | ||
| 1747 | |||
| 1748 | while (len) { | ||
| 1749 | min = len; | ||
| 1750 | /* More than one GPAs can be mapped into a single HVA. So | ||
| 1751 | * iterate all possible umems here to be safe. | ||
| 1752 | */ | ||
| 1753 | list_for_each_entry(u, &umem->umem_list, link) { | ||
| 1754 | if (u->userspace_addr > hva - 1 + len || | ||
| 1755 | u->userspace_addr - 1 + u->size < hva) | ||
| 1756 | continue; | ||
| 1757 | start = max(u->userspace_addr, hva); | ||
| 1758 | end = min(u->userspace_addr - 1 + u->size, | ||
| 1759 | hva - 1 + len); | ||
| 1760 | l = end - start + 1; | ||
| 1761 | r = log_write(vq->log_base, | ||
| 1762 | u->start + start - u->userspace_addr, | ||
| 1763 | l); | ||
| 1764 | if (r < 0) | ||
| 1765 | return r; | ||
| 1766 | hit = true; | ||
| 1767 | min = min(l, min); | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | if (!hit) | ||
| 1771 | return -EFAULT; | ||
| 1772 | |||
| 1773 | len -= min; | ||
| 1774 | hva += min; | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | return 0; | ||
| 1778 | } | ||
| 1779 | |||
| 1780 | static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | ||
| 1781 | { | ||
| 1782 | struct iovec iov[64]; | ||
| 1783 | int i, ret; | ||
| 1784 | |||
| 1785 | if (!vq->iotlb) | ||
| 1786 | return log_write(vq->log_base, vq->log_addr + used_offset, len); | ||
| 1787 | |||
| 1788 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | ||
| 1789 | len, iov, 64, VHOST_ACCESS_WO); | ||
| 1790 | if (ret) | ||
| 1791 | return ret; | ||
| 1792 | |||
| 1793 | for (i = 0; i < ret; i++) { | ||
| 1794 | ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
| 1795 | iov[i].iov_len); | ||
| 1796 | if (ret) | ||
| 1797 | return ret; | ||
| 1798 | } | ||
| 1799 | |||
| 1800 | return 0; | ||
| 1801 | } | ||
| 1802 | |||
| 1736 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 1803 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
| 1737 | unsigned int log_num, u64 len) | 1804 | unsigned int log_num, u64 len, struct iovec *iov, int count) |
| 1738 | { | 1805 | { |
| 1739 | int i, r; | 1806 | int i, r; |
| 1740 | 1807 | ||
| 1741 | /* Make sure data written is seen before log. */ | 1808 | /* Make sure data written is seen before log. */ |
| 1742 | smp_wmb(); | 1809 | smp_wmb(); |
| 1810 | |||
| 1811 | if (vq->iotlb) { | ||
| 1812 | for (i = 0; i < count; i++) { | ||
| 1813 | r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
| 1814 | iov[i].iov_len); | ||
| 1815 | if (r < 0) | ||
| 1816 | return r; | ||
| 1817 | } | ||
| 1818 | return 0; | ||
| 1819 | } | ||
| 1820 | |||
| 1743 | for (i = 0; i < log_num; ++i) { | 1821 | for (i = 0; i < log_num; ++i) { |
| 1744 | u64 l = min(log[i].len, len); | 1822 | u64 l = min(log[i].len, len); |
| 1745 | r = log_write(vq->log_base, log[i].addr, l); | 1823 | r = log_write(vq->log_base, log[i].addr, l); |
| @@ -1769,9 +1847,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) | |||
| 1769 | smp_wmb(); | 1847 | smp_wmb(); |
| 1770 | /* Log used flag write. */ | 1848 | /* Log used flag write. */ |
| 1771 | used = &vq->used->flags; | 1849 | used = &vq->used->flags; |
| 1772 | log_write(vq->log_base, vq->log_addr + | 1850 | log_used(vq, (used - (void __user *)vq->used), |
| 1773 | (used - (void __user *)vq->used), | 1851 | sizeof vq->used->flags); |
| 1774 | sizeof vq->used->flags); | ||
| 1775 | if (vq->log_ctx) | 1852 | if (vq->log_ctx) |
| 1776 | eventfd_signal(vq->log_ctx, 1); | 1853 | eventfd_signal(vq->log_ctx, 1); |
| 1777 | } | 1854 | } |
| @@ -1789,9 +1866,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) | |||
| 1789 | smp_wmb(); | 1866 | smp_wmb(); |
| 1790 | /* Log avail event write */ | 1867 | /* Log avail event write */ |
| 1791 | used = vhost_avail_event(vq); | 1868 | used = vhost_avail_event(vq); |
| 1792 | log_write(vq->log_base, vq->log_addr + | 1869 | log_used(vq, (used - (void __user *)vq->used), |
| 1793 | (used - (void __user *)vq->used), | 1870 | sizeof *vhost_avail_event(vq)); |
| 1794 | sizeof *vhost_avail_event(vq)); | ||
| 1795 | if (vq->log_ctx) | 1871 | if (vq->log_ctx) |
| 1796 | eventfd_signal(vq->log_ctx, 1); | 1872 | eventfd_signal(vq->log_ctx, 1); |
| 1797 | } | 1873 | } |
| @@ -2191,10 +2267,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, | |||
| 2191 | /* Make sure data is seen before log. */ | 2267 | /* Make sure data is seen before log. */ |
| 2192 | smp_wmb(); | 2268 | smp_wmb(); |
| 2193 | /* Log used ring entry write. */ | 2269 | /* Log used ring entry write. */ |
| 2194 | log_write(vq->log_base, | 2270 | log_used(vq, ((void __user *)used - (void __user *)vq->used), |
| 2195 | vq->log_addr + | 2271 | count * sizeof *used); |
| 2196 | ((void __user *)used - (void __user *)vq->used), | ||
| 2197 | count * sizeof *used); | ||
| 2198 | } | 2272 | } |
| 2199 | old = vq->last_used_idx; | 2273 | old = vq->last_used_idx; |
| 2200 | new = (vq->last_used_idx += count); | 2274 | new = (vq->last_used_idx += count); |
| @@ -2236,9 +2310,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | |||
| 2236 | /* Make sure used idx is seen before log. */ | 2310 | /* Make sure used idx is seen before log. */ |
| 2237 | smp_wmb(); | 2311 | smp_wmb(); |
| 2238 | /* Log used index update. */ | 2312 | /* Log used index update. */ |
| 2239 | log_write(vq->log_base, | 2313 | log_used(vq, offsetof(struct vring_used, idx), |
| 2240 | vq->log_addr + offsetof(struct vring_used, idx), | 2314 | sizeof vq->used->idx); |
| 2241 | sizeof vq->used->idx); | ||
| 2242 | if (vq->log_ctx) | 2315 | if (vq->log_ctx) |
| 2243 | eventfd_signal(vq->log_ctx, 1); | 2316 | eventfd_signal(vq->log_ctx, 1); |
| 2244 | } | 2317 | } |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 466ef7542291..1b675dad5e05 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
| @@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); | |||
| 205 | bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); | 205 | bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); |
| 206 | 206 | ||
| 207 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | 207 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, |
| 208 | unsigned int log_num, u64 len); | 208 | unsigned int log_num, u64 len, |
| 209 | struct iovec *iov, int count); | ||
| 209 | int vq_iotlb_prefetch(struct vhost_virtqueue *vq); | 210 | int vq_iotlb_prefetch(struct vhost_virtqueue *vq); |
| 210 | 211 | ||
| 211 | struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); | 212 | struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bc42d38ae031..3fbc068eaa9b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
| @@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |||
| 642 | hash_del_rcu(&vsock->hash); | 642 | hash_del_rcu(&vsock->hash); |
| 643 | 643 | ||
| 644 | vsock->guest_cid = guest_cid; | 644 | vsock->guest_cid = guest_cid; |
| 645 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); | 645 | hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); |
| 646 | mutex_unlock(&vhost_vsock_mutex); | 646 | mutex_unlock(&vhost_vsock_mutex); |
| 647 | 647 | ||
| 648 | return 0; | 648 | return 0; |
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 6d8dc2c77520..51e0c4be08df 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c | |||
| @@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev, | |||
| 174 | return -ENODEV; | 174 | return -ENODEV; |
| 175 | } | 175 | } |
| 176 | for_each_child_of_node(nproot, np) { | 176 | for_each_child_of_node(nproot, np) { |
| 177 | if (!of_node_cmp(np->name, name)) { | 177 | if (of_node_name_eq(np, name)) { |
| 178 | of_property_read_u32(np, "marvell,88pm860x-iset", | 178 | of_property_read_u32(np, "marvell,88pm860x-iset", |
| 179 | &iset); | 179 | &iset); |
| 180 | data->iset = PM8606_WLED_CURRENT(iset); | 180 | data->iset = PM8606_WLED_CURRENT(iset); |
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index f9ef0673a083..feb90764a811 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c | |||
| @@ -30,6 +30,7 @@ struct pwm_bl_data { | |||
| 30 | struct device *dev; | 30 | struct device *dev; |
| 31 | unsigned int lth_brightness; | 31 | unsigned int lth_brightness; |
| 32 | unsigned int *levels; | 32 | unsigned int *levels; |
| 33 | bool enabled; | ||
| 33 | struct regulator *power_supply; | 34 | struct regulator *power_supply; |
| 34 | struct gpio_desc *enable_gpio; | 35 | struct gpio_desc *enable_gpio; |
| 35 | unsigned int scale; | 36 | unsigned int scale; |
| @@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb) | |||
| 50 | int err; | 51 | int err; |
| 51 | 52 | ||
| 52 | pwm_get_state(pb->pwm, &state); | 53 | pwm_get_state(pb->pwm, &state); |
| 53 | if (state.enabled) | 54 | if (pb->enabled) |
| 54 | return; | 55 | return; |
| 55 | 56 | ||
| 56 | err = regulator_enable(pb->power_supply); | 57 | err = regulator_enable(pb->power_supply); |
| @@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb) | |||
| 65 | 66 | ||
| 66 | if (pb->enable_gpio) | 67 | if (pb->enable_gpio) |
| 67 | gpiod_set_value_cansleep(pb->enable_gpio, 1); | 68 | gpiod_set_value_cansleep(pb->enable_gpio, 1); |
| 69 | |||
| 70 | pb->enabled = true; | ||
| 68 | } | 71 | } |
| 69 | 72 | ||
| 70 | static void pwm_backlight_power_off(struct pwm_bl_data *pb) | 73 | static void pwm_backlight_power_off(struct pwm_bl_data *pb) |
| @@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) | |||
| 72 | struct pwm_state state; | 75 | struct pwm_state state; |
| 73 | 76 | ||
| 74 | pwm_get_state(pb->pwm, &state); | 77 | pwm_get_state(pb->pwm, &state); |
| 75 | if (!state.enabled) | 78 | if (!pb->enabled) |
| 76 | return; | 79 | return; |
| 77 | 80 | ||
| 78 | if (pb->enable_gpio) | 81 | if (pb->enable_gpio) |
| @@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) | |||
| 86 | pwm_apply_state(pb->pwm, &state); | 89 | pwm_apply_state(pb->pwm, &state); |
| 87 | 90 | ||
| 88 | regulator_disable(pb->power_supply); | 91 | regulator_disable(pb->power_supply); |
| 92 | pb->enabled = false; | ||
| 89 | } | 93 | } |
| 90 | 94 | ||
| 91 | static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) | 95 | static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) |
| @@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev, | |||
| 269 | memset(data, 0, sizeof(*data)); | 273 | memset(data, 0, sizeof(*data)); |
| 270 | 274 | ||
| 271 | /* | 275 | /* |
| 276 | * These values are optional and set as 0 by default, the out values | ||
| 277 | * are modified only if a valid u32 value can be decoded. | ||
| 278 | */ | ||
| 279 | of_property_read_u32(node, "post-pwm-on-delay-ms", | ||
| 280 | &data->post_pwm_on_delay); | ||
| 281 | of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); | ||
| 282 | |||
| 283 | data->enable_gpio = -EINVAL; | ||
| 284 | |||
| 285 | /* | ||
| 272 | * Determine the number of brightness levels, if this property is not | 286 | * Determine the number of brightness levels, if this property is not |
| 273 | * set a default table of brightness levels will be used. | 287 | * set a default table of brightness levels will be used. |
| 274 | */ | 288 | */ |
| @@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev, | |||
| 380 | data->max_brightness--; | 394 | data->max_brightness--; |
| 381 | } | 395 | } |
| 382 | 396 | ||
| 383 | /* | ||
| 384 | * These values are optional and set as 0 by default, the out values | ||
| 385 | * are modified only if a valid u32 value can be decoded. | ||
| 386 | */ | ||
| 387 | of_property_read_u32(node, "post-pwm-on-delay-ms", | ||
| 388 | &data->post_pwm_on_delay); | ||
| 389 | of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); | ||
| 390 | |||
| 391 | data->enable_gpio = -EINVAL; | ||
| 392 | return 0; | 397 | return 0; |
| 393 | } | 398 | } |
| 394 | 399 | ||
| @@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) | |||
| 483 | pb->check_fb = data->check_fb; | 488 | pb->check_fb = data->check_fb; |
| 484 | pb->exit = data->exit; | 489 | pb->exit = data->exit; |
| 485 | pb->dev = &pdev->dev; | 490 | pb->dev = &pdev->dev; |
| 491 | pb->enabled = false; | ||
| 486 | pb->post_pwm_on_delay = data->post_pwm_on_delay; | 492 | pb->post_pwm_on_delay = data->post_pwm_on_delay; |
| 487 | pb->pwm_off_delay = data->pwm_off_delay; | 493 | pb->pwm_off_delay = data->pwm_off_delay; |
| 488 | 494 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 09731b2f6815..c6b3bdbbdbc9 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
| @@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count) | |||
| 271 | 271 | ||
| 272 | static void vgacon_restore_screen(struct vc_data *c) | 272 | static void vgacon_restore_screen(struct vc_data *c) |
| 273 | { | 273 | { |
| 274 | c->vc_origin = c->vc_visible_origin; | ||
| 274 | vgacon_scrollback_cur->save = 0; | 275 | vgacon_scrollback_cur->save = 0; |
| 275 | 276 | ||
| 276 | if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { | 277 | if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { |
| @@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | |||
| 287 | int start, end, count, soff; | 288 | int start, end, count, soff; |
| 288 | 289 | ||
| 289 | if (!lines) { | 290 | if (!lines) { |
| 290 | c->vc_visible_origin = c->vc_origin; | 291 | vgacon_restore_screen(c); |
| 291 | vga_set_mem_top(c); | ||
| 292 | return; | 292 | return; |
| 293 | } | 293 | } |
| 294 | 294 | ||
| @@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | |||
| 298 | if (!vgacon_scrollback_cur->save) { | 298 | if (!vgacon_scrollback_cur->save) { |
| 299 | vgacon_cursor(c, CM_ERASE); | 299 | vgacon_cursor(c, CM_ERASE); |
| 300 | vgacon_save_screen(c); | 300 | vgacon_save_screen(c); |
| 301 | c->vc_origin = (unsigned long)c->vc_screenbuf; | ||
| 301 | vgacon_scrollback_cur->save = 1; | 302 | vgacon_scrollback_cur->save = 1; |
| 302 | } | 303 | } |
| 303 | 304 | ||
| @@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | |||
| 335 | int copysize; | 336 | int copysize; |
| 336 | 337 | ||
| 337 | int diff = c->vc_rows - count; | 338 | int diff = c->vc_rows - count; |
| 338 | void *d = (void *) c->vc_origin; | 339 | void *d = (void *) c->vc_visible_origin; |
| 339 | void *s = (void *) c->vc_screenbuf; | 340 | void *s = (void *) c->vc_screenbuf; |
| 340 | 341 | ||
| 341 | count *= c->vc_size_row; | 342 | count *= c->vc_size_row; |
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8976190b6c1f..bfa1360ec750 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c | |||
| @@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt) | |||
| 510 | continue; | 510 | continue; |
| 511 | } | 511 | } |
| 512 | #endif | 512 | #endif |
| 513 | |||
| 514 | if (!strncmp(options, "logo-pos:", 9)) { | ||
| 515 | options += 9; | ||
| 516 | if (!strcmp(options, "center")) | ||
| 517 | fb_center_logo = true; | ||
| 518 | continue; | ||
| 519 | } | ||
| 513 | } | 520 | } |
| 514 | return 1; | 521 | return 1; |
| 515 | } | 522 | } |
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 558ed2ed3124..cb43a2258c51 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c | |||
| @@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb); | |||
| 53 | int num_registered_fb __read_mostly; | 53 | int num_registered_fb __read_mostly; |
| 54 | EXPORT_SYMBOL(num_registered_fb); | 54 | EXPORT_SYMBOL(num_registered_fb); |
| 55 | 55 | ||
| 56 | bool fb_center_logo __read_mostly; | ||
| 57 | EXPORT_SYMBOL(fb_center_logo); | ||
| 58 | |||
| 56 | static struct fb_info *get_fb_info(unsigned int idx) | 59 | static struct fb_info *get_fb_info(unsigned int idx) |
| 57 | { | 60 | { |
| 58 | struct fb_info *fb_info; | 61 | struct fb_info *fb_info; |
| @@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
| 506 | fb_set_logo(info, logo, logo_new, fb_logo.depth); | 509 | fb_set_logo(info, logo, logo_new, fb_logo.depth); |
| 507 | } | 510 | } |
| 508 | 511 | ||
| 509 | #ifdef CONFIG_FB_LOGO_CENTER | 512 | if (fb_center_logo) { |
| 510 | { | ||
| 511 | int xres = info->var.xres; | 513 | int xres = info->var.xres; |
| 512 | int yres = info->var.yres; | 514 | int yres = info->var.yres; |
| 513 | 515 | ||
| @@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
| 520 | --n; | 522 | --n; |
| 521 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; | 523 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; |
| 522 | image.dy = y ?: (yres - logo->height) / 2; | 524 | image.dy = y ?: (yres - logo->height) / 2; |
| 525 | } else { | ||
| 526 | image.dx = 0; | ||
| 527 | image.dy = y; | ||
| 523 | } | 528 | } |
| 524 | #else | 529 | |
| 525 | image.dx = 0; | ||
| 526 | image.dy = y; | ||
| 527 | #endif | ||
| 528 | image.width = logo->width; | 530 | image.width = logo->width; |
| 529 | image.height = logo->height; | 531 | image.height = logo->height; |
| 530 | 532 | ||
| @@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate) | |||
| 684 | } | 686 | } |
| 685 | 687 | ||
| 686 | height = fb_logo.logo->height; | 688 | height = fb_logo.logo->height; |
| 687 | #ifdef CONFIG_FB_LOGO_CENTER | 689 | if (fb_center_logo) |
| 688 | height += (yres - fb_logo.logo->height) / 2; | 690 | height += (yres - fb_logo.logo->height) / 2; |
| 689 | #endif | ||
| 690 | 691 | ||
| 691 | return fb_prepare_extra_logos(info, height, yres); | 692 | return fb_prepare_extra_logos(info, height, yres); |
| 692 | } | 693 | } |
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 31f769d67195..057d3cdef92e 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c | |||
| @@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, | |||
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, | 320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, |
| 321 | const char *name, unsigned long address) | 321 | unsigned long address) |
| 322 | { | 322 | { |
| 323 | struct offb_par *par = (struct offb_par *) info->par; | 323 | struct offb_par *par = (struct offb_par *) info->par; |
| 324 | 324 | ||
| 325 | if (dp && !strncmp(name, "ATY,Rage128", 11)) { | 325 | if (of_node_name_prefix(dp, "ATY,Rage128")) { |
| 326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 327 | if (par->cmap_adr) | 327 | if (par->cmap_adr) |
| 328 | par->cmap_type = cmap_r128; | 328 | par->cmap_type = cmap_r128; |
| 329 | } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) | 329 | } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || |
| 330 | || !strncmp(name, "ATY,RageM3p12A", 14))) { | 330 | of_node_name_prefix(dp, "ATY,RageM3p12A")) { |
| 331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 332 | if (par->cmap_adr) | 332 | if (par->cmap_adr) |
| 333 | par->cmap_type = cmap_M3A; | 333 | par->cmap_type = cmap_M3A; |
| 334 | } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { | 334 | } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { |
| 335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 336 | if (par->cmap_adr) | 336 | if (par->cmap_adr) |
| 337 | par->cmap_type = cmap_M3B; | 337 | par->cmap_type = cmap_M3B; |
| 338 | } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { | 338 | } else if (of_node_name_prefix(dp, "ATY,Rage6")) { |
| 339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); | 339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); |
| 340 | if (par->cmap_adr) | 340 | if (par->cmap_adr) |
| 341 | par->cmap_type = cmap_radeon; | 341 | par->cmap_type = cmap_radeon; |
| 342 | } else if (!strncmp(name, "ATY,", 4)) { | 342 | } else if (of_node_name_prefix(dp, "ATY,")) { |
| 343 | unsigned long base = address & 0xff000000UL; | 343 | unsigned long base = address & 0xff000000UL; |
| 344 | par->cmap_adr = | 344 | par->cmap_adr = |
| 345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; | 345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; |
| @@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp | |||
| 350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); | 350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); |
| 351 | if (par->cmap_adr) | 351 | if (par->cmap_adr) |
| 352 | par->cmap_type = cmap_gxt2000; | 352 | par->cmap_type = cmap_gxt2000; |
| 353 | } else if (dp && !strncmp(name, "vga,Display-", 12)) { | 353 | } else if (of_node_name_prefix(dp, "vga,Display-")) { |
| 354 | /* Look for AVIVO initialized by SLOF */ | 354 | /* Look for AVIVO initialized by SLOF */ |
| 355 | struct device_node *pciparent = of_get_parent(dp); | 355 | struct device_node *pciparent = of_get_parent(dp); |
| 356 | const u32 *vid, *did; | 356 | const u32 *vid, *did; |
| @@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, | |||
| 438 | 438 | ||
| 439 | par->cmap_type = cmap_unknown; | 439 | par->cmap_type = cmap_unknown; |
| 440 | if (depth == 8) | 440 | if (depth == 8) |
| 441 | offb_init_palette_hacks(info, dp, name, address); | 441 | offb_init_palette_hacks(info, dp, address); |
| 442 | else | 442 | else |
| 443 | fix->visual = FB_VISUAL_TRUECOLOR; | 443 | fix->visual = FB_VISUAL_TRUECOLOR; |
| 444 | 444 | ||
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 53f93616c671..8e23160ec59f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c | |||
| @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) | |||
| 609 | 609 | ||
| 610 | int r = 0; | 610 | int r = 0; |
| 611 | 611 | ||
| 612 | memset(&p, 0, sizeof(p)); | ||
| 613 | |||
| 612 | switch (cmd) { | 614 | switch (cmd) { |
| 613 | case OMAPFB_SYNC_GFX: | 615 | case OMAPFB_SYNC_GFX: |
| 614 | DBG("ioctl SYNC_GFX\n"); | 616 | DBG("ioctl SYNC_GFX\n"); |
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig index 1e972c4e88b1..d1f6196c8b9a 100644 --- a/drivers/video/logo/Kconfig +++ b/drivers/video/logo/Kconfig | |||
| @@ -10,15 +10,6 @@ menuconfig LOGO | |||
| 10 | 10 | ||
| 11 | if LOGO | 11 | if LOGO |
| 12 | 12 | ||
| 13 | config FB_LOGO_CENTER | ||
| 14 | bool "Center the logo" | ||
| 15 | depends on FB=y | ||
| 16 | help | ||
| 17 | When this option is selected, the bootup logo is centered both | ||
| 18 | horizontally and vertically. If more than one logo is displayed | ||
| 19 | due to multiple CPUs, the collected line of logos is centered | ||
| 20 | as a whole. | ||
| 21 | |||
| 22 | config FB_LOGO_EXTRA | 13 | config FB_LOGO_EXTRA |
| 23 | bool | 14 | bool |
| 24 | depends on FB=y | 15 | depends on FB=y |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 728ecd1eea30..fb12fe205f86 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -61,6 +61,10 @@ enum virtio_balloon_vq { | |||
| 61 | VIRTIO_BALLOON_VQ_MAX | 61 | VIRTIO_BALLOON_VQ_MAX |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | enum virtio_balloon_config_read { | ||
| 65 | VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, | ||
| 66 | }; | ||
| 67 | |||
| 64 | struct virtio_balloon { | 68 | struct virtio_balloon { |
| 65 | struct virtio_device *vdev; | 69 | struct virtio_device *vdev; |
| 66 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; | 70 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; |
| @@ -77,14 +81,20 @@ struct virtio_balloon { | |||
| 77 | /* Prevent updating balloon when it is being canceled. */ | 81 | /* Prevent updating balloon when it is being canceled. */ |
| 78 | spinlock_t stop_update_lock; | 82 | spinlock_t stop_update_lock; |
| 79 | bool stop_update; | 83 | bool stop_update; |
| 84 | /* Bitmap to indicate if reading the related config fields are needed */ | ||
| 85 | unsigned long config_read_bitmap; | ||
| 80 | 86 | ||
| 81 | /* The list of allocated free pages, waiting to be given back to mm */ | 87 | /* The list of allocated free pages, waiting to be given back to mm */ |
| 82 | struct list_head free_page_list; | 88 | struct list_head free_page_list; |
| 83 | spinlock_t free_page_list_lock; | 89 | spinlock_t free_page_list_lock; |
| 84 | /* The number of free page blocks on the above list */ | 90 | /* The number of free page blocks on the above list */ |
| 85 | unsigned long num_free_page_blocks; | 91 | unsigned long num_free_page_blocks; |
| 86 | /* The cmd id received from host */ | 92 | /* |
| 87 | u32 cmd_id_received; | 93 | * The cmd id received from host. |
| 94 | * Read it via virtio_balloon_cmd_id_received to get the latest value | ||
| 95 | * sent from host. | ||
| 96 | */ | ||
| 97 | u32 cmd_id_received_cache; | ||
| 88 | /* The cmd id that is actively in use */ | 98 | /* The cmd id that is actively in use */ |
| 89 | __virtio32 cmd_id_active; | 99 | __virtio32 cmd_id_active; |
| 90 | /* Buffer to store the stop sign */ | 100 | /* Buffer to store the stop sign */ |
| @@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, | |||
| 390 | return num_returned; | 400 | return num_returned; |
| 391 | } | 401 | } |
| 392 | 402 | ||
| 403 | static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) | ||
| 404 | { | ||
| 405 | if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) | ||
| 406 | return; | ||
| 407 | |||
| 408 | /* No need to queue the work if the bit was already set. */ | ||
| 409 | if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
| 410 | &vb->config_read_bitmap)) | ||
| 411 | return; | ||
| 412 | |||
| 413 | queue_work(vb->balloon_wq, &vb->report_free_page_work); | ||
| 414 | } | ||
| 415 | |||
| 393 | static void virtballoon_changed(struct virtio_device *vdev) | 416 | static void virtballoon_changed(struct virtio_device *vdev) |
| 394 | { | 417 | { |
| 395 | struct virtio_balloon *vb = vdev->priv; | 418 | struct virtio_balloon *vb = vdev->priv; |
| 396 | unsigned long flags; | 419 | unsigned long flags; |
| 397 | s64 diff = towards_target(vb); | ||
| 398 | |||
| 399 | if (diff) { | ||
| 400 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
| 401 | if (!vb->stop_update) | ||
| 402 | queue_work(system_freezable_wq, | ||
| 403 | &vb->update_balloon_size_work); | ||
| 404 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 405 | } | ||
| 406 | 420 | ||
| 407 | if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { | 421 | spin_lock_irqsave(&vb->stop_update_lock, flags); |
| 408 | virtio_cread(vdev, struct virtio_balloon_config, | 422 | if (!vb->stop_update) { |
| 409 | free_page_report_cmd_id, &vb->cmd_id_received); | 423 | queue_work(system_freezable_wq, |
| 410 | if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | 424 | &vb->update_balloon_size_work); |
| 411 | /* Pass ULONG_MAX to give back all the free pages */ | 425 | virtio_balloon_queue_free_page_work(vb); |
| 412 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
| 413 | } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
| 414 | vb->cmd_id_received != | ||
| 415 | virtio32_to_cpu(vdev, vb->cmd_id_active)) { | ||
| 416 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
| 417 | if (!vb->stop_update) { | ||
| 418 | queue_work(vb->balloon_wq, | ||
| 419 | &vb->report_free_page_work); | ||
| 420 | } | ||
| 421 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 422 | } | ||
| 423 | } | 426 | } |
| 427 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 424 | } | 428 | } |
| 425 | 429 | ||
| 426 | static void update_balloon_size(struct virtio_balloon *vb) | 430 | static void update_balloon_size(struct virtio_balloon *vb) |
| @@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) | |||
| 527 | return 0; | 531 | return 0; |
| 528 | } | 532 | } |
| 529 | 533 | ||
| 534 | static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) | ||
| 535 | { | ||
| 536 | if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
| 537 | &vb->config_read_bitmap)) | ||
| 538 | virtio_cread(vb->vdev, struct virtio_balloon_config, | ||
| 539 | free_page_report_cmd_id, | ||
| 540 | &vb->cmd_id_received_cache); | ||
| 541 | |||
| 542 | return vb->cmd_id_received_cache; | ||
| 543 | } | ||
| 544 | |||
| 530 | static int send_cmd_id_start(struct virtio_balloon *vb) | 545 | static int send_cmd_id_start(struct virtio_balloon *vb) |
| 531 | { | 546 | { |
| 532 | struct scatterlist sg; | 547 | struct scatterlist sg; |
| @@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) | |||
| 537 | while (virtqueue_get_buf(vq, &unused)) | 552 | while (virtqueue_get_buf(vq, &unused)) |
| 538 | ; | 553 | ; |
| 539 | 554 | ||
| 540 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); | 555 | vb->cmd_id_active = virtio32_to_cpu(vb->vdev, |
| 556 | virtio_balloon_cmd_id_received(vb)); | ||
| 541 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); | 557 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); |
| 542 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); | 558 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); |
| 543 | if (!err) | 559 | if (!err) |
| @@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
| 620 | * stop the reporting. | 636 | * stop the reporting. |
| 621 | */ | 637 | */ |
| 622 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); | 638 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); |
| 623 | if (cmd_id_active != vb->cmd_id_received) | 639 | if (unlikely(cmd_id_active != |
| 640 | virtio_balloon_cmd_id_received(vb))) | ||
| 624 | break; | 641 | break; |
| 625 | 642 | ||
| 626 | /* | 643 | /* |
| @@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
| 637 | return 0; | 654 | return 0; |
| 638 | } | 655 | } |
| 639 | 656 | ||
| 640 | static void report_free_page_func(struct work_struct *work) | 657 | static void virtio_balloon_report_free_page(struct virtio_balloon *vb) |
| 641 | { | 658 | { |
| 642 | int err; | 659 | int err; |
| 643 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
| 644 | report_free_page_work); | ||
| 645 | struct device *dev = &vb->vdev->dev; | 660 | struct device *dev = &vb->vdev->dev; |
| 646 | 661 | ||
| 647 | /* Start by sending the received cmd id to host with an outbuf. */ | 662 | /* Start by sending the received cmd id to host with an outbuf. */ |
| @@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) | |||
| 659 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); | 674 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); |
| 660 | } | 675 | } |
| 661 | 676 | ||
| 677 | static void report_free_page_func(struct work_struct *work) | ||
| 678 | { | ||
| 679 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
| 680 | report_free_page_work); | ||
| 681 | u32 cmd_id_received; | ||
| 682 | |||
| 683 | cmd_id_received = virtio_balloon_cmd_id_received(vb); | ||
| 684 | if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | ||
| 685 | /* Pass ULONG_MAX to give back all the free pages */ | ||
| 686 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
| 687 | } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
| 688 | cmd_id_received != | ||
| 689 | virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { | ||
| 690 | virtio_balloon_report_free_page(vb); | ||
| 691 | } | ||
| 692 | } | ||
| 693 | |||
| 662 | #ifdef CONFIG_BALLOON_COMPACTION | 694 | #ifdef CONFIG_BALLOON_COMPACTION |
| 663 | /* | 695 | /* |
| 664 | * virtballoon_migratepage - perform the balloon page migration on behalf of | 696 | * virtballoon_migratepage - perform the balloon page migration on behalf of |
| @@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
| 885 | goto out_del_vqs; | 917 | goto out_del_vqs; |
| 886 | } | 918 | } |
| 887 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); | 919 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); |
| 888 | vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; | 920 | vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; |
| 889 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, | 921 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, |
| 890 | VIRTIO_BALLOON_CMD_ID_STOP); | 922 | VIRTIO_BALLOON_CMD_ID_STOP); |
| 891 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, | 923 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 4cd9ea5c75be..d9dd0f789279 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
| @@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 468 | { | 468 | { |
| 469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
| 470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); | 470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); |
| 471 | int i, err; | 471 | int i, err, queue_idx = 0; |
| 472 | 472 | ||
| 473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, | 473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, |
| 474 | dev_name(&vdev->dev), vm_dev); | 474 | dev_name(&vdev->dev), vm_dev); |
| @@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 476 | return err; | 476 | return err; |
| 477 | 477 | ||
| 478 | for (i = 0; i < nvqs; ++i) { | 478 | for (i = 0; i < nvqs; ++i) { |
| 479 | vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], | 479 | if (!names[i]) { |
| 480 | vqs[i] = NULL; | ||
| 481 | continue; | ||
| 482 | } | ||
| 483 | |||
| 484 | vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
| 480 | ctx ? ctx[i] : false); | 485 | ctx ? ctx[i] : false); |
| 481 | if (IS_ERR(vqs[i])) { | 486 | if (IS_ERR(vqs[i])) { |
| 482 | vm_del_vqs(vdev); | 487 | vm_del_vqs(vdev); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 465a6f5142cc..d0584c040c60 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 285 | { | 285 | { |
| 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 287 | u16 msix_vec; | 287 | u16 msix_vec; |
| 288 | int i, err, nvectors, allocated_vectors; | 288 | int i, err, nvectors, allocated_vectors, queue_idx = 0; |
| 289 | 289 | ||
| 290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
| 291 | if (!vp_dev->vqs) | 291 | if (!vp_dev->vqs) |
| @@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 321 | msix_vec = allocated_vectors++; | 321 | msix_vec = allocated_vectors++; |
| 322 | else | 322 | else |
| 323 | msix_vec = VP_MSIX_VQ_VECTOR; | 323 | msix_vec = VP_MSIX_VQ_VECTOR; |
| 324 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 324 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
| 325 | ctx ? ctx[i] : false, | 325 | ctx ? ctx[i] : false, |
| 326 | msix_vec); | 326 | msix_vec); |
| 327 | if (IS_ERR(vqs[i])) { | 327 | if (IS_ERR(vqs[i])) { |
| @@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
| 356 | const char * const names[], const bool *ctx) | 356 | const char * const names[], const bool *ctx) |
| 357 | { | 357 | { |
| 358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 359 | int i, err; | 359 | int i, err, queue_idx = 0; |
| 360 | 360 | ||
| 361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
| 362 | if (!vp_dev->vqs) | 362 | if (!vp_dev->vqs) |
| @@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
| 374 | vqs[i] = NULL; | 374 | vqs[i] = NULL; |
| 375 | continue; | 375 | continue; |
| 376 | } | 376 | } |
| 377 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 377 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
| 378 | ctx ? ctx[i] : false, | 378 | ctx ? ctx[i] : false, |
| 379 | VIRTIO_MSI_NO_VECTOR); | 379 | VIRTIO_MSI_NO_VECTOR); |
| 380 | if (IS_ERR(vqs[i])) { | 380 | if (IS_ERR(vqs[i])) { |
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 5c4a764717c4..81208cd3f4ec 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/watchdog.h> | 17 | #include <linux/watchdog.h> |
| 18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/mod_devicetable.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/mach-ralink/ralink_regs.h> | 22 | #include <asm/mach-ralink/ralink_regs.h> |
| 22 | 23 | ||
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 98967f0a7d10..db7c57d82cfd 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/watchdog.h> | 18 | #include <linux/watchdog.h> |
| 19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
| 20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/mod_devicetable.h> | ||
| 21 | 22 | ||
| 22 | #include <asm/mach-ralink/ralink_regs.h> | 23 | #include <asm/mach-ralink/ralink_regs.h> |
| 23 | 24 | ||
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c index 0d3a0fbbd7a5..52941207a12a 100644 --- a/drivers/watchdog/tqmx86_wdt.c +++ b/drivers/watchdog/tqmx86_wdt.c | |||
| @@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev) | |||
| 79 | return -ENOMEM; | 79 | return -ENOMEM; |
| 80 | 80 | ||
| 81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
| 82 | if (IS_ERR(res)) | 82 | if (!res) |
| 83 | return PTR_ERR(res); | 83 | return -ENODEV; |
| 84 | 84 | ||
| 85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, | 85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, |
| 86 | resource_size(res)); | 86 | resource_size(res)); |
| 87 | if (IS_ERR(priv->io_base)) | 87 | if (!priv->io_base) |
| 88 | return PTR_ERR(priv->io_base); | 88 | return -ENOMEM; |
| 89 | 89 | ||
| 90 | watchdog_set_drvdata(&priv->wdd, priv); | 90 | watchdog_set_drvdata(&priv->wdd, priv); |
| 91 | 91 | ||
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 93194f3e7540..117e76b2f939 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -1650,7 +1650,7 @@ void xen_callback_vector(void) | |||
| 1650 | xen_have_vector_callback = 0; | 1650 | xen_have_vector_callback = 0; |
| 1651 | return; | 1651 | return; |
| 1652 | } | 1652 | } |
| 1653 | pr_info("Xen HVM callback vector for event delivery is enabled\n"); | 1653 | pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); |
| 1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, | 1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
| 1655 | xen_hvm_callback_vector); | 1655 | xen_hvm_callback_vector); |
| 1656 | } | 1656 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 2e5d845b5091..7aa64d1b119c 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
| @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque) | |||
| 160 | 160 | ||
| 161 | /* write the data, then modify the indexes */ | 161 | /* write the data, then modify the indexes */ |
| 162 | virt_wmb(); | 162 | virt_wmb(); |
| 163 | if (ret < 0) | 163 | if (ret < 0) { |
| 164 | atomic_set(&map->read, 0); | ||
| 164 | intf->in_error = ret; | 165 | intf->in_error = ret; |
| 165 | else | 166 | } else |
| 166 | intf->in_prod = prod + ret; | 167 | intf->in_prod = prod + ret; |
| 167 | /* update the indexes, then notify the other end */ | 168 | /* update the indexes, then notify the other end */ |
| 168 | virt_wmb(); | 169 | virt_wmb(); |
| @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev, | |||
| 282 | static void pvcalls_sk_state_change(struct sock *sock) | 283 | static void pvcalls_sk_state_change(struct sock *sock) |
| 283 | { | 284 | { |
| 284 | struct sock_mapping *map = sock->sk_user_data; | 285 | struct sock_mapping *map = sock->sk_user_data; |
| 285 | struct pvcalls_data_intf *intf; | ||
| 286 | 286 | ||
| 287 | if (map == NULL) | 287 | if (map == NULL) |
| 288 | return; | 288 | return; |
| 289 | 289 | ||
| 290 | intf = map->ring; | 290 | atomic_inc(&map->read); |
| 291 | intf->in_error = -ENOTCONN; | ||
| 292 | notify_remote_via_irq(map->irq); | 291 | notify_remote_via_irq(map->irq); |
| 293 | } | 292 | } |
| 294 | 293 | ||
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..8a249c95c193 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
| @@ -31,6 +31,12 @@ | |||
| 31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) | 31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) |
| 32 | #define PVCALLS_FRONT_MAX_SPIN 5000 | 32 | #define PVCALLS_FRONT_MAX_SPIN 5000 |
| 33 | 33 | ||
| 34 | static struct proto pvcalls_proto = { | ||
| 35 | .name = "PVCalls", | ||
| 36 | .owner = THIS_MODULE, | ||
| 37 | .obj_size = sizeof(struct sock), | ||
| 38 | }; | ||
| 39 | |||
| 34 | struct pvcalls_bedata { | 40 | struct pvcalls_bedata { |
| 35 | struct xen_pvcalls_front_ring ring; | 41 | struct xen_pvcalls_front_ring ring; |
| 36 | grant_ref_t ref; | 42 | grant_ref_t ref; |
| @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock) | |||
| 335 | return ret; | 341 | return ret; |
| 336 | } | 342 | } |
| 337 | 343 | ||
| 344 | static void free_active_ring(struct sock_mapping *map) | ||
| 345 | { | ||
| 346 | if (!map->active.ring) | ||
| 347 | return; | ||
| 348 | |||
| 349 | free_pages((unsigned long)map->active.data.in, | ||
| 350 | map->active.ring->ring_order); | ||
| 351 | free_page((unsigned long)map->active.ring); | ||
| 352 | } | ||
| 353 | |||
| 354 | static int alloc_active_ring(struct sock_mapping *map) | ||
| 355 | { | ||
| 356 | void *bytes; | ||
| 357 | |||
| 358 | map->active.ring = (struct pvcalls_data_intf *) | ||
| 359 | get_zeroed_page(GFP_KERNEL); | ||
| 360 | if (!map->active.ring) | ||
| 361 | goto out; | ||
| 362 | |||
| 363 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
| 364 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 365 | PVCALLS_RING_ORDER); | ||
| 366 | if (!bytes) | ||
| 367 | goto out; | ||
| 368 | |||
| 369 | map->active.data.in = bytes; | ||
| 370 | map->active.data.out = bytes + | ||
| 371 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
| 372 | |||
| 373 | return 0; | ||
| 374 | |||
| 375 | out: | ||
| 376 | free_active_ring(map); | ||
| 377 | return -ENOMEM; | ||
| 378 | } | ||
| 379 | |||
| 338 | static int create_active(struct sock_mapping *map, int *evtchn) | 380 | static int create_active(struct sock_mapping *map, int *evtchn) |
| 339 | { | 381 | { |
| 340 | void *bytes; | 382 | void *bytes; |
| @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 343 | *evtchn = -1; | 385 | *evtchn = -1; |
| 344 | init_waitqueue_head(&map->active.inflight_conn_req); | 386 | init_waitqueue_head(&map->active.inflight_conn_req); |
| 345 | 387 | ||
| 346 | map->active.ring = (struct pvcalls_data_intf *) | 388 | bytes = map->active.data.in; |
| 347 | __get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
| 348 | if (map->active.ring == NULL) | ||
| 349 | goto out_error; | ||
| 350 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
| 351 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 352 | PVCALLS_RING_ORDER); | ||
| 353 | if (bytes == NULL) | ||
| 354 | goto out_error; | ||
| 355 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) | 389 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) |
| 356 | map->active.ring->ref[i] = gnttab_grant_foreign_access( | 390 | map->active.ring->ref[i] = gnttab_grant_foreign_access( |
| 357 | pvcalls_front_dev->otherend_id, | 391 | pvcalls_front_dev->otherend_id, |
| @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 361 | pvcalls_front_dev->otherend_id, | 395 | pvcalls_front_dev->otherend_id, |
| 362 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); | 396 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); |
| 363 | 397 | ||
| 364 | map->active.data.in = bytes; | ||
| 365 | map->active.data.out = bytes + | ||
| 366 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
| 367 | |||
| 368 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); | 398 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); |
| 369 | if (ret) | 399 | if (ret) |
| 370 | goto out_error; | 400 | goto out_error; |
| @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 385 | out_error: | 415 | out_error: |
| 386 | if (*evtchn >= 0) | 416 | if (*evtchn >= 0) |
| 387 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); | 417 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); |
| 388 | free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); | ||
| 389 | free_page((unsigned long)map->active.ring); | ||
| 390 | return ret; | 418 | return ret; |
| 391 | } | 419 | } |
| 392 | 420 | ||
| @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, | |||
| 406 | return PTR_ERR(map); | 434 | return PTR_ERR(map); |
| 407 | 435 | ||
| 408 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 436 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
| 437 | ret = alloc_active_ring(map); | ||
| 438 | if (ret < 0) { | ||
| 439 | pvcalls_exit_sock(sock); | ||
| 440 | return ret; | ||
| 441 | } | ||
| 409 | 442 | ||
| 410 | spin_lock(&bedata->socket_lock); | 443 | spin_lock(&bedata->socket_lock); |
| 411 | ret = get_request(bedata, &req_id); | 444 | ret = get_request(bedata, &req_id); |
| 412 | if (ret < 0) { | 445 | if (ret < 0) { |
| 413 | spin_unlock(&bedata->socket_lock); | 446 | spin_unlock(&bedata->socket_lock); |
| 447 | free_active_ring(map); | ||
| 414 | pvcalls_exit_sock(sock); | 448 | pvcalls_exit_sock(sock); |
| 415 | return ret; | 449 | return ret; |
| 416 | } | 450 | } |
| 417 | ret = create_active(map, &evtchn); | 451 | ret = create_active(map, &evtchn); |
| 418 | if (ret < 0) { | 452 | if (ret < 0) { |
| 419 | spin_unlock(&bedata->socket_lock); | 453 | spin_unlock(&bedata->socket_lock); |
| 454 | free_active_ring(map); | ||
| 420 | pvcalls_exit_sock(sock); | 455 | pvcalls_exit_sock(sock); |
| 421 | return ret; | 456 | return ret; |
| 422 | } | 457 | } |
| @@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf, | |||
| 469 | virt_mb(); | 504 | virt_mb(); |
| 470 | 505 | ||
| 471 | size = pvcalls_queued(prod, cons, array_size); | 506 | size = pvcalls_queued(prod, cons, array_size); |
| 472 | if (size >= array_size) | 507 | if (size > array_size) |
| 473 | return -EINVAL; | 508 | return -EINVAL; |
| 509 | if (size == array_size) | ||
| 510 | return 0; | ||
| 474 | if (len > array_size - size) | 511 | if (len > array_size - size) |
| 475 | len = array_size - size; | 512 | len = array_size - size; |
| 476 | 513 | ||
| @@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf, | |||
| 560 | error = intf->in_error; | 597 | error = intf->in_error; |
| 561 | /* get pointers before reading from the ring */ | 598 | /* get pointers before reading from the ring */ |
| 562 | virt_rmb(); | 599 | virt_rmb(); |
| 563 | if (error < 0) | ||
| 564 | return error; | ||
| 565 | 600 | ||
| 566 | size = pvcalls_queued(prod, cons, array_size); | 601 | size = pvcalls_queued(prod, cons, array_size); |
| 567 | masked_prod = pvcalls_mask(prod, array_size); | 602 | masked_prod = pvcalls_mask(prod, array_size); |
| 568 | masked_cons = pvcalls_mask(cons, array_size); | 603 | masked_cons = pvcalls_mask(cons, array_size); |
| 569 | 604 | ||
| 570 | if (size == 0) | 605 | if (size == 0) |
| 571 | return 0; | 606 | return error ?: size; |
| 572 | 607 | ||
| 573 | if (len > size) | 608 | if (len > size) |
| 574 | len = size; | 609 | len = size; |
| @@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 780 | } | 815 | } |
| 781 | } | 816 | } |
| 782 | 817 | ||
| 783 | spin_lock(&bedata->socket_lock); | 818 | map2 = kzalloc(sizeof(*map2), GFP_KERNEL); |
| 784 | ret = get_request(bedata, &req_id); | 819 | if (map2 == NULL) { |
| 785 | if (ret < 0) { | ||
| 786 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 820 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 787 | (void *)&map->passive.flags); | 821 | (void *)&map->passive.flags); |
| 788 | spin_unlock(&bedata->socket_lock); | 822 | pvcalls_exit_sock(sock); |
| 823 | return -ENOMEM; | ||
| 824 | } | ||
| 825 | ret = alloc_active_ring(map2); | ||
| 826 | if (ret < 0) { | ||
| 827 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | ||
| 828 | (void *)&map->passive.flags); | ||
| 829 | kfree(map2); | ||
| 789 | pvcalls_exit_sock(sock); | 830 | pvcalls_exit_sock(sock); |
| 790 | return ret; | 831 | return ret; |
| 791 | } | 832 | } |
| 792 | map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); | 833 | spin_lock(&bedata->socket_lock); |
| 793 | if (map2 == NULL) { | 834 | ret = get_request(bedata, &req_id); |
| 835 | if (ret < 0) { | ||
| 794 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 836 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 795 | (void *)&map->passive.flags); | 837 | (void *)&map->passive.flags); |
| 796 | spin_unlock(&bedata->socket_lock); | 838 | spin_unlock(&bedata->socket_lock); |
| 839 | free_active_ring(map2); | ||
| 840 | kfree(map2); | ||
| 797 | pvcalls_exit_sock(sock); | 841 | pvcalls_exit_sock(sock); |
| 798 | return -ENOMEM; | 842 | return ret; |
| 799 | } | 843 | } |
| 844 | |||
| 800 | ret = create_active(map2, &evtchn); | 845 | ret = create_active(map2, &evtchn); |
| 801 | if (ret < 0) { | 846 | if (ret < 0) { |
| 847 | free_active_ring(map2); | ||
| 802 | kfree(map2); | 848 | kfree(map2); |
| 803 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 849 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 804 | (void *)&map->passive.flags); | 850 | (void *)&map->passive.flags); |
| @@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 839 | 885 | ||
| 840 | received: | 886 | received: |
| 841 | map2->sock = newsock; | 887 | map2->sock = newsock; |
| 842 | newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); | 888 | newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); |
| 843 | if (!newsock->sk) { | 889 | if (!newsock->sk) { |
| 844 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 890 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
| 845 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; | 891 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; |
| @@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock) | |||
| 1032 | spin_lock(&bedata->socket_lock); | 1078 | spin_lock(&bedata->socket_lock); |
| 1033 | list_del(&map->list); | 1079 | list_del(&map->list); |
| 1034 | spin_unlock(&bedata->socket_lock); | 1080 | spin_unlock(&bedata->socket_lock); |
| 1035 | if (READ_ONCE(map->passive.inflight_req_id) != | 1081 | if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && |
| 1036 | PVCALLS_INVALID_ID) { | 1082 | READ_ONCE(map->passive.inflight_req_id) != 0) { |
| 1037 | pvcalls_front_free_map(bedata, | 1083 | pvcalls_front_free_map(bedata, |
| 1038 | map->passive.accept_map); | 1084 | map->passive.accept_map); |
| 1039 | } | 1085 | } |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 989cf872b98c..bb7888429be6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
| @@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
| 645 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 645 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 646 | unsigned long attrs) | 646 | unsigned long attrs) |
| 647 | { | 647 | { |
| 648 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | 648 | #ifdef CONFIG_ARM |
| 649 | if (xen_get_dma_ops(dev)->mmap) | 649 | if (xen_get_dma_ops(dev)->mmap) |
| 650 | return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, | 650 | return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, |
| 651 | dma_addr, size, attrs); | 651 | dma_addr, size, attrs); |
| @@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | |||
| 662 | void *cpu_addr, dma_addr_t handle, size_t size, | 662 | void *cpu_addr, dma_addr_t handle, size_t size, |
| 663 | unsigned long attrs) | 663 | unsigned long attrs) |
| 664 | { | 664 | { |
| 665 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | 665 | #ifdef CONFIG_ARM |
| 666 | if (xen_get_dma_ops(dev)->get_sgtable) { | 666 | if (xen_get_dma_ops(dev)->get_sgtable) { |
| 667 | #if 0 | 667 | #if 0 |
| 668 | /* | 668 | /* |
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0568fd986821..e432bd27a2e7 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
| @@ -208,7 +208,7 @@ again: | |||
| 208 | /* The new front of the queue now owns the state variables. */ | 208 | /* The new front of the queue now owns the state variables. */ |
| 209 | next = list_entry(vnode->pending_locks.next, | 209 | next = list_entry(vnode->pending_locks.next, |
| 210 | struct file_lock, fl_u.afs.link); | 210 | struct file_lock, fl_u.afs.link); |
| 211 | vnode->lock_key = afs_file_key(next->fl_file); | 211 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
| 212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
| 213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
| 214 | goto again; | 214 | goto again; |
| @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl) | |||
| 413 | /* The new front of the queue now owns the state variables. */ | 413 | /* The new front of the queue now owns the state variables. */ |
| 414 | next = list_entry(vnode->pending_locks.next, | 414 | next = list_entry(vnode->pending_locks.next, |
| 415 | struct file_lock, fl_u.afs.link); | 415 | struct file_lock, fl_u.afs.link); |
| 416 | vnode->lock_key = afs_file_key(next->fl_file); | 416 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
| 417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
| 418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
| 419 | afs_lock_may_be_available(vnode); | 419 | afs_lock_may_be_available(vnode); |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 6b17d3620414..1a4ce07fb406 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
| @@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
| 414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | 414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { |
| 415 | valid = true; | 415 | valid = true; |
| 416 | } else { | 416 | } else { |
| 417 | vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; | ||
| 418 | vnode->cb_v_break = vnode->volume->cb_v_break; | 417 | vnode->cb_v_break = vnode->volume->cb_v_break; |
| 419 | valid = false; | 418 | valid = false; |
| 420 | } | 419 | } |
| @@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode) | |||
| 546 | #endif | 545 | #endif |
| 547 | 546 | ||
| 548 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); | 547 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); |
| 548 | key_put(vnode->lock_key); | ||
| 549 | vnode->lock_key = NULL; | ||
| 549 | _leave(""); | 550 | _leave(""); |
| 550 | } | 551 | } |
| 551 | 552 | ||
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 07bc10f076aa..d443e2bfa094 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h | |||
| @@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus { | |||
| 161 | struct yfs_xdr_u64 max_quota; | 161 | struct yfs_xdr_u64 max_quota; |
| 162 | struct yfs_xdr_u64 file_quota; | 162 | struct yfs_xdr_u64 file_quota; |
| 163 | } __packed; | 163 | } __packed; |
| 164 | |||
| 165 | enum yfs_lock_type { | ||
| 166 | yfs_LockNone = -1, | ||
| 167 | yfs_LockRead = 0, | ||
| 168 | yfs_LockWrite = 1, | ||
| 169 | yfs_LockExtend = 2, | ||
| 170 | yfs_LockRelease = 3, | ||
| 171 | yfs_LockMandatoryRead = 0x100, | ||
| 172 | yfs_LockMandatoryWrite = 0x101, | ||
| 173 | yfs_LockMandatoryExtend = 0x102, | ||
| 174 | }; | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a7b44863d502..2c588f9bbbda 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls; | |||
| 23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); | 23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); |
| 24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); | 24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); |
| 25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); | 25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 26 | static void afs_delete_async_call(struct work_struct *); | ||
| 26 | static void afs_process_async_call(struct work_struct *); | 27 | static void afs_process_async_call(struct work_struct *); |
| 27 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); | 28 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 28 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); | 29 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); |
| @@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call) | |||
| 203 | } | 204 | } |
| 204 | } | 205 | } |
| 205 | 206 | ||
| 207 | static struct afs_call *afs_get_call(struct afs_call *call, | ||
| 208 | enum afs_call_trace why) | ||
| 209 | { | ||
| 210 | int u = atomic_inc_return(&call->usage); | ||
| 211 | |||
| 212 | trace_afs_call(call, why, u, | ||
| 213 | atomic_read(&call->net->nr_outstanding_calls), | ||
| 214 | __builtin_return_address(0)); | ||
| 215 | return call; | ||
| 216 | } | ||
| 217 | |||
| 206 | /* | 218 | /* |
| 207 | * Queue the call for actual work. | 219 | * Queue the call for actual work. |
| 208 | */ | 220 | */ |
| 209 | static void afs_queue_call_work(struct afs_call *call) | 221 | static void afs_queue_call_work(struct afs_call *call) |
| 210 | { | 222 | { |
| 211 | if (call->type->work) { | 223 | if (call->type->work) { |
| 212 | int u = atomic_inc_return(&call->usage); | ||
| 213 | |||
| 214 | trace_afs_call(call, afs_call_trace_work, u, | ||
| 215 | atomic_read(&call->net->nr_outstanding_calls), | ||
| 216 | __builtin_return_address(0)); | ||
| 217 | |||
| 218 | INIT_WORK(&call->work, call->type->work); | 224 | INIT_WORK(&call->work, call->type->work); |
| 219 | 225 | ||
| 226 | afs_get_call(call, afs_call_trace_work); | ||
| 220 | if (!queue_work(afs_wq, &call->work)) | 227 | if (!queue_work(afs_wq, &call->work)) |
| 221 | afs_put_call(call); | 228 | afs_put_call(call); |
| 222 | } | 229 | } |
| @@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
| 398 | } | 405 | } |
| 399 | } | 406 | } |
| 400 | 407 | ||
| 408 | /* If the call is going to be asynchronous, we need an extra ref for | ||
| 409 | * the call to hold itself so the caller need not hang on to its ref. | ||
| 410 | */ | ||
| 411 | if (call->async) | ||
| 412 | afs_get_call(call, afs_call_trace_get); | ||
| 413 | |||
| 401 | /* create a call */ | 414 | /* create a call */ |
| 402 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, | 415 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, |
| 403 | (unsigned long)call, | 416 | (unsigned long)call, |
| @@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
| 438 | goto error_do_abort; | 451 | goto error_do_abort; |
| 439 | } | 452 | } |
| 440 | 453 | ||
| 441 | /* at this point, an async call may no longer exist as it may have | 454 | /* Note that at this point, we may have received the reply or an abort |
| 442 | * already completed */ | 455 | * - and an asynchronous call may already have completed. |
| 443 | if (call->async) | 456 | */ |
| 457 | if (call->async) { | ||
| 458 | afs_put_call(call); | ||
| 444 | return -EINPROGRESS; | 459 | return -EINPROGRESS; |
| 460 | } | ||
| 445 | 461 | ||
| 446 | return afs_wait_for_call_to_complete(call, ac); | 462 | return afs_wait_for_call_to_complete(call, ac); |
| 447 | 463 | ||
| 448 | error_do_abort: | 464 | error_do_abort: |
| 449 | call->state = AFS_CALL_COMPLETE; | ||
| 450 | if (ret != -ECONNABORTED) { | 465 | if (ret != -ECONNABORTED) { |
| 451 | rxrpc_kernel_abort_call(call->net->socket, rxcall, | 466 | rxrpc_kernel_abort_call(call->net->socket, rxcall, |
| 452 | RX_USER_ABORT, ret, "KSD"); | 467 | RX_USER_ABORT, ret, "KSD"); |
| @@ -463,8 +478,24 @@ error_do_abort: | |||
| 463 | error_kill_call: | 478 | error_kill_call: |
| 464 | if (call->type->done) | 479 | if (call->type->done) |
| 465 | call->type->done(call); | 480 | call->type->done(call); |
| 466 | afs_put_call(call); | 481 | |
| 482 | /* We need to dispose of the extra ref we grabbed for an async call. | ||
| 483 | * The call, however, might be queued on afs_async_calls and we need to | ||
| 484 | * make sure we don't get any more notifications that might requeue it. | ||
| 485 | */ | ||
| 486 | if (call->rxcall) { | ||
| 487 | rxrpc_kernel_end_call(call->net->socket, call->rxcall); | ||
| 488 | call->rxcall = NULL; | ||
| 489 | } | ||
| 490 | if (call->async) { | ||
| 491 | if (cancel_work_sync(&call->async_work)) | ||
| 492 | afs_put_call(call); | ||
| 493 | afs_put_call(call); | ||
| 494 | } | ||
| 495 | |||
| 467 | ac->error = ret; | 496 | ac->error = ret; |
| 497 | call->state = AFS_CALL_COMPLETE; | ||
| 498 | afs_put_call(call); | ||
| 468 | _leave(" = %d", ret); | 499 | _leave(" = %d", ret); |
| 469 | return ret; | 500 | return ret; |
| 470 | } | 501 | } |
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 95d0761cdb34..155dc14caef9 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c | |||
| @@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, | |||
| 42 | if (vldb->fs_mask[i] & type_mask) | 42 | if (vldb->fs_mask[i] & type_mask) |
| 43 | nr_servers++; | 43 | nr_servers++; |
| 44 | 44 | ||
| 45 | slist = kzalloc(sizeof(struct afs_server_list) + | 45 | slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); |
| 46 | sizeof(struct afs_server_entry) * nr_servers, | ||
| 47 | GFP_KERNEL); | ||
| 48 | if (!slist) | 46 | if (!slist) |
| 49 | goto error; | 47 | goto error; |
| 50 | 48 | ||
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 12658c1363ae..5aa57929e8c2 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c | |||
| @@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, | |||
| 803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); | 803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); |
| 804 | bp = xdr_encode_string(bp, name, namesz); | 804 | bp = xdr_encode_string(bp, name, namesz); |
| 805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); | 805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); |
| 806 | bp = xdr_encode_u32(bp, 0); /* ViceLockType */ | 806 | bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ |
| 807 | yfs_check_req(call, bp); | 807 | yfs_check_req(call, bp); |
| 808 | 808 | ||
| 809 | afs_use_fs_server(call, fc->cbi); | 809 | afs_use_fs_server(call, fc->cbi); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index c546cdce77e6..58a4c1217fa8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) | |||
| 104 | } | 104 | } |
| 105 | EXPORT_SYMBOL(invalidate_bdev); | 105 | EXPORT_SYMBOL(invalidate_bdev); |
| 106 | 106 | ||
| 107 | static void set_init_blocksize(struct block_device *bdev) | ||
| 108 | { | ||
| 109 | unsigned bsize = bdev_logical_block_size(bdev); | ||
| 110 | loff_t size = i_size_read(bdev->bd_inode); | ||
| 111 | |||
| 112 | while (bsize < PAGE_SIZE) { | ||
| 113 | if (size & bsize) | ||
| 114 | break; | ||
| 115 | bsize <<= 1; | ||
| 116 | } | ||
| 117 | bdev->bd_block_size = bsize; | ||
| 118 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
| 119 | } | ||
| 120 | |||
| 107 | int set_blocksize(struct block_device *bdev, int size) | 121 | int set_blocksize(struct block_device *bdev, int size) |
| 108 | { | 122 | { |
| 109 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ | 123 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
| @@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); | |||
| 1431 | 1445 | ||
| 1432 | void bd_set_size(struct block_device *bdev, loff_t size) | 1446 | void bd_set_size(struct block_device *bdev, loff_t size) |
| 1433 | { | 1447 | { |
| 1434 | unsigned bsize = bdev_logical_block_size(bdev); | ||
| 1435 | |||
| 1436 | inode_lock(bdev->bd_inode); | 1448 | inode_lock(bdev->bd_inode); |
| 1437 | i_size_write(bdev->bd_inode, size); | 1449 | i_size_write(bdev->bd_inode, size); |
| 1438 | inode_unlock(bdev->bd_inode); | 1450 | inode_unlock(bdev->bd_inode); |
| 1439 | while (bsize < PAGE_SIZE) { | ||
| 1440 | if (size & bsize) | ||
| 1441 | break; | ||
| 1442 | bsize <<= 1; | ||
| 1443 | } | ||
| 1444 | bdev->bd_block_size = bsize; | ||
| 1445 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
| 1446 | } | 1451 | } |
| 1447 | EXPORT_SYMBOL(bd_set_size); | 1452 | EXPORT_SYMBOL(bd_set_size); |
| 1448 | 1453 | ||
| @@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1519 | } | 1524 | } |
| 1520 | } | 1525 | } |
| 1521 | 1526 | ||
| 1522 | if (!ret) | 1527 | if (!ret) { |
| 1523 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1528 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
| 1529 | set_init_blocksize(bdev); | ||
| 1530 | } | ||
| 1524 | 1531 | ||
| 1525 | /* | 1532 | /* |
| 1526 | * If the device is invalidated, rescan partition | 1533 | * If the device is invalidated, rescan partition |
| @@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1555 | goto out_clear; | 1562 | goto out_clear; |
| 1556 | } | 1563 | } |
| 1557 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); | 1564 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
| 1565 | set_init_blocksize(bdev); | ||
| 1558 | } | 1566 | } |
| 1559 | 1567 | ||
| 1560 | if (bdev->bd_bdi == &noop_backing_dev_info) | 1568 | if (bdev->bd_bdi == &noop_backing_dev_info) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a68cf7032f5..7a2a2621f0d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | struct btrfs_trans_handle; | 35 | struct btrfs_trans_handle; |
| 36 | struct btrfs_transaction; | 36 | struct btrfs_transaction; |
| 37 | struct btrfs_pending_snapshot; | 37 | struct btrfs_pending_snapshot; |
| 38 | struct btrfs_delayed_ref_root; | ||
| 38 | extern struct kmem_cache *btrfs_trans_handle_cachep; | 39 | extern struct kmem_cache *btrfs_trans_handle_cachep; |
| 39 | extern struct kmem_cache *btrfs_bit_radix_cachep; | 40 | extern struct kmem_cache *btrfs_bit_radix_cachep; |
| 40 | extern struct kmem_cache *btrfs_path_cachep; | 41 | extern struct kmem_cache *btrfs_path_cachep; |
| @@ -786,6 +787,9 @@ enum { | |||
| 786 | * main phase. The fs_info::balance_ctl is initialized. | 787 | * main phase. The fs_info::balance_ctl is initialized. |
| 787 | */ | 788 | */ |
| 788 | BTRFS_FS_BALANCE_RUNNING, | 789 | BTRFS_FS_BALANCE_RUNNING, |
| 790 | |||
| 791 | /* Indicate that the cleaner thread is awake and doing something. */ | ||
| 792 | BTRFS_FS_CLEANER_RUNNING, | ||
| 789 | }; | 793 | }; |
| 790 | 794 | ||
| 791 | struct btrfs_fs_info { | 795 | struct btrfs_fs_info { |
| @@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2661 | unsigned long count); | 2665 | unsigned long count); |
| 2662 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, | 2666 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, |
| 2663 | unsigned long count, u64 transid, int wait); | 2667 | unsigned long count, u64 transid, int wait); |
| 2668 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, | ||
| 2669 | struct btrfs_delayed_ref_root *delayed_refs, | ||
| 2670 | struct btrfs_delayed_ref_head *head); | ||
| 2664 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); | 2671 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); |
| 2665 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | 2672 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
| 2666 | struct btrfs_fs_info *fs_info, u64 bytenr, | 2673 | struct btrfs_fs_info *fs_info, u64 bytenr, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8da2f380d3c0..6a2a2a951705 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg) | |||
| 1682 | while (1) { | 1682 | while (1) { |
| 1683 | again = 0; | 1683 | again = 0; |
| 1684 | 1684 | ||
| 1685 | set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
| 1686 | |||
| 1685 | /* Make the cleaner go to sleep early. */ | 1687 | /* Make the cleaner go to sleep early. */ |
| 1686 | if (btrfs_need_cleaner_sleep(fs_info)) | 1688 | if (btrfs_need_cleaner_sleep(fs_info)) |
| 1687 | goto sleep; | 1689 | goto sleep; |
| @@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg) | |||
| 1728 | */ | 1730 | */ |
| 1729 | btrfs_delete_unused_bgs(fs_info); | 1731 | btrfs_delete_unused_bgs(fs_info); |
| 1730 | sleep: | 1732 | sleep: |
| 1733 | clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
| 1731 | if (kthread_should_park()) | 1734 | if (kthread_should_park()) |
| 1732 | kthread_parkme(); | 1735 | kthread_parkme(); |
| 1733 | if (kthread_should_stop()) | 1736 | if (kthread_should_stop()) |
| @@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |||
| 4201 | spin_lock(&fs_info->ordered_root_lock); | 4204 | spin_lock(&fs_info->ordered_root_lock); |
| 4202 | } | 4205 | } |
| 4203 | spin_unlock(&fs_info->ordered_root_lock); | 4206 | spin_unlock(&fs_info->ordered_root_lock); |
| 4207 | |||
| 4208 | /* | ||
| 4209 | * We need this here because if we've been flipped read-only we won't | ||
| 4210 | * get sync() from the umount, so we need to make sure any ordered | ||
| 4211 | * extents that haven't had their dirty pages IO start writeout yet | ||
| 4212 | * actually get run and error out properly. | ||
| 4213 | */ | ||
| 4214 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | ||
| 4204 | } | 4215 | } |
| 4205 | 4216 | ||
| 4206 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 4217 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
| @@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
| 4265 | if (pin_bytes) | 4276 | if (pin_bytes) |
| 4266 | btrfs_pin_extent(fs_info, head->bytenr, | 4277 | btrfs_pin_extent(fs_info, head->bytenr, |
| 4267 | head->num_bytes, 1); | 4278 | head->num_bytes, 1); |
| 4279 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); | ||
| 4268 | btrfs_put_delayed_ref_head(head); | 4280 | btrfs_put_delayed_ref_head(head); |
| 4269 | cond_resched(); | 4281 | cond_resched(); |
| 4270 | spin_lock(&delayed_refs->lock); | 4282 | spin_lock(&delayed_refs->lock); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b15afeae16df..d81035b7ea7d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, | |||
| 2456 | return ret ? ret : 1; | 2456 | return ret ? ret : 1; |
| 2457 | } | 2457 | } |
| 2458 | 2458 | ||
| 2459 | static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, | 2459 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, |
| 2460 | struct btrfs_delayed_ref_head *head) | 2460 | struct btrfs_delayed_ref_root *delayed_refs, |
| 2461 | struct btrfs_delayed_ref_head *head) | ||
| 2461 | { | 2462 | { |
| 2462 | struct btrfs_fs_info *fs_info = trans->fs_info; | ||
| 2463 | struct btrfs_delayed_ref_root *delayed_refs = | ||
| 2464 | &trans->transaction->delayed_refs; | ||
| 2465 | int nr_items = 1; /* Dropping this ref head update. */ | 2463 | int nr_items = 1; /* Dropping this ref head update. */ |
| 2466 | 2464 | ||
| 2467 | if (head->total_ref_mod < 0) { | 2465 | if (head->total_ref_mod < 0) { |
| @@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, | |||
| 2544 | } | 2542 | } |
| 2545 | } | 2543 | } |
| 2546 | 2544 | ||
| 2547 | cleanup_ref_head_accounting(trans, head); | 2545 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
| 2548 | 2546 | ||
| 2549 | trace_run_delayed_ref_head(fs_info, head, 0); | 2547 | trace_run_delayed_ref_head(fs_info, head, 0); |
| 2550 | btrfs_delayed_ref_unlock(head); | 2548 | btrfs_delayed_ref_unlock(head); |
| @@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info, | |||
| 4954 | ret = 0; | 4952 | ret = 0; |
| 4955 | break; | 4953 | break; |
| 4956 | case COMMIT_TRANS: | 4954 | case COMMIT_TRANS: |
| 4955 | /* | ||
| 4956 | * If we have pending delayed iputs then we could free up a | ||
| 4957 | * bunch of pinned space, so make sure we run the iputs before | ||
| 4958 | * we do our pinned bytes check below. | ||
| 4959 | */ | ||
| 4960 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); | ||
| 4961 | btrfs_run_delayed_iputs(fs_info); | ||
| 4962 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); | ||
| 4963 | |||
| 4957 | ret = may_commit_transaction(fs_info, space_info); | 4964 | ret = may_commit_transaction(fs_info, space_info); |
| 4958 | break; | 4965 | break; |
| 4959 | default: | 4966 | default: |
| @@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
| 7188 | if (head->must_insert_reserved) | 7195 | if (head->must_insert_reserved) |
| 7189 | ret = 1; | 7196 | ret = 1; |
| 7190 | 7197 | ||
| 7191 | cleanup_ref_head_accounting(trans, head); | 7198 | btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); |
| 7192 | mutex_unlock(&head->mutex); | 7199 | mutex_unlock(&head->mutex); |
| 7193 | btrfs_put_delayed_ref_head(head); | 7200 | btrfs_put_delayed_ref_head(head); |
| 7194 | return ret; | 7201 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43eb4535319d..5c349667c761 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -3129,9 +3129,6 @@ out: | |||
| 3129 | /* once for the tree */ | 3129 | /* once for the tree */ |
| 3130 | btrfs_put_ordered_extent(ordered_extent); | 3130 | btrfs_put_ordered_extent(ordered_extent); |
| 3131 | 3131 | ||
| 3132 | /* Try to release some metadata so we don't get an OOM but don't wait */ | ||
| 3133 | btrfs_btree_balance_dirty_nodelay(fs_info); | ||
| 3134 | |||
| 3135 | return ret; | 3132 | return ret; |
| 3136 | } | 3133 | } |
| 3137 | 3134 | ||
| @@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode) | |||
| 3254 | ASSERT(list_empty(&binode->delayed_iput)); | 3251 | ASSERT(list_empty(&binode->delayed_iput)); |
| 3255 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); | 3252 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); |
| 3256 | spin_unlock(&fs_info->delayed_iput_lock); | 3253 | spin_unlock(&fs_info->delayed_iput_lock); |
| 3254 | if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) | ||
| 3255 | wake_up_process(fs_info->cleaner_kthread); | ||
| 3257 | } | 3256 | } |
| 3258 | 3257 | ||
| 3259 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) | 3258 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 94c026bba2c2..bba28a5034ba 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -1035,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci) | |||
| 1035 | list_del_init(&ci->i_snap_realm_item); | 1035 | list_del_init(&ci->i_snap_realm_item); |
| 1036 | ci->i_snap_realm_counter++; | 1036 | ci->i_snap_realm_counter++; |
| 1037 | ci->i_snap_realm = NULL; | 1037 | ci->i_snap_realm = NULL; |
| 1038 | if (realm->ino == ci->i_vino.ino) | ||
| 1039 | realm->inode = NULL; | ||
| 1038 | spin_unlock(&realm->inodes_with_caps_lock); | 1040 | spin_unlock(&realm->inodes_with_caps_lock); |
| 1039 | ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, | 1041 | ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, |
| 1040 | realm); | 1042 | realm); |
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 03f4d24db8fe..9455d3aef0c3 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c | |||
| @@ -3,19 +3,6 @@ | |||
| 3 | * quota.c - CephFS quota | 3 | * quota.c - CephFS quota |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2017-2018 SUSE | 5 | * Copyright (C) 2017-2018 SUSE |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version 2 | ||
| 10 | * of the License, or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | 6 | */ |
| 20 | 7 | ||
| 21 | #include <linux/statfs.h> | 8 | #include <linux/statfs.h> |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 593fb422d0f3..e92a2fee3c57 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
| @@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
| 252 | seq_printf(m, ",ACL"); | 252 | seq_printf(m, ",ACL"); |
| 253 | #endif | 253 | #endif |
| 254 | seq_putc(m, '\n'); | 254 | seq_putc(m, '\n'); |
| 255 | seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize); | ||
| 255 | seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); | 256 | seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); |
| 256 | seq_printf(m, "Servers:"); | 257 | seq_printf(m, "Servers:"); |
| 257 | 258 | ||
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index e18915415e13..bb54ccf8481c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -1549,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) | |||
| 1549 | } | 1549 | } |
| 1550 | 1550 | ||
| 1551 | static int | 1551 | static int |
| 1552 | cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | 1552 | __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, |
| 1553 | bool malformed) | ||
| 1553 | { | 1554 | { |
| 1554 | int length; | 1555 | int length; |
| 1555 | struct cifs_readdata *rdata = mid->callback_data; | ||
| 1556 | 1556 | ||
| 1557 | length = cifs_discard_remaining_data(server); | 1557 | length = cifs_discard_remaining_data(server); |
| 1558 | dequeue_mid(mid, rdata->result); | 1558 | dequeue_mid(mid, malformed); |
| 1559 | mid->resp_buf = server->smallbuf; | 1559 | mid->resp_buf = server->smallbuf; |
| 1560 | server->smallbuf = NULL; | 1560 | server->smallbuf = NULL; |
| 1561 | return length; | 1561 | return length; |
| 1562 | } | 1562 | } |
| 1563 | 1563 | ||
| 1564 | static int | ||
| 1565 | cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
| 1566 | { | ||
| 1567 | struct cifs_readdata *rdata = mid->callback_data; | ||
| 1568 | |||
| 1569 | return __cifs_readv_discard(server, mid, rdata->result); | ||
| 1570 | } | ||
| 1571 | |||
| 1564 | int | 1572 | int |
| 1565 | cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | 1573 | cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) |
| 1566 | { | 1574 | { |
| @@ -1602,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 1602 | return -1; | 1610 | return -1; |
| 1603 | } | 1611 | } |
| 1604 | 1612 | ||
| 1613 | /* set up first two iov for signature check and to get credits */ | ||
| 1614 | rdata->iov[0].iov_base = buf; | ||
| 1615 | rdata->iov[0].iov_len = 4; | ||
| 1616 | rdata->iov[1].iov_base = buf + 4; | ||
| 1617 | rdata->iov[1].iov_len = server->total_read - 4; | ||
| 1618 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
| 1619 | rdata->iov[0].iov_base, rdata->iov[0].iov_len); | ||
| 1620 | cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", | ||
| 1621 | rdata->iov[1].iov_base, rdata->iov[1].iov_len); | ||
| 1622 | |||
| 1605 | /* Was the SMB read successful? */ | 1623 | /* Was the SMB read successful? */ |
| 1606 | rdata->result = server->ops->map_error(buf, false); | 1624 | rdata->result = server->ops->map_error(buf, false); |
| 1607 | if (rdata->result != 0) { | 1625 | if (rdata->result != 0) { |
| 1608 | cifs_dbg(FYI, "%s: server returned error %d\n", | 1626 | cifs_dbg(FYI, "%s: server returned error %d\n", |
| 1609 | __func__, rdata->result); | 1627 | __func__, rdata->result); |
| 1610 | return cifs_readv_discard(server, mid); | 1628 | /* normal error on read response */ |
| 1629 | return __cifs_readv_discard(server, mid, false); | ||
| 1611 | } | 1630 | } |
| 1612 | 1631 | ||
| 1613 | /* Is there enough to get to the rest of the READ_RSP header? */ | 1632 | /* Is there enough to get to the rest of the READ_RSP header? */ |
| @@ -1651,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 1651 | server->total_read += length; | 1670 | server->total_read += length; |
| 1652 | } | 1671 | } |
| 1653 | 1672 | ||
| 1654 | /* set up first iov for signature check */ | ||
| 1655 | rdata->iov[0].iov_base = buf; | ||
| 1656 | rdata->iov[0].iov_len = 4; | ||
| 1657 | rdata->iov[1].iov_base = buf + 4; | ||
| 1658 | rdata->iov[1].iov_len = server->total_read - 4; | ||
| 1659 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", | ||
| 1660 | rdata->iov[0].iov_base, server->total_read); | ||
| 1661 | |||
| 1662 | /* how much data is in the response? */ | 1673 | /* how much data is in the response? */ |
| 1663 | #ifdef CONFIG_CIFS_SMB_DIRECT | 1674 | #ifdef CONFIG_CIFS_SMB_DIRECT |
| 1664 | use_rdma_mr = rdata->mr; | 1675 | use_rdma_mr = rdata->mr; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 683310f26171..8463c940e0e5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -720,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server) | |||
| 720 | return false; | 720 | return false; |
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | static inline bool | ||
| 724 | zero_credits(struct TCP_Server_Info *server) | ||
| 725 | { | ||
| 726 | int val; | ||
| 727 | |||
| 728 | spin_lock(&server->req_lock); | ||
| 729 | val = server->credits + server->echo_credits + server->oplock_credits; | ||
| 730 | if (server->in_flight == 0 && val == 0) { | ||
| 731 | spin_unlock(&server->req_lock); | ||
| 732 | return true; | ||
| 733 | } | ||
| 734 | spin_unlock(&server->req_lock); | ||
| 735 | return false; | ||
| 736 | } | ||
| 737 | |||
| 723 | static int | 738 | static int |
| 724 | cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) | 739 | cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) |
| 725 | { | 740 | { |
| @@ -732,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) | |||
| 732 | for (total_read = 0; msg_data_left(smb_msg); total_read += length) { | 747 | for (total_read = 0; msg_data_left(smb_msg); total_read += length) { |
| 733 | try_to_freeze(); | 748 | try_to_freeze(); |
| 734 | 749 | ||
| 750 | /* reconnect if no credits and no requests in flight */ | ||
| 751 | if (zero_credits(server)) { | ||
| 752 | cifs_reconnect(server); | ||
| 753 | return -ECONNABORTED; | ||
| 754 | } | ||
| 755 | |||
| 735 | if (server_unresponsive(server)) | 756 | if (server_unresponsive(server)) |
| 736 | return -ECONNABORTED; | 757 | return -ECONNABORTED; |
| 737 | if (cifs_rdma_enabled(server) && server->smbd_conn) | 758 | if (cifs_rdma_enabled(server) && server->smbd_conn) |
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index f14533da3a93..01a76bccdb8d 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c | |||
| @@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 293 | int rc; | 293 | int rc; |
| 294 | struct smb2_file_all_info *smb2_data; | 294 | struct smb2_file_all_info *smb2_data; |
| 295 | __u32 create_options = 0; | 295 | __u32 create_options = 0; |
| 296 | struct cifs_fid fid; | ||
| 297 | bool no_cached_open = tcon->nohandlecache; | ||
| 296 | 298 | ||
| 297 | *adjust_tz = false; | 299 | *adjust_tz = false; |
| 298 | *symlink = false; | 300 | *symlink = false; |
| @@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 301 | GFP_KERNEL); | 303 | GFP_KERNEL); |
| 302 | if (smb2_data == NULL) | 304 | if (smb2_data == NULL) |
| 303 | return -ENOMEM; | 305 | return -ENOMEM; |
| 306 | |||
| 307 | /* If it is a root and its handle is cached then use it */ | ||
| 308 | if (!strlen(full_path) && !no_cached_open) { | ||
| 309 | rc = open_shroot(xid, tcon, &fid); | ||
| 310 | if (rc) | ||
| 311 | goto out; | ||
| 312 | rc = SMB2_query_info(xid, tcon, fid.persistent_fid, | ||
| 313 | fid.volatile_fid, smb2_data); | ||
| 314 | close_shroot(&tcon->crfid); | ||
| 315 | if (rc) | ||
| 316 | goto out; | ||
| 317 | move_smb2_info_to_cifs(data, smb2_data); | ||
| 318 | goto out; | ||
| 319 | } | ||
| 320 | |||
| 304 | if (backup_cred(cifs_sb)) | 321 | if (backup_cred(cifs_sb)) |
| 305 | create_options |= CREATE_OPEN_BACKUP_INTENT; | 322 | create_options |= CREATE_OPEN_BACKUP_INTENT; |
| 306 | 323 | ||
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 6a9c47541c53..7b8b58fb4d3f 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
| @@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | |||
| 648 | if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) | 648 | if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) |
| 649 | return false; | 649 | return false; |
| 650 | 650 | ||
| 651 | if (rsp->sync_hdr.CreditRequest) { | ||
| 652 | spin_lock(&server->req_lock); | ||
| 653 | server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest); | ||
| 654 | spin_unlock(&server->req_lock); | ||
| 655 | wake_up(&server->request_q); | ||
| 656 | } | ||
| 657 | |||
| 651 | if (rsp->StructureSize != | 658 | if (rsp->StructureSize != |
| 652 | smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { | 659 | smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { |
| 653 | if (le16_to_cpu(rsp->StructureSize) == 44) | 660 | if (le16_to_cpu(rsp->StructureSize) == 44) |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index cf7eb891804f..153238fc4fa9 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include "cifs_ioctl.h" | 34 | #include "cifs_ioctl.h" |
| 35 | #include "smbdirect.h" | 35 | #include "smbdirect.h" |
| 36 | 36 | ||
| 37 | /* Change credits for different ops and return the total number of credits */ | ||
| 37 | static int | 38 | static int |
| 38 | change_conf(struct TCP_Server_Info *server) | 39 | change_conf(struct TCP_Server_Info *server) |
| 39 | { | 40 | { |
| @@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server) | |||
| 41 | server->oplock_credits = server->echo_credits = 0; | 42 | server->oplock_credits = server->echo_credits = 0; |
| 42 | switch (server->credits) { | 43 | switch (server->credits) { |
| 43 | case 0: | 44 | case 0: |
| 44 | return -1; | 45 | return 0; |
| 45 | case 1: | 46 | case 1: |
| 46 | server->echoes = false; | 47 | server->echoes = false; |
| 47 | server->oplocks = false; | 48 | server->oplocks = false; |
| 48 | cifs_dbg(VFS, "disabling echoes and oplocks\n"); | ||
| 49 | break; | 49 | break; |
| 50 | case 2: | 50 | case 2: |
| 51 | server->echoes = true; | 51 | server->echoes = true; |
| 52 | server->oplocks = false; | 52 | server->oplocks = false; |
| 53 | server->echo_credits = 1; | 53 | server->echo_credits = 1; |
| 54 | cifs_dbg(FYI, "disabling oplocks\n"); | ||
| 55 | break; | 54 | break; |
| 56 | default: | 55 | default: |
| 57 | server->echoes = true; | 56 | server->echoes = true; |
| @@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server) | |||
| 64 | server->echo_credits = 1; | 63 | server->echo_credits = 1; |
| 65 | } | 64 | } |
| 66 | server->credits -= server->echo_credits + server->oplock_credits; | 65 | server->credits -= server->echo_credits + server->oplock_credits; |
| 67 | return 0; | 66 | return server->credits + server->echo_credits + server->oplock_credits; |
| 68 | } | 67 | } |
| 69 | 68 | ||
| 70 | static void | 69 | static void |
| 71 | smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, | 70 | smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, |
| 72 | const int optype) | 71 | const int optype) |
| 73 | { | 72 | { |
| 74 | int *val, rc = 0; | 73 | int *val, rc = -1; |
| 74 | |||
| 75 | spin_lock(&server->req_lock); | 75 | spin_lock(&server->req_lock); |
| 76 | val = server->ops->get_credits_field(server, optype); | 76 | val = server->ops->get_credits_field(server, optype); |
| 77 | 77 | ||
| @@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, | |||
| 101 | } | 101 | } |
| 102 | spin_unlock(&server->req_lock); | 102 | spin_unlock(&server->req_lock); |
| 103 | wake_up(&server->request_q); | 103 | wake_up(&server->request_q); |
| 104 | if (rc) | 104 | |
| 105 | cifs_reconnect(server); | 105 | if (server->tcpStatus == CifsNeedReconnect) |
| 106 | return; | ||
| 107 | |||
| 108 | switch (rc) { | ||
| 109 | case -1: | ||
| 110 | /* change_conf hasn't been executed */ | ||
| 111 | break; | ||
| 112 | case 0: | ||
| 113 | cifs_dbg(VFS, "Possible client or server bug - zero credits\n"); | ||
| 114 | break; | ||
| 115 | case 1: | ||
| 116 | cifs_dbg(VFS, "disabling echoes and oplocks\n"); | ||
| 117 | break; | ||
| 118 | case 2: | ||
| 119 | cifs_dbg(FYI, "disabling oplocks\n"); | ||
| 120 | break; | ||
| 121 | default: | ||
| 122 | cifs_dbg(FYI, "add %u credits total=%d\n", add, rc); | ||
| 123 | } | ||
| 106 | } | 124 | } |
| 107 | 125 | ||
| 108 | static void | 126 | static void |
| @@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid) | |||
| 136 | { | 154 | { |
| 137 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; | 155 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; |
| 138 | 156 | ||
| 139 | return le16_to_cpu(shdr->CreditRequest); | 157 | if (mid->mid_state == MID_RESPONSE_RECEIVED |
| 158 | || mid->mid_state == MID_RESPONSE_MALFORMED) | ||
| 159 | return le16_to_cpu(shdr->CreditRequest); | ||
| 160 | |||
| 161 | return 0; | ||
| 140 | } | 162 | } |
| 141 | 163 | ||
| 142 | static int | 164 | static int |
| @@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, | |||
| 165 | 187 | ||
| 166 | scredits = server->credits; | 188 | scredits = server->credits; |
| 167 | /* can deadlock with reopen */ | 189 | /* can deadlock with reopen */ |
| 168 | if (scredits == 1) { | 190 | if (scredits <= 8) { |
| 169 | *num = SMB2_MAX_BUFFER_SIZE; | 191 | *num = SMB2_MAX_BUFFER_SIZE; |
| 170 | *credits = 0; | 192 | *credits = 0; |
| 171 | break; | 193 | break; |
| 172 | } | 194 | } |
| 173 | 195 | ||
| 174 | /* leave one credit for a possible reopen */ | 196 | /* leave some credits for reopen and other ops */ |
| 175 | scredits--; | 197 | scredits -= 8; |
| 176 | *num = min_t(unsigned int, size, | 198 | *num = min_t(unsigned int, size, |
| 177 | scredits * SMB2_MAX_BUFFER_SIZE); | 199 | scredits * SMB2_MAX_BUFFER_SIZE); |
| 178 | 200 | ||
| @@ -3189,11 +3211,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | |||
| 3189 | server->ops->is_status_pending(buf, server, 0)) | 3211 | server->ops->is_status_pending(buf, server, 0)) |
| 3190 | return -1; | 3212 | return -1; |
| 3191 | 3213 | ||
| 3192 | rdata->result = server->ops->map_error(buf, false); | 3214 | /* set up first two iov to get credits */ |
| 3215 | rdata->iov[0].iov_base = buf; | ||
| 3216 | rdata->iov[0].iov_len = 4; | ||
| 3217 | rdata->iov[1].iov_base = buf + 4; | ||
| 3218 | rdata->iov[1].iov_len = | ||
| 3219 | min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4; | ||
| 3220 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
| 3221 | rdata->iov[0].iov_base, rdata->iov[0].iov_len); | ||
| 3222 | cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", | ||
| 3223 | rdata->iov[1].iov_base, rdata->iov[1].iov_len); | ||
| 3224 | |||
| 3225 | rdata->result = server->ops->map_error(buf, true); | ||
| 3193 | if (rdata->result != 0) { | 3226 | if (rdata->result != 0) { |
| 3194 | cifs_dbg(FYI, "%s: server returned error %d\n", | 3227 | cifs_dbg(FYI, "%s: server returned error %d\n", |
| 3195 | __func__, rdata->result); | 3228 | __func__, rdata->result); |
| 3196 | dequeue_mid(mid, rdata->result); | 3229 | /* normal error on read response */ |
| 3230 | dequeue_mid(mid, false); | ||
| 3197 | return 0; | 3231 | return 0; |
| 3198 | } | 3232 | } |
| 3199 | 3233 | ||
| @@ -3266,14 +3300,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | |||
| 3266 | return 0; | 3300 | return 0; |
| 3267 | } | 3301 | } |
| 3268 | 3302 | ||
| 3269 | /* set up first iov for signature check */ | ||
| 3270 | rdata->iov[0].iov_base = buf; | ||
| 3271 | rdata->iov[0].iov_len = 4; | ||
| 3272 | rdata->iov[1].iov_base = buf + 4; | ||
| 3273 | rdata->iov[1].iov_len = server->vals->read_rsp_size - 4; | ||
| 3274 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
| 3275 | rdata->iov[0].iov_base, server->vals->read_rsp_size); | ||
| 3276 | |||
| 3277 | length = rdata->copy_into_pages(server, rdata, &iter); | 3303 | length = rdata->copy_into_pages(server, rdata, &iter); |
| 3278 | 3304 | ||
| 3279 | kfree(bvec); | 3305 | kfree(bvec); |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 50811a7dc0e0..2ff209ec4fab 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -2816,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2816 | int resp_buftype = CIFS_NO_BUFFER; | 2816 | int resp_buftype = CIFS_NO_BUFFER; |
| 2817 | struct cifs_ses *ses = tcon->ses; | 2817 | struct cifs_ses *ses = tcon->ses; |
| 2818 | int flags = 0; | 2818 | int flags = 0; |
| 2819 | bool allocated = false; | ||
| 2819 | 2820 | ||
| 2820 | cifs_dbg(FYI, "Query Info\n"); | 2821 | cifs_dbg(FYI, "Query Info\n"); |
| 2821 | 2822 | ||
| @@ -2855,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2855 | "Error %d allocating memory for acl\n", | 2856 | "Error %d allocating memory for acl\n", |
| 2856 | rc); | 2857 | rc); |
| 2857 | *dlen = 0; | 2858 | *dlen = 0; |
| 2859 | rc = -ENOMEM; | ||
| 2858 | goto qinf_exit; | 2860 | goto qinf_exit; |
| 2859 | } | 2861 | } |
| 2862 | allocated = true; | ||
| 2860 | } | 2863 | } |
| 2861 | } | 2864 | } |
| 2862 | 2865 | ||
| 2863 | rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), | 2866 | rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), |
| 2864 | le32_to_cpu(rsp->OutputBufferLength), | 2867 | le32_to_cpu(rsp->OutputBufferLength), |
| 2865 | &rsp_iov, min_len, *data); | 2868 | &rsp_iov, min_len, *data); |
| 2869 | if (rc && allocated) { | ||
| 2870 | kfree(*data); | ||
| 2871 | *data = NULL; | ||
| 2872 | *dlen = 0; | ||
| 2873 | } | ||
| 2866 | 2874 | ||
| 2867 | qinf_exit: | 2875 | qinf_exit: |
| 2868 | SMB2_query_info_free(&rqst); | 2876 | SMB2_query_info_free(&rqst); |
| @@ -2916,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid) | |||
| 2916 | { | 2924 | { |
| 2917 | struct TCP_Server_Info *server = mid->callback_data; | 2925 | struct TCP_Server_Info *server = mid->callback_data; |
| 2918 | struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; | 2926 | struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; |
| 2919 | unsigned int credits_received = 1; | 2927 | unsigned int credits_received = 0; |
| 2920 | 2928 | ||
| 2921 | if (mid->mid_state == MID_RESPONSE_RECEIVED) | 2929 | if (mid->mid_state == MID_RESPONSE_RECEIVED |
| 2930 | || mid->mid_state == MID_RESPONSE_MALFORMED) | ||
| 2922 | credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); | 2931 | credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); |
| 2923 | 2932 | ||
| 2924 | DeleteMidQEntry(mid); | 2933 | DeleteMidQEntry(mid); |
| @@ -3175,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
| 3175 | struct TCP_Server_Info *server = tcon->ses->server; | 3184 | struct TCP_Server_Info *server = tcon->ses->server; |
| 3176 | struct smb2_sync_hdr *shdr = | 3185 | struct smb2_sync_hdr *shdr = |
| 3177 | (struct smb2_sync_hdr *)rdata->iov[0].iov_base; | 3186 | (struct smb2_sync_hdr *)rdata->iov[0].iov_base; |
| 3178 | unsigned int credits_received = 1; | 3187 | unsigned int credits_received = 0; |
| 3179 | struct smb_rqst rqst = { .rq_iov = rdata->iov, | 3188 | struct smb_rqst rqst = { .rq_iov = rdata->iov, |
| 3180 | .rq_nvec = 2, | 3189 | .rq_nvec = 2, |
| 3181 | .rq_pages = rdata->pages, | 3190 | .rq_pages = rdata->pages, |
| @@ -3214,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
| 3214 | task_io_account_read(rdata->got_bytes); | 3223 | task_io_account_read(rdata->got_bytes); |
| 3215 | cifs_stats_bytes_read(tcon, rdata->got_bytes); | 3224 | cifs_stats_bytes_read(tcon, rdata->got_bytes); |
| 3216 | break; | 3225 | break; |
| 3226 | case MID_RESPONSE_MALFORMED: | ||
| 3227 | credits_received = le16_to_cpu(shdr->CreditRequest); | ||
| 3228 | /* fall through */ | ||
| 3217 | default: | 3229 | default: |
| 3218 | if (rdata->result != -ENODATA) | 3230 | if (rdata->result != -ENODATA) |
| 3219 | rdata->result = -EIO; | 3231 | rdata->result = -EIO; |
| @@ -3399,7 +3411,7 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
| 3399 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 3411 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
| 3400 | unsigned int written; | 3412 | unsigned int written; |
| 3401 | struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; | 3413 | struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; |
| 3402 | unsigned int credits_received = 1; | 3414 | unsigned int credits_received = 0; |
| 3403 | 3415 | ||
| 3404 | switch (mid->mid_state) { | 3416 | switch (mid->mid_state) { |
| 3405 | case MID_RESPONSE_RECEIVED: | 3417 | case MID_RESPONSE_RECEIVED: |
| @@ -3427,6 +3439,9 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
| 3427 | case MID_RETRY_NEEDED: | 3439 | case MID_RETRY_NEEDED: |
| 3428 | wdata->result = -EAGAIN; | 3440 | wdata->result = -EAGAIN; |
| 3429 | break; | 3441 | break; |
| 3442 | case MID_RESPONSE_MALFORMED: | ||
| 3443 | credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); | ||
| 3444 | /* fall through */ | ||
| 3430 | default: | 3445 | default: |
| 3431 | wdata->result = -EIO; | 3446 | wdata->result = -EIO; |
| 3432 | break; | 3447 | break; |
diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c index bd4a546feec1..465483787193 100644 --- a/fs/cifs/trace.c +++ b/fs/cifs/trace.c | |||
| @@ -3,16 +3,6 @@ | |||
| 3 | * Copyright (C) 2018, Microsoft Corporation. | 3 | * Copyright (C) 2018, Microsoft Corporation. |
| 4 | * | 4 | * |
| 5 | * Author(s): Steve French <stfrench@microsoft.com> | 5 | * Author(s): Steve French <stfrench@microsoft.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
| 15 | * the GNU General Public License for more details. | ||
| 16 | */ | 6 | */ |
| 17 | #define CREATE_TRACE_POINTS | 7 | #define CREATE_TRACE_POINTS |
| 18 | #include "trace.h" | 8 | #include "trace.h" |
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h index fb049809555f..59be48206932 100644 --- a/fs/cifs/trace.h +++ b/fs/cifs/trace.h | |||
| @@ -3,16 +3,6 @@ | |||
| 3 | * Copyright (C) 2018, Microsoft Corporation. | 3 | * Copyright (C) 2018, Microsoft Corporation. |
| 4 | * | 4 | * |
| 5 | * Author(s): Steve French <stfrench@microsoft.com> | 5 | * Author(s): Steve French <stfrench@microsoft.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
| 15 | * the GNU General Public License for more details. | ||
| 16 | */ | 6 | */ |
| 17 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
| 18 | #define TRACE_SYSTEM cifs | 8 | #define TRACE_SYSTEM cifs |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 202e0e84efdd..53532bd3f50d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -786,17 +786,8 @@ static void | |||
| 786 | cifs_compound_callback(struct mid_q_entry *mid) | 786 | cifs_compound_callback(struct mid_q_entry *mid) |
| 787 | { | 787 | { |
| 788 | struct TCP_Server_Info *server = mid->server; | 788 | struct TCP_Server_Info *server = mid->server; |
| 789 | unsigned int optype = mid->optype; | ||
| 790 | unsigned int credits_received = 0; | ||
| 791 | 789 | ||
| 792 | if (mid->mid_state == MID_RESPONSE_RECEIVED) { | 790 | add_credits(server, server->ops->get_credits(mid), mid->optype); |
| 793 | if (mid->resp_buf) | ||
| 794 | credits_received = server->ops->get_credits(mid); | ||
| 795 | else | ||
| 796 | cifs_dbg(FYI, "Bad state for cancelled MID\n"); | ||
| 797 | } | ||
| 798 | |||
| 799 | add_credits(server, credits_received, optype); | ||
| 800 | } | 791 | } |
| 801 | 792 | ||
| 802 | static void | 793 | static void |
diff --git a/fs/direct-io.c b/fs/direct-io.c index dbc1a1f080ce..ec2fb6fe6d37 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, | |||
| 679 | unsigned long fs_count; /* Number of filesystem-sized blocks */ | 679 | unsigned long fs_count; /* Number of filesystem-sized blocks */ |
| 680 | int create; | 680 | int create; |
| 681 | unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; | 681 | unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; |
| 682 | loff_t i_size; | ||
| 682 | 683 | ||
| 683 | /* | 684 | /* |
| 684 | * If there was a memory error and we've overwritten all the | 685 | * If there was a memory error and we've overwritten all the |
| @@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, | |||
| 708 | */ | 709 | */ |
| 709 | create = dio->op == REQ_OP_WRITE; | 710 | create = dio->op == REQ_OP_WRITE; |
| 710 | if (dio->flags & DIO_SKIP_HOLES) { | 711 | if (dio->flags & DIO_SKIP_HOLES) { |
| 711 | if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> | 712 | i_size = i_size_read(dio->inode); |
| 712 | i_blkbits)) | 713 | if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) |
| 713 | create = 0; | 714 | create = 0; |
| 714 | } | 715 | } |
| 715 | 716 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index b40168fcc94a..36855c1f8daf 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -331,11 +331,22 @@ struct inode_switch_wbs_context { | |||
| 331 | struct work_struct work; | 331 | struct work_struct work; |
| 332 | }; | 332 | }; |
| 333 | 333 | ||
| 334 | static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) | ||
| 335 | { | ||
| 336 | down_write(&bdi->wb_switch_rwsem); | ||
| 337 | } | ||
| 338 | |||
| 339 | static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) | ||
| 340 | { | ||
| 341 | up_write(&bdi->wb_switch_rwsem); | ||
| 342 | } | ||
| 343 | |||
| 334 | static void inode_switch_wbs_work_fn(struct work_struct *work) | 344 | static void inode_switch_wbs_work_fn(struct work_struct *work) |
| 335 | { | 345 | { |
| 336 | struct inode_switch_wbs_context *isw = | 346 | struct inode_switch_wbs_context *isw = |
| 337 | container_of(work, struct inode_switch_wbs_context, work); | 347 | container_of(work, struct inode_switch_wbs_context, work); |
| 338 | struct inode *inode = isw->inode; | 348 | struct inode *inode = isw->inode; |
| 349 | struct backing_dev_info *bdi = inode_to_bdi(inode); | ||
| 339 | struct address_space *mapping = inode->i_mapping; | 350 | struct address_space *mapping = inode->i_mapping; |
| 340 | struct bdi_writeback *old_wb = inode->i_wb; | 351 | struct bdi_writeback *old_wb = inode->i_wb; |
| 341 | struct bdi_writeback *new_wb = isw->new_wb; | 352 | struct bdi_writeback *new_wb = isw->new_wb; |
| @@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) | |||
| 344 | bool switched = false; | 355 | bool switched = false; |
| 345 | 356 | ||
| 346 | /* | 357 | /* |
| 358 | * If @inode switches cgwb membership while sync_inodes_sb() is | ||
| 359 | * being issued, sync_inodes_sb() might miss it. Synchronize. | ||
| 360 | */ | ||
| 361 | down_read(&bdi->wb_switch_rwsem); | ||
| 362 | |||
| 363 | /* | ||
| 347 | * By the time control reaches here, RCU grace period has passed | 364 | * By the time control reaches here, RCU grace period has passed |
| 348 | * since I_WB_SWITCH assertion and all wb stat update transactions | 365 | * since I_WB_SWITCH assertion and all wb stat update transactions |
| 349 | * between unlocked_inode_to_wb_begin/end() are guaranteed to be | 366 | * between unlocked_inode_to_wb_begin/end() are guaranteed to be |
| @@ -428,6 +445,8 @@ skip_switch: | |||
| 428 | spin_unlock(&new_wb->list_lock); | 445 | spin_unlock(&new_wb->list_lock); |
| 429 | spin_unlock(&old_wb->list_lock); | 446 | spin_unlock(&old_wb->list_lock); |
| 430 | 447 | ||
| 448 | up_read(&bdi->wb_switch_rwsem); | ||
| 449 | |||
| 431 | if (switched) { | 450 | if (switched) { |
| 432 | wb_wakeup(new_wb); | 451 | wb_wakeup(new_wb); |
| 433 | wb_put(old_wb); | 452 | wb_put(old_wb); |
| @@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
| 468 | if (inode->i_state & I_WB_SWITCH) | 487 | if (inode->i_state & I_WB_SWITCH) |
| 469 | return; | 488 | return; |
| 470 | 489 | ||
| 490 | /* | ||
| 491 | * Avoid starting new switches while sync_inodes_sb() is in | ||
| 492 | * progress. Otherwise, if the down_write protected issue path | ||
| 493 | * blocks heavily, we might end up starting a large number of | ||
| 494 | * switches which will block on the rwsem. | ||
| 495 | */ | ||
| 496 | if (!down_read_trylock(&bdi->wb_switch_rwsem)) | ||
| 497 | return; | ||
| 498 | |||
| 471 | isw = kzalloc(sizeof(*isw), GFP_ATOMIC); | 499 | isw = kzalloc(sizeof(*isw), GFP_ATOMIC); |
| 472 | if (!isw) | 500 | if (!isw) |
| 473 | return; | 501 | goto out_unlock; |
| 474 | 502 | ||
| 475 | /* find and pin the new wb */ | 503 | /* find and pin the new wb */ |
| 476 | rcu_read_lock(); | 504 | rcu_read_lock(); |
| @@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
| 504 | * Let's continue after I_WB_SWITCH is guaranteed to be visible. | 532 | * Let's continue after I_WB_SWITCH is guaranteed to be visible. |
| 505 | */ | 533 | */ |
| 506 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); | 534 | call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); |
| 507 | return; | 535 | goto out_unlock; |
| 508 | 536 | ||
| 509 | out_free: | 537 | out_free: |
| 510 | if (isw->new_wb) | 538 | if (isw->new_wb) |
| 511 | wb_put(isw->new_wb); | 539 | wb_put(isw->new_wb); |
| 512 | kfree(isw); | 540 | kfree(isw); |
| 541 | out_unlock: | ||
| 542 | up_read(&bdi->wb_switch_rwsem); | ||
| 513 | } | 543 | } |
| 514 | 544 | ||
| 515 | /** | 545 | /** |
| @@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init); | |||
| 887 | 917 | ||
| 888 | #else /* CONFIG_CGROUP_WRITEBACK */ | 918 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 889 | 919 | ||
| 920 | static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } | ||
| 921 | static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } | ||
| 922 | |||
| 890 | static struct bdi_writeback * | 923 | static struct bdi_writeback * |
| 891 | locked_inode_to_wb_and_lock_list(struct inode *inode) | 924 | locked_inode_to_wb_and_lock_list(struct inode *inode) |
| 892 | __releases(&inode->i_lock) | 925 | __releases(&inode->i_lock) |
| @@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb) | |||
| 2413 | return; | 2446 | return; |
| 2414 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 2447 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
| 2415 | 2448 | ||
| 2449 | /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ | ||
| 2450 | bdi_down_write_wb_switch_rwsem(bdi); | ||
| 2416 | bdi_split_work_to_wbs(bdi, &work, false); | 2451 | bdi_split_work_to_wbs(bdi, &work, false); |
| 2417 | wb_wait_for_completion(bdi, &done); | 2452 | wb_wait_for_completion(bdi, &done); |
| 2453 | bdi_up_write_wb_switch_rwsem(bdi); | ||
| 2418 | 2454 | ||
| 2419 | wait_sb_inodes(sb); | 2455 | wait_sb_inodes(sb); |
| 2420 | } | 2456 | } |
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 46d691ba04bc..45b2322e092d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c | |||
| @@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, | |||
| 133 | struct file *file_out, loff_t pos_out, | 133 | struct file *file_out, loff_t pos_out, |
| 134 | size_t count, unsigned int flags) | 134 | size_t count, unsigned int flags) |
| 135 | { | 135 | { |
| 136 | ssize_t ret; | ||
| 137 | |||
| 138 | if (file_inode(file_in) == file_inode(file_out)) | 136 | if (file_inode(file_in) == file_inode(file_out)) |
| 139 | return -EINVAL; | 137 | return -EINVAL; |
| 140 | retry: | 138 | return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); |
| 141 | ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); | ||
| 142 | if (ret == -EAGAIN) | ||
| 143 | goto retry; | ||
| 144 | return ret; | ||
| 145 | } | 139 | } |
| 146 | 140 | ||
| 147 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) | 141 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) |
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 105576daca4a..798f1253141a 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c | |||
| @@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, | |||
| 724 | return -EBADF; | 724 | return -EBADF; |
| 725 | 725 | ||
| 726 | /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ | 726 | /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ |
| 727 | if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) | 727 | if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { |
| 728 | return -EINVAL; | 728 | ret = -EINVAL; |
| 729 | goto fput_and_out; | ||
| 730 | } | ||
| 729 | 731 | ||
| 730 | /* verify that this is indeed an inotify instance */ | 732 | /* verify that this is indeed an inotify instance */ |
| 731 | if (unlikely(f.file->f_op != &inotify_fops)) { | 733 | if (unlikely(f.file->f_op != &inotify_fops)) { |
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 96f7d32cd184..898c8321b343 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c | |||
| @@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, | |||
| 128 | struct pstore_record *record) | 128 | struct pstore_record *record) |
| 129 | { | 129 | { |
| 130 | struct persistent_ram_zone *prz; | 130 | struct persistent_ram_zone *prz; |
| 131 | bool update = (record->type == PSTORE_TYPE_DMESG); | ||
| 132 | 131 | ||
| 133 | /* Give up if we never existed or have hit the end. */ | 132 | /* Give up if we never existed or have hit the end. */ |
| 134 | if (!przs) | 133 | if (!przs) |
| @@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, | |||
| 139 | return NULL; | 138 | return NULL; |
| 140 | 139 | ||
| 141 | /* Update old/shadowed buffer. */ | 140 | /* Update old/shadowed buffer. */ |
| 142 | if (update) | 141 | if (prz->type == PSTORE_TYPE_DMESG) |
| 143 | persistent_ram_save_old(prz); | 142 | persistent_ram_save_old(prz); |
| 144 | 143 | ||
| 145 | if (!persistent_ram_old_size(prz)) | 144 | if (!persistent_ram_old_size(prz)) |
| @@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev) | |||
| 711 | { | 710 | { |
| 712 | struct device *dev = &pdev->dev; | 711 | struct device *dev = &pdev->dev; |
| 713 | struct ramoops_platform_data *pdata = dev->platform_data; | 712 | struct ramoops_platform_data *pdata = dev->platform_data; |
| 713 | struct ramoops_platform_data pdata_local; | ||
| 714 | struct ramoops_context *cxt = &oops_cxt; | 714 | struct ramoops_context *cxt = &oops_cxt; |
| 715 | size_t dump_mem_sz; | 715 | size_t dump_mem_sz; |
| 716 | phys_addr_t paddr; | 716 | phys_addr_t paddr; |
| 717 | int err = -EINVAL; | 717 | int err = -EINVAL; |
| 718 | 718 | ||
| 719 | if (dev_of_node(dev) && !pdata) { | 719 | if (dev_of_node(dev) && !pdata) { |
| 720 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | 720 | pdata = &pdata_local; |
| 721 | if (!pdata) { | 721 | memset(pdata, 0, sizeof(*pdata)); |
| 722 | pr_err("cannot allocate platform data buffer\n"); | ||
| 723 | err = -ENOMEM; | ||
| 724 | goto fail_out; | ||
| 725 | } | ||
| 726 | 722 | ||
| 727 | err = ramoops_parse_dt(pdev, pdata); | 723 | err = ramoops_parse_dt(pdev, pdata); |
| 728 | if (err < 0) | 724 | if (err < 0) |
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h index ad6f55dabd6d..0f2e0fe45ca4 100644 --- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h | |||
| @@ -1,12 +1,11 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | * | ||
| 3 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
| 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2017 Amlogic, inc. | 6 | * Copyright (c) 2017 Amlogic, inc. |
| 7 | * Author: Yixun Lan <yixun.lan@amlogic.com> | 7 | * Author: Yixun Lan <yixun.lan@amlogic.com> |
| 8 | * | 8 | * |
| 9 | * SPDX-License-Identifier: (GPL-2.0+ OR BSD) | ||
| 10 | */ | 9 | */ |
| 11 | 10 | ||
| 12 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H | 11 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c31157135598..07e02d6df5ad 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -190,6 +190,7 @@ struct backing_dev_info { | |||
| 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 191 | struct rb_root cgwb_congested_tree; /* their congested states */ | 191 | struct rb_root cgwb_congested_tree; /* their congested states */ |
| 192 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ | 192 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
| 193 | struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ | ||
| 193 | #else | 194 | #else |
| 194 | struct bdi_writeback_congested *wb_congested; | 195 | struct bdi_writeback_congested *wb_congested; |
| 195 | #endif | 196 | #endif |
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | struct bcma_soc { | 7 | struct bcma_soc { |
| 8 | struct bcma_bus bus; | 8 | struct bcma_bus bus; |
| 9 | struct device *dev; | ||
| 9 | }; | 10 | }; |
| 10 | 11 | ||
| 11 | int __init bcma_host_soc_register(struct bcma_soc *soc); | 12 | int __init bcma_host_soc_register(struct bcma_soc *soc); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5c7e7f859a24..d66bf5f32610 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -287,7 +287,7 @@ enum req_opf { | |||
| 287 | REQ_OP_DISCARD = 3, | 287 | REQ_OP_DISCARD = 3, |
| 288 | /* securely erase sectors */ | 288 | /* securely erase sectors */ |
| 289 | REQ_OP_SECURE_ERASE = 5, | 289 | REQ_OP_SECURE_ERASE = 5, |
| 290 | /* seset a zone write pointer */ | 290 | /* reset a zone write pointer */ |
| 291 | REQ_OP_ZONE_RESET = 6, | 291 | REQ_OP_ZONE_RESET = 6, |
| 292 | /* write the same sector many times */ | 292 | /* write the same sector many times */ |
| 293 | REQ_OP_WRITE_SAME = 7, | 293 | REQ_OP_WRITE_SAME = 7, |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 27b74947cd2b..573cca00a0e6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -172,6 +172,7 @@ struct bpf_verifier_state_list { | |||
| 172 | #define BPF_ALU_SANITIZE_SRC 1U | 172 | #define BPF_ALU_SANITIZE_SRC 1U |
| 173 | #define BPF_ALU_SANITIZE_DST 2U | 173 | #define BPF_ALU_SANITIZE_DST 2U |
| 174 | #define BPF_ALU_NEG_VALUE (1U << 2) | 174 | #define BPF_ALU_NEG_VALUE (1U << 2) |
| 175 | #define BPF_ALU_NON_POINTER (1U << 3) | ||
| 175 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ | 176 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
| 176 | BPF_ALU_SANITIZE_DST) | 177 | BPF_ALU_SANITIZE_DST) |
| 177 | 178 | ||
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index f02cee0225d4..d815622cd31e 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h | |||
| @@ -3,13 +3,22 @@ | |||
| 3 | #define _LINUX_BPFILTER_H | 3 | #define _LINUX_BPFILTER_H |
| 4 | 4 | ||
| 5 | #include <uapi/linux/bpfilter.h> | 5 | #include <uapi/linux/bpfilter.h> |
| 6 | #include <linux/umh.h> | ||
| 6 | 7 | ||
| 7 | struct sock; | 8 | struct sock; |
| 8 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, | 9 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, |
| 9 | unsigned int optlen); | 10 | unsigned int optlen); |
| 10 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, | 11 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, |
| 11 | int __user *optlen); | 12 | int __user *optlen); |
| 12 | extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, | 13 | struct bpfilter_umh_ops { |
| 13 | char __user *optval, | 14 | struct umh_info info; |
| 14 | unsigned int optlen, bool is_set); | 15 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ |
| 16 | struct mutex lock; | ||
| 17 | int (*sockopt)(struct sock *sk, int optname, | ||
| 18 | char __user *optval, | ||
| 19 | unsigned int optlen, bool is_set); | ||
| 20 | int (*start)(void); | ||
| 21 | bool stop; | ||
| 22 | }; | ||
| 23 | extern struct bpfilter_umh_ops bpfilter_ops; | ||
| 15 | #endif | 24 | #endif |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
| @@ -3,9 +3,8 @@ | |||
| 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." |
| 4 | #endif | 4 | #endif |
| 5 | 5 | ||
| 6 | /* Some compiler specific definitions are overwritten here | 6 | /* Compiler specific definitions for Clang compiler */ |
| 7 | * for Clang compiler | 7 | |
| 8 | */ | ||
| 9 | #define uninitialized_var(x) x = *(&(x)) | 8 | #define uninitialized_var(x) x = *(&(x)) |
| 10 | 9 | ||
| 11 | /* same as gcc, this was present in clang-2.6 so we can assume it works | 10 | /* same as gcc, this was present in clang-2.6 so we can assume it works |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dd8268f5f5f0..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -58,10 +58,6 @@ | |||
| 58 | (typeof(ptr)) (__ptr + (off)); \ | 58 | (typeof(ptr)) (__ptr + (off)); \ |
| 59 | }) | 59 | }) |
| 60 | 60 | ||
| 61 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ | ||
| 62 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 63 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 64 | |||
| 65 | /* | 61 | /* |
| 66 | * A trick to suppress uninitialized variable warning without generating any | 62 | * A trick to suppress uninitialized variable warning without generating any |
| 67 | * code | 63 | * code |
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
| @@ -5,9 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #ifdef __ECC | 6 | #ifdef __ECC |
| 7 | 7 | ||
| 8 | /* Some compiler specific definitions are overwritten here | 8 | /* Compiler specific definitions for Intel ECC compiler */ |
| 9 | * for Intel ECC compiler | ||
| 10 | */ | ||
| 11 | 9 | ||
| 12 | #include <asm/intrinsics.h> | 10 | #include <asm/intrinsics.h> |
| 13 | 11 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
| 161 | #endif | 161 | #endif |
| 162 | 162 | ||
| 163 | #ifndef OPTIMIZER_HIDE_VAR | 163 | #ifndef OPTIMIZER_HIDE_VAR |
| 164 | #define OPTIMIZER_HIDE_VAR(var) barrier() | 164 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 165 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 166 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 165 | #endif | 167 | #endif |
| 166 | 168 | ||
| 167 | /* Not-quite-unique ID. */ | 169 | /* Not-quite-unique ID. */ |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7cdd31a69719..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); | |||
| 653 | 653 | ||
| 654 | extern struct fb_info *registered_fb[FB_MAX]; | 654 | extern struct fb_info *registered_fb[FB_MAX]; |
| 655 | extern int num_registered_fb; | 655 | extern int num_registered_fb; |
| 656 | extern bool fb_center_logo; | ||
| 656 | extern struct class *fb_class; | 657 | extern struct class *fb_class; |
| 657 | 658 | ||
| 658 | #define for_each_registered_fb(i) \ | 659 | #define for_each_registered_fb(i) \ |
diff --git a/include/linux/hid.h b/include/linux/hid.h index d99287327ef2..f9707d1dcb58 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -430,7 +430,7 @@ struct hid_local { | |||
| 430 | */ | 430 | */ |
| 431 | 431 | ||
| 432 | struct hid_collection { | 432 | struct hid_collection { |
| 433 | struct hid_collection *parent; | 433 | int parent_idx; /* device->collection */ |
| 434 | unsigned type; | 434 | unsigned type; |
| 435 | unsigned usage; | 435 | unsigned usage; |
| 436 | unsigned level; | 436 | unsigned level; |
| @@ -658,7 +658,6 @@ struct hid_parser { | |||
| 658 | unsigned int *collection_stack; | 658 | unsigned int *collection_stack; |
| 659 | unsigned int collection_stack_ptr; | 659 | unsigned int collection_stack_ptr; |
| 660 | unsigned int collection_stack_size; | 660 | unsigned int collection_stack_size; |
| 661 | struct hid_collection *active_collection; | ||
| 662 | struct hid_device *device; | 661 | struct hid_device *device; |
| 663 | unsigned int scan_flags; | 662 | unsigned int scan_flags; |
| 664 | }; | 663 | }; |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f0885cc01db6..dcb6977afce9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info { | |||
| 1159 | u32 bytes_avail_towrite; | 1159 | u32 bytes_avail_towrite; |
| 1160 | }; | 1160 | }; |
| 1161 | 1161 | ||
| 1162 | void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | 1162 | |
| 1163 | struct hv_ring_buffer_debug_info *debug_info); | 1163 | int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, |
| 1164 | struct hv_ring_buffer_debug_info *debug_info); | ||
| 1164 | 1165 | ||
| 1165 | /* Vmbus interface */ | 1166 | /* Vmbus interface */ |
| 1166 | #define vmbus_driver_register(driver) \ | 1167 | #define vmbus_driver_register(driver) \ |
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6756fea18b69..e44746de95cd 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h | |||
| @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) | |||
| 54 | case ARPHRD_IPGRE: | 54 | case ARPHRD_IPGRE: |
| 55 | case ARPHRD_VOID: | 55 | case ARPHRD_VOID: |
| 56 | case ARPHRD_NONE: | 56 | case ARPHRD_NONE: |
| 57 | case ARPHRD_RAWIP: | ||
| 57 | return false; | 58 | return false; |
| 58 | default: | 59 | default: |
| 59 | return true; | 60 | return true; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c672f34235e7..4a728dba02e2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -260,6 +260,7 @@ struct irq_affinity { | |||
| 260 | /** | 260 | /** |
| 261 | * struct irq_affinity_desc - Interrupt affinity descriptor | 261 | * struct irq_affinity_desc - Interrupt affinity descriptor |
| 262 | * @mask: cpumask to hold the affinity assignment | 262 | * @mask: cpumask to hold the affinity assignment |
| 263 | * @is_managed: 1 if the interrupt is managed internally | ||
| 263 | */ | 264 | */ |
| 264 | struct irq_affinity_desc { | 265 | struct irq_affinity_desc { |
| 265 | struct cpumask mask; | 266 | struct cpumask mask; |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7da406ae3a2b..43348303cb4b 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
| @@ -162,6 +162,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | enum nvdimm_security_state { | 164 | enum nvdimm_security_state { |
| 165 | NVDIMM_SECURITY_ERROR = -1, | ||
| 165 | NVDIMM_SECURITY_DISABLED, | 166 | NVDIMM_SECURITY_DISABLED, |
| 166 | NVDIMM_SECURITY_UNLOCKED, | 167 | NVDIMM_SECURITY_UNLOCKED, |
| 167 | NVDIMM_SECURITY_LOCKED, | 168 | NVDIMM_SECURITY_LOCKED, |
| @@ -236,7 +237,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, | |||
| 236 | cmd_mask, num_flush, flush_wpq, NULL, NULL); | 237 | cmd_mask, num_flush, flush_wpq, NULL, NULL); |
| 237 | } | 238 | } |
| 238 | 239 | ||
| 239 | int nvdimm_security_setup_events(struct nvdimm *nvdimm); | ||
| 240 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); | 240 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); |
| 241 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); | 241 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); |
| 242 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, | 242 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, |
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 9a9631f0559e..fc91082d4c35 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h | |||
| @@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param { | |||
| 2791 | } __packed; | 2791 | } __packed; |
| 2792 | 2792 | ||
| 2793 | /*****************************************************************************/ | 2793 | /*****************************************************************************/ |
| 2794 | /* Commands for I2S recording on audio codec. */ | ||
| 2795 | |||
| 2796 | #define EC_CMD_CODEC_I2S 0x00BC | ||
| 2797 | |||
| 2798 | enum ec_codec_i2s_subcmd { | ||
| 2799 | EC_CODEC_SET_SAMPLE_DEPTH = 0x0, | ||
| 2800 | EC_CODEC_SET_GAIN = 0x1, | ||
| 2801 | EC_CODEC_GET_GAIN = 0x2, | ||
| 2802 | EC_CODEC_I2S_ENABLE = 0x3, | ||
| 2803 | EC_CODEC_I2S_SET_CONFIG = 0x4, | ||
| 2804 | EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, | ||
| 2805 | EC_CODEC_I2S_SET_BCLK = 0x6, | ||
| 2806 | }; | ||
| 2807 | |||
| 2808 | enum ec_sample_depth_value { | ||
| 2809 | EC_CODEC_SAMPLE_DEPTH_16 = 0, | ||
| 2810 | EC_CODEC_SAMPLE_DEPTH_24 = 1, | ||
| 2811 | }; | ||
| 2812 | |||
| 2813 | enum ec_i2s_config { | ||
| 2814 | EC_DAI_FMT_I2S = 0, | ||
| 2815 | EC_DAI_FMT_RIGHT_J = 1, | ||
| 2816 | EC_DAI_FMT_LEFT_J = 2, | ||
| 2817 | EC_DAI_FMT_PCM_A = 3, | ||
| 2818 | EC_DAI_FMT_PCM_B = 4, | ||
| 2819 | EC_DAI_FMT_PCM_TDM = 5, | ||
| 2820 | }; | ||
| 2821 | |||
| 2822 | struct ec_param_codec_i2s { | ||
| 2823 | /* | ||
| 2824 | * enum ec_codec_i2s_subcmd | ||
| 2825 | */ | ||
| 2826 | uint8_t cmd; | ||
| 2827 | union { | ||
| 2828 | /* | ||
| 2829 | * EC_CODEC_SET_SAMPLE_DEPTH | ||
| 2830 | * Value should be one of ec_sample_depth_value. | ||
| 2831 | */ | ||
| 2832 | uint8_t depth; | ||
| 2833 | |||
| 2834 | /* | ||
| 2835 | * EC_CODEC_SET_GAIN | ||
| 2836 | * Value should be 0~43 for both channels. | ||
| 2837 | */ | ||
| 2838 | struct ec_param_codec_i2s_set_gain { | ||
| 2839 | uint8_t left; | ||
| 2840 | uint8_t right; | ||
| 2841 | } __packed gain; | ||
| 2842 | |||
| 2843 | /* | ||
| 2844 | * EC_CODEC_I2S_ENABLE | ||
| 2845 | * 1 to enable, 0 to disable. | ||
| 2846 | */ | ||
| 2847 | uint8_t i2s_enable; | ||
| 2848 | |||
| 2849 | /* | ||
| 2850 | * EC_CODEC_I2S_SET_COFNIG | ||
| 2851 | * Value should be one of ec_i2s_config. | ||
| 2852 | */ | ||
| 2853 | uint8_t i2s_config; | ||
| 2854 | |||
| 2855 | /* | ||
| 2856 | * EC_CODEC_I2S_SET_TDM_CONFIG | ||
| 2857 | * Value should be one of ec_i2s_config. | ||
| 2858 | */ | ||
| 2859 | struct ec_param_codec_i2s_tdm { | ||
| 2860 | /* | ||
| 2861 | * 0 to 496 | ||
| 2862 | */ | ||
| 2863 | int16_t ch0_delay; | ||
| 2864 | /* | ||
| 2865 | * -1 to 496 | ||
| 2866 | */ | ||
| 2867 | int16_t ch1_delay; | ||
| 2868 | uint8_t adjacent_to_ch0; | ||
| 2869 | uint8_t adjacent_to_ch1; | ||
| 2870 | } __packed tdm_param; | ||
| 2871 | |||
| 2872 | /* | ||
| 2873 | * EC_CODEC_I2S_SET_BCLK | ||
| 2874 | */ | ||
| 2875 | uint32_t bclk; | ||
| 2876 | }; | ||
| 2877 | } __packed; | ||
| 2878 | |||
| 2879 | /* | ||
| 2880 | * For subcommand EC_CODEC_GET_GAIN. | ||
| 2881 | */ | ||
| 2882 | struct ec_response_codec_gain { | ||
| 2883 | uint8_t left; | ||
| 2884 | uint8_t right; | ||
| 2885 | } __packed; | ||
| 2886 | |||
| 2887 | /*****************************************************************************/ | ||
| 2794 | /* System commands */ | 2888 | /* System commands */ |
| 2795 | 2889 | ||
| 2796 | /* | 2890 | /* |
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h index ab16ad283def..2083fa20821d 100644 --- a/include/linux/mfd/ingenic-tcu.h +++ b/include/linux/mfd/ingenic-tcu.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | #define TCU_TCSR_PRESCALE_LSB 3 | 41 | #define TCU_TCSR_PRESCALE_LSB 3 |
| 42 | #define TCU_TCSR_PRESCALE_MASK 0x38 | 42 | #define TCU_TCSR_PRESCALE_MASK 0x38 |
| 43 | 43 | ||
| 44 | #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ | 44 | #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */ |
| 45 | #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ | 45 | #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ |
| 46 | #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ | 46 | #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ |
| 47 | 47 | ||
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index fe69c0f4398f..4d5d51a9c8a6 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/gpio/consumer.h> | 15 | #include <linux/gpio/consumer.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/mfd/madera/pdata.h> | 17 | #include <linux/mfd/madera/pdata.h> |
| 18 | #include <linux/mutex.h> | ||
| 18 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
| 19 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
| 20 | #include <linux/regulator/consumer.h> | 21 | #include <linux/regulator/consumer.h> |
| @@ -37,6 +38,8 @@ enum madera_type { | |||
| 37 | 38 | ||
| 38 | #define MADERA_MAX_MICBIAS 4 | 39 | #define MADERA_MAX_MICBIAS 4 |
| 39 | 40 | ||
| 41 | #define MADERA_MAX_HP_OUTPUT 3 | ||
| 42 | |||
| 40 | /* Notifier events */ | 43 | /* Notifier events */ |
| 41 | #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 | 44 | #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 |
| 42 | #define MADERA_NOTIFY_HPDET 0x2 | 45 | #define MADERA_NOTIFY_HPDET 0x2 |
| @@ -183,6 +186,10 @@ struct madera { | |||
| 183 | unsigned int num_childbias[MADERA_MAX_MICBIAS]; | 186 | unsigned int num_childbias[MADERA_MAX_MICBIAS]; |
| 184 | 187 | ||
| 185 | struct snd_soc_dapm_context *dapm; | 188 | struct snd_soc_dapm_context *dapm; |
| 189 | struct mutex dapm_ptr_lock; | ||
| 190 | unsigned int hp_ena; | ||
| 191 | bool out_clamp[MADERA_MAX_HP_OUTPUT]; | ||
| 192 | bool out_shorted[MADERA_MAX_HP_OUTPUT]; | ||
| 186 | 193 | ||
| 187 | struct blocking_notifier_head notifier; | 194 | struct blocking_notifier_head notifier; |
| 188 | }; | 195 | }; |
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index b9a53e013bff..483168403ae5 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h | |||
| @@ -78,6 +78,8 @@ | |||
| 78 | #define STEPCONFIG_YNN BIT(8) | 78 | #define STEPCONFIG_YNN BIT(8) |
| 79 | #define STEPCONFIG_XNP BIT(9) | 79 | #define STEPCONFIG_XNP BIT(9) |
| 80 | #define STEPCONFIG_YPN BIT(10) | 80 | #define STEPCONFIG_YPN BIT(10) |
| 81 | #define STEPCONFIG_RFP(val) ((val) << 12) | ||
| 82 | #define STEPCONFIG_RFP_VREFP (0x3 << 12) | ||
| 81 | #define STEPCONFIG_INM_MASK (0xF << 15) | 83 | #define STEPCONFIG_INM_MASK (0xF << 15) |
| 82 | #define STEPCONFIG_INM(val) ((val) << 15) | 84 | #define STEPCONFIG_INM(val) ((val) << 15) |
| 83 | #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) | 85 | #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) |
| @@ -86,6 +88,8 @@ | |||
| 86 | #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) | 88 | #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) |
| 87 | #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) | 89 | #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) |
| 88 | #define STEPCONFIG_FIFO1 BIT(26) | 90 | #define STEPCONFIG_FIFO1 BIT(26) |
| 91 | #define STEPCONFIG_RFM(val) ((val) << 23) | ||
| 92 | #define STEPCONFIG_RFM_VREFN (0x3 << 23) | ||
| 89 | 93 | ||
| 90 | /* Delay register */ | 94 | /* Delay register */ |
| 91 | #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) | 95 | #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) |
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index e2687a30e5a1..739b7bf37eaa 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
| @@ -79,7 +79,7 @@ | |||
| 79 | /* Some controllers have a CBSY bit */ | 79 | /* Some controllers have a CBSY bit */ |
| 80 | #define TMIO_MMC_HAVE_CBSY BIT(11) | 80 | #define TMIO_MMC_HAVE_CBSY BIT(11) |
| 81 | 81 | ||
| 82 | /* Some controllers that support HS400 use use 4 taps while others use 8. */ | 82 | /* Some controllers that support HS400 use 4 taps while others use 8. */ |
| 83 | #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) | 83 | #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) |
| 84 | 84 | ||
| 85 | int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); | 85 | int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); |
diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -50,7 +50,6 @@ struct of_irq_controller; | |||
| 50 | 50 | ||
| 51 | struct device_node { | 51 | struct device_node { |
| 52 | const char *name; | 52 | const char *name; |
| 53 | const char *type; | ||
| 54 | phandle phandle; | 53 | phandle phandle; |
| 55 | const char *full_name; | 54 | const char *full_name; |
| 56 | struct fwnode_handle fwnode; | 55 | struct fwnode_handle fwnode; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b051f761450..ef20aeea10cc 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; | |||
| 48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; | 48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; |
| 49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; | 49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; |
| 50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; | 50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
| 51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; | ||
| 51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; | 52 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
| 52 | 53 | ||
| 53 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) | 54 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) |
| @@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini | |||
| 56 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) | 57 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) |
| 57 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) | 58 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) |
| 58 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) | 59 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) |
| 60 | #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) | ||
| 59 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) | 61 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) |
| 60 | 62 | ||
| 61 | extern const int phy_10_100_features_array[4]; | 63 | extern const int phy_10_100_features_array[4]; |
| @@ -467,8 +469,8 @@ struct phy_device { | |||
| 467 | * only works for PHYs with IDs which match this field | 469 | * only works for PHYs with IDs which match this field |
| 468 | * name: The friendly name of this PHY type | 470 | * name: The friendly name of this PHY type |
| 469 | * phy_id_mask: Defines the important bits of the phy_id | 471 | * phy_id_mask: Defines the important bits of the phy_id |
| 470 | * features: A list of features (speed, duplex, etc) supported | 472 | * features: A mandatory list of features (speed, duplex, etc) |
| 471 | * by this PHY | 473 | * supported by this PHY |
| 472 | * flags: A bitfield defining certain other features this PHY | 474 | * flags: A bitfield defining certain other features this PHY |
| 473 | * supports (like interrupts) | 475 | * supports (like interrupts) |
| 474 | * | 476 | * |
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 59ddf9af909e..2dd0a9ed5b36 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
| @@ -663,6 +663,37 @@ out: | |||
| 663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, | 663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, |
| 664 | u32 prod_idx, void *p_prod_elem) | 664 | u32 prod_idx, void *p_prod_elem) |
| 665 | { | 665 | { |
| 666 | if (p_chain->mode == QED_CHAIN_MODE_PBL) { | ||
| 667 | u32 cur_prod, page_mask, page_cnt, page_diff; | ||
| 668 | |||
| 669 | cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : | ||
| 670 | p_chain->u.chain32.prod_idx; | ||
| 671 | |||
| 672 | /* Assume that number of elements in a page is power of 2 */ | ||
| 673 | page_mask = ~p_chain->elem_per_page_mask; | ||
| 674 | |||
| 675 | /* Use "cur_prod - 1" and "prod_idx - 1" since producer index | ||
| 676 | * reaches the first element of next page before the page index | ||
| 677 | * is incremented. See qed_chain_produce(). | ||
| 678 | * Index wrap around is not a problem because the difference | ||
| 679 | * between current and given producer indices is always | ||
| 680 | * positive and lower than the chain's capacity. | ||
| 681 | */ | ||
| 682 | page_diff = (((cur_prod - 1) & page_mask) - | ||
| 683 | ((prod_idx - 1) & page_mask)) / | ||
| 684 | p_chain->elem_per_page; | ||
| 685 | |||
| 686 | page_cnt = qed_chain_get_page_cnt(p_chain); | ||
| 687 | if (is_chain_u16(p_chain)) | ||
| 688 | p_chain->pbl.c.u16.prod_page_idx = | ||
| 689 | (p_chain->pbl.c.u16.prod_page_idx - | ||
| 690 | page_diff + page_cnt) % page_cnt; | ||
| 691 | else | ||
| 692 | p_chain->pbl.c.u32.prod_page_idx = | ||
| 693 | (p_chain->pbl.c.u32.prod_page_idx - | ||
| 694 | page_diff + page_cnt) % page_cnt; | ||
| 695 | } | ||
| 696 | |||
| 666 | if (is_chain_u16(p_chain)) | 697 | if (is_chain_u16(p_chain)) |
| 667 | p_chain->u.chain16.prod_idx = (u16) prod_idx; | 698 | p_chain->u.chain16.prod_idx = (u16) prod_idx; |
| 668 | else | 699 | else |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 224666226e87..d2f90fa92468 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1406,6 +1406,7 @@ extern struct pid *cad_pid; | |||
| 1406 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ | 1406 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
| 1407 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1407 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1408 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ | 1408 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
| 1409 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ | ||
| 1409 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1410 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1410 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1411 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1411 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1412 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
| @@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t) | |||
| 1904 | 1905 | ||
| 1905 | #endif | 1906 | #endif |
| 1906 | 1907 | ||
| 1908 | void __exit_umh(struct task_struct *tsk); | ||
| 1909 | |||
| 1910 | static inline void exit_umh(struct task_struct *tsk) | ||
| 1911 | { | ||
| 1912 | if (unlikely(tsk->flags & PF_UMH)) | ||
| 1913 | __exit_umh(tsk); | ||
| 1914 | } | ||
| 1915 | |||
| 1907 | #ifdef CONFIG_DEBUG_RSEQ | 1916 | #ifdef CONFIG_DEBUG_RSEQ |
| 1908 | 1917 | ||
| 1909 | void rseq_syscall(struct pt_regs *regs); | 1918 | void rseq_syscall(struct pt_regs *regs); |
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 10b19a192b2d..545f37138057 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h | |||
| @@ -24,9 +24,13 @@ | |||
| 24 | * called near the end of a function. Otherwise, the list can be | 24 | * called near the end of a function. Otherwise, the list can be |
| 25 | * re-initialized for later re-use by wake_q_init(). | 25 | * re-initialized for later re-use by wake_q_init(). |
| 26 | * | 26 | * |
| 27 | * Note that this can cause spurious wakeups. schedule() callers | 27 | * NOTE that this can cause spurious wakeups. schedule() callers |
| 28 | * must ensure the call is done inside a loop, confirming that the | 28 | * must ensure the call is done inside a loop, confirming that the |
| 29 | * wakeup condition has in fact occurred. | 29 | * wakeup condition has in fact occurred. |
| 30 | * | ||
| 31 | * NOTE that there is no guarantee the wakeup will happen any later than the | ||
| 32 | * wake_q_add() location. Therefore task must be ready to be woken at the | ||
| 33 | * location of the wake_q_add(). | ||
| 30 | */ | 34 | */ |
| 31 | 35 | ||
| 32 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..95d25b010a25 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); | |||
| 3218 | * | 3218 | * |
| 3219 | * This is exactly the same as pskb_trim except that it ensures the | 3219 | * This is exactly the same as pskb_trim except that it ensures the |
| 3220 | * checksum of received packets are still valid after the operation. | 3220 | * checksum of received packets are still valid after the operation. |
| 3221 | * It can change skb pointers. | ||
| 3221 | */ | 3222 | */ |
| 3222 | 3223 | ||
| 3223 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | 3224 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) |
diff --git a/include/linux/umh.h b/include/linux/umh.h index 235f51b62c71..0c08de356d0d 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h | |||
| @@ -47,6 +47,8 @@ struct umh_info { | |||
| 47 | const char *cmdline; | 47 | const char *cmdline; |
| 48 | struct file *pipe_to_umh; | 48 | struct file *pipe_to_umh; |
| 49 | struct file *pipe_from_umh; | 49 | struct file *pipe_from_umh; |
| 50 | struct list_head list; | ||
| 51 | void (*cleanup)(struct umh_info *info); | ||
| 50 | pid_t pid; | 52 | pid_t pid; |
| 51 | }; | 53 | }; |
| 52 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); | 54 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..987b6491b946 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -12,6 +12,11 @@ struct irq_affinity; | |||
| 12 | 12 | ||
| 13 | /** | 13 | /** |
| 14 | * virtio_config_ops - operations for configuring a virtio device | 14 | * virtio_config_ops - operations for configuring a virtio device |
| 15 | * Note: Do not assume that a transport implements all of the operations | ||
| 16 | * getting/setting a value as a simple read/write! Generally speaking, | ||
| 17 | * any of @get/@set, @get_status/@set_status, or @get_features/ | ||
| 18 | * @finalize_features are NOT safe to be called from an atomic | ||
| 19 | * context. | ||
| 15 | * @get: read the value of a configuration field | 20 | * @get: read the value of a configuration field |
| 16 | * vdev: the virtio_device | 21 | * vdev: the virtio_device |
| 17 | * offset: the offset of the configuration field | 22 | * offset: the offset of the configuration field |
| @@ -22,7 +27,7 @@ struct irq_affinity; | |||
| 22 | * offset: the offset of the configuration field | 27 | * offset: the offset of the configuration field |
| 23 | * buf: the buffer to read the field value from. | 28 | * buf: the buffer to read the field value from. |
| 24 | * len: the length of the buffer | 29 | * len: the length of the buffer |
| 25 | * @generation: config generation counter | 30 | * @generation: config generation counter (optional) |
| 26 | * vdev: the virtio_device | 31 | * vdev: the virtio_device |
| 27 | * Returns the config generation counter | 32 | * Returns the config generation counter |
| 28 | * @get_status: read the status byte | 33 | * @get_status: read the status byte |
| @@ -48,17 +53,17 @@ struct irq_affinity; | |||
| 48 | * @del_vqs: free virtqueues found by find_vqs(). | 53 | * @del_vqs: free virtqueues found by find_vqs(). |
| 49 | * @get_features: get the array of feature bits for this device. | 54 | * @get_features: get the array of feature bits for this device. |
| 50 | * vdev: the virtio_device | 55 | * vdev: the virtio_device |
| 51 | * Returns the first 32 feature bits (all we currently need). | 56 | * Returns the first 64 feature bits (all we currently need). |
| 52 | * @finalize_features: confirm what device features we'll be using. | 57 | * @finalize_features: confirm what device features we'll be using. |
| 53 | * vdev: the virtio_device | 58 | * vdev: the virtio_device |
| 54 | * This gives the final feature bits for the device: it can change | 59 | * This gives the final feature bits for the device: it can change |
| 55 | * the dev->feature bits if it wants. | 60 | * the dev->feature bits if it wants. |
| 56 | * Returns 0 on success or error status | 61 | * Returns 0 on success or error status |
| 57 | * @bus_name: return the bus name associated with the device | 62 | * @bus_name: return the bus name associated with the device (optional) |
| 58 | * vdev: the virtio_device | 63 | * vdev: the virtio_device |
| 59 | * This returns a pointer to the bus name a la pci_name from which | 64 | * This returns a pointer to the bus name a la pci_name from which |
| 60 | * the caller can then copy. | 65 | * the caller can then copy. |
| 61 | * @set_vq_affinity: set the affinity for a virtqueue. | 66 | * @set_vq_affinity: set the affinity for a virtqueue (optional). |
| 62 | * @get_vq_affinity: get the affinity for a virtqueue (optional). | 67 | * @get_vq_affinity: get the affinity for a virtqueue (optional). |
| 63 | */ | 68 | */ |
| 64 | typedef void vq_callback_t(struct virtqueue *); | 69 | typedef void vq_callback_t(struct virtqueue *); |
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..5d9d318bcf7a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
| @@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) | |||
| 176 | */ | 176 | */ |
| 177 | static inline bool xa_is_err(const void *entry) | 177 | static inline bool xa_is_err(const void *entry) |
| 178 | { | 178 | { |
| 179 | return unlikely(xa_is_internal(entry)); | 179 | return unlikely(xa_is_internal(entry) && |
| 180 | entry >= xa_mk_internal(-MAX_ERRNO)); | ||
| 180 | } | 181 | } |
| 181 | 182 | ||
| 182 | /** | 183 | /** |
| @@ -286,7 +287,6 @@ struct xarray { | |||
| 286 | */ | 287 | */ |
| 287 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) | 288 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) |
| 288 | 289 | ||
| 289 | void xa_init_flags(struct xarray *, gfp_t flags); | ||
| 290 | void *xa_load(struct xarray *, unsigned long index); | 290 | void *xa_load(struct xarray *, unsigned long index); |
| 291 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 291 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
| 292 | void *xa_erase(struct xarray *, unsigned long index); | 292 | void *xa_erase(struct xarray *, unsigned long index); |
| @@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, | |||
| 304 | void xa_destroy(struct xarray *); | 304 | void xa_destroy(struct xarray *); |
| 305 | 305 | ||
| 306 | /** | 306 | /** |
| 307 | * xa_init_flags() - Initialise an empty XArray with flags. | ||
| 308 | * @xa: XArray. | ||
| 309 | * @flags: XA_FLAG values. | ||
| 310 | * | ||
| 311 | * If you need to initialise an XArray with special flags (eg you need | ||
| 312 | * to take the lock from interrupt context), use this function instead | ||
| 313 | * of xa_init(). | ||
| 314 | * | ||
| 315 | * Context: Any context. | ||
| 316 | */ | ||
| 317 | static inline void xa_init_flags(struct xarray *xa, gfp_t flags) | ||
| 318 | { | ||
| 319 | spin_lock_init(&xa->xa_lock); | ||
| 320 | xa->xa_flags = flags; | ||
| 321 | xa->xa_head = NULL; | ||
| 322 | } | ||
| 323 | |||
| 324 | /** | ||
| 307 | * xa_init() - Initialise an empty XArray. | 325 | * xa_init() - Initialise an empty XArray. |
| 308 | * @xa: XArray. | 326 | * @xa: XArray. |
| 309 | * | 327 | * |
| @@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
| 342 | } | 360 | } |
| 343 | 361 | ||
| 344 | /** | 362 | /** |
| 345 | * xa_for_each() - Iterate over a portion of an XArray. | 363 | * xa_for_each_start() - Iterate over a portion of an XArray. |
| 346 | * @xa: XArray. | 364 | * @xa: XArray. |
| 365 | * @index: Index of @entry. | ||
| 347 | * @entry: Entry retrieved from array. | 366 | * @entry: Entry retrieved from array. |
| 367 | * @start: First index to retrieve from array. | ||
| 368 | * | ||
| 369 | * During the iteration, @entry will have the value of the entry stored | ||
| 370 | * in @xa at @index. You may modify @index during the iteration if you | ||
| 371 | * want to skip or reprocess indices. It is safe to modify the array | ||
| 372 | * during the iteration. At the end of the iteration, @entry will be set | ||
| 373 | * to NULL and @index will have a value less than or equal to max. | ||
| 374 | * | ||
| 375 | * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have | ||
| 376 | * to handle your own locking with xas_for_each(), and if you have to unlock | ||
| 377 | * after each iteration, it will also end up being O(n.log(n)). | ||
| 378 | * xa_for_each_start() will spin if it hits a retry entry; if you intend to | ||
| 379 | * see retry entries, you should use the xas_for_each() iterator instead. | ||
| 380 | * The xas_for_each() iterator will expand into more inline code than | ||
| 381 | * xa_for_each_start(). | ||
| 382 | * | ||
| 383 | * Context: Any context. Takes and releases the RCU lock. | ||
| 384 | */ | ||
| 385 | #define xa_for_each_start(xa, index, entry, start) \ | ||
| 386 | for (index = start, \ | ||
| 387 | entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ | ||
| 388 | entry; \ | ||
| 389 | entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) | ||
| 390 | |||
| 391 | /** | ||
| 392 | * xa_for_each() - Iterate over present entries in an XArray. | ||
| 393 | * @xa: XArray. | ||
| 348 | * @index: Index of @entry. | 394 | * @index: Index of @entry. |
| 349 | * @max: Maximum index to retrieve from array. | 395 | * @entry: Entry retrieved from array. |
| 350 | * @filter: Selection criterion. | ||
| 351 | * | 396 | * |
| 352 | * Initialise @index to the lowest index you want to retrieve from the | 397 | * During the iteration, @entry will have the value of the entry stored |
| 353 | * array. During the iteration, @entry will have the value of the entry | 398 | * in @xa at @index. You may modify @index during the iteration if you want |
| 354 | * stored in @xa at @index. The iteration will skip all entries in the | 399 | * to skip or reprocess indices. It is safe to modify the array during the |
| 355 | * array which do not match @filter. You may modify @index during the | 400 | * iteration. At the end of the iteration, @entry will be set to NULL and |
| 356 | * iteration if you want to skip or reprocess indices. It is safe to modify | 401 | * @index will have a value less than or equal to max. |
| 357 | * the array during the iteration. At the end of the iteration, @entry will | ||
| 358 | * be set to NULL and @index will have a value less than or equal to max. | ||
| 359 | * | 402 | * |
| 360 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have | 403 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have |
| 361 | * to handle your own locking with xas_for_each(), and if you have to unlock | 404 | * to handle your own locking with xas_for_each(), and if you have to unlock |
| @@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
| 366 | * | 409 | * |
| 367 | * Context: Any context. Takes and releases the RCU lock. | 410 | * Context: Any context. Takes and releases the RCU lock. |
| 368 | */ | 411 | */ |
| 369 | #define xa_for_each(xa, entry, index, max, filter) \ | 412 | #define xa_for_each(xa, index, entry) \ |
| 370 | for (entry = xa_find(xa, &index, max, filter); entry; \ | 413 | xa_for_each_start(xa, index, entry, 0) |
| 371 | entry = xa_find_after(xa, &index, max, filter)) | 414 | |
| 415 | /** | ||
| 416 | * xa_for_each_marked() - Iterate over marked entries in an XArray. | ||
| 417 | * @xa: XArray. | ||
| 418 | * @index: Index of @entry. | ||
| 419 | * @entry: Entry retrieved from array. | ||
| 420 | * @filter: Selection criterion. | ||
| 421 | * | ||
| 422 | * During the iteration, @entry will have the value of the entry stored | ||
| 423 | * in @xa at @index. The iteration will skip all entries in the array | ||
| 424 | * which do not match @filter. You may modify @index during the iteration | ||
| 425 | * if you want to skip or reprocess indices. It is safe to modify the array | ||
| 426 | * during the iteration. At the end of the iteration, @entry will be set to | ||
| 427 | * NULL and @index will have a value less than or equal to max. | ||
| 428 | * | ||
| 429 | * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). | ||
| 430 | * You have to handle your own locking with xas_for_each(), and if you have | ||
| 431 | * to unlock after each iteration, it will also end up being O(n.log(n)). | ||
| 432 | * xa_for_each_marked() will spin if it hits a retry entry; if you intend to | ||
| 433 | * see retry entries, you should use the xas_for_each_marked() iterator | ||
| 434 | * instead. The xas_for_each_marked() iterator will expand into more inline | ||
| 435 | * code than xa_for_each_marked(). | ||
| 436 | * | ||
| 437 | * Context: Any context. Takes and releases the RCU lock. | ||
| 438 | */ | ||
| 439 | #define xa_for_each_marked(xa, index, entry, filter) \ | ||
| 440 | for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ | ||
| 441 | entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) | ||
| 372 | 442 | ||
| 373 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) | 443 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) |
| 374 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) | 444 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) |
| @@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index); | |||
| 393 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 463 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
| 394 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, | 464 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
| 395 | void *entry, gfp_t); | 465 | void *entry, gfp_t); |
| 466 | int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); | ||
| 396 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); | 467 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); |
| 397 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); | 468 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); |
| 398 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); | 469 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
| 399 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | 470 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
| 400 | 471 | ||
| 401 | /** | 472 | /** |
| 402 | * __xa_insert() - Store this entry in the XArray unless another entry is | ||
| 403 | * already present. | ||
| 404 | * @xa: XArray. | ||
| 405 | * @index: Index into array. | ||
| 406 | * @entry: New entry. | ||
| 407 | * @gfp: Memory allocation flags. | ||
| 408 | * | ||
| 409 | * If you would rather see the existing entry in the array, use __xa_cmpxchg(). | ||
| 410 | * This function is for users who don't care what the entry is, only that | ||
| 411 | * one is present. | ||
| 412 | * | ||
| 413 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
| 414 | * release and reacquire xa_lock if the @gfp flags permit. | ||
| 415 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
| 416 | * -ENOMEM if memory could not be allocated. | ||
| 417 | */ | ||
| 418 | static inline int __xa_insert(struct xarray *xa, unsigned long index, | ||
| 419 | void *entry, gfp_t gfp) | ||
| 420 | { | ||
| 421 | void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); | ||
| 422 | if (!curr) | ||
| 423 | return 0; | ||
| 424 | if (xa_is_err(curr)) | ||
| 425 | return xa_err(curr); | ||
| 426 | return -EEXIST; | ||
| 427 | } | ||
| 428 | |||
| 429 | /** | ||
| 430 | * xa_store_bh() - Store this entry in the XArray. | 473 | * xa_store_bh() - Store this entry in the XArray. |
| 431 | * @xa: XArray. | 474 | * @xa: XArray. |
| 432 | * @index: Index into array. | 475 | * @index: Index into array. |
| @@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, | |||
| 453 | } | 496 | } |
| 454 | 497 | ||
| 455 | /** | 498 | /** |
| 456 | * xa_store_irq() - Erase this entry from the XArray. | 499 | * xa_store_irq() - Store this entry in the XArray. |
| 457 | * @xa: XArray. | 500 | * @xa: XArray. |
| 458 | * @index: Index into array. | 501 | * @index: Index into array. |
| 459 | * @entry: New entry. | 502 | * @entry: New entry. |
| @@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | |||
| 615 | * @entry: New entry. | 658 | * @entry: New entry. |
| 616 | * @gfp: Memory allocation flags. | 659 | * @gfp: Memory allocation flags. |
| 617 | * | 660 | * |
| 618 | * If you would rather see the existing entry in the array, use xa_cmpxchg(). | 661 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
| 619 | * This function is for users who don't care what the entry is, only that | 662 | * if no entry is present. Inserting will fail if a reserved entry is |
| 620 | * one is present. | 663 | * present, even though loading from this index will return NULL. |
| 621 | * | 664 | * |
| 622 | * Context: Process context. Takes and releases the xa_lock. | 665 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
| 623 | * May sleep if the @gfp flags permit. | 666 | * the @gfp flags permit. |
| 624 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | 667 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. |
| 625 | * -ENOMEM if memory could not be allocated. | 668 | * -ENOMEM if memory could not be allocated. |
| 626 | */ | 669 | */ |
| 627 | static inline int xa_insert(struct xarray *xa, unsigned long index, | 670 | static inline int xa_insert(struct xarray *xa, unsigned long index, |
| 628 | void *entry, gfp_t gfp) | 671 | void *entry, gfp_t gfp) |
| 629 | { | 672 | { |
| 630 | void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); | 673 | int err; |
| 631 | if (!curr) | 674 | |
| 632 | return 0; | 675 | xa_lock(xa); |
| 633 | if (xa_is_err(curr)) | 676 | err = __xa_insert(xa, index, entry, gfp); |
| 634 | return xa_err(curr); | 677 | xa_unlock(xa); |
| 635 | return -EEXIST; | 678 | |
| 679 | return err; | ||
| 680 | } | ||
| 681 | |||
| 682 | /** | ||
| 683 | * xa_insert_bh() - Store this entry in the XArray unless another entry is | ||
| 684 | * already present. | ||
| 685 | * @xa: XArray. | ||
| 686 | * @index: Index into array. | ||
| 687 | * @entry: New entry. | ||
| 688 | * @gfp: Memory allocation flags. | ||
| 689 | * | ||
| 690 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
| 691 | * if no entry is present. Inserting will fail if a reserved entry is | ||
| 692 | * present, even though loading from this index will return NULL. | ||
| 693 | * | ||
| 694 | * Context: Any context. Takes and releases the xa_lock while | ||
| 695 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
| 696 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
| 697 | * -ENOMEM if memory could not be allocated. | ||
| 698 | */ | ||
| 699 | static inline int xa_insert_bh(struct xarray *xa, unsigned long index, | ||
| 700 | void *entry, gfp_t gfp) | ||
| 701 | { | ||
| 702 | int err; | ||
| 703 | |||
| 704 | xa_lock_bh(xa); | ||
| 705 | err = __xa_insert(xa, index, entry, gfp); | ||
| 706 | xa_unlock_bh(xa); | ||
| 707 | |||
| 708 | return err; | ||
| 709 | } | ||
| 710 | |||
| 711 | /** | ||
| 712 | * xa_insert_irq() - Store this entry in the XArray unless another entry is | ||
| 713 | * already present. | ||
| 714 | * @xa: XArray. | ||
| 715 | * @index: Index into array. | ||
| 716 | * @entry: New entry. | ||
| 717 | * @gfp: Memory allocation flags. | ||
| 718 | * | ||
| 719 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
| 720 | * if no entry is present. Inserting will fail if a reserved entry is | ||
| 721 | * present, even though loading from this index will return NULL. | ||
| 722 | * | ||
| 723 | * Context: Process context. Takes and releases the xa_lock while | ||
| 724 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
| 725 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
| 726 | * -ENOMEM if memory could not be allocated. | ||
| 727 | */ | ||
| 728 | static inline int xa_insert_irq(struct xarray *xa, unsigned long index, | ||
| 729 | void *entry, gfp_t gfp) | ||
| 730 | { | ||
| 731 | int err; | ||
| 732 | |||
| 733 | xa_lock_irq(xa); | ||
| 734 | err = __xa_insert(xa, index, entry, gfp); | ||
| 735 | xa_unlock_irq(xa); | ||
| 736 | |||
| 737 | return err; | ||
| 636 | } | 738 | } |
| 637 | 739 | ||
| 638 | /** | 740 | /** |
| @@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry) | |||
| 970 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); | 1072 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); |
| 971 | } | 1073 | } |
| 972 | 1074 | ||
| 973 | #define XA_ZERO_ENTRY xa_mk_internal(256) | 1075 | #define XA_RETRY_ENTRY xa_mk_internal(256) |
| 974 | #define XA_RETRY_ENTRY xa_mk_internal(257) | 1076 | #define XA_ZERO_ENTRY xa_mk_internal(257) |
| 975 | 1077 | ||
| 976 | /** | 1078 | /** |
| 977 | * xa_is_zero() - Is the entry a zero entry? | 1079 | * xa_is_zero() - Is the entry a zero entry? |
| @@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry) | |||
| 996 | } | 1098 | } |
| 997 | 1099 | ||
| 998 | /** | 1100 | /** |
| 1101 | * xa_is_advanced() - Is the entry only permitted for the advanced API? | ||
| 1102 | * @entry: Entry to be stored in the XArray. | ||
| 1103 | * | ||
| 1104 | * Return: %true if the entry cannot be stored by the normal API. | ||
| 1105 | */ | ||
| 1106 | static inline bool xa_is_advanced(const void *entry) | ||
| 1107 | { | ||
| 1108 | return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | /** | ||
| 999 | * typedef xa_update_node_t - A callback function from the XArray. | 1112 | * typedef xa_update_node_t - A callback function from the XArray. |
| 1000 | * @node: The node which is being processed | 1113 | * @node: The node which is being processed |
| 1001 | * | 1114 | * |
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1adefe42c0a6..2bfb87eb98ce 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h | |||
| @@ -21,18 +21,6 @@ struct socket; | |||
| 21 | struct rxrpc_call; | 21 | struct rxrpc_call; |
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * Call completion condition (state == RXRPC_CALL_COMPLETE). | ||
| 25 | */ | ||
| 26 | enum rxrpc_call_completion { | ||
| 27 | RXRPC_CALL_SUCCEEDED, /* - Normal termination */ | ||
| 28 | RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ | ||
| 29 | RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ | ||
| 30 | RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ | ||
| 31 | RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ | ||
| 32 | NR__RXRPC_CALL_COMPLETIONS | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Debug ID counter for tracing. | 24 | * Debug ID counter for tracing. |
| 37 | */ | 25 | */ |
| 38 | extern atomic_t rxrpc_debug_id; | 26 | extern atomic_t rxrpc_debug_id; |
| @@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, | |||
| 73 | rxrpc_user_attach_call_t, unsigned long, gfp_t, | 61 | rxrpc_user_attach_call_t, unsigned long, gfp_t, |
| 74 | unsigned int); | 62 | unsigned int); |
| 75 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); | 63 | void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); |
| 76 | int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, | ||
| 77 | struct sockaddr_rxrpc *, struct key *); | ||
| 78 | int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, | ||
| 79 | enum rxrpc_call_completion *, u32 *); | ||
| 80 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); | 64 | u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); |
| 81 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); | 65 | void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); |
| 82 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); | 66 | u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); |
diff --git a/include/net/ax25.h b/include/net/ax25.h index 3f9aea8087e3..8b7eb46ad72d 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h | |||
| @@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) | |||
| 201 | 201 | ||
| 202 | void __ax25_put_route(ax25_route *ax25_rt); | 202 | void __ax25_put_route(ax25_route *ax25_rt); |
| 203 | 203 | ||
| 204 | extern rwlock_t ax25_route_lock; | ||
| 205 | |||
| 206 | static inline void ax25_route_lock_use(void) | ||
| 207 | { | ||
| 208 | read_lock(&ax25_route_lock); | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline void ax25_route_lock_unuse(void) | ||
| 212 | { | ||
| 213 | read_unlock(&ax25_route_lock); | ||
| 214 | } | ||
| 215 | |||
| 204 | static inline void ax25_put_route(ax25_route *ax25_rt) | 216 | static inline void ax25_put_route(ax25_route *ax25_rt) |
| 205 | { | 217 | { |
| 206 | if (refcount_dec_and_test(&ax25_rt->refcount)) | 218 | if (refcount_dec_and_test(&ax25_rt->refcount)) |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c5969762a8f4..9c8214d2116d 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
| @@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, | |||
| 241 | struct netlink_ext_ack *extack); | 241 | struct netlink_ext_ack *extack); |
| 242 | int fib_table_dump(struct fib_table *table, struct sk_buff *skb, | 242 | int fib_table_dump(struct fib_table *table, struct sk_buff *skb, |
| 243 | struct netlink_callback *cb, struct fib_dump_filter *filter); | 243 | struct netlink_callback *cb, struct fib_dump_filter *filter); |
| 244 | int fib_table_flush(struct net *net, struct fib_table *table); | 244 | int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); |
| 245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); | 245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); |
| 246 | void fib_table_flush_external(struct fib_table *table); | 246 | void fib_table_flush_external(struct fib_table *table); |
| 247 | void fib_free_table(struct fib_table *tb); | 247 | void fib_free_table(struct fib_table *tb); |
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 7d5cda7ce32a..3e370cb36263 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h | |||
| @@ -84,7 +84,6 @@ struct flow_offload { | |||
| 84 | struct nf_flow_route { | 84 | struct nf_flow_route { |
| 85 | struct { | 85 | struct { |
| 86 | struct dst_entry *dst; | 86 | struct dst_entry *dst; |
| 87 | int ifindex; | ||
| 88 | } tuple[FLOW_OFFLOAD_DIR_MAX]; | 87 | } tuple[FLOW_OFFLOAD_DIR_MAX]; |
| 89 | }; | 88 | }; |
| 90 | 89 | ||
diff --git a/include/sound/soc.h b/include/sound/soc.h index 8ec1de856ee7..e665f111b0d2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
| @@ -985,6 +985,12 @@ struct snd_soc_dai_link { | |||
| 985 | /* Do not create a PCM for this DAI link (Backend link) */ | 985 | /* Do not create a PCM for this DAI link (Backend link) */ |
| 986 | unsigned int ignore:1; | 986 | unsigned int ignore:1; |
| 987 | 987 | ||
| 988 | /* | ||
| 989 | * This driver uses legacy platform naming. Set by the core, machine | ||
| 990 | * drivers should not modify this value. | ||
| 991 | */ | ||
| 992 | unsigned int legacy_platform:1; | ||
| 993 | |||
| 988 | struct list_head list; /* DAI link list of the soc card */ | 994 | struct list_head list; /* DAI link list of the soc card */ |
| 989 | struct snd_soc_dobj dobj; /* For topology */ | 995 | struct snd_soc_dobj dobj; /* For topology */ |
| 990 | }; | 996 | }; |
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 33d291888ba9..e3f005eae1f7 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | enum afs_call_trace { | 25 | enum afs_call_trace { |
| 26 | afs_call_trace_alloc, | 26 | afs_call_trace_alloc, |
| 27 | afs_call_trace_free, | 27 | afs_call_trace_free, |
| 28 | afs_call_trace_get, | ||
| 28 | afs_call_trace_put, | 29 | afs_call_trace_put, |
| 29 | afs_call_trace_wake, | 30 | afs_call_trace_wake, |
| 30 | afs_call_trace_work, | 31 | afs_call_trace_work, |
| @@ -159,6 +160,7 @@ enum afs_file_error { | |||
| 159 | #define afs_call_traces \ | 160 | #define afs_call_traces \ |
| 160 | EM(afs_call_trace_alloc, "ALLOC") \ | 161 | EM(afs_call_trace_alloc, "ALLOC") \ |
| 161 | EM(afs_call_trace_free, "FREE ") \ | 162 | EM(afs_call_trace_free, "FREE ") \ |
| 163 | EM(afs_call_trace_get, "GET ") \ | ||
| 162 | EM(afs_call_trace_put, "PUT ") \ | 164 | EM(afs_call_trace_put, "PUT ") \ |
| 163 | EM(afs_call_trace_wake, "WAKE ") \ | 165 | EM(afs_call_trace_wake, "WAKE ") \ |
| 164 | E_(afs_call_trace_work, "WORK ") | 166 | E_(afs_call_trace_work, "WORK ") |
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binderfs.h index 65b2efd1a0a5..87410477aea9 100644 --- a/include/uapi/linux/android/binder_ctl.h +++ b/include/uapi/linux/android/binderfs.h | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | * | 4 | * |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #ifndef _UAPI_LINUX_BINDER_CTL_H | 7 | #ifndef _UAPI_LINUX_BINDERFS_H |
| 8 | #define _UAPI_LINUX_BINDER_CTL_H | 8 | #define _UAPI_LINUX_BINDERFS_H |
| 9 | 9 | ||
| 10 | #include <linux/android/binder.h> | 10 | #include <linux/android/binder.h> |
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| @@ -22,8 +22,8 @@ | |||
| 22 | */ | 22 | */ |
| 23 | struct binderfs_device { | 23 | struct binderfs_device { |
| 24 | char name[BINDERFS_MAX_NAME + 1]; | 24 | char name[BINDERFS_MAX_NAME + 1]; |
| 25 | __u8 major; | 25 | __u32 major; |
| 26 | __u8 minor; | 26 | __u32 minor; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | /** | 29 | /** |
| @@ -31,5 +31,5 @@ struct binderfs_device { | |||
| 31 | */ | 31 | */ |
| 32 | #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) | 32 | #define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) |
| 33 | 33 | ||
| 34 | #endif /* _UAPI_LINUX_BINDER_CTL_H */ | 34 | #endif /* _UAPI_LINUX_BINDERFS_H */ |
| 35 | 35 | ||
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 6fa38d001d84..498eec813494 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h | |||
| @@ -138,6 +138,7 @@ struct blk_zone_range { | |||
| 138 | * @BLKRESETZONE: Reset the write pointer of the zones in the specified | 138 | * @BLKRESETZONE: Reset the write pointer of the zones in the specified |
| 139 | * sector range. The sector range must be zone aligned. | 139 | * sector range. The sector range must be zone aligned. |
| 140 | * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. | 140 | * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. |
| 141 | * @BLKGETNRZONES: Get the total number of zones of the device. | ||
| 141 | */ | 142 | */ |
| 142 | #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) | 143 | #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) |
| 143 | #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) | 144 | #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) |
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h | |||
| @@ -268,7 +268,7 @@ struct sockaddr_in { | |||
| 268 | #define IN_MULTICAST(a) IN_CLASSD(a) | 268 | #define IN_MULTICAST(a) IN_CLASSD(a) |
| 269 | #define IN_MULTICAST_NET 0xe0000000 | 269 | #define IN_MULTICAST_NET 0xe0000000 |
| 270 | 270 | ||
| 271 | #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) | 271 | #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) |
| 272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) | 272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) |
| 273 | 273 | ||
| 274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) | 274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index fb78f6f500f3..f056b2a00d5c 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
| @@ -26,13 +26,17 @@ | |||
| 26 | */ | 26 | */ |
| 27 | 27 | ||
| 28 | struct input_event { | 28 | struct input_event { |
| 29 | #if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) | 29 | #if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__) |
| 30 | struct timeval time; | 30 | struct timeval time; |
| 31 | #define input_event_sec time.tv_sec | 31 | #define input_event_sec time.tv_sec |
| 32 | #define input_event_usec time.tv_usec | 32 | #define input_event_usec time.tv_usec |
| 33 | #else | 33 | #else |
| 34 | __kernel_ulong_t __sec; | 34 | __kernel_ulong_t __sec; |
| 35 | #if defined(__sparc__) && defined(__arch64__) | ||
| 36 | unsigned int __usec; | ||
| 37 | #else | ||
| 35 | __kernel_ulong_t __usec; | 38 | __kernel_ulong_t __usec; |
| 39 | #endif | ||
| 36 | #define input_event_sec __sec | 40 | #define input_event_sec __sec |
| 37 | #define input_event_usec __usec | 41 | #define input_event_usec __usec |
| 38 | #endif | 42 | #endif |
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index d73d83950265..1bc794ad957a 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h | |||
| @@ -147,7 +147,7 @@ struct ptp_pin_desc { | |||
| 147 | #define PTP_SYS_OFFSET_PRECISE \ | 147 | #define PTP_SYS_OFFSET_PRECISE \ |
| 148 | _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) | 148 | _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) |
| 149 | #define PTP_SYS_OFFSET_EXTENDED \ | 149 | #define PTP_SYS_OFFSET_EXTENDED \ |
| 150 | _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) | 150 | _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) |
| 151 | 151 | ||
| 152 | struct ptp_extts_event { | 152 | struct ptp_extts_event { |
| 153 | struct ptp_clock_time t; /* Time event occured. */ | 153 | struct ptp_clock_time t; /* Time event occured. */ |
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index d13fd490b66d..6e73f0274e41 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h | |||
| @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { | |||
| 78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
| 79 | PVRDMA_WR_BIND_MW, | 79 | PVRDMA_WR_BIND_MW, |
| 80 | PVRDMA_WR_REG_SIG_MR, | 80 | PVRDMA_WR_REG_SIG_MR, |
| 81 | PVRDMA_WR_ERROR, | ||
| 81 | }; | 82 | }; |
| 82 | 83 | ||
| 83 | enum pvrdma_wc_status { | 84 | enum pvrdma_wc_status { |
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h index 59a260712a56..2ca9164a79bf 100644 --- a/include/xen/arm/page-coherent.h +++ b/include/xen/arm/page-coherent.h | |||
| @@ -1,17 +1,6 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H | 2 | #ifndef _XEN_ARM_PAGE_COHERENT_H |
| 3 | #define _ASM_ARM_XEN_PAGE_COHERENT_H | 3 | #define _XEN_ARM_PAGE_COHERENT_H |
| 4 | |||
| 5 | #include <asm/page.h> | ||
| 6 | #include <asm/dma-mapping.h> | ||
| 7 | #include <linux/dma-mapping.h> | ||
| 8 | |||
| 9 | static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) | ||
| 10 | { | ||
| 11 | if (dev && dev->archdata.dev_dma_ops) | ||
| 12 | return dev->archdata.dev_dma_ops; | ||
| 13 | return get_arch_dma_ops(NULL); | ||
| 14 | } | ||
| 15 | 4 | ||
| 16 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | 5 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
| 17 | dma_addr_t dev_addr, unsigned long offset, size_t size, | 6 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
| @@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | |||
| 21 | unsigned long attrs); | 10 | unsigned long attrs); |
| 22 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | 11 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
| 23 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 12 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
| 24 | |||
| 25 | void __xen_dma_sync_single_for_device(struct device *hwdev, | 13 | void __xen_dma_sync_single_for_device(struct device *hwdev, |
| 26 | dma_addr_t handle, size_t size, enum dma_data_direction dir); | 14 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
| 27 | 15 | ||
| 28 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | 16 | #endif /* _XEN_ARM_PAGE_COHERENT_H */ |
| 29 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) | ||
| 30 | { | ||
| 31 | return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
| 35 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) | ||
| 36 | { | ||
| 37 | xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
| 41 | dma_addr_t dev_addr, unsigned long offset, size_t size, | ||
| 42 | enum dma_data_direction dir, unsigned long attrs) | ||
| 43 | { | ||
| 44 | unsigned long page_pfn = page_to_xen_pfn(page); | ||
| 45 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); | ||
| 46 | unsigned long compound_pages = | ||
| 47 | (1<<compound_order(page)) * XEN_PFN_PER_PAGE; | ||
| 48 | bool local = (page_pfn <= dev_pfn) && | ||
| 49 | (dev_pfn - page_pfn < compound_pages); | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Dom0 is mapped 1:1, while the Linux page can span across | ||
| 53 | * multiple Xen pages, it's not possible for it to contain a | ||
| 54 | * mix of local and foreign Xen pages. So if the first xen_pfn | ||
| 55 | * == mfn the page is local otherwise it's a foreign page | ||
| 56 | * grant-mapped in dom0. If the page is local we can safely | ||
| 57 | * call the native dma_ops function, otherwise we call the xen | ||
| 58 | * specific function. | ||
| 59 | */ | ||
| 60 | if (local) | ||
| 61 | xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
| 62 | else | ||
| 63 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
| 67 | size_t size, enum dma_data_direction dir, unsigned long attrs) | ||
| 68 | { | ||
| 69 | unsigned long pfn = PFN_DOWN(handle); | ||
| 70 | /* | ||
| 71 | * Dom0 is mapped 1:1, while the Linux page can be spanned accross | ||
| 72 | * multiple Xen page, it's not possible to have a mix of local and | ||
| 73 | * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a | ||
| 74 | * foreign mfn will always return false. If the page is local we can | ||
| 75 | * safely call the native dma_ops function, otherwise we call the xen | ||
| 76 | * specific function. | ||
| 77 | */ | ||
| 78 | if (pfn_valid(pfn)) { | ||
| 79 | if (xen_get_dma_ops(hwdev)->unmap_page) | ||
| 80 | xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
| 81 | } else | ||
| 82 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
| 86 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 87 | { | ||
| 88 | unsigned long pfn = PFN_DOWN(handle); | ||
| 89 | if (pfn_valid(pfn)) { | ||
| 90 | if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) | ||
| 91 | xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 92 | } else | ||
| 93 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
| 97 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
| 98 | { | ||
| 99 | unsigned long pfn = PFN_DOWN(handle); | ||
| 100 | if (pfn_valid(pfn)) { | ||
| 101 | if (xen_get_dma_ops(hwdev)->sync_single_for_device) | ||
| 102 | xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
| 103 | } else | ||
| 104 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); | ||
| 105 | } | ||
| 106 | |||
| 107 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | ||
diff --git a/init/Kconfig b/init/Kconfig index d47cb77a220e..513fa544a134 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION | |||
| 1124 | bool "Dead code and data elimination (EXPERIMENTAL)" | 1124 | bool "Dead code and data elimination (EXPERIMENTAL)" |
| 1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION | 1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION |
| 1126 | depends on EXPERT | 1126 | depends on EXPERT |
| 1127 | depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) | ||
| 1127 | depends on $(cc-option,-ffunction-sections -fdata-sections) | 1128 | depends on $(cc-option,-ffunction-sections -fdata-sections) |
| 1128 | depends on $(ld-option,--gc-sections) | 1129 | depends on $(ld-option,--gc-sections) |
| 1129 | help | 1130 | help |
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 715f9fcf4712..befe570be5ba 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
| @@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) | |||
| 467 | return kind_ops[BTF_INFO_KIND(t->info)]; | 467 | return kind_ops[BTF_INFO_KIND(t->info)]; |
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | bool btf_name_offset_valid(const struct btf *btf, u32 offset) | 470 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
| 471 | { | 471 | { |
| 472 | return BTF_STR_OFFSET_VALID(offset) && | 472 | return BTF_STR_OFFSET_VALID(offset) && |
| 473 | offset < btf->hdr.str_len; | 473 | offset < btf->hdr.str_len; |
| @@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset, | |||
| 1219 | u8 nr_copy_bits; | 1219 | u8 nr_copy_bits; |
| 1220 | u64 print_num; | 1220 | u64 print_num; |
| 1221 | 1221 | ||
| 1222 | data += BITS_ROUNDDOWN_BYTES(bits_offset); | ||
| 1223 | bits_offset = BITS_PER_BYTE_MASKED(bits_offset); | ||
| 1224 | nr_copy_bits = nr_bits + bits_offset; | 1222 | nr_copy_bits = nr_bits + bits_offset; |
| 1225 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); | 1223 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
| 1226 | 1224 | ||
| @@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf, | |||
| 1255 | * BTF_INT_OFFSET() cannot exceed 64 bits. | 1253 | * BTF_INT_OFFSET() cannot exceed 64 bits. |
| 1256 | */ | 1254 | */ |
| 1257 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); | 1255 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
| 1258 | btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); | 1256 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
| 1257 | bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); | ||
| 1258 | btf_bitfield_seq_show(data, bits_offset, nr_bits, m); | ||
| 1259 | } | 1259 | } |
| 1260 | 1260 | ||
| 1261 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, | 1261 | static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, |
| @@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, | |||
| 2001 | 2001 | ||
| 2002 | member_offset = btf_member_bit_offset(t, member); | 2002 | member_offset = btf_member_bit_offset(t, member); |
| 2003 | bitfield_size = btf_member_bitfield_size(t, member); | 2003 | bitfield_size = btf_member_bitfield_size(t, member); |
| 2004 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); | ||
| 2005 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); | ||
| 2004 | if (bitfield_size) { | 2006 | if (bitfield_size) { |
| 2005 | btf_bitfield_seq_show(data, member_offset, | 2007 | btf_bitfield_seq_show(data + bytes_offset, bits8_offset, |
| 2006 | bitfield_size, m); | 2008 | bitfield_size, m); |
| 2007 | } else { | 2009 | } else { |
| 2008 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); | ||
| 2009 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); | ||
| 2010 | ops = btf_type_ops(member_type); | 2010 | ops = btf_type_ops(member_type); |
| 2011 | ops->seq_show(btf, member_type, member->type, | 2011 | ops->seq_show(btf, member_type, member->type, |
| 2012 | data + bytes_offset, bits8_offset, m); | 2012 | data + bytes_offset, bits8_offset, m); |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 9425c2fb872f..ab612fe9862f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
| @@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |||
| 718 | case BPF_FUNC_trace_printk: | 718 | case BPF_FUNC_trace_printk: |
| 719 | if (capable(CAP_SYS_ADMIN)) | 719 | if (capable(CAP_SYS_ADMIN)) |
| 720 | return bpf_get_trace_printk_proto(); | 720 | return bpf_get_trace_printk_proto(); |
| 721 | /* fall through */ | ||
| 721 | default: | 722 | default: |
| 722 | return NULL; | 723 | return NULL; |
| 723 | } | 724 | } |
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 99d243e1ad6e..52378d3e34b3 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | 12 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) |
| 13 | { | 13 | { |
| 14 | struct bpf_map *inner_map, *inner_map_meta; | 14 | struct bpf_map *inner_map, *inner_map_meta; |
| 15 | u32 inner_map_meta_size; | ||
| 15 | struct fd f; | 16 | struct fd f; |
| 16 | 17 | ||
| 17 | f = fdget(inner_map_ufd); | 18 | f = fdget(inner_map_ufd); |
| @@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
| 36 | return ERR_PTR(-EINVAL); | 37 | return ERR_PTR(-EINVAL); |
| 37 | } | 38 | } |
| 38 | 39 | ||
| 39 | inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); | 40 | inner_map_meta_size = sizeof(*inner_map_meta); |
| 41 | /* In some cases verifier needs to access beyond just base map. */ | ||
| 42 | if (inner_map->ops == &array_map_ops) | ||
| 43 | inner_map_meta_size = sizeof(struct bpf_array); | ||
| 44 | |||
| 45 | inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); | ||
| 40 | if (!inner_map_meta) { | 46 | if (!inner_map_meta) { |
| 41 | fdput(f); | 47 | fdput(f); |
| 42 | return ERR_PTR(-ENOMEM); | 48 | return ERR_PTR(-ENOMEM); |
| @@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | |||
| 46 | inner_map_meta->key_size = inner_map->key_size; | 52 | inner_map_meta->key_size = inner_map->key_size; |
| 47 | inner_map_meta->value_size = inner_map->value_size; | 53 | inner_map_meta->value_size = inner_map->value_size; |
| 48 | inner_map_meta->map_flags = inner_map->map_flags; | 54 | inner_map_meta->map_flags = inner_map->map_flags; |
| 49 | inner_map_meta->ops = inner_map->ops; | ||
| 50 | inner_map_meta->max_entries = inner_map->max_entries; | 55 | inner_map_meta->max_entries = inner_map->max_entries; |
| 51 | 56 | ||
| 57 | /* Misc members not needed in bpf_map_meta_equal() check. */ | ||
| 58 | inner_map_meta->ops = inner_map->ops; | ||
| 59 | if (inner_map->ops == &array_map_ops) { | ||
| 60 | inner_map_meta->unpriv_array = inner_map->unpriv_array; | ||
| 61 | container_of(inner_map_meta, struct bpf_array, map)->index_mask = | ||
| 62 | container_of(inner_map, struct bpf_array, map)->index_mask; | ||
| 63 | } | ||
| 64 | |||
| 52 | fdput(f); | 65 | fdput(f); |
| 53 | return inner_map_meta; | 66 | return inner_map_meta; |
| 54 | } | 67 | } |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 90daf285de03..d43b14535827 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
| @@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, | |||
| 180 | 180 | ||
| 181 | if (nhdr->n_type == BPF_BUILD_ID && | 181 | if (nhdr->n_type == BPF_BUILD_ID && |
| 182 | nhdr->n_namesz == sizeof("GNU") && | 182 | nhdr->n_namesz == sizeof("GNU") && |
| 183 | nhdr->n_descsz == BPF_BUILD_ID_SIZE) { | 183 | nhdr->n_descsz > 0 && |
| 184 | nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { | ||
| 184 | memcpy(build_id, | 185 | memcpy(build_id, |
| 185 | note_start + note_offs + | 186 | note_start + note_offs + |
| 186 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), | 187 | ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), |
| 187 | BPF_BUILD_ID_SIZE); | 188 | nhdr->n_descsz); |
| 189 | memset(build_id + nhdr->n_descsz, 0, | ||
| 190 | BPF_BUILD_ID_SIZE - nhdr->n_descsz); | ||
| 188 | return 0; | 191 | return 0; |
| 189 | } | 192 | } |
| 190 | new_offs = note_offs + sizeof(Elf32_Nhdr) + | 193 | new_offs = note_offs + sizeof(Elf32_Nhdr) + |
| @@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, | |||
| 260 | return -EFAULT; /* page not mapped */ | 263 | return -EFAULT; /* page not mapped */ |
| 261 | 264 | ||
| 262 | ret = -EINVAL; | 265 | ret = -EINVAL; |
| 263 | page_addr = page_address(page); | 266 | page_addr = kmap_atomic(page); |
| 264 | ehdr = (Elf32_Ehdr *)page_addr; | 267 | ehdr = (Elf32_Ehdr *)page_addr; |
| 265 | 268 | ||
| 266 | /* compare magic x7f "ELF" */ | 269 | /* compare magic x7f "ELF" */ |
| @@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma, | |||
| 276 | else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) | 279 | else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) |
| 277 | ret = stack_map_get_build_id_64(page_addr, build_id); | 280 | ret = stack_map_get_build_id_64(page_addr, build_id); |
| 278 | out: | 281 | out: |
| 282 | kunmap_atomic(page_addr); | ||
| 279 | put_page(page); | 283 | put_page(page); |
| 280 | return ret; | 284 | return ret; |
| 281 | } | 285 | } |
| @@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
| 310 | for (i = 0; i < trace_nr; i++) { | 314 | for (i = 0; i < trace_nr; i++) { |
| 311 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 315 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
| 312 | id_offs[i].ip = ips[i]; | 316 | id_offs[i].ip = ips[i]; |
| 317 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
| 313 | } | 318 | } |
| 314 | return; | 319 | return; |
| 315 | } | 320 | } |
| @@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
| 320 | /* per entry fall back to ips */ | 325 | /* per entry fall back to ips */ |
| 321 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; | 326 | id_offs[i].status = BPF_STACK_BUILD_ID_IP; |
| 322 | id_offs[i].ip = ips[i]; | 327 | id_offs[i].ip = ips[i]; |
| 328 | memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); | ||
| 323 | continue; | 329 | continue; |
| 324 | } | 330 | } |
| 325 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] | 331 | id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6bc62a9ee8e..56674a7c3778 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, | |||
| 3103 | } | 3103 | } |
| 3104 | } | 3104 | } |
| 3105 | 3105 | ||
| 3106 | static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, | ||
| 3107 | const struct bpf_insn *insn) | ||
| 3108 | { | ||
| 3109 | return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; | ||
| 3110 | } | ||
| 3111 | |||
| 3112 | static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, | ||
| 3113 | u32 alu_state, u32 alu_limit) | ||
| 3114 | { | ||
| 3115 | /* If we arrived here from different branches with different | ||
| 3116 | * state or limits to sanitize, then this won't work. | ||
| 3117 | */ | ||
| 3118 | if (aux->alu_state && | ||
| 3119 | (aux->alu_state != alu_state || | ||
| 3120 | aux->alu_limit != alu_limit)) | ||
| 3121 | return -EACCES; | ||
| 3122 | |||
| 3123 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
| 3124 | aux->alu_state = alu_state; | ||
| 3125 | aux->alu_limit = alu_limit; | ||
| 3126 | return 0; | ||
| 3127 | } | ||
| 3128 | |||
| 3129 | static int sanitize_val_alu(struct bpf_verifier_env *env, | ||
| 3130 | struct bpf_insn *insn) | ||
| 3131 | { | ||
| 3132 | struct bpf_insn_aux_data *aux = cur_aux(env); | ||
| 3133 | |||
| 3134 | if (can_skip_alu_sanitation(env, insn)) | ||
| 3135 | return 0; | ||
| 3136 | |||
| 3137 | return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); | ||
| 3138 | } | ||
| 3139 | |||
| 3106 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, | 3140 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
| 3107 | struct bpf_insn *insn, | 3141 | struct bpf_insn *insn, |
| 3108 | const struct bpf_reg_state *ptr_reg, | 3142 | const struct bpf_reg_state *ptr_reg, |
| @@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
| 3117 | struct bpf_reg_state tmp; | 3151 | struct bpf_reg_state tmp; |
| 3118 | bool ret; | 3152 | bool ret; |
| 3119 | 3153 | ||
| 3120 | if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) | 3154 | if (can_skip_alu_sanitation(env, insn)) |
| 3121 | return 0; | 3155 | return 0; |
| 3122 | 3156 | ||
| 3123 | /* We already marked aux for masking from non-speculative | 3157 | /* We already marked aux for masking from non-speculative |
| @@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, | |||
| 3133 | 3167 | ||
| 3134 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) | 3168 | if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) |
| 3135 | return 0; | 3169 | return 0; |
| 3136 | 3170 | if (update_alu_sanitation_state(aux, alu_state, alu_limit)) | |
| 3137 | /* If we arrived here from different branches with different | ||
| 3138 | * limits to sanitize, then this won't work. | ||
| 3139 | */ | ||
| 3140 | if (aux->alu_state && | ||
| 3141 | (aux->alu_state != alu_state || | ||
| 3142 | aux->alu_limit != alu_limit)) | ||
| 3143 | return -EACCES; | 3171 | return -EACCES; |
| 3144 | |||
| 3145 | /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
| 3146 | aux->alu_state = alu_state; | ||
| 3147 | aux->alu_limit = alu_limit; | ||
| 3148 | |||
| 3149 | do_sim: | 3172 | do_sim: |
| 3150 | /* Simulate and find potential out-of-bounds access under | 3173 | /* Simulate and find potential out-of-bounds access under |
| 3151 | * speculative execution from truncation as a result of | 3174 | * speculative execution from truncation as a result of |
| @@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
| 3418 | s64 smin_val, smax_val; | 3441 | s64 smin_val, smax_val; |
| 3419 | u64 umin_val, umax_val; | 3442 | u64 umin_val, umax_val; |
| 3420 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; | 3443 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; |
| 3444 | u32 dst = insn->dst_reg; | ||
| 3445 | int ret; | ||
| 3421 | 3446 | ||
| 3422 | if (insn_bitness == 32) { | 3447 | if (insn_bitness == 32) { |
| 3423 | /* Relevant for 32-bit RSH: Information can propagate towards | 3448 | /* Relevant for 32-bit RSH: Information can propagate towards |
| @@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
| 3452 | 3477 | ||
| 3453 | switch (opcode) { | 3478 | switch (opcode) { |
| 3454 | case BPF_ADD: | 3479 | case BPF_ADD: |
| 3480 | ret = sanitize_val_alu(env, insn); | ||
| 3481 | if (ret < 0) { | ||
| 3482 | verbose(env, "R%d tried to add from different pointers or scalars\n", dst); | ||
| 3483 | return ret; | ||
| 3484 | } | ||
| 3455 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || | 3485 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || |
| 3456 | signed_add_overflows(dst_reg->smax_value, smax_val)) { | 3486 | signed_add_overflows(dst_reg->smax_value, smax_val)) { |
| 3457 | dst_reg->smin_value = S64_MIN; | 3487 | dst_reg->smin_value = S64_MIN; |
| @@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
| 3471 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); | 3501 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); |
| 3472 | break; | 3502 | break; |
| 3473 | case BPF_SUB: | 3503 | case BPF_SUB: |
| 3504 | ret = sanitize_val_alu(env, insn); | ||
| 3505 | if (ret < 0) { | ||
| 3506 | verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); | ||
| 3507 | return ret; | ||
| 3508 | } | ||
| 3474 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || | 3509 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || |
| 3475 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { | 3510 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { |
| 3476 | /* Overflow possible, we know nothing */ | 3511 | /* Overflow possible, we know nothing */ |
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d6361776dc5c..1fb6fd68b9c7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c | |||
| @@ -378,6 +378,8 @@ void __init swiotlb_exit(void) | |||
| 378 | memblock_free_late(io_tlb_start, | 378 | memblock_free_late(io_tlb_start, |
| 379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 380 | } | 380 | } |
| 381 | io_tlb_start = 0; | ||
| 382 | io_tlb_end = 0; | ||
| 381 | io_tlb_nslabs = 0; | 383 | io_tlb_nslabs = 0; |
| 382 | max_segment = 0; | 384 | max_segment = 0; |
| 383 | } | 385 | } |
diff --git a/kernel/exit.c b/kernel/exit.c index 2d14979577ee..3fb7be001964 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w) | |||
| 307 | * MB (A) MB (B) | 307 | * MB (A) MB (B) |
| 308 | * [L] cond [L] tsk | 308 | * [L] cond [L] tsk |
| 309 | */ | 309 | */ |
| 310 | smp_rmb(); /* (B) */ | 310 | smp_mb(); /* (B) */ |
| 311 | 311 | ||
| 312 | /* | 312 | /* |
| 313 | * Avoid using task_rcu_dereference() magic as long as we are careful, | 313 | * Avoid using task_rcu_dereference() magic as long as we are careful, |
| @@ -866,6 +866,7 @@ void __noreturn do_exit(long code) | |||
| 866 | exit_task_namespaces(tsk); | 866 | exit_task_namespaces(tsk); |
| 867 | exit_task_work(tsk); | 867 | exit_task_work(tsk); |
| 868 | exit_thread(tsk); | 868 | exit_thread(tsk); |
| 869 | exit_umh(tsk); | ||
| 869 | 870 | ||
| 870 | /* | 871 | /* |
| 871 | * Flush inherited counters to the parent - before the parent | 872 | * Flush inherited counters to the parent - before the parent |
diff --git a/kernel/futex.c b/kernel/futex.c index be3bff2315ff..fdd312da0992 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1452,11 +1452,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) | |||
| 1452 | if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) | 1452 | if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) |
| 1453 | return; | 1453 | return; |
| 1454 | 1454 | ||
| 1455 | /* | 1455 | get_task_struct(p); |
| 1456 | * Queue the task for later wakeup for after we've released | ||
| 1457 | * the hb->lock. wake_q_add() grabs reference to p. | ||
| 1458 | */ | ||
| 1459 | wake_q_add(wake_q, p); | ||
| 1460 | __unqueue_futex(q); | 1456 | __unqueue_futex(q); |
| 1461 | /* | 1457 | /* |
| 1462 | * The waiting task can free the futex_q as soon as q->lock_ptr = NULL | 1458 | * The waiting task can free the futex_q as soon as q->lock_ptr = NULL |
| @@ -1466,6 +1462,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) | |||
| 1466 | * plist_del in __unqueue_futex(). | 1462 | * plist_del in __unqueue_futex(). |
| 1467 | */ | 1463 | */ |
| 1468 | smp_store_release(&q->lock_ptr, NULL); | 1464 | smp_store_release(&q->lock_ptr, NULL); |
| 1465 | |||
| 1466 | /* | ||
| 1467 | * Queue the task for later wakeup for after we've released | ||
| 1468 | * the hb->lock. wake_q_add() grabs reference to p. | ||
| 1469 | */ | ||
| 1470 | wake_q_add(wake_q, p); | ||
| 1471 | put_task_struct(p); | ||
| 1469 | } | 1472 | } |
| 1470 | 1473 | ||
| 1471 | /* | 1474 | /* |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index ee062b7939d3..ef8ad36cadcf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -457,7 +457,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, | |||
| 457 | 457 | ||
| 458 | /* Validate affinity mask(s) */ | 458 | /* Validate affinity mask(s) */ |
| 459 | if (affinity) { | 459 | if (affinity) { |
| 460 | for (i = 0; i < cnt; i++, i++) { | 460 | for (i = 0; i < cnt; i++) { |
| 461 | if (cpumask_empty(&affinity[i].mask)) | 461 | if (cpumask_empty(&affinity[i].mask)) |
| 462 | return -EINVAL; | 462 | return -EINVAL; |
| 463 | } | 463 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a4888ce4667a..84b54a17b95d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc) | |||
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | cpumask_and(&mask, cpu_online_mask, set); | 395 | cpumask_and(&mask, cpu_online_mask, set); |
| 396 | if (cpumask_empty(&mask)) | ||
| 397 | cpumask_copy(&mask, cpu_online_mask); | ||
| 398 | |||
| 396 | if (node != NUMA_NO_NODE) { | 399 | if (node != NUMA_NO_NODE) { |
| 397 | const struct cpumask *nodemask = cpumask_of_node(node); | 400 | const struct cpumask *nodemask = cpumask_of_node(node); |
| 398 | 401 | ||
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 09b180063ee1..50d9af615dc4 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, | |||
| 198 | woken++; | 198 | woken++; |
| 199 | tsk = waiter->task; | 199 | tsk = waiter->task; |
| 200 | 200 | ||
| 201 | wake_q_add(wake_q, tsk); | 201 | get_task_struct(tsk); |
| 202 | list_del(&waiter->list); | 202 | list_del(&waiter->list); |
| 203 | /* | 203 | /* |
| 204 | * Ensure that the last operation is setting the reader | 204 | * Ensure calling get_task_struct() before setting the reader |
| 205 | * waiter to nil such that rwsem_down_read_failed() cannot | 205 | * waiter to nil such that rwsem_down_read_failed() cannot |
| 206 | * race with do_exit() by always holding a reference count | 206 | * race with do_exit() by always holding a reference count |
| 207 | * to the task to wakeup. | 207 | * to the task to wakeup. |
| 208 | */ | 208 | */ |
| 209 | smp_store_release(&waiter->task, NULL); | 209 | smp_store_release(&waiter->task, NULL); |
| 210 | /* | ||
| 211 | * Ensure issuing the wakeup (either by us or someone else) | ||
| 212 | * after setting the reader waiter to nil. | ||
| 213 | */ | ||
| 214 | wake_q_add(wake_q, tsk); | ||
| 215 | /* wake_q_add() already take the task ref */ | ||
| 216 | put_task_struct(tsk); | ||
| 210 | } | 217 | } |
| 211 | 218 | ||
| 212 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; | 219 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a674c7db2f29..d8d76a65cfdd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -396,6 +396,18 @@ static bool set_nr_if_polling(struct task_struct *p) | |||
| 396 | #endif | 396 | #endif |
| 397 | #endif | 397 | #endif |
| 398 | 398 | ||
| 399 | /** | ||
| 400 | * wake_q_add() - queue a wakeup for 'later' waking. | ||
| 401 | * @head: the wake_q_head to add @task to | ||
| 402 | * @task: the task to queue for 'later' wakeup | ||
| 403 | * | ||
| 404 | * Queue a task for later wakeup, most likely by the wake_up_q() call in the | ||
| 405 | * same context, _HOWEVER_ this is not guaranteed, the wakeup can come | ||
| 406 | * instantly. | ||
| 407 | * | ||
| 408 | * This function must be used as-if it were wake_up_process(); IOW the task | ||
| 409 | * must be ready to be woken at this location. | ||
| 410 | */ | ||
| 399 | void wake_q_add(struct wake_q_head *head, struct task_struct *task) | 411 | void wake_q_add(struct wake_q_head *head, struct task_struct *task) |
| 400 | { | 412 | { |
| 401 | struct wake_q_node *node = &task->wake_q; | 413 | struct wake_q_node *node = &task->wake_q; |
| @@ -405,10 +417,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) | |||
| 405 | * its already queued (either by us or someone else) and will get the | 417 | * its already queued (either by us or someone else) and will get the |
| 406 | * wakeup due to that. | 418 | * wakeup due to that. |
| 407 | * | 419 | * |
| 408 | * This cmpxchg() executes a full barrier, which pairs with the full | 420 | * In order to ensure that a pending wakeup will observe our pending |
| 409 | * barrier executed by the wakeup in wake_up_q(). | 421 | * state, even in the failed case, an explicit smp_mb() must be used. |
| 410 | */ | 422 | */ |
| 411 | if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) | 423 | smp_mb__before_atomic(); |
| 424 | if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)) | ||
| 412 | return; | 425 | return; |
| 413 | 426 | ||
| 414 | get_task_struct(task); | 427 | get_task_struct(task); |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d7f538847b84..e815781ed751 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) | |||
| 976 | struct seccomp_filter *filter = file->private_data; | 976 | struct seccomp_filter *filter = file->private_data; |
| 977 | struct seccomp_knotif *knotif; | 977 | struct seccomp_knotif *knotif; |
| 978 | 978 | ||
| 979 | if (!filter) | ||
| 980 | return 0; | ||
| 981 | |||
| 979 | mutex_lock(&filter->notify_lock); | 982 | mutex_lock(&filter->notify_lock); |
| 980 | 983 | ||
| 981 | /* | 984 | /* |
| @@ -1300,6 +1303,7 @@ out: | |||
| 1300 | out_put_fd: | 1303 | out_put_fd: |
| 1301 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { | 1304 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { |
| 1302 | if (ret < 0) { | 1305 | if (ret < 0) { |
| 1306 | listener_f->private_data = NULL; | ||
| 1303 | fput(listener_f); | 1307 | fput(listener_f); |
| 1304 | put_unused_fd(listener); | 1308 | put_unused_fd(listener); |
| 1305 | } else { | 1309 | } else { |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 8f0644af40be..80f955210861 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
| @@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, | |||
| 685 | * set up the signal and overrun bookkeeping. | 685 | * set up the signal and overrun bookkeeping. |
| 686 | */ | 686 | */ |
| 687 | timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); | 687 | timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); |
| 688 | timer->it_interval = ns_to_ktime(timer->it.cpu.incr); | ||
| 688 | 689 | ||
| 689 | /* | 690 | /* |
| 690 | * This acts as a modification timestamp for the timer, | 691 | * This acts as a modification timestamp for the timer, |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 5c19b8c41c7e..d5fb09ebba8b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -607,11 +607,17 @@ static int trace_kprobe_create(int argc, const char *argv[]) | |||
| 607 | char buf[MAX_EVENT_NAME_LEN]; | 607 | char buf[MAX_EVENT_NAME_LEN]; |
| 608 | unsigned int flags = TPARG_FL_KERNEL; | 608 | unsigned int flags = TPARG_FL_KERNEL; |
| 609 | 609 | ||
| 610 | /* argc must be >= 1 */ | 610 | switch (argv[0][0]) { |
| 611 | if (argv[0][0] == 'r') { | 611 | case 'r': |
| 612 | is_return = true; | 612 | is_return = true; |
| 613 | flags |= TPARG_FL_RETURN; | 613 | flags |= TPARG_FL_RETURN; |
| 614 | } else if (argv[0][0] != 'p' || argc < 2) | 614 | break; |
| 615 | case 'p': | ||
| 616 | break; | ||
| 617 | default: | ||
| 618 | return -ECANCELED; | ||
| 619 | } | ||
| 620 | if (argc < 2) | ||
| 615 | return -ECANCELED; | 621 | return -ECANCELED; |
| 616 | 622 | ||
| 617 | event = strchr(&argv[0][1], ':'); | 623 | event = strchr(&argv[0][1], ':'); |
diff --git a/kernel/umh.c b/kernel/umh.c index 0baa672e023c..d937cbad903a 100644 --- a/kernel/umh.c +++ b/kernel/umh.c | |||
| @@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; | |||
| 37 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; | 37 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; |
| 38 | static DEFINE_SPINLOCK(umh_sysctl_lock); | 38 | static DEFINE_SPINLOCK(umh_sysctl_lock); |
| 39 | static DECLARE_RWSEM(umhelper_sem); | 39 | static DECLARE_RWSEM(umhelper_sem); |
| 40 | static LIST_HEAD(umh_list); | ||
| 41 | static DEFINE_MUTEX(umh_list_lock); | ||
| 40 | 42 | ||
| 41 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) | 43 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) |
| 42 | { | 44 | { |
| @@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data) | |||
| 100 | commit_creds(new); | 102 | commit_creds(new); |
| 101 | 103 | ||
| 102 | sub_info->pid = task_pid_nr(current); | 104 | sub_info->pid = task_pid_nr(current); |
| 103 | if (sub_info->file) | 105 | if (sub_info->file) { |
| 104 | retval = do_execve_file(sub_info->file, | 106 | retval = do_execve_file(sub_info->file, |
| 105 | sub_info->argv, sub_info->envp); | 107 | sub_info->argv, sub_info->envp); |
| 106 | else | 108 | if (!retval) |
| 109 | current->flags |= PF_UMH; | ||
| 110 | } else | ||
| 107 | retval = do_execve(getname_kernel(sub_info->path), | 111 | retval = do_execve(getname_kernel(sub_info->path), |
| 108 | (const char __user *const __user *)sub_info->argv, | 112 | (const char __user *const __user *)sub_info->argv, |
| 109 | (const char __user *const __user *)sub_info->envp); | 113 | (const char __user *const __user *)sub_info->envp); |
| @@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) | |||
| 517 | goto out; | 521 | goto out; |
| 518 | 522 | ||
| 519 | err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); | 523 | err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); |
| 524 | if (!err) { | ||
| 525 | mutex_lock(&umh_list_lock); | ||
| 526 | list_add(&info->list, &umh_list); | ||
| 527 | mutex_unlock(&umh_list_lock); | ||
| 528 | } | ||
| 520 | out: | 529 | out: |
| 521 | fput(file); | 530 | fput(file); |
| 522 | return err; | 531 | return err; |
| @@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write, | |||
| 679 | return 0; | 688 | return 0; |
| 680 | } | 689 | } |
| 681 | 690 | ||
| 691 | void __exit_umh(struct task_struct *tsk) | ||
| 692 | { | ||
| 693 | struct umh_info *info; | ||
| 694 | pid_t pid = tsk->pid; | ||
| 695 | |||
| 696 | mutex_lock(&umh_list_lock); | ||
| 697 | list_for_each_entry(info, &umh_list, list) { | ||
| 698 | if (info->pid == pid) { | ||
| 699 | list_del(&info->list); | ||
| 700 | mutex_unlock(&umh_list_lock); | ||
| 701 | goto out; | ||
| 702 | } | ||
| 703 | } | ||
| 704 | mutex_unlock(&umh_list_lock); | ||
| 705 | return; | ||
| 706 | out: | ||
| 707 | if (info->cleanup) | ||
| 708 | info->cleanup(info); | ||
| 709 | } | ||
| 710 | |||
| 682 | struct ctl_table usermodehelper_table[] = { | 711 | struct ctl_table usermodehelper_table[] = { |
| 683 | { | 712 | { |
| 684 | .procname = "bset", | 713 | .procname = "bset", |
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c | |||
| @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) | |||
| 52 | if (x <= ULONG_MAX) | 52 | if (x <= ULONG_MAX) |
| 53 | return int_sqrt((unsigned long) x); | 53 | return int_sqrt((unsigned long) x); |
| 54 | 54 | ||
| 55 | m = 1ULL << (fls64(x) & ~1ULL); | 55 | m = 1ULL << ((fls64(x) - 1) & ~1ULL); |
| 56 | while (m != 0) { | 56 | while (m != 0) { |
| 57 | b = y + m; | 57 | b = y + m; |
| 58 | y >>= 1; | 58 | y >>= 1; |
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 65c2d06250a6..5b382c1244ed 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
| @@ -26,14 +26,10 @@ | |||
| 26 | static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) | 26 | static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) |
| 27 | { | 27 | { |
| 28 | unsigned long mask, val; | 28 | unsigned long mask, val; |
| 29 | unsigned long __maybe_unused flags; | ||
| 30 | bool ret = false; | 29 | bool ret = false; |
| 30 | unsigned long flags; | ||
| 31 | 31 | ||
| 32 | /* Silence bogus lockdep warning */ | 32 | spin_lock_irqsave(&sb->map[index].swap_lock, flags); |
| 33 | #if defined(CONFIG_LOCKDEP) | ||
| 34 | local_irq_save(flags); | ||
| 35 | #endif | ||
| 36 | spin_lock(&sb->map[index].swap_lock); | ||
| 37 | 33 | ||
| 38 | if (!sb->map[index].cleared) | 34 | if (!sb->map[index].cleared) |
| 39 | goto out_unlock; | 35 | goto out_unlock; |
| @@ -54,10 +50,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) | |||
| 54 | 50 | ||
| 55 | ret = true; | 51 | ret = true; |
| 56 | out_unlock: | 52 | out_unlock: |
| 57 | spin_unlock(&sb->map[index].swap_lock); | 53 | spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); |
| 58 | #if defined(CONFIG_LOCKDEP) | ||
| 59 | local_irq_restore(flags); | ||
| 60 | #endif | ||
| 61 | return ret; | 54 | return ret; |
| 62 | } | 55 | } |
| 63 | 56 | ||
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 4676c0a1eeca..c596a957f764 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c | |||
| @@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) | |||
| 199 | XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); | 199 | XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); |
| 200 | xa_set_mark(xa, index + 1, XA_MARK_0); | 200 | xa_set_mark(xa, index + 1, XA_MARK_0); |
| 201 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); | 201 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); |
| 202 | xa_set_mark(xa, index + 2, XA_MARK_1); | 202 | xa_set_mark(xa, index + 2, XA_MARK_2); |
| 203 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); | 203 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); |
| 204 | xa_store_order(xa, index, order, xa_mk_index(index), | 204 | xa_store_order(xa, index, order, xa_mk_index(index), |
| 205 | GFP_KERNEL); | 205 | GFP_KERNEL); |
| @@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) | |||
| 209 | void *entry; | 209 | void *entry; |
| 210 | 210 | ||
| 211 | XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); | 211 | XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); |
| 212 | XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); | 212 | XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); |
| 213 | XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); | 213 | XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); |
| 214 | 214 | ||
| 215 | /* We should see two elements in the array */ | 215 | /* We should see two elements in the array */ |
| 216 | rcu_read_lock(); | 216 | rcu_read_lock(); |
| @@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa) | |||
| 357 | static noinline void check_reserve(struct xarray *xa) | 357 | static noinline void check_reserve(struct xarray *xa) |
| 358 | { | 358 | { |
| 359 | void *entry; | 359 | void *entry; |
| 360 | unsigned long index = 0; | 360 | unsigned long index; |
| 361 | 361 | ||
| 362 | /* An array with a reserved entry is not empty */ | 362 | /* An array with a reserved entry is not empty */ |
| 363 | XA_BUG_ON(xa, !xa_empty(xa)); | 363 | XA_BUG_ON(xa, !xa_empty(xa)); |
| @@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa) | |||
| 382 | xa_erase_index(xa, 12345678); | 382 | xa_erase_index(xa, 12345678); |
| 383 | XA_BUG_ON(xa, !xa_empty(xa)); | 383 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 384 | 384 | ||
| 385 | /* And so does xa_insert */ | 385 | /* But xa_insert does not */ |
| 386 | xa_reserve(xa, 12345678, GFP_KERNEL); | 386 | xa_reserve(xa, 12345678, GFP_KERNEL); |
| 387 | XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); | 387 | XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != |
| 388 | xa_erase_index(xa, 12345678); | 388 | -EEXIST); |
| 389 | XA_BUG_ON(xa, xa_empty(xa)); | ||
| 390 | XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); | ||
| 389 | XA_BUG_ON(xa, !xa_empty(xa)); | 391 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 390 | 392 | ||
| 391 | /* Can iterate through a reserved entry */ | 393 | /* Can iterate through a reserved entry */ |
| @@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa) | |||
| 393 | xa_reserve(xa, 6, GFP_KERNEL); | 395 | xa_reserve(xa, 6, GFP_KERNEL); |
| 394 | xa_store_index(xa, 7, GFP_KERNEL); | 396 | xa_store_index(xa, 7, GFP_KERNEL); |
| 395 | 397 | ||
| 396 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { | 398 | xa_for_each(xa, index, entry) { |
| 397 | XA_BUG_ON(xa, index != 5 && index != 7); | 399 | XA_BUG_ON(xa, index != 5 && index != 7); |
| 398 | } | 400 | } |
| 399 | xa_destroy(xa); | 401 | xa_destroy(xa); |
| @@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa) | |||
| 812 | static noinline void check_find_2(struct xarray *xa) | 814 | static noinline void check_find_2(struct xarray *xa) |
| 813 | { | 815 | { |
| 814 | void *entry; | 816 | void *entry; |
| 815 | unsigned long i, j, index = 0; | 817 | unsigned long i, j, index; |
| 816 | 818 | ||
| 817 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { | 819 | xa_for_each(xa, index, entry) { |
| 818 | XA_BUG_ON(xa, true); | 820 | XA_BUG_ON(xa, true); |
| 819 | } | 821 | } |
| 820 | 822 | ||
| 821 | for (i = 0; i < 1024; i++) { | 823 | for (i = 0; i < 1024; i++) { |
| 822 | xa_store_index(xa, index, GFP_KERNEL); | 824 | xa_store_index(xa, index, GFP_KERNEL); |
| 823 | j = 0; | 825 | j = 0; |
| 824 | index = 0; | 826 | xa_for_each(xa, index, entry) { |
| 825 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { | ||
| 826 | XA_BUG_ON(xa, xa_mk_index(index) != entry); | 827 | XA_BUG_ON(xa, xa_mk_index(index) != entry); |
| 827 | XA_BUG_ON(xa, index != j++); | 828 | XA_BUG_ON(xa, index != j++); |
| 828 | } | 829 | } |
| @@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa) | |||
| 839 | 840 | ||
| 840 | for (i = 0; i < 100; i++) { | 841 | for (i = 0; i < 100; i++) { |
| 841 | for (j = 0; j < 100; j++) { | 842 | for (j = 0; j < 100; j++) { |
| 843 | rcu_read_lock(); | ||
| 842 | for (k = 0; k < 100; k++) { | 844 | for (k = 0; k < 100; k++) { |
| 843 | xas_set(&xas, j); | 845 | xas_set(&xas, j); |
| 844 | xas_for_each_marked(&xas, entry, k, XA_MARK_0) | 846 | xas_for_each_marked(&xas, entry, k, XA_MARK_0) |
| @@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa) | |||
| 847 | XA_BUG_ON(xa, | 849 | XA_BUG_ON(xa, |
| 848 | xas.xa_node != XAS_RESTART); | 850 | xas.xa_node != XAS_RESTART); |
| 849 | } | 851 | } |
| 852 | rcu_read_unlock(); | ||
| 850 | } | 853 | } |
| 851 | xa_store_index(xa, i, GFP_KERNEL); | 854 | xa_store_index(xa, i, GFP_KERNEL); |
| 852 | xa_set_mark(xa, i, XA_MARK_0); | 855 | xa_set_mark(xa, i, XA_MARK_0); |
| @@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa) | |||
| 1183 | } | 1186 | } |
| 1184 | } | 1187 | } |
| 1185 | 1188 | ||
| 1189 | static void check_align_1(struct xarray *xa, char *name) | ||
| 1190 | { | ||
| 1191 | int i; | ||
| 1192 | unsigned int id; | ||
| 1193 | unsigned long index; | ||
| 1194 | void *entry; | ||
| 1195 | |||
| 1196 | for (i = 0; i < 8; i++) { | ||
| 1197 | id = 0; | ||
| 1198 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) | ||
| 1199 | != 0); | ||
| 1200 | XA_BUG_ON(xa, id != i); | ||
| 1201 | } | ||
| 1202 | xa_for_each(xa, index, entry) | ||
| 1203 | XA_BUG_ON(xa, xa_is_err(entry)); | ||
| 1204 | xa_destroy(xa); | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | static noinline void check_align(struct xarray *xa) | ||
| 1208 | { | ||
| 1209 | char name[] = "Motorola 68000"; | ||
| 1210 | |||
| 1211 | check_align_1(xa, name); | ||
| 1212 | check_align_1(xa, name + 1); | ||
| 1213 | check_align_1(xa, name + 2); | ||
| 1214 | check_align_1(xa, name + 3); | ||
| 1215 | // check_align_2(xa, name); | ||
| 1216 | } | ||
| 1217 | |||
| 1186 | static LIST_HEAD(shadow_nodes); | 1218 | static LIST_HEAD(shadow_nodes); |
| 1187 | 1219 | ||
| 1188 | static void test_update_node(struct xa_node *node) | 1220 | static void test_update_node(struct xa_node *node) |
| @@ -1332,6 +1364,7 @@ static int xarray_checks(void) | |||
| 1332 | check_create_range(&array); | 1364 | check_create_range(&array); |
| 1333 | check_store_range(&array); | 1365 | check_store_range(&array); |
| 1334 | check_store_iter(&array); | 1366 | check_store_iter(&array); |
| 1367 | check_align(&xa0); | ||
| 1335 | 1368 | ||
| 1336 | check_workingset(&array, 0); | 1369 | check_workingset(&array, 0); |
| 1337 | check_workingset(&array, 64); | 1370 | check_workingset(&array, 64); |
diff --git a/lib/xarray.c b/lib/xarray.c index 5f3f9311de89..81c3171ddde9 100644 --- a/lib/xarray.c +++ b/lib/xarray.c | |||
| @@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas) | |||
| 232 | if (xas->xa_shift > node->shift) | 232 | if (xas->xa_shift > node->shift) |
| 233 | break; | 233 | break; |
| 234 | entry = xas_descend(xas, node); | 234 | entry = xas_descend(xas, node); |
| 235 | if (node->shift == 0) | ||
| 236 | break; | ||
| 235 | } | 237 | } |
| 236 | return entry; | 238 | return entry; |
| 237 | } | 239 | } |
| @@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) | |||
| 506 | for (;;) { | 508 | for (;;) { |
| 507 | void *entry = xa_entry_locked(xas->xa, node, offset); | 509 | void *entry = xa_entry_locked(xas->xa, node, offset); |
| 508 | 510 | ||
| 509 | if (xa_is_node(entry)) { | 511 | if (node->shift && xa_is_node(entry)) { |
| 510 | node = xa_to_node(entry); | 512 | node = xa_to_node(entry); |
| 511 | offset = 0; | 513 | offset = 0; |
| 512 | continue; | 514 | continue; |
| @@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head) | |||
| 604 | /* | 606 | /* |
| 605 | * xas_create() - Create a slot to store an entry in. | 607 | * xas_create() - Create a slot to store an entry in. |
| 606 | * @xas: XArray operation state. | 608 | * @xas: XArray operation state. |
| 609 | * @allow_root: %true if we can store the entry in the root directly | ||
| 607 | * | 610 | * |
| 608 | * Most users will not need to call this function directly, as it is called | 611 | * Most users will not need to call this function directly, as it is called |
| 609 | * by xas_store(). It is useful for doing conditional store operations | 612 | * by xas_store(). It is useful for doing conditional store operations |
| @@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head) | |||
| 613 | * If the slot was newly created, returns %NULL. If it failed to create the | 616 | * If the slot was newly created, returns %NULL. If it failed to create the |
| 614 | * slot, returns %NULL and indicates the error in @xas. | 617 | * slot, returns %NULL and indicates the error in @xas. |
| 615 | */ | 618 | */ |
| 616 | static void *xas_create(struct xa_state *xas) | 619 | static void *xas_create(struct xa_state *xas, bool allow_root) |
| 617 | { | 620 | { |
| 618 | struct xarray *xa = xas->xa; | 621 | struct xarray *xa = xas->xa; |
| 619 | void *entry; | 622 | void *entry; |
| @@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas) | |||
| 628 | shift = xas_expand(xas, entry); | 631 | shift = xas_expand(xas, entry); |
| 629 | if (shift < 0) | 632 | if (shift < 0) |
| 630 | return NULL; | 633 | return NULL; |
| 634 | if (!shift && !allow_root) | ||
| 635 | shift = XA_CHUNK_SHIFT; | ||
| 631 | entry = xa_head_locked(xa); | 636 | entry = xa_head_locked(xa); |
| 632 | slot = &xa->xa_head; | 637 | slot = &xa->xa_head; |
| 633 | } else if (xas_error(xas)) { | 638 | } else if (xas_error(xas)) { |
| @@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas) | |||
| 687 | xas->xa_sibs = 0; | 692 | xas->xa_sibs = 0; |
| 688 | 693 | ||
| 689 | for (;;) { | 694 | for (;;) { |
| 690 | xas_create(xas); | 695 | xas_create(xas, true); |
| 691 | if (xas_error(xas)) | 696 | if (xas_error(xas)) |
| 692 | goto restore; | 697 | goto restore; |
| 693 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) | 698 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) |
| @@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry) | |||
| 754 | bool value = xa_is_value(entry); | 759 | bool value = xa_is_value(entry); |
| 755 | 760 | ||
| 756 | if (entry) | 761 | if (entry) |
| 757 | first = xas_create(xas); | 762 | first = xas_create(xas, !xa_is_node(entry)); |
| 758 | else | 763 | else |
| 759 | first = xas_load(xas); | 764 | first = xas_load(xas); |
| 760 | 765 | ||
| @@ -1251,35 +1256,6 @@ void *xas_find_conflict(struct xa_state *xas) | |||
| 1251 | EXPORT_SYMBOL_GPL(xas_find_conflict); | 1256 | EXPORT_SYMBOL_GPL(xas_find_conflict); |
| 1252 | 1257 | ||
| 1253 | /** | 1258 | /** |
| 1254 | * xa_init_flags() - Initialise an empty XArray with flags. | ||
| 1255 | * @xa: XArray. | ||
| 1256 | * @flags: XA_FLAG values. | ||
| 1257 | * | ||
| 1258 | * If you need to initialise an XArray with special flags (eg you need | ||
| 1259 | * to take the lock from interrupt context), use this function instead | ||
| 1260 | * of xa_init(). | ||
| 1261 | * | ||
| 1262 | * Context: Any context. | ||
| 1263 | */ | ||
| 1264 | void xa_init_flags(struct xarray *xa, gfp_t flags) | ||
| 1265 | { | ||
| 1266 | unsigned int lock_type; | ||
| 1267 | static struct lock_class_key xa_lock_irq; | ||
| 1268 | static struct lock_class_key xa_lock_bh; | ||
| 1269 | |||
| 1270 | spin_lock_init(&xa->xa_lock); | ||
| 1271 | xa->xa_flags = flags; | ||
| 1272 | xa->xa_head = NULL; | ||
| 1273 | |||
| 1274 | lock_type = xa_lock_type(xa); | ||
| 1275 | if (lock_type == XA_LOCK_IRQ) | ||
| 1276 | lockdep_set_class(&xa->xa_lock, &xa_lock_irq); | ||
| 1277 | else if (lock_type == XA_LOCK_BH) | ||
| 1278 | lockdep_set_class(&xa->xa_lock, &xa_lock_bh); | ||
| 1279 | } | ||
| 1280 | EXPORT_SYMBOL(xa_init_flags); | ||
| 1281 | |||
| 1282 | /** | ||
| 1283 | * xa_load() - Load an entry from an XArray. | 1259 | * xa_load() - Load an entry from an XArray. |
| 1284 | * @xa: XArray. | 1260 | * @xa: XArray. |
| 1285 | * @index: index into array. | 1261 | * @index: index into array. |
| @@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr) | |||
| 1308 | { | 1284 | { |
| 1309 | if (xa_is_zero(curr)) | 1285 | if (xa_is_zero(curr)) |
| 1310 | return NULL; | 1286 | return NULL; |
| 1311 | XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); | ||
| 1312 | if (xas_error(xas)) | 1287 | if (xas_error(xas)) |
| 1313 | curr = xas->xa_node; | 1288 | curr = xas->xa_node; |
| 1314 | return curr; | 1289 | return curr; |
| @@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) | |||
| 1378 | XA_STATE(xas, xa, index); | 1353 | XA_STATE(xas, xa, index); |
| 1379 | void *curr; | 1354 | void *curr; |
| 1380 | 1355 | ||
| 1381 | if (WARN_ON_ONCE(xa_is_internal(entry))) | 1356 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
| 1382 | return XA_ERROR(-EINVAL); | 1357 | return XA_ERROR(-EINVAL); |
| 1383 | if (xa_track_free(xa) && !entry) | 1358 | if (xa_track_free(xa) && !entry) |
| 1384 | entry = XA_ZERO_ENTRY; | 1359 | entry = XA_ZERO_ENTRY; |
| @@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, | |||
| 1444 | XA_STATE(xas, xa, index); | 1419 | XA_STATE(xas, xa, index); |
| 1445 | void *curr; | 1420 | void *curr; |
| 1446 | 1421 | ||
| 1447 | if (WARN_ON_ONCE(xa_is_internal(entry))) | 1422 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
| 1448 | return XA_ERROR(-EINVAL); | 1423 | return XA_ERROR(-EINVAL); |
| 1449 | if (xa_track_free(xa) && !entry) | 1424 | if (xa_track_free(xa) && !entry) |
| 1450 | entry = XA_ZERO_ENTRY; | 1425 | entry = XA_ZERO_ENTRY; |
| @@ -1465,6 +1440,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, | |||
| 1465 | EXPORT_SYMBOL(__xa_cmpxchg); | 1440 | EXPORT_SYMBOL(__xa_cmpxchg); |
| 1466 | 1441 | ||
| 1467 | /** | 1442 | /** |
| 1443 | * __xa_insert() - Store this entry in the XArray if no entry is present. | ||
| 1444 | * @xa: XArray. | ||
| 1445 | * @index: Index into array. | ||
| 1446 | * @entry: New entry. | ||
| 1447 | * @gfp: Memory allocation flags. | ||
| 1448 | * | ||
| 1449 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
| 1450 | * if no entry is present. Inserting will fail if a reserved entry is | ||
| 1451 | * present, even though loading from this index will return NULL. | ||
| 1452 | * | ||
| 1453 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
| 1454 | * release and reacquire xa_lock if @gfp flags permit. | ||
| 1455 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
| 1456 | * -ENOMEM if memory could not be allocated. | ||
| 1457 | */ | ||
| 1458 | int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) | ||
| 1459 | { | ||
| 1460 | XA_STATE(xas, xa, index); | ||
| 1461 | void *curr; | ||
| 1462 | |||
| 1463 | if (WARN_ON_ONCE(xa_is_advanced(entry))) | ||
| 1464 | return -EINVAL; | ||
| 1465 | if (!entry) | ||
| 1466 | entry = XA_ZERO_ENTRY; | ||
| 1467 | |||
| 1468 | do { | ||
| 1469 | curr = xas_load(&xas); | ||
| 1470 | if (!curr) { | ||
| 1471 | xas_store(&xas, entry); | ||
| 1472 | if (xa_track_free(xa)) | ||
| 1473 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
| 1474 | } else { | ||
| 1475 | xas_set_err(&xas, -EEXIST); | ||
| 1476 | } | ||
| 1477 | } while (__xas_nomem(&xas, gfp)); | ||
| 1478 | |||
| 1479 | return xas_error(&xas); | ||
| 1480 | } | ||
| 1481 | EXPORT_SYMBOL(__xa_insert); | ||
| 1482 | |||
| 1483 | /** | ||
| 1468 | * __xa_reserve() - Reserve this index in the XArray. | 1484 | * __xa_reserve() - Reserve this index in the XArray. |
| 1469 | * @xa: XArray. | 1485 | * @xa: XArray. |
| 1470 | * @index: Index into array. | 1486 | * @index: Index into array. |
| @@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first, | |||
| 1567 | if (last + 1) | 1583 | if (last + 1) |
| 1568 | order = __ffs(last + 1); | 1584 | order = __ffs(last + 1); |
| 1569 | xas_set_order(&xas, last, order); | 1585 | xas_set_order(&xas, last, order); |
| 1570 | xas_create(&xas); | 1586 | xas_create(&xas, true); |
| 1571 | if (xas_error(&xas)) | 1587 | if (xas_error(&xas)) |
| 1572 | goto unlock; | 1588 | goto unlock; |
| 1573 | } | 1589 | } |
| @@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) | |||
| 1609 | XA_STATE(xas, xa, 0); | 1625 | XA_STATE(xas, xa, 0); |
| 1610 | int err; | 1626 | int err; |
| 1611 | 1627 | ||
| 1612 | if (WARN_ON_ONCE(xa_is_internal(entry))) | 1628 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
| 1613 | return -EINVAL; | 1629 | return -EINVAL; |
| 1614 | if (WARN_ON_ONCE(!xa_track_free(xa))) | 1630 | if (WARN_ON_ONCE(!xa_track_free(xa))) |
| 1615 | return -EINVAL; | 1631 | return -EINVAL; |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 8a8bb8796c6c..72e6d0c55cfa 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) | |||
| 689 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); | 689 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
| 690 | bdi->cgwb_congested_tree = RB_ROOT; | 690 | bdi->cgwb_congested_tree = RB_ROOT; |
| 691 | mutex_init(&bdi->cgwb_release_mutex); | 691 | mutex_init(&bdi->cgwb_release_mutex); |
| 692 | init_rwsem(&bdi->wb_switch_rwsem); | ||
| 692 | 693 | ||
| 693 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | 694 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); |
| 694 | if (!ret) { | 695 | if (!ret) { |
diff --git a/mm/mincore.c b/mm/mincore.c index f0f91461a9f4..218099b5ed31 100644 --- a/mm/mincore.c +++ b/mm/mincore.c | |||
| @@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, | |||
| 42 | return 0; | 42 | return 0; |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | static int mincore_unmapped_range(unsigned long addr, unsigned long end, | 45 | /* |
| 46 | struct mm_walk *walk) | 46 | * Later we can get more picky about what "in core" means precisely. |
| 47 | * For now, simply check to see if the page is in the page cache, | ||
| 48 | * and is up to date; i.e. that no page-in operation would be required | ||
| 49 | * at this time if an application were to map and access this page. | ||
| 50 | */ | ||
| 51 | static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) | ||
| 52 | { | ||
| 53 | unsigned char present = 0; | ||
| 54 | struct page *page; | ||
| 55 | |||
| 56 | /* | ||
| 57 | * When tmpfs swaps out a page from a file, any process mapping that | ||
| 58 | * file will not get a swp_entry_t in its pte, but rather it is like | ||
| 59 | * any other file mapping (ie. marked !present and faulted in with | ||
| 60 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. | ||
| 61 | */ | ||
| 62 | #ifdef CONFIG_SWAP | ||
| 63 | if (shmem_mapping(mapping)) { | ||
| 64 | page = find_get_entry(mapping, pgoff); | ||
| 65 | /* | ||
| 66 | * shmem/tmpfs may return swap: account for swapcache | ||
| 67 | * page too. | ||
| 68 | */ | ||
| 69 | if (xa_is_value(page)) { | ||
| 70 | swp_entry_t swp = radix_to_swp_entry(page); | ||
| 71 | page = find_get_page(swap_address_space(swp), | ||
| 72 | swp_offset(swp)); | ||
| 73 | } | ||
| 74 | } else | ||
| 75 | page = find_get_page(mapping, pgoff); | ||
| 76 | #else | ||
| 77 | page = find_get_page(mapping, pgoff); | ||
| 78 | #endif | ||
| 79 | if (page) { | ||
| 80 | present = PageUptodate(page); | ||
| 81 | put_page(page); | ||
| 82 | } | ||
| 83 | |||
| 84 | return present; | ||
| 85 | } | ||
| 86 | |||
| 87 | static int __mincore_unmapped_range(unsigned long addr, unsigned long end, | ||
| 88 | struct vm_area_struct *vma, unsigned char *vec) | ||
| 47 | { | 89 | { |
| 48 | unsigned char *vec = walk->private; | ||
| 49 | unsigned long nr = (end - addr) >> PAGE_SHIFT; | 90 | unsigned long nr = (end - addr) >> PAGE_SHIFT; |
| 91 | int i; | ||
| 50 | 92 | ||
| 51 | memset(vec, 0, nr); | 93 | if (vma->vm_file) { |
| 52 | walk->private += nr; | 94 | pgoff_t pgoff; |
| 95 | |||
| 96 | pgoff = linear_page_index(vma, addr); | ||
| 97 | for (i = 0; i < nr; i++, pgoff++) | ||
| 98 | vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); | ||
| 99 | } else { | ||
| 100 | for (i = 0; i < nr; i++) | ||
| 101 | vec[i] = 0; | ||
| 102 | } | ||
| 103 | return nr; | ||
| 104 | } | ||
| 105 | |||
| 106 | static int mincore_unmapped_range(unsigned long addr, unsigned long end, | ||
| 107 | struct mm_walk *walk) | ||
| 108 | { | ||
| 109 | walk->private += __mincore_unmapped_range(addr, end, | ||
| 110 | walk->vma, walk->private); | ||
| 53 | return 0; | 111 | return 0; |
| 54 | } | 112 | } |
| 55 | 113 | ||
| @@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
| 69 | goto out; | 127 | goto out; |
| 70 | } | 128 | } |
| 71 | 129 | ||
| 72 | /* We'll consider a THP page under construction to be there */ | ||
| 73 | if (pmd_trans_unstable(pmd)) { | 130 | if (pmd_trans_unstable(pmd)) { |
| 74 | memset(vec, 1, nr); | 131 | __mincore_unmapped_range(addr, end, vma, vec); |
| 75 | goto out; | 132 | goto out; |
| 76 | } | 133 | } |
| 77 | 134 | ||
| @@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
| 80 | pte_t pte = *ptep; | 137 | pte_t pte = *ptep; |
| 81 | 138 | ||
| 82 | if (pte_none(pte)) | 139 | if (pte_none(pte)) |
| 83 | *vec = 0; | 140 | __mincore_unmapped_range(addr, addr + PAGE_SIZE, |
| 141 | vma, vec); | ||
| 84 | else if (pte_present(pte)) | 142 | else if (pte_present(pte)) |
| 85 | *vec = 1; | 143 | *vec = 1; |
| 86 | else { /* pte is a swap entry */ | 144 | else { /* pte is a swap entry */ |
| 87 | swp_entry_t entry = pte_to_swp_entry(pte); | 145 | swp_entry_t entry = pte_to_swp_entry(pte); |
| 88 | 146 | ||
| 89 | /* | 147 | if (non_swap_entry(entry)) { |
| 90 | * migration or hwpoison entries are always | 148 | /* |
| 91 | * uptodate | 149 | * migration or hwpoison entries are always |
| 92 | */ | 150 | * uptodate |
| 93 | *vec = !!non_swap_entry(entry); | 151 | */ |
| 152 | *vec = 1; | ||
| 153 | } else { | ||
| 154 | #ifdef CONFIG_SWAP | ||
| 155 | *vec = mincore_page(swap_address_space(entry), | ||
| 156 | swp_offset(entry)); | ||
| 157 | #else | ||
| 158 | WARN_ON(1); | ||
| 159 | *vec = 1; | ||
| 160 | #endif | ||
| 161 | } | ||
| 94 | } | 162 | } |
| 95 | vec++; | 163 | vec++; |
| 96 | } | 164 | } |
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 70417e9b932d..314bbc8010fb 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c | |||
| @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) | |||
| 114 | dst = (ax25_address *)(bp + 1); | 114 | dst = (ax25_address *)(bp + 1); |
| 115 | src = (ax25_address *)(bp + 8); | 115 | src = (ax25_address *)(bp + 8); |
| 116 | 116 | ||
| 117 | ax25_route_lock_use(); | ||
| 117 | route = ax25_get_route(dst, NULL); | 118 | route = ax25_get_route(dst, NULL); |
| 118 | if (route) { | 119 | if (route) { |
| 119 | digipeat = route->digipeat; | 120 | digipeat = route->digipeat; |
| @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) | |||
| 206 | ax25_queue_xmit(skb, dev); | 207 | ax25_queue_xmit(skb, dev); |
| 207 | 208 | ||
| 208 | put: | 209 | put: |
| 209 | if (route) | ||
| 210 | ax25_put_route(route); | ||
| 211 | 210 | ||
| 211 | ax25_route_lock_unuse(); | ||
| 212 | return NETDEV_TX_OK; | 212 | return NETDEV_TX_OK; |
| 213 | } | 213 | } |
| 214 | 214 | ||
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index a0eff323af12..66f74c85cf6b 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <linux/export.h> | 40 | #include <linux/export.h> |
| 41 | 41 | ||
| 42 | static ax25_route *ax25_route_list; | 42 | static ax25_route *ax25_route_list; |
| 43 | static DEFINE_RWLOCK(ax25_route_lock); | 43 | DEFINE_RWLOCK(ax25_route_lock); |
| 44 | 44 | ||
| 45 | void ax25_rt_device_down(struct net_device *dev) | 45 | void ax25_rt_device_down(struct net_device *dev) |
| 46 | { | 46 | { |
| @@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = { | |||
| 335 | * Find AX.25 route | 335 | * Find AX.25 route |
| 336 | * | 336 | * |
| 337 | * Only routes with a reference count of zero can be destroyed. | 337 | * Only routes with a reference count of zero can be destroyed. |
| 338 | * Must be called with ax25_route_lock read locked. | ||
| 338 | */ | 339 | */ |
| 339 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | 340 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
| 340 | { | 341 | { |
| @@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | |||
| 342 | ax25_route *ax25_def_rt = NULL; | 343 | ax25_route *ax25_def_rt = NULL; |
| 343 | ax25_route *ax25_rt; | 344 | ax25_route *ax25_rt; |
| 344 | 345 | ||
| 345 | read_lock(&ax25_route_lock); | ||
| 346 | /* | 346 | /* |
| 347 | * Bind to the physical interface we heard them on, or the default | 347 | * Bind to the physical interface we heard them on, or the default |
| 348 | * route if none is found; | 348 | * route if none is found; |
| @@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | |||
| 365 | if (ax25_spe_rt != NULL) | 365 | if (ax25_spe_rt != NULL) |
| 366 | ax25_rt = ax25_spe_rt; | 366 | ax25_rt = ax25_spe_rt; |
| 367 | 367 | ||
| 368 | if (ax25_rt != NULL) | ||
| 369 | ax25_hold_route(ax25_rt); | ||
| 370 | |||
| 371 | read_unlock(&ax25_route_lock); | ||
| 372 | |||
| 373 | return ax25_rt; | 368 | return ax25_rt; |
| 374 | } | 369 | } |
| 375 | 370 | ||
| @@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) | |||
| 400 | ax25_route *ax25_rt; | 395 | ax25_route *ax25_rt; |
| 401 | int err = 0; | 396 | int err = 0; |
| 402 | 397 | ||
| 403 | if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) | 398 | ax25_route_lock_use(); |
| 399 | ax25_rt = ax25_get_route(addr, NULL); | ||
| 400 | if (!ax25_rt) { | ||
| 401 | ax25_route_lock_unuse(); | ||
| 404 | return -EHOSTUNREACH; | 402 | return -EHOSTUNREACH; |
| 405 | 403 | } | |
| 406 | if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { | 404 | if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { |
| 407 | err = -EHOSTUNREACH; | 405 | err = -EHOSTUNREACH; |
| 408 | goto put; | 406 | goto put; |
| @@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) | |||
| 437 | } | 435 | } |
| 438 | 436 | ||
| 439 | put: | 437 | put: |
| 440 | ax25_put_route(ax25_rt); | 438 | ax25_route_lock_unuse(); |
| 441 | |||
| 442 | return err; | 439 | return err; |
| 443 | } | 440 | } |
| 444 | 441 | ||
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 7acfc83087d5..7ee4fea93637 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c | |||
| @@ -13,39 +13,24 @@ | |||
| 13 | extern char bpfilter_umh_start; | 13 | extern char bpfilter_umh_start; |
| 14 | extern char bpfilter_umh_end; | 14 | extern char bpfilter_umh_end; |
| 15 | 15 | ||
| 16 | static struct umh_info info; | 16 | static void shutdown_umh(void) |
| 17 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ | ||
| 18 | static DEFINE_MUTEX(bpfilter_lock); | ||
| 19 | |||
| 20 | static void shutdown_umh(struct umh_info *info) | ||
| 21 | { | 17 | { |
| 22 | struct task_struct *tsk; | 18 | struct task_struct *tsk; |
| 23 | 19 | ||
| 24 | if (!info->pid) | 20 | if (bpfilter_ops.stop) |
| 25 | return; | 21 | return; |
| 26 | tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); | 22 | |
| 23 | tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID); | ||
| 27 | if (tsk) { | 24 | if (tsk) { |
| 28 | force_sig(SIGKILL, tsk); | 25 | force_sig(SIGKILL, tsk); |
| 29 | put_task_struct(tsk); | 26 | put_task_struct(tsk); |
| 30 | } | 27 | } |
| 31 | fput(info->pipe_to_umh); | ||
| 32 | fput(info->pipe_from_umh); | ||
| 33 | info->pid = 0; | ||
| 34 | } | 28 | } |
| 35 | 29 | ||
| 36 | static void __stop_umh(void) | 30 | static void __stop_umh(void) |
| 37 | { | 31 | { |
| 38 | if (IS_ENABLED(CONFIG_INET)) { | 32 | if (IS_ENABLED(CONFIG_INET)) |
| 39 | bpfilter_process_sockopt = NULL; | 33 | shutdown_umh(); |
| 40 | shutdown_umh(&info); | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | static void stop_umh(void) | ||
| 45 | { | ||
| 46 | mutex_lock(&bpfilter_lock); | ||
| 47 | __stop_umh(); | ||
| 48 | mutex_unlock(&bpfilter_lock); | ||
| 49 | } | 34 | } |
| 50 | 35 | ||
| 51 | static int __bpfilter_process_sockopt(struct sock *sk, int optname, | 36 | static int __bpfilter_process_sockopt(struct sock *sk, int optname, |
| @@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
| 63 | req.cmd = optname; | 48 | req.cmd = optname; |
| 64 | req.addr = (long __force __user)optval; | 49 | req.addr = (long __force __user)optval; |
| 65 | req.len = optlen; | 50 | req.len = optlen; |
| 66 | mutex_lock(&bpfilter_lock); | 51 | if (!bpfilter_ops.info.pid) |
| 67 | if (!info.pid) | ||
| 68 | goto out; | 52 | goto out; |
| 69 | n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); | 53 | n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), |
| 54 | &pos); | ||
| 70 | if (n != sizeof(req)) { | 55 | if (n != sizeof(req)) { |
| 71 | pr_err("write fail %zd\n", n); | 56 | pr_err("write fail %zd\n", n); |
| 72 | __stop_umh(); | 57 | __stop_umh(); |
| @@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
| 74 | goto out; | 59 | goto out; |
| 75 | } | 60 | } |
| 76 | pos = 0; | 61 | pos = 0; |
| 77 | n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); | 62 | n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), |
| 63 | &pos); | ||
| 78 | if (n != sizeof(reply)) { | 64 | if (n != sizeof(reply)) { |
| 79 | pr_err("read fail %zd\n", n); | 65 | pr_err("read fail %zd\n", n); |
| 80 | __stop_umh(); | 66 | __stop_umh(); |
| @@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, | |||
| 83 | } | 69 | } |
| 84 | ret = reply.status; | 70 | ret = reply.status; |
| 85 | out: | 71 | out: |
| 86 | mutex_unlock(&bpfilter_lock); | ||
| 87 | return ret; | 72 | return ret; |
| 88 | } | 73 | } |
| 89 | 74 | ||
| 90 | static int __init load_umh(void) | 75 | static int start_umh(void) |
| 91 | { | 76 | { |
| 92 | int err; | 77 | int err; |
| 93 | 78 | ||
| 94 | /* fork usermode process */ | 79 | /* fork usermode process */ |
| 95 | info.cmdline = "bpfilter_umh"; | ||
| 96 | err = fork_usermode_blob(&bpfilter_umh_start, | 80 | err = fork_usermode_blob(&bpfilter_umh_start, |
| 97 | &bpfilter_umh_end - &bpfilter_umh_start, | 81 | &bpfilter_umh_end - &bpfilter_umh_start, |
| 98 | &info); | 82 | &bpfilter_ops.info); |
| 99 | if (err) | 83 | if (err) |
| 100 | return err; | 84 | return err; |
| 101 | pr_info("Loaded bpfilter_umh pid %d\n", info.pid); | 85 | bpfilter_ops.stop = false; |
| 86 | pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid); | ||
| 102 | 87 | ||
| 103 | /* health check that usermode process started correctly */ | 88 | /* health check that usermode process started correctly */ |
| 104 | if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { | 89 | if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { |
| 105 | stop_umh(); | 90 | shutdown_umh(); |
| 106 | return -EFAULT; | 91 | return -EFAULT; |
| 107 | } | 92 | } |
| 108 | if (IS_ENABLED(CONFIG_INET)) | ||
| 109 | bpfilter_process_sockopt = &__bpfilter_process_sockopt; | ||
| 110 | 93 | ||
| 111 | return 0; | 94 | return 0; |
| 112 | } | 95 | } |
| 113 | 96 | ||
| 97 | static int __init load_umh(void) | ||
| 98 | { | ||
| 99 | int err; | ||
| 100 | |||
| 101 | mutex_lock(&bpfilter_ops.lock); | ||
| 102 | if (!bpfilter_ops.stop) { | ||
| 103 | err = -EFAULT; | ||
| 104 | goto out; | ||
| 105 | } | ||
| 106 | err = start_umh(); | ||
| 107 | if (!err && IS_ENABLED(CONFIG_INET)) { | ||
| 108 | bpfilter_ops.sockopt = &__bpfilter_process_sockopt; | ||
| 109 | bpfilter_ops.start = &start_umh; | ||
| 110 | } | ||
| 111 | out: | ||
| 112 | mutex_unlock(&bpfilter_ops.lock); | ||
| 113 | return err; | ||
| 114 | } | ||
| 115 | |||
| 114 | static void __exit fini_umh(void) | 116 | static void __exit fini_umh(void) |
| 115 | { | 117 | { |
| 116 | stop_umh(); | 118 | mutex_lock(&bpfilter_ops.lock); |
| 119 | if (IS_ENABLED(CONFIG_INET)) { | ||
| 120 | shutdown_umh(); | ||
| 121 | bpfilter_ops.start = NULL; | ||
| 122 | bpfilter_ops.sockopt = NULL; | ||
| 123 | } | ||
| 124 | mutex_unlock(&bpfilter_ops.lock); | ||
| 117 | } | 125 | } |
| 118 | module_init(load_umh); | 126 | module_init(load_umh); |
| 119 | module_exit(fini_umh); | 127 | module_exit(fini_umh); |
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S index 40311d10d2f2..9ea6100dca87 100644 --- a/net/bpfilter/bpfilter_umh_blob.S +++ b/net/bpfilter/bpfilter_umh_blob.S | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | .section .init.rodata, "a" | 2 | .section .rodata, "a" |
| 3 | .global bpfilter_umh_start | 3 | .global bpfilter_umh_start |
| 4 | bpfilter_umh_start: | 4 | bpfilter_umh_start: |
| 5 | .incbin "net/bpfilter/bpfilter_umh" | 5 | .incbin "net/bpfilter/bpfilter_umh" |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index fe3c758791ca..9e14767500ea 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -1128,6 +1128,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, | |||
| 1128 | err = -ENOMEM; | 1128 | err = -ENOMEM; |
| 1129 | goto err_unlock; | 1129 | goto err_unlock; |
| 1130 | } | 1130 | } |
| 1131 | if (swdev_notify) | ||
| 1132 | fdb->added_by_user = 1; | ||
| 1131 | fdb->added_by_external_learn = 1; | 1133 | fdb->added_by_external_learn = 1; |
| 1132 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); | 1134 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); |
| 1133 | } else { | 1135 | } else { |
| @@ -1147,6 +1149,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, | |||
| 1147 | modified = true; | 1149 | modified = true; |
| 1148 | } | 1150 | } |
| 1149 | 1151 | ||
| 1152 | if (swdev_notify) | ||
| 1153 | fdb->added_by_user = 1; | ||
| 1154 | |||
| 1150 | if (modified) | 1155 | if (modified) |
| 1151 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); | 1156 | fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); |
| 1152 | } | 1157 | } |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 5372e2042adf..48ddc60b4fbd 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
| @@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
| 36 | 36 | ||
| 37 | int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) | 37 | int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 38 | { | 38 | { |
| 39 | skb_push(skb, ETH_HLEN); | ||
| 39 | if (!is_skb_forwardable(skb->dev, skb)) | 40 | if (!is_skb_forwardable(skb->dev, skb)) |
| 40 | goto drop; | 41 | goto drop; |
| 41 | 42 | ||
| 42 | skb_push(skb, ETH_HLEN); | ||
| 43 | br_drop_fake_rtable(skb); | 43 | br_drop_fake_rtable(skb); |
| 44 | 44 | ||
| 45 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 45 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| @@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); | |||
| 65 | 65 | ||
| 66 | int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 66 | int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 67 | { | 67 | { |
| 68 | skb->tstamp = 0; | ||
| 68 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, | 69 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, |
| 69 | net, sk, skb, NULL, skb->dev, | 70 | net, sk, skb, NULL, skb->dev, |
| 70 | br_dev_queue_push_xmit); | 71 | br_dev_queue_push_xmit); |
| @@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to, | |||
| 97 | net = dev_net(indev); | 98 | net = dev_net(indev); |
| 98 | } else { | 99 | } else { |
| 99 | if (unlikely(netpoll_tx_running(to->br->dev))) { | 100 | if (unlikely(netpoll_tx_running(to->br->dev))) { |
| 100 | if (!is_skb_forwardable(skb->dev, skb)) { | 101 | skb_push(skb, ETH_HLEN); |
| 102 | if (!is_skb_forwardable(skb->dev, skb)) | ||
| 101 | kfree_skb(skb); | 103 | kfree_skb(skb); |
| 102 | } else { | 104 | else |
| 103 | skb_push(skb, ETH_HLEN); | ||
| 104 | br_netpoll_send_skb(to, skb); | 105 | br_netpoll_send_skb(to, skb); |
| 105 | } | ||
| 106 | return; | 106 | return; |
| 107 | } | 107 | } |
| 108 | br_hook = NF_BR_LOCAL_OUT; | 108 | br_hook = NF_BR_LOCAL_OUT; |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index d21a23698410..c93c35bb73dd 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
| @@ -265,7 +265,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ | |||
| 265 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); | 265 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
| 266 | int ret; | 266 | int ret; |
| 267 | 267 | ||
| 268 | if (neigh->hh.hh_len) { | 268 | if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) { |
| 269 | neigh_hh_bridge(&neigh->hh, skb); | 269 | neigh_hh_bridge(&neigh->hh, skb); |
| 270 | skb->dev = nf_bridge->physindev; | 270 | skb->dev = nf_bridge->physindev; |
| 271 | ret = br_handle_frame_finish(net, sk, skb); | 271 | ret = br_handle_frame_finish(net, sk, skb); |
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index 94039f588f1d..564710f88f93 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c | |||
| @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) | |||
| 131 | IPSTATS_MIB_INDISCARDS); | 131 | IPSTATS_MIB_INDISCARDS); |
| 132 | goto drop; | 132 | goto drop; |
| 133 | } | 133 | } |
| 134 | hdr = ipv6_hdr(skb); | ||
| 134 | } | 135 | } |
| 135 | if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) | 136 | if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) |
| 136 | goto drop; | 137 | goto drop; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d240b3e7919f..eabf8bf28a3f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -107,6 +107,7 @@ struct br_tunnel_info { | |||
| 107 | /* private vlan flags */ | 107 | /* private vlan flags */ |
| 108 | enum { | 108 | enum { |
| 109 | BR_VLFLAG_PER_PORT_STATS = BIT(0), | 109 | BR_VLFLAG_PER_PORT_STATS = BIT(0), |
| 110 | BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1), | ||
| 110 | }; | 111 | }; |
| 111 | 112 | ||
| 112 | /** | 113 | /** |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 4a2f31157ef5..96abf8feb9dc 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
| @@ -80,16 +80,18 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) | |||
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, | 82 | static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, |
| 83 | u16 vid, u16 flags, struct netlink_ext_ack *extack) | 83 | struct net_bridge_vlan *v, u16 flags, |
| 84 | struct netlink_ext_ack *extack) | ||
| 84 | { | 85 | { |
| 85 | int err; | 86 | int err; |
| 86 | 87 | ||
| 87 | /* Try switchdev op first. In case it is not supported, fallback to | 88 | /* Try switchdev op first. In case it is not supported, fallback to |
| 88 | * 8021q add. | 89 | * 8021q add. |
| 89 | */ | 90 | */ |
| 90 | err = br_switchdev_port_vlan_add(dev, vid, flags, extack); | 91 | err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack); |
| 91 | if (err == -EOPNOTSUPP) | 92 | if (err == -EOPNOTSUPP) |
| 92 | return vlan_vid_add(dev, br->vlan_proto, vid); | 93 | return vlan_vid_add(dev, br->vlan_proto, v->vid); |
| 94 | v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV; | ||
| 93 | return err; | 95 | return err; |
| 94 | } | 96 | } |
| 95 | 97 | ||
| @@ -121,19 +123,17 @@ static void __vlan_del_list(struct net_bridge_vlan *v) | |||
| 121 | } | 123 | } |
| 122 | 124 | ||
| 123 | static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, | 125 | static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, |
| 124 | u16 vid) | 126 | const struct net_bridge_vlan *v) |
| 125 | { | 127 | { |
| 126 | int err; | 128 | int err; |
| 127 | 129 | ||
| 128 | /* Try switchdev op first. In case it is not supported, fallback to | 130 | /* Try switchdev op first. In case it is not supported, fallback to |
| 129 | * 8021q del. | 131 | * 8021q del. |
| 130 | */ | 132 | */ |
| 131 | err = br_switchdev_port_vlan_del(dev, vid); | 133 | err = br_switchdev_port_vlan_del(dev, v->vid); |
| 132 | if (err == -EOPNOTSUPP) { | 134 | if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)) |
| 133 | vlan_vid_del(dev, br->vlan_proto, vid); | 135 | vlan_vid_del(dev, br->vlan_proto, v->vid); |
| 134 | return 0; | 136 | return err == -EOPNOTSUPP ? 0 : err; |
| 135 | } | ||
| 136 | return err; | ||
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | /* Returns a master vlan, if it didn't exist it gets created. In all cases a | 139 | /* Returns a master vlan, if it didn't exist it gets created. In all cases a |
| @@ -242,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, | |||
| 242 | * This ensures tagged traffic enters the bridge when | 242 | * This ensures tagged traffic enters the bridge when |
| 243 | * promiscuous mode is disabled by br_manage_promisc(). | 243 | * promiscuous mode is disabled by br_manage_promisc(). |
| 244 | */ | 244 | */ |
| 245 | err = __vlan_vid_add(dev, br, v->vid, flags, extack); | 245 | err = __vlan_vid_add(dev, br, v, flags, extack); |
| 246 | if (err) | 246 | if (err) |
| 247 | goto out; | 247 | goto out; |
| 248 | 248 | ||
| @@ -305,7 +305,7 @@ out_fdb_insert: | |||
| 305 | 305 | ||
| 306 | out_filt: | 306 | out_filt: |
| 307 | if (p) { | 307 | if (p) { |
| 308 | __vlan_vid_del(dev, br, v->vid); | 308 | __vlan_vid_del(dev, br, v); |
| 309 | if (masterv) { | 309 | if (masterv) { |
| 310 | if (v->stats && masterv->stats != v->stats) | 310 | if (v->stats && masterv->stats != v->stats) |
| 311 | free_percpu(v->stats); | 311 | free_percpu(v->stats); |
| @@ -338,7 +338,7 @@ static int __vlan_del(struct net_bridge_vlan *v) | |||
| 338 | 338 | ||
| 339 | __vlan_delete_pvid(vg, v->vid); | 339 | __vlan_delete_pvid(vg, v->vid); |
| 340 | if (p) { | 340 | if (p) { |
| 341 | err = __vlan_vid_del(p->dev, p->br, v->vid); | 341 | err = __vlan_vid_del(p->dev, p->br, v); |
| 342 | if (err) | 342 | if (err) |
| 343 | goto out; | 343 | goto out; |
| 344 | } else { | 344 | } else { |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 491828713e0b..5e55cef0cec3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
| @@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user, | |||
| 1137 | tmp.name[sizeof(tmp.name) - 1] = 0; | 1137 | tmp.name[sizeof(tmp.name) - 1] = 0; |
| 1138 | 1138 | ||
| 1139 | countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; | 1139 | countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; |
| 1140 | newinfo = vmalloc(sizeof(*newinfo) + countersize); | 1140 | newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, |
| 1141 | PAGE_KERNEL); | ||
| 1141 | if (!newinfo) | 1142 | if (!newinfo) |
| 1142 | return -ENOMEM; | 1143 | return -ENOMEM; |
| 1143 | 1144 | ||
| 1144 | if (countersize) | 1145 | if (countersize) |
| 1145 | memset(newinfo->counters, 0, countersize); | 1146 | memset(newinfo->counters, 0, countersize); |
| 1146 | 1147 | ||
| 1147 | newinfo->entries = vmalloc(tmp.entries_size); | 1148 | newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, |
| 1149 | PAGE_KERNEL); | ||
| 1148 | if (!newinfo->entries) { | 1150 | if (!newinfo->entries) { |
| 1149 | ret = -ENOMEM; | 1151 | ret = -ENOMEM; |
| 1150 | goto free_newinfo; | 1152 | goto free_newinfo; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 08cbed7d940e..419e8edf23ba 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
| @@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) | |||
| 229 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) | 229 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) |
| 230 | return false; | 230 | return false; |
| 231 | 231 | ||
| 232 | ip6h = ipv6_hdr(skb); | ||
| 232 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); | 233 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); |
| 233 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) | 234 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) |
| 234 | return false; | 235 | return false; |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 0af8f0db892a..79bb8afa9c0c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -67,6 +67,9 @@ | |||
| 67 | */ | 67 | */ |
| 68 | #define MAX_NFRAMES 256 | 68 | #define MAX_NFRAMES 256 |
| 69 | 69 | ||
| 70 | /* limit timers to 400 days for sending/timeouts */ | ||
| 71 | #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) | ||
| 72 | |||
| 70 | /* use of last_frames[index].flags */ | 73 | /* use of last_frames[index].flags */ |
| 71 | #define RX_RECV 0x40 /* received data for this element */ | 74 | #define RX_RECV 0x40 /* received data for this element */ |
| 72 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ | 75 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ |
| @@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) | |||
| 140 | return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); | 143 | return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); |
| 141 | } | 144 | } |
| 142 | 145 | ||
| 146 | /* check limitations for timeval provided by user */ | ||
| 147 | static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) | ||
| 148 | { | ||
| 149 | if ((msg_head->ival1.tv_sec < 0) || | ||
| 150 | (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || | ||
| 151 | (msg_head->ival1.tv_usec < 0) || | ||
| 152 | (msg_head->ival1.tv_usec >= USEC_PER_SEC) || | ||
| 153 | (msg_head->ival2.tv_sec < 0) || | ||
| 154 | (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || | ||
| 155 | (msg_head->ival2.tv_usec < 0) || | ||
| 156 | (msg_head->ival2.tv_usec >= USEC_PER_SEC)) | ||
| 157 | return true; | ||
| 158 | |||
| 159 | return false; | ||
| 160 | } | ||
| 161 | |||
| 143 | #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) | 162 | #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) |
| 144 | #define OPSIZ sizeof(struct bcm_op) | 163 | #define OPSIZ sizeof(struct bcm_op) |
| 145 | #define MHSIZ sizeof(struct bcm_msg_head) | 164 | #define MHSIZ sizeof(struct bcm_msg_head) |
| @@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
| 873 | if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) | 892 | if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) |
| 874 | return -EINVAL; | 893 | return -EINVAL; |
| 875 | 894 | ||
| 895 | /* check timeval limitations */ | ||
| 896 | if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) | ||
| 897 | return -EINVAL; | ||
| 898 | |||
| 876 | /* check the given can_id */ | 899 | /* check the given can_id */ |
| 877 | op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); | 900 | op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); |
| 878 | if (op) { | 901 | if (op) { |
| @@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
| 1053 | (!(msg_head->can_id & CAN_RTR_FLAG)))) | 1076 | (!(msg_head->can_id & CAN_RTR_FLAG)))) |
| 1054 | return -EINVAL; | 1077 | return -EINVAL; |
| 1055 | 1078 | ||
| 1079 | /* check timeval limitations */ | ||
| 1080 | if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) | ||
| 1081 | return -EINVAL; | ||
| 1082 | |||
| 1056 | /* check the given can_id */ | 1083 | /* check the given can_id */ |
| 1057 | op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); | 1084 | op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); |
| 1058 | if (op) { | 1085 | if (op) { |
diff --git a/net/can/gw.c b/net/can/gw.c index faa3da88a127..53859346dc9a 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
| @@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 416 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) | 416 | while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) |
| 417 | (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); | 417 | (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); |
| 418 | 418 | ||
| 419 | /* check for checksum updates when the CAN frame has been modified */ | 419 | /* Has the CAN frame been modified? */ |
| 420 | if (modidx) { | 420 | if (modidx) { |
| 421 | if (gwj->mod.csumfunc.crc8) | 421 | /* get available space for the processed CAN frame type */ |
| 422 | int max_len = nskb->len - offsetof(struct can_frame, data); | ||
| 423 | |||
| 424 | /* dlc may have changed, make sure it fits to the CAN frame */ | ||
| 425 | if (cf->can_dlc > max_len) | ||
| 426 | goto out_delete; | ||
| 427 | |||
| 428 | /* check for checksum updates in classic CAN length only */ | ||
| 429 | if (gwj->mod.csumfunc.crc8) { | ||
| 430 | if (cf->can_dlc > 8) | ||
| 431 | goto out_delete; | ||
| 432 | |||
| 422 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); | 433 | (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); |
| 434 | } | ||
| 435 | |||
| 436 | if (gwj->mod.csumfunc.xor) { | ||
| 437 | if (cf->can_dlc > 8) | ||
| 438 | goto out_delete; | ||
| 423 | 439 | ||
| 424 | if (gwj->mod.csumfunc.xor) | ||
| 425 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); | 440 | (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); |
| 441 | } | ||
| 426 | } | 442 | } |
| 427 | 443 | ||
| 428 | /* clear the skb timestamp if not configured the other way */ | 444 | /* clear the skb timestamp if not configured the other way */ |
| @@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) | |||
| 434 | gwj->dropped_frames++; | 450 | gwj->dropped_frames++; |
| 435 | else | 451 | else |
| 436 | gwj->handled_frames++; | 452 | gwj->handled_frames++; |
| 453 | |||
| 454 | return; | ||
| 455 | |||
| 456 | out_delete: | ||
| 457 | /* delete frame due to misconfiguration */ | ||
| 458 | gwj->deleted_frames++; | ||
| 459 | kfree_skb(nskb); | ||
| 460 | return; | ||
| 437 | } | 461 | } |
| 438 | 462 | ||
| 439 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) | 463 | static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d5718284db57..3661cdd927f1 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -3206,9 +3206,10 @@ void ceph_con_keepalive(struct ceph_connection *con) | |||
| 3206 | dout("con_keepalive %p\n", con); | 3206 | dout("con_keepalive %p\n", con); |
| 3207 | mutex_lock(&con->mutex); | 3207 | mutex_lock(&con->mutex); |
| 3208 | clear_standby(con); | 3208 | clear_standby(con); |
| 3209 | con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING); | ||
| 3209 | mutex_unlock(&con->mutex); | 3210 | mutex_unlock(&con->mutex); |
| 3210 | if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && | 3211 | |
| 3211 | con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) | 3212 | if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) |
| 3212 | queue_con(con); | 3213 | queue_con(con); |
| 3213 | } | 3214 | } |
| 3214 | EXPORT_SYMBOL(ceph_con_keepalive); | 3215 | EXPORT_SYMBOL(ceph_con_keepalive); |
diff --git a/net/core/filter.c b/net/core/filter.c index 447dd1bad31f..7559d6835ecb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | 2020 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
| 2021 | u32 flags) | 2021 | u32 flags) |
| 2022 | { | 2022 | { |
| 2023 | /* skb->mac_len is not set on normal egress */ | 2023 | unsigned int mlen = skb_network_offset(skb); |
| 2024 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
| 2025 | 2024 | ||
| 2026 | __skb_pull(skb, mlen); | 2025 | if (mlen) { |
| 2026 | __skb_pull(skb, mlen); | ||
| 2027 | 2027 | ||
| 2028 | /* At ingress, the mac header has already been pulled once. | 2028 | /* At ingress, the mac header has already been pulled once. |
| 2029 | * At egress, skb_pospull_rcsum has to be done in case that | 2029 | * At egress, skb_pospull_rcsum has to be done in case that |
| 2030 | * the skb is originated from ingress (i.e. a forwarded skb) | 2030 | * the skb is originated from ingress (i.e. a forwarded skb) |
| 2031 | * to ensure that rcsum starts at net header. | 2031 | * to ensure that rcsum starts at net header. |
| 2032 | */ | 2032 | */ |
| 2033 | if (!skb_at_tc_ingress(skb)) | 2033 | if (!skb_at_tc_ingress(skb)) |
| 2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | 2034 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
| 2035 | } | ||
| 2035 | skb_pop_mac_header(skb); | 2036 | skb_pop_mac_header(skb); |
| 2036 | skb_reset_mac_len(skb); | 2037 | skb_reset_mac_len(skb); |
| 2037 | return flags & BPF_F_INGRESS ? | 2038 | return flags & BPF_F_INGRESS ? |
| @@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 4119 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
| 4120 | break; | 4121 | break; |
| 4121 | case SO_MAX_PACING_RATE: /* 32bit version */ | 4122 | case SO_MAX_PACING_RATE: /* 32bit version */ |
| 4123 | if (val != ~0U) | ||
| 4124 | cmpxchg(&sk->sk_pacing_status, | ||
| 4125 | SK_PACING_NONE, | ||
| 4126 | SK_PACING_NEEDED); | ||
| 4122 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; | 4127 | sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; |
| 4123 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, | 4128 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, |
| 4124 | sk->sk_max_pacing_rate); | 4129 | sk->sk_max_pacing_rate); |
| @@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 4132 | sk->sk_rcvlowat = val ? : 1; | 4137 | sk->sk_rcvlowat = val ? : 1; |
| 4133 | break; | 4138 | break; |
| 4134 | case SO_MARK: | 4139 | case SO_MARK: |
| 4135 | sk->sk_mark = val; | 4140 | if (sk->sk_mark != val) { |
| 4141 | sk->sk_mark = val; | ||
| 4142 | sk_dst_reset(sk); | ||
| 4143 | } | ||
| 4136 | break; | 4144 | break; |
| 4137 | default: | 4145 | default: |
| 4138 | ret = -EINVAL; | 4146 | ret = -EINVAL; |
| @@ -4203,7 +4211,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 4203 | /* Only some options are supported */ | 4211 | /* Only some options are supported */ |
| 4204 | switch (optname) { | 4212 | switch (optname) { |
| 4205 | case TCP_BPF_IW: | 4213 | case TCP_BPF_IW: |
| 4206 | if (val <= 0 || tp->data_segs_out > 0) | 4214 | if (val <= 0 || tp->data_segs_out > tp->syn_data) |
| 4207 | ret = -EINVAL; | 4215 | ret = -EINVAL; |
| 4208 | else | 4216 | else |
| 4209 | tp->snd_cwnd = val; | 4217 | tp->snd_cwnd = val; |
| @@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) | |||
| 5309 | case BPF_FUNC_trace_printk: | 5317 | case BPF_FUNC_trace_printk: |
| 5310 | if (capable(CAP_SYS_ADMIN)) | 5318 | if (capable(CAP_SYS_ADMIN)) |
| 5311 | return bpf_get_trace_printk_proto(); | 5319 | return bpf_get_trace_printk_proto(); |
| 5312 | /* else: fall through */ | 5320 | /* else, fall through */ |
| 5313 | default: | 5321 | default: |
| 5314 | return NULL; | 5322 | return NULL; |
| 5315 | } | 5323 | } |
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 3e85437f7106..a648568c5e8f 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c | |||
| @@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, | |||
| 63 | lwt->name ? : "<unknown>"); | 63 | lwt->name ? : "<unknown>"); |
| 64 | ret = BPF_OK; | 64 | ret = BPF_OK; |
| 65 | } else { | 65 | } else { |
| 66 | skb_reset_mac_header(skb); | ||
| 66 | ret = skb_do_redirect(skb); | 67 | ret = skb_do_redirect(skb); |
| 67 | if (ret == 0) | 68 | if (ret == 0) |
| 68 | ret = BPF_REDIRECT; | 69 | ret = BPF_REDIRECT; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 763a7b08df67..4230400b9a30 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 19 | 19 | ||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/kmemleak.h> | ||
| 21 | #include <linux/types.h> | 22 | #include <linux/types.h> |
| 22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
| 23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| @@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) | |||
| 443 | ret = kmalloc(sizeof(*ret), GFP_ATOMIC); | 444 | ret = kmalloc(sizeof(*ret), GFP_ATOMIC); |
| 444 | if (!ret) | 445 | if (!ret) |
| 445 | return NULL; | 446 | return NULL; |
| 446 | if (size <= PAGE_SIZE) | 447 | if (size <= PAGE_SIZE) { |
| 447 | buckets = kzalloc(size, GFP_ATOMIC); | 448 | buckets = kzalloc(size, GFP_ATOMIC); |
| 448 | else | 449 | } else { |
| 449 | buckets = (struct neighbour __rcu **) | 450 | buckets = (struct neighbour __rcu **) |
| 450 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, | 451 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, |
| 451 | get_order(size)); | 452 | get_order(size)); |
| 453 | kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); | ||
| 454 | } | ||
| 452 | if (!buckets) { | 455 | if (!buckets) { |
| 453 | kfree(ret); | 456 | kfree(ret); |
| 454 | return NULL; | 457 | return NULL; |
| @@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head) | |||
| 468 | size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); | 471 | size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); |
| 469 | struct neighbour __rcu **buckets = nht->hash_buckets; | 472 | struct neighbour __rcu **buckets = nht->hash_buckets; |
| 470 | 473 | ||
| 471 | if (size <= PAGE_SIZE) | 474 | if (size <= PAGE_SIZE) { |
| 472 | kfree(buckets); | 475 | kfree(buckets); |
| 473 | else | 476 | } else { |
| 477 | kmemleak_free(buckets); | ||
| 474 | free_pages((unsigned long)buckets, get_order(size)); | 478 | free_pages((unsigned long)buckets, get_order(size)); |
| 479 | } | ||
| 475 | kfree(nht); | 480 | kfree(nht); |
| 476 | } | 481 | } |
| 477 | 482 | ||
| @@ -1002,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh) | |||
| 1002 | if (neigh->ops->solicit) | 1007 | if (neigh->ops->solicit) |
| 1003 | neigh->ops->solicit(neigh, skb); | 1008 | neigh->ops->solicit(neigh, skb); |
| 1004 | atomic_inc(&neigh->probes); | 1009 | atomic_inc(&neigh->probes); |
| 1005 | kfree_skb(skb); | 1010 | consume_skb(skb); |
| 1006 | } | 1011 | } |
| 1007 | 1012 | ||
| 1008 | /* Called when a timer expires for a neighbour entry. */ | 1013 | /* Called when a timer expires for a neighbour entry. */ |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 37317ffec146..26d848484912 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -5270,7 +5270,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
| 5270 | unsigned long chunk; | 5270 | unsigned long chunk; |
| 5271 | struct sk_buff *skb; | 5271 | struct sk_buff *skb; |
| 5272 | struct page *page; | 5272 | struct page *page; |
| 5273 | gfp_t gfp_head; | ||
| 5274 | int i; | 5273 | int i; |
| 5275 | 5274 | ||
| 5276 | *errcode = -EMSGSIZE; | 5275 | *errcode = -EMSGSIZE; |
| @@ -5280,12 +5279,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
| 5280 | if (npages > MAX_SKB_FRAGS) | 5279 | if (npages > MAX_SKB_FRAGS) |
| 5281 | return NULL; | 5280 | return NULL; |
| 5282 | 5281 | ||
| 5283 | gfp_head = gfp_mask; | ||
| 5284 | if (gfp_head & __GFP_DIRECT_RECLAIM) | ||
| 5285 | gfp_head |= __GFP_RETRY_MAYFAIL; | ||
| 5286 | |||
| 5287 | *errcode = -ENOBUFS; | 5282 | *errcode = -ENOBUFS; |
| 5288 | skb = alloc_skb(header_len, gfp_head); | 5283 | skb = alloc_skb(header_len, gfp_mask); |
| 5289 | if (!skb) | 5284 | if (!skb) |
| 5290 | return NULL; | 5285 | return NULL; |
| 5291 | 5286 | ||
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 5e04ed25bc0e..1e976bb93d99 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c | |||
| @@ -1,28 +1,54 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/init.h> | ||
| 3 | #include <linux/module.h> | ||
| 2 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
| 3 | #include <linux/bpfilter.h> | 5 | #include <linux/bpfilter.h> |
| 4 | #include <uapi/linux/bpf.h> | 6 | #include <uapi/linux/bpf.h> |
| 5 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
| 6 | #include <linux/kmod.h> | 8 | #include <linux/kmod.h> |
| 9 | #include <linux/fs.h> | ||
| 10 | #include <linux/file.h> | ||
| 7 | 11 | ||
| 8 | int (*bpfilter_process_sockopt)(struct sock *sk, int optname, | 12 | struct bpfilter_umh_ops bpfilter_ops; |
| 9 | char __user *optval, | 13 | EXPORT_SYMBOL_GPL(bpfilter_ops); |
| 10 | unsigned int optlen, bool is_set); | 14 | |
| 11 | EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); | 15 | static void bpfilter_umh_cleanup(struct umh_info *info) |
| 16 | { | ||
| 17 | mutex_lock(&bpfilter_ops.lock); | ||
| 18 | bpfilter_ops.stop = true; | ||
| 19 | fput(info->pipe_to_umh); | ||
| 20 | fput(info->pipe_from_umh); | ||
| 21 | info->pid = 0; | ||
| 22 | mutex_unlock(&bpfilter_ops.lock); | ||
| 23 | } | ||
| 12 | 24 | ||
| 13 | static int bpfilter_mbox_request(struct sock *sk, int optname, | 25 | static int bpfilter_mbox_request(struct sock *sk, int optname, |
| 14 | char __user *optval, | 26 | char __user *optval, |
| 15 | unsigned int optlen, bool is_set) | 27 | unsigned int optlen, bool is_set) |
| 16 | { | 28 | { |
| 17 | if (!bpfilter_process_sockopt) { | 29 | int err; |
| 18 | int err = request_module("bpfilter"); | 30 | mutex_lock(&bpfilter_ops.lock); |
| 31 | if (!bpfilter_ops.sockopt) { | ||
| 32 | mutex_unlock(&bpfilter_ops.lock); | ||
| 33 | err = request_module("bpfilter"); | ||
| 34 | mutex_lock(&bpfilter_ops.lock); | ||
| 19 | 35 | ||
| 20 | if (err) | 36 | if (err) |
| 21 | return err; | 37 | goto out; |
| 22 | if (!bpfilter_process_sockopt) | 38 | if (!bpfilter_ops.sockopt) { |
| 23 | return -ECHILD; | 39 | err = -ECHILD; |
| 40 | goto out; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | if (bpfilter_ops.stop) { | ||
| 44 | err = bpfilter_ops.start(); | ||
| 45 | if (err) | ||
| 46 | goto out; | ||
| 24 | } | 47 | } |
| 25 | return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); | 48 | err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set); |
| 49 | out: | ||
| 50 | mutex_unlock(&bpfilter_ops.lock); | ||
| 51 | return err; | ||
| 26 | } | 52 | } |
| 27 | 53 | ||
| 28 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, | 54 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, |
| @@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, | |||
| 41 | 67 | ||
| 42 | return bpfilter_mbox_request(sk, optname, optval, len, false); | 68 | return bpfilter_mbox_request(sk, optname, optval, len, false); |
| 43 | } | 69 | } |
| 70 | |||
| 71 | static int __init bpfilter_sockopt_init(void) | ||
| 72 | { | ||
| 73 | mutex_init(&bpfilter_ops.lock); | ||
| 74 | bpfilter_ops.stop = true; | ||
| 75 | bpfilter_ops.info.cmdline = "bpfilter_umh"; | ||
| 76 | bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | module_init(bpfilter_sockopt_init); | ||
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 04ba321ae5ce..e258a00b4a3d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -1826,7 +1826,7 @@ put_tgt_net: | |||
| 1826 | if (fillargs.netnsid >= 0) | 1826 | if (fillargs.netnsid >= 0) |
| 1827 | put_net(tgt_net); | 1827 | put_net(tgt_net); |
| 1828 | 1828 | ||
| 1829 | return err < 0 ? err : skb->len; | 1829 | return skb->len ? : err; |
| 1830 | } | 1830 | } |
| 1831 | 1831 | ||
| 1832 | static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, | 1832 | static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6df95be96311..fe4f6a624238 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -203,7 +203,7 @@ static void fib_flush(struct net *net) | |||
| 203 | struct fib_table *tb; | 203 | struct fib_table *tb; |
| 204 | 204 | ||
| 205 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) | 205 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) |
| 206 | flushed += fib_table_flush(net, tb); | 206 | flushed += fib_table_flush(net, tb, false); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | if (flushed) | 209 | if (flushed) |
| @@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net) | |||
| 1463 | 1463 | ||
| 1464 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { | 1464 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { |
| 1465 | hlist_del(&tb->tb_hlist); | 1465 | hlist_del(&tb->tb_hlist); |
| 1466 | fib_table_flush(net, tb); | 1466 | fib_table_flush(net, tb, true); |
| 1467 | fib_free_table(tb); | 1467 | fib_free_table(tb); |
| 1468 | } | 1468 | } |
| 1469 | } | 1469 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 237c9f72b265..a573e37e0615 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb) | |||
| 1856 | } | 1856 | } |
| 1857 | 1857 | ||
| 1858 | /* Caller must hold RTNL. */ | 1858 | /* Caller must hold RTNL. */ |
| 1859 | int fib_table_flush(struct net *net, struct fib_table *tb) | 1859 | int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) |
| 1860 | { | 1860 | { |
| 1861 | struct trie *t = (struct trie *)tb->tb_data; | 1861 | struct trie *t = (struct trie *)tb->tb_data; |
| 1862 | struct key_vector *pn = t->kv; | 1862 | struct key_vector *pn = t->kv; |
| @@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb) | |||
| 1904 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { | 1904 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { |
| 1905 | struct fib_info *fi = fa->fa_info; | 1905 | struct fib_info *fi = fa->fa_info; |
| 1906 | 1906 | ||
| 1907 | if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || | 1907 | if (!fi || tb->tb_id != fa->tb_id || |
| 1908 | tb->tb_id != fa->tb_id) { | 1908 | (!(fi->fib_flags & RTNH_F_DEAD) && |
| 1909 | !fib_props[fa->fa_type].error)) { | ||
| 1910 | slen = fa->fa_slen; | ||
| 1911 | continue; | ||
| 1912 | } | ||
| 1913 | |||
| 1914 | /* Do not flush error routes if network namespace is | ||
| 1915 | * not being dismantled | ||
| 1916 | */ | ||
| 1917 | if (!flush_all && fib_props[fa->fa_type].error) { | ||
| 1909 | slen = fa->fa_slen; | 1918 | slen = fa->fa_slen; |
| 1910 | continue; | 1919 | continue; |
| 1911 | } | 1920 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 0c9f171fb085..437070d1ffb1 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
| 1020 | { | 1020 | { |
| 1021 | int transport_offset = skb_transport_offset(skb); | 1021 | int transport_offset = skb_transport_offset(skb); |
| 1022 | struct guehdr *guehdr; | 1022 | struct guehdr *guehdr; |
| 1023 | size_t optlen; | 1023 | size_t len, optlen; |
| 1024 | int ret; | 1024 | int ret; |
| 1025 | 1025 | ||
| 1026 | if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) | 1026 | len = sizeof(struct udphdr) + sizeof(struct guehdr); |
| 1027 | if (!pskb_may_pull(skb, len)) | ||
| 1027 | return -EINVAL; | 1028 | return -EINVAL; |
| 1028 | 1029 | ||
| 1029 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | 1030 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; |
| @@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
| 1058 | 1059 | ||
| 1059 | optlen = guehdr->hlen << 2; | 1060 | optlen = guehdr->hlen << 2; |
| 1060 | 1061 | ||
| 1062 | if (!pskb_may_pull(skb, len + optlen)) | ||
| 1063 | return -EINVAL; | ||
| 1064 | |||
| 1065 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | ||
| 1061 | if (validate_gue_flags(guehdr, optlen)) | 1066 | if (validate_gue_flags(guehdr, optlen)) |
| 1062 | return -EINVAL; | 1067 | return -EINVAL; |
| 1063 | 1068 | ||
| @@ -1065,7 +1070,8 @@ static int gue_err(struct sk_buff *skb, u32 info) | |||
| 1065 | * recursion. Besides, this kind of encapsulation can't even be | 1070 | * recursion. Besides, this kind of encapsulation can't even be |
| 1066 | * configured currently. Discard this. | 1071 | * configured currently. Discard this. |
| 1067 | */ | 1072 | */ |
| 1068 | if (guehdr->proto_ctype == IPPROTO_UDP) | 1073 | if (guehdr->proto_ctype == IPPROTO_UDP || |
| 1074 | guehdr->proto_ctype == IPPROTO_UDPLITE) | ||
| 1069 | return -EOPNOTSUPP; | 1075 | return -EOPNOTSUPP; |
| 1070 | 1076 | ||
| 1071 | skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); | 1077 | skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); |
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index a4bf22ee3aed..7c4a41dc04bb 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 26 | #include <net/protocol.h> | 26 | #include <net/protocol.h> |
| 27 | #include <net/gre.h> | 27 | #include <net/gre.h> |
| 28 | #include <net/erspan.h> | ||
| 28 | 29 | ||
| 29 | #include <net/icmp.h> | 30 | #include <net/icmp.h> |
| 30 | #include <net/route.h> | 31 | #include <net/route.h> |
| @@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 119 | hdr_len += 4; | 120 | hdr_len += 4; |
| 120 | } | 121 | } |
| 121 | tpi->hdr_len = hdr_len; | 122 | tpi->hdr_len = hdr_len; |
| 123 | |||
| 124 | /* ERSPAN ver 1 and 2 protocol sets GRE key field | ||
| 125 | * to 0 and sets the configured key in the | ||
| 126 | * inner erspan header field | ||
| 127 | */ | ||
| 128 | if (greh->protocol == htons(ETH_P_ERSPAN) || | ||
| 129 | greh->protocol == htons(ETH_P_ERSPAN2)) { | ||
| 130 | struct erspan_base_hdr *ershdr; | ||
| 131 | |||
| 132 | if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) | ||
| 133 | return -EINVAL; | ||
| 134 | |||
| 135 | ershdr = (struct erspan_base_hdr *)options; | ||
| 136 | tpi->key = cpu_to_be32(get_session_id(ershdr)); | ||
| 137 | } | ||
| 138 | |||
| 122 | return hdr_len; | 139 | return hdr_len; |
| 123 | } | 140 | } |
| 124 | EXPORT_SYMBOL(gre_parse_header); | 141 | EXPORT_SYMBOL(gre_parse_header); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d1d09f3e5f9e..20a64fe6254b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 268 | int len; | 268 | int len; |
| 269 | 269 | ||
| 270 | itn = net_generic(net, erspan_net_id); | 270 | itn = net_generic(net, erspan_net_id); |
| 271 | len = gre_hdr_len + sizeof(*ershdr); | ||
| 272 | |||
| 273 | /* Check based hdr len */ | ||
| 274 | if (unlikely(!pskb_may_pull(skb, len))) | ||
| 275 | return PACKET_REJECT; | ||
| 276 | 271 | ||
| 277 | iph = ip_hdr(skb); | 272 | iph = ip_hdr(skb); |
| 278 | ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); | 273 | ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); |
| 279 | ver = ershdr->ver; | 274 | ver = ershdr->ver; |
| 280 | 275 | ||
| 281 | /* The original GRE header does not have key field, | ||
| 282 | * Use ERSPAN 10-bit session ID as key. | ||
| 283 | */ | ||
| 284 | tpi->key = cpu_to_be32(get_session_id(ershdr)); | ||
| 285 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, | 276 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, |
| 286 | tpi->flags | TUNNEL_KEY, | 277 | tpi->flags | TUNNEL_KEY, |
| 287 | iph->saddr, iph->daddr, tpi->key); | 278 | iph->saddr, iph->daddr, tpi->key); |
| @@ -569,8 +560,7 @@ err_free_skb: | |||
| 569 | dev->stats.tx_dropped++; | 560 | dev->stats.tx_dropped++; |
| 570 | } | 561 | } |
| 571 | 562 | ||
| 572 | static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | 563 | static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) |
| 573 | __be16 proto) | ||
| 574 | { | 564 | { |
| 575 | struct ip_tunnel *tunnel = netdev_priv(dev); | 565 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 576 | struct ip_tunnel_info *tun_info; | 566 | struct ip_tunnel_info *tun_info; |
| @@ -578,10 +568,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 578 | struct erspan_metadata *md; | 568 | struct erspan_metadata *md; |
| 579 | struct rtable *rt = NULL; | 569 | struct rtable *rt = NULL; |
| 580 | bool truncate = false; | 570 | bool truncate = false; |
| 571 | __be16 df, proto; | ||
| 581 | struct flowi4 fl; | 572 | struct flowi4 fl; |
| 582 | int tunnel_hlen; | 573 | int tunnel_hlen; |
| 583 | int version; | 574 | int version; |
| 584 | __be16 df; | ||
| 585 | int nhoff; | 575 | int nhoff; |
| 586 | int thoff; | 576 | int thoff; |
| 587 | 577 | ||
| @@ -626,18 +616,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 626 | if (version == 1) { | 616 | if (version == 1) { |
| 627 | erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), | 617 | erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), |
| 628 | ntohl(md->u.index), truncate, true); | 618 | ntohl(md->u.index), truncate, true); |
| 619 | proto = htons(ETH_P_ERSPAN); | ||
| 629 | } else if (version == 2) { | 620 | } else if (version == 2) { |
| 630 | erspan_build_header_v2(skb, | 621 | erspan_build_header_v2(skb, |
| 631 | ntohl(tunnel_id_to_key32(key->tun_id)), | 622 | ntohl(tunnel_id_to_key32(key->tun_id)), |
| 632 | md->u.md2.dir, | 623 | md->u.md2.dir, |
| 633 | get_hwid(&md->u.md2), | 624 | get_hwid(&md->u.md2), |
| 634 | truncate, true); | 625 | truncate, true); |
| 626 | proto = htons(ETH_P_ERSPAN2); | ||
| 635 | } else { | 627 | } else { |
| 636 | goto err_free_rt; | 628 | goto err_free_rt; |
| 637 | } | 629 | } |
| 638 | 630 | ||
| 639 | gre_build_header(skb, 8, TUNNEL_SEQ, | 631 | gre_build_header(skb, 8, TUNNEL_SEQ, |
| 640 | htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); | 632 | proto, 0, htonl(tunnel->o_seqno++)); |
| 641 | 633 | ||
| 642 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; | 634 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
| 643 | 635 | ||
| @@ -721,12 +713,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
| 721 | { | 713 | { |
| 722 | struct ip_tunnel *tunnel = netdev_priv(dev); | 714 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 723 | bool truncate = false; | 715 | bool truncate = false; |
| 716 | __be16 proto; | ||
| 724 | 717 | ||
| 725 | if (!pskb_inet_may_pull(skb)) | 718 | if (!pskb_inet_may_pull(skb)) |
| 726 | goto free_skb; | 719 | goto free_skb; |
| 727 | 720 | ||
| 728 | if (tunnel->collect_md) { | 721 | if (tunnel->collect_md) { |
| 729 | erspan_fb_xmit(skb, dev, skb->protocol); | 722 | erspan_fb_xmit(skb, dev); |
| 730 | return NETDEV_TX_OK; | 723 | return NETDEV_TX_OK; |
| 731 | } | 724 | } |
| 732 | 725 | ||
| @@ -742,19 +735,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
| 742 | } | 735 | } |
| 743 | 736 | ||
| 744 | /* Push ERSPAN header */ | 737 | /* Push ERSPAN header */ |
| 745 | if (tunnel->erspan_ver == 1) | 738 | if (tunnel->erspan_ver == 1) { |
| 746 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), | 739 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), |
| 747 | tunnel->index, | 740 | tunnel->index, |
| 748 | truncate, true); | 741 | truncate, true); |
| 749 | else if (tunnel->erspan_ver == 2) | 742 | proto = htons(ETH_P_ERSPAN); |
| 743 | } else if (tunnel->erspan_ver == 2) { | ||
| 750 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), | 744 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), |
| 751 | tunnel->dir, tunnel->hwid, | 745 | tunnel->dir, tunnel->hwid, |
| 752 | truncate, true); | 746 | truncate, true); |
| 753 | else | 747 | proto = htons(ETH_P_ERSPAN2); |
| 748 | } else { | ||
| 754 | goto free_skb; | 749 | goto free_skb; |
| 750 | } | ||
| 755 | 751 | ||
| 756 | tunnel->parms.o_flags &= ~TUNNEL_KEY; | 752 | tunnel->parms.o_flags &= ~TUNNEL_KEY; |
| 757 | __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); | 753 | __gre_xmit(skb, dev, &tunnel->parms.iph, proto); |
| 758 | return NETDEV_TX_OK; | 754 | return NETDEV_TX_OK; |
| 759 | 755 | ||
| 760 | free_skb: | 756 | free_skb: |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 26921f6b3b92..51d8efba6de2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) | |||
| 488 | goto drop; | 488 | goto drop; |
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | iph = ip_hdr(skb); | ||
| 491 | skb->transport_header = skb->network_header + iph->ihl*4; | 492 | skb->transport_header = skb->network_header + iph->ihl*4; |
| 492 | 493 | ||
| 493 | /* Remove any debris in the socket control block */ | 494 | /* Remove any debris in the socket control block */ |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fffcc130900e..82f341e84fae 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) | |||
| 148 | 148 | ||
| 149 | static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) | 149 | static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
| 150 | { | 150 | { |
| 151 | __be16 _ports[2], *ports; | ||
| 151 | struct sockaddr_in sin; | 152 | struct sockaddr_in sin; |
| 152 | __be16 *ports; | ||
| 153 | int end; | ||
| 154 | |||
| 155 | end = skb_transport_offset(skb) + 4; | ||
| 156 | if (end > 0 && !pskb_may_pull(skb, end)) | ||
| 157 | return; | ||
| 158 | 153 | ||
| 159 | /* All current transport protocols have the port numbers in the | 154 | /* All current transport protocols have the port numbers in the |
| 160 | * first four bytes of the transport header and this function is | 155 | * first four bytes of the transport header and this function is |
| 161 | * written with this assumption in mind. | 156 | * written with this assumption in mind. |
| 162 | */ | 157 | */ |
| 163 | ports = (__be16 *)skb_transport_header(skb); | 158 | ports = skb_header_pointer(skb, skb_transport_offset(skb), |
| 159 | sizeof(_ports), &_ports); | ||
| 160 | if (!ports) | ||
| 161 | return; | ||
| 164 | 162 | ||
| 165 | sin.sin_family = AF_INET; | 163 | sin.sin_family = AF_INET; |
| 166 | sin.sin_addr.s_addr = ip_hdr(skb)->daddr; | 164 | sin.sin_addr.s_addr = ip_hdr(skb)->daddr; |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..054d01c16dc6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 644 | dst = tnl_params->daddr; | 644 | dst = tnl_params->daddr; |
| 645 | if (dst == 0) { | 645 | if (dst == 0) { |
| 646 | /* NBMA tunnel */ | 646 | /* NBMA tunnel */ |
| 647 | struct ip_tunnel_info *tun_info; | ||
| 647 | 648 | ||
| 648 | if (!skb_dst(skb)) { | 649 | if (!skb_dst(skb)) { |
| 649 | dev->stats.tx_fifo_errors++; | 650 | dev->stats.tx_fifo_errors++; |
| 650 | goto tx_error; | 651 | goto tx_error; |
| 651 | } | 652 | } |
| 652 | 653 | ||
| 653 | if (skb->protocol == htons(ETH_P_IP)) { | 654 | tun_info = skb_tunnel_info(skb); |
| 655 | if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) && | ||
| 656 | ip_tunnel_info_af(tun_info) == AF_INET && | ||
| 657 | tun_info->key.u.ipv4.dst) | ||
| 658 | dst = tun_info->key.u.ipv4.dst; | ||
| 659 | else if (skb->protocol == htons(ETH_P_IP)) { | ||
| 654 | rt = skb_rtable(skb); | 660 | rt = skb_rtable(skb); |
| 655 | dst = rt_nexthop(rt, inner_iph->daddr); | 661 | dst = rt_nexthop(rt, inner_iph->daddr); |
| 656 | } | 662 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 27e2f6837062..2079145a3b7c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 1186 | flags = msg->msg_flags; | 1186 | flags = msg->msg_flags; |
| 1187 | 1187 | ||
| 1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { | 1188 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { |
| 1189 | if (sk->sk_state != TCP_ESTABLISHED) { | 1189 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { |
| 1190 | err = -EINVAL; | 1190 | err = -EINVAL; |
| 1191 | goto out_err; | 1191 | goto out_err; |
| 1192 | } | 1192 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index f87dbc78b6bc..71a29e9c0620 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
| 226 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 226 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
| 227 | if (icsk->icsk_retransmits) { | 227 | if (icsk->icsk_retransmits) { |
| 228 | dst_negative_advice(sk); | 228 | dst_negative_advice(sk); |
| 229 | } else if (!tp->syn_data && !tp->syn_fastopen) { | 229 | } else { |
| 230 | sk_rethink_txhash(sk); | 230 | sk_rethink_txhash(sk); |
| 231 | } | 231 | } |
| 232 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; | 232 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3fb0ed5e4789..5c3cd5d84a6f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, | |||
| 847 | const int hlen = skb_network_header_len(skb) + | 847 | const int hlen = skb_network_header_len(skb) + |
| 848 | sizeof(struct udphdr); | 848 | sizeof(struct udphdr); |
| 849 | 849 | ||
| 850 | if (hlen + cork->gso_size > cork->fragsize) | 850 | if (hlen + cork->gso_size > cork->fragsize) { |
| 851 | kfree_skb(skb); | ||
| 851 | return -EINVAL; | 852 | return -EINVAL; |
| 852 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | 853 | } |
| 854 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
| 855 | kfree_skb(skb); | ||
| 853 | return -EINVAL; | 856 | return -EINVAL; |
| 854 | if (sk->sk_no_check_tx) | 857 | } |
| 858 | if (sk->sk_no_check_tx) { | ||
| 859 | kfree_skb(skb); | ||
| 855 | return -EINVAL; | 860 | return -EINVAL; |
| 861 | } | ||
| 856 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | 862 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || |
| 857 | dst_xfrm(skb_dst(skb))) | 863 | dst_xfrm(skb_dst(skb))) { |
| 864 | kfree_skb(skb); | ||
| 858 | return -EIO; | 865 | return -EIO; |
| 866 | } | ||
| 859 | 867 | ||
| 860 | skb_shinfo(skb)->gso_size = cork->gso_size; | 868 | skb_shinfo(skb)->gso_size = cork->gso_size; |
| 861 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | 869 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
| @@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) | |||
| 1918 | } | 1926 | } |
| 1919 | EXPORT_SYMBOL(udp_lib_rehash); | 1927 | EXPORT_SYMBOL(udp_lib_rehash); |
| 1920 | 1928 | ||
| 1921 | static void udp_v4_rehash(struct sock *sk) | 1929 | void udp_v4_rehash(struct sock *sk) |
| 1922 | { | 1930 | { |
| 1923 | u16 new_hash = ipv4_portaddr_hash(sock_net(sk), | 1931 | u16 new_hash = ipv4_portaddr_hash(sock_net(sk), |
| 1924 | inet_sk(sk)->inet_rcv_saddr, | 1932 | inet_sk(sk)->inet_rcv_saddr, |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 322672655419..6b2fa77eeb1c 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
| @@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int); | |||
| 10 | int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); | 10 | int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); |
| 11 | 11 | ||
| 12 | int udp_v4_get_port(struct sock *sk, unsigned short snum); | 12 | int udp_v4_get_port(struct sock *sk, unsigned short snum); |
| 13 | void udp_v4_rehash(struct sock *sk); | ||
| 13 | 14 | ||
| 14 | int udp_setsockopt(struct sock *sk, int level, int optname, | 15 | int udp_setsockopt(struct sock *sk, int level, int optname, |
| 15 | char __user *optval, unsigned int optlen); | 16 | char __user *optval, unsigned int optlen); |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 39c7f17d916f..3c94b8f0ff27 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
| @@ -53,6 +53,7 @@ struct proto udplite_prot = { | |||
| 53 | .sendpage = udp_sendpage, | 53 | .sendpage = udp_sendpage, |
| 54 | .hash = udp_lib_hash, | 54 | .hash = udp_lib_hash, |
| 55 | .unhash = udp_lib_unhash, | 55 | .unhash = udp_lib_unhash, |
| 56 | .rehash = udp_v4_rehash, | ||
| 56 | .get_port = udp_v4_get_port, | 57 | .get_port = udp_v4_get_port, |
| 57 | .memory_allocated = &udp_memory_allocated, | 58 | .memory_allocated = &udp_memory_allocated, |
| 58 | .sysctl_mem = sysctl_udp_mem, | 59 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8eeec6eb2bd3..84c358804355 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -3495,8 +3495,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3495 | 3495 | ||
| 3496 | if (!addrconf_link_ready(dev)) { | 3496 | if (!addrconf_link_ready(dev)) { |
| 3497 | /* device is not ready yet. */ | 3497 | /* device is not ready yet. */ |
| 3498 | pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", | 3498 | pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n", |
| 3499 | dev->name); | 3499 | dev->name); |
| 3500 | break; | 3500 | break; |
| 3501 | } | 3501 | } |
| 3502 | 3502 | ||
| @@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 5120 | if (idev) { | 5120 | if (idev) { |
| 5121 | err = in6_dump_addrs(idev, skb, cb, s_ip_idx, | 5121 | err = in6_dump_addrs(idev, skb, cb, s_ip_idx, |
| 5122 | &fillargs); | 5122 | &fillargs); |
| 5123 | if (err > 0) | ||
| 5124 | err = 0; | ||
| 5123 | } | 5125 | } |
| 5124 | goto put_tgt_net; | 5126 | goto put_tgt_net; |
| 5125 | } | 5127 | } |
| @@ -5154,7 +5156,7 @@ put_tgt_net: | |||
| 5154 | if (fillargs.netnsid >= 0) | 5156 | if (fillargs.netnsid >= 0) |
| 5155 | put_net(tgt_net); | 5157 | put_net(tgt_net); |
| 5156 | 5158 | ||
| 5157 | return err < 0 ? err : skb->len; | 5159 | return skb->len ? : err; |
| 5158 | } | 5160 | } |
| 5159 | 5161 | ||
| 5160 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | 5162 | static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0bfb6cc0a30a..d99753b5e39b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, | |||
| 310 | 310 | ||
| 311 | /* Check if the address belongs to the host. */ | 311 | /* Check if the address belongs to the host. */ |
| 312 | if (addr_type == IPV6_ADDR_MAPPED) { | 312 | if (addr_type == IPV6_ADDR_MAPPED) { |
| 313 | struct net_device *dev = NULL; | ||
| 313 | int chk_addr_ret; | 314 | int chk_addr_ret; |
| 314 | 315 | ||
| 315 | /* Binding to v4-mapped address on a v6-only socket | 316 | /* Binding to v4-mapped address on a v6-only socket |
| @@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, | |||
| 320 | goto out; | 321 | goto out; |
| 321 | } | 322 | } |
| 322 | 323 | ||
| 324 | rcu_read_lock(); | ||
| 325 | if (sk->sk_bound_dev_if) { | ||
| 326 | dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); | ||
| 327 | if (!dev) { | ||
| 328 | err = -ENODEV; | ||
| 329 | goto out_unlock; | ||
| 330 | } | ||
| 331 | } | ||
| 332 | |||
| 323 | /* Reproduce AF_INET checks to make the bindings consistent */ | 333 | /* Reproduce AF_INET checks to make the bindings consistent */ |
| 324 | v4addr = addr->sin6_addr.s6_addr32[3]; | 334 | v4addr = addr->sin6_addr.s6_addr32[3]; |
| 325 | chk_addr_ret = inet_addr_type(net, v4addr); | 335 | chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr); |
| 336 | rcu_read_unlock(); | ||
| 337 | |||
| 326 | if (!inet_can_nonlocal_bind(net, inet) && | 338 | if (!inet_can_nonlocal_bind(net, inet) && |
| 327 | v4addr != htonl(INADDR_ANY) && | 339 | v4addr != htonl(INADDR_ANY) && |
| 328 | chk_addr_ret != RTN_LOCAL && | 340 | chk_addr_ret != RTN_LOCAL && |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index bde08aa549f3..ee4a4e54d016 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
| @@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) | |||
| 341 | skb_reset_network_header(skb); | 341 | skb_reset_network_header(skb); |
| 342 | iph = ipv6_hdr(skb); | 342 | iph = ipv6_hdr(skb); |
| 343 | iph->daddr = fl6->daddr; | 343 | iph->daddr = fl6->daddr; |
| 344 | ip6_flow_hdr(iph, 0, 0); | ||
| 344 | 345 | ||
| 345 | serr = SKB_EXT_ERR(skb); | 346 | serr = SKB_EXT_ERR(skb); |
| 346 | serr->ee.ee_errno = err; | 347 | serr->ee.ee_errno = err; |
| @@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, | |||
| 700 | } | 701 | } |
| 701 | if (np->rxopt.bits.rxorigdstaddr) { | 702 | if (np->rxopt.bits.rxorigdstaddr) { |
| 702 | struct sockaddr_in6 sin6; | 703 | struct sockaddr_in6 sin6; |
| 703 | __be16 *ports; | 704 | __be16 _ports[2], *ports; |
| 704 | int end; | ||
| 705 | 705 | ||
| 706 | end = skb_transport_offset(skb) + 4; | 706 | ports = skb_header_pointer(skb, skb_transport_offset(skb), |
| 707 | if (end <= 0 || pskb_may_pull(skb, end)) { | 707 | sizeof(_ports), &_ports); |
| 708 | if (ports) { | ||
| 708 | /* All current transport protocols have the port numbers in the | 709 | /* All current transport protocols have the port numbers in the |
| 709 | * first four bytes of the transport header and this function is | 710 | * first four bytes of the transport header and this function is |
| 710 | * written with this assumption in mind. | 711 | * written with this assumption in mind. |
| 711 | */ | 712 | */ |
| 712 | ports = (__be16 *)skb_transport_header(skb); | ||
| 713 | |||
| 714 | sin6.sin6_family = AF_INET6; | 713 | sin6.sin6_family = AF_INET6; |
| 715 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; | 714 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
| 716 | sin6.sin6_port = ports[1]; | 715 | sin6.sin6_port = ports[1]; |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index bd675c61deb1..b858bd5280bf 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
| @@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 90 | { | 90 | { |
| 91 | int transport_offset = skb_transport_offset(skb); | 91 | int transport_offset = skb_transport_offset(skb); |
| 92 | struct guehdr *guehdr; | 92 | struct guehdr *guehdr; |
| 93 | size_t optlen; | 93 | size_t len, optlen; |
| 94 | int ret; | 94 | int ret; |
| 95 | 95 | ||
| 96 | if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) | 96 | len = sizeof(struct udphdr) + sizeof(struct guehdr); |
| 97 | if (!pskb_may_pull(skb, len)) | ||
| 97 | return -EINVAL; | 98 | return -EINVAL; |
| 98 | 99 | ||
| 99 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | 100 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; |
| @@ -128,9 +129,21 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 128 | 129 | ||
| 129 | optlen = guehdr->hlen << 2; | 130 | optlen = guehdr->hlen << 2; |
| 130 | 131 | ||
| 132 | if (!pskb_may_pull(skb, len + optlen)) | ||
| 133 | return -EINVAL; | ||
| 134 | |||
| 135 | guehdr = (struct guehdr *)&udp_hdr(skb)[1]; | ||
| 131 | if (validate_gue_flags(guehdr, optlen)) | 136 | if (validate_gue_flags(guehdr, optlen)) |
| 132 | return -EINVAL; | 137 | return -EINVAL; |
| 133 | 138 | ||
| 139 | /* Handling exceptions for direct UDP encapsulation in GUE would lead to | ||
| 140 | * recursion. Besides, this kind of encapsulation can't even be | ||
| 141 | * configured currently. Discard this. | ||
| 142 | */ | ||
| 143 | if (guehdr->proto_ctype == IPPROTO_UDP || | ||
| 144 | guehdr->proto_ctype == IPPROTO_UDPLITE) | ||
| 145 | return -EOPNOTSUPP; | ||
| 146 | |||
| 134 | skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); | 147 | skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); |
| 135 | ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, | 148 | ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, |
| 136 | opt, type, code, offset, info); | 149 | opt, type, code, offset, info); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5d7aa2c2770c..bbcdfd299692 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
| @@ -423,10 +423,10 @@ static int icmp6_iif(const struct sk_buff *skb) | |||
| 423 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | 423 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, |
| 424 | const struct in6_addr *force_saddr) | 424 | const struct in6_addr *force_saddr) |
| 425 | { | 425 | { |
| 426 | struct net *net = dev_net(skb->dev); | ||
| 427 | struct inet6_dev *idev = NULL; | 426 | struct inet6_dev *idev = NULL; |
| 428 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 427 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
| 429 | struct sock *sk; | 428 | struct sock *sk; |
| 429 | struct net *net; | ||
| 430 | struct ipv6_pinfo *np; | 430 | struct ipv6_pinfo *np; |
| 431 | const struct in6_addr *saddr = NULL; | 431 | const struct in6_addr *saddr = NULL; |
| 432 | struct dst_entry *dst; | 432 | struct dst_entry *dst; |
| @@ -437,12 +437,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
| 437 | int iif = 0; | 437 | int iif = 0; |
| 438 | int addr_type = 0; | 438 | int addr_type = 0; |
| 439 | int len; | 439 | int len; |
| 440 | u32 mark = IP6_REPLY_MARK(net, skb->mark); | 440 | u32 mark; |
| 441 | 441 | ||
| 442 | if ((u8 *)hdr < skb->head || | 442 | if ((u8 *)hdr < skb->head || |
| 443 | (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) | 443 | (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) |
| 444 | return; | 444 | return; |
| 445 | 445 | ||
| 446 | if (!skb->dev) | ||
| 447 | return; | ||
| 448 | net = dev_net(skb->dev); | ||
| 449 | mark = IP6_REPLY_MARK(net, skb->mark); | ||
| 446 | /* | 450 | /* |
| 447 | * Make sure we respect the rules | 451 | * Make sure we respect the rules |
| 448 | * i.e. RFC 1885 2.4(e) | 452 | * i.e. RFC 1885 2.4(e) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 09d0826742f8..4416368dbd49 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, | |||
| 534 | struct ip6_tnl *tunnel; | 534 | struct ip6_tnl *tunnel; |
| 535 | u8 ver; | 535 | u8 ver; |
| 536 | 536 | ||
| 537 | if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr)))) | ||
| 538 | return PACKET_REJECT; | ||
| 539 | |||
| 540 | ipv6h = ipv6_hdr(skb); | 537 | ipv6h = ipv6_hdr(skb); |
| 541 | ershdr = (struct erspan_base_hdr *)skb->data; | 538 | ershdr = (struct erspan_base_hdr *)skb->data; |
| 542 | ver = ershdr->ver; | 539 | ver = ershdr->ver; |
| 543 | tpi->key = cpu_to_be32(get_session_id(ershdr)); | ||
| 544 | 540 | ||
| 545 | tunnel = ip6gre_tunnel_lookup(skb->dev, | 541 | tunnel = ip6gre_tunnel_lookup(skb->dev, |
| 546 | &ipv6h->saddr, &ipv6h->daddr, tpi->key, | 542 | &ipv6h->saddr, &ipv6h->daddr, tpi->key, |
| @@ -922,6 +918,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
| 922 | __u8 dsfield = false; | 918 | __u8 dsfield = false; |
| 923 | struct flowi6 fl6; | 919 | struct flowi6 fl6; |
| 924 | int err = -EINVAL; | 920 | int err = -EINVAL; |
| 921 | __be16 proto; | ||
| 925 | __u32 mtu; | 922 | __u32 mtu; |
| 926 | int nhoff; | 923 | int nhoff; |
| 927 | int thoff; | 924 | int thoff; |
| @@ -1035,8 +1032,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
| 1035 | } | 1032 | } |
| 1036 | 1033 | ||
| 1037 | /* Push GRE header. */ | 1034 | /* Push GRE header. */ |
| 1038 | gre_build_header(skb, 8, TUNNEL_SEQ, | 1035 | proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) |
| 1039 | htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); | 1036 | : htons(ETH_P_ERSPAN2); |
| 1037 | gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); | ||
| 1040 | 1038 | ||
| 1041 | /* TooBig packet may have updated dst->dev's mtu */ | 1039 | /* TooBig packet may have updated dst->dev's mtu */ |
| 1042 | if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) | 1040 | if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) |
| @@ -1169,6 +1167,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, | |||
| 1169 | t->parms.i_flags = p->i_flags; | 1167 | t->parms.i_flags = p->i_flags; |
| 1170 | t->parms.o_flags = p->o_flags; | 1168 | t->parms.o_flags = p->o_flags; |
| 1171 | t->parms.fwmark = p->fwmark; | 1169 | t->parms.fwmark = p->fwmark; |
| 1170 | t->parms.erspan_ver = p->erspan_ver; | ||
| 1171 | t->parms.index = p->index; | ||
| 1172 | t->parms.dir = p->dir; | ||
| 1173 | t->parms.hwid = p->hwid; | ||
| 1172 | dst_cache_reset(&t->dst_cache); | 1174 | dst_cache_reset(&t->dst_cache); |
| 1173 | } | 1175 | } |
| 1174 | 1176 | ||
| @@ -2025,9 +2027,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
| 2025 | struct nlattr *data[], | 2027 | struct nlattr *data[], |
| 2026 | struct netlink_ext_ack *extack) | 2028 | struct netlink_ext_ack *extack) |
| 2027 | { | 2029 | { |
| 2028 | struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); | 2030 | struct ip6_tnl *t = netdev_priv(dev); |
| 2031 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | ||
| 2029 | struct __ip6_tnl_parm p; | 2032 | struct __ip6_tnl_parm p; |
| 2030 | struct ip6_tnl *t; | ||
| 2031 | 2033 | ||
| 2032 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); | 2034 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); |
| 2033 | if (IS_ERR(t)) | 2035 | if (IS_ERR(t)) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 40b225f87d5e..964491cf3672 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -4251,17 +4251,6 @@ struct rt6_nh { | |||
| 4251 | struct list_head next; | 4251 | struct list_head next; |
| 4252 | }; | 4252 | }; |
| 4253 | 4253 | ||
| 4254 | static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) | ||
| 4255 | { | ||
| 4256 | struct rt6_nh *nh; | ||
| 4257 | |||
| 4258 | list_for_each_entry(nh, rt6_nh_list, next) { | ||
| 4259 | pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", | ||
| 4260 | &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, | ||
| 4261 | nh->r_cfg.fc_ifindex); | ||
| 4262 | } | ||
| 4263 | } | ||
| 4264 | |||
| 4265 | static int ip6_route_info_append(struct net *net, | 4254 | static int ip6_route_info_append(struct net *net, |
| 4266 | struct list_head *rt6_nh_list, | 4255 | struct list_head *rt6_nh_list, |
| 4267 | struct fib6_info *rt, | 4256 | struct fib6_info *rt, |
| @@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, | |||
| 4407 | nh->fib6_info = NULL; | 4396 | nh->fib6_info = NULL; |
| 4408 | if (err) { | 4397 | if (err) { |
| 4409 | if (replace && nhn) | 4398 | if (replace && nhn) |
| 4410 | ip6_print_replace_route_err(&rt6_nh_list); | 4399 | NL_SET_ERR_MSG_MOD(extack, |
| 4400 | "multipath route replace failed (check consistency of installed routes)"); | ||
| 4411 | err_nh = nh; | 4401 | err_nh = nh; |
| 4412 | goto add_errout; | 4402 | goto add_errout; |
| 4413 | } | 4403 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9cbf363172bd..2596ffdeebea 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 102 | return udp_lib_get_port(sk, snum, hash2_nulladdr); | 102 | return udp_lib_get_port(sk, snum, hash2_nulladdr); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | static void udp_v6_rehash(struct sock *sk) | 105 | void udp_v6_rehash(struct sock *sk) |
| 106 | { | 106 | { |
| 107 | u16 new_hash = ipv6_portaddr_hash(sock_net(sk), | 107 | u16 new_hash = ipv6_portaddr_hash(sock_net(sk), |
| 108 | &sk->sk_v6_rcv_saddr, | 108 | &sk->sk_v6_rcv_saddr, |
| @@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, | |||
| 1132 | const int hlen = skb_network_header_len(skb) + | 1132 | const int hlen = skb_network_header_len(skb) + |
| 1133 | sizeof(struct udphdr); | 1133 | sizeof(struct udphdr); |
| 1134 | 1134 | ||
| 1135 | if (hlen + cork->gso_size > cork->fragsize) | 1135 | if (hlen + cork->gso_size > cork->fragsize) { |
| 1136 | kfree_skb(skb); | ||
| 1136 | return -EINVAL; | 1137 | return -EINVAL; |
| 1137 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | 1138 | } |
| 1139 | if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
| 1140 | kfree_skb(skb); | ||
| 1138 | return -EINVAL; | 1141 | return -EINVAL; |
| 1139 | if (udp_sk(sk)->no_check6_tx) | 1142 | } |
| 1143 | if (udp_sk(sk)->no_check6_tx) { | ||
| 1144 | kfree_skb(skb); | ||
| 1140 | return -EINVAL; | 1145 | return -EINVAL; |
| 1146 | } | ||
| 1141 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | 1147 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || |
| 1142 | dst_xfrm(skb_dst(skb))) | 1148 | dst_xfrm(skb_dst(skb))) { |
| 1149 | kfree_skb(skb); | ||
| 1143 | return -EIO; | 1150 | return -EIO; |
| 1151 | } | ||
| 1144 | 1152 | ||
| 1145 | skb_shinfo(skb)->gso_size = cork->gso_size; | 1153 | skb_shinfo(skb)->gso_size = cork->gso_size; |
| 1146 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | 1154 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; |
| @@ -1390,10 +1398,7 @@ do_udp_sendmsg: | |||
| 1390 | ipc6.opt = opt; | 1398 | ipc6.opt = opt; |
| 1391 | 1399 | ||
| 1392 | fl6.flowi6_proto = sk->sk_protocol; | 1400 | fl6.flowi6_proto = sk->sk_protocol; |
| 1393 | if (!ipv6_addr_any(daddr)) | 1401 | fl6.daddr = *daddr; |
| 1394 | fl6.daddr = *daddr; | ||
| 1395 | else | ||
| 1396 | fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | ||
| 1397 | if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) | 1402 | if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) |
| 1398 | fl6.saddr = np->saddr; | 1403 | fl6.saddr = np->saddr; |
| 1399 | fl6.fl6_sport = inet->inet_sport; | 1404 | fl6.fl6_sport = inet->inet_sport; |
| @@ -1421,6 +1426,9 @@ do_udp_sendmsg: | |||
| 1421 | } | 1426 | } |
| 1422 | } | 1427 | } |
| 1423 | 1428 | ||
| 1429 | if (ipv6_addr_any(&fl6.daddr)) | ||
| 1430 | fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ | ||
| 1431 | |||
| 1424 | final_p = fl6_update_dst(&fl6, opt, &final); | 1432 | final_p = fl6_update_dst(&fl6, opt, &final); |
| 1425 | if (final_p) | 1433 | if (final_p) |
| 1426 | connected = false; | 1434 | connected = false; |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 5730e6503cb4..20e324b6f358 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
| @@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, | |||
| 13 | __be32, struct udp_table *); | 13 | __be32, struct udp_table *); |
| 14 | 14 | ||
| 15 | int udp_v6_get_port(struct sock *sk, unsigned short snum); | 15 | int udp_v6_get_port(struct sock *sk, unsigned short snum); |
| 16 | void udp_v6_rehash(struct sock *sk); | ||
| 16 | 17 | ||
| 17 | int udpv6_getsockopt(struct sock *sk, int level, int optname, | 18 | int udpv6_getsockopt(struct sock *sk, int level, int optname, |
| 18 | char __user *optval, int __user *optlen); | 19 | char __user *optval, int __user *optlen); |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index a125aebc29e5..f35907836444 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
| @@ -49,6 +49,7 @@ struct proto udplitev6_prot = { | |||
| 49 | .recvmsg = udpv6_recvmsg, | 49 | .recvmsg = udpv6_recvmsg, |
| 50 | .hash = udp_lib_hash, | 50 | .hash = udp_lib_hash, |
| 51 | .unhash = udp_lib_unhash, | 51 | .unhash = udp_lib_unhash, |
| 52 | .rehash = udp_v6_rehash, | ||
| 52 | .get_port = udp_v6_get_port, | 53 | .get_port = udp_v6_get_port, |
| 53 | .memory_allocated = &udp_memory_allocated, | 54 | .memory_allocated = &udp_memory_allocated, |
| 54 | .sysctl_mem = sysctl_udp_mem, | 55 | .sysctl_mem = sysctl_udp_mem, |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index de65fe3ed9cc..2493c74c2d37 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -1490,6 +1490,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
| 1490 | if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) | 1490 | if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) |
| 1491 | sta->sta.tdls = true; | 1491 | sta->sta.tdls = true; |
| 1492 | 1492 | ||
| 1493 | if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION && | ||
| 1494 | !sdata->u.mgd.associated) | ||
| 1495 | return -EINVAL; | ||
| 1496 | |||
| 1493 | err = sta_apply_parameters(local, sta, params); | 1497 | err = sta_apply_parameters(local, sta, params); |
| 1494 | if (err) { | 1498 | if (err) { |
| 1495 | sta_info_free(local, sta); | 1499 | sta_info_free(local, sta); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 45aad3d3108c..bb4d71efb6fb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -231,7 +231,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, | |||
| 231 | struct ieee80211_hdr_3addr hdr; | 231 | struct ieee80211_hdr_3addr hdr; |
| 232 | u8 category; | 232 | u8 category; |
| 233 | u8 action_code; | 233 | u8 action_code; |
| 234 | } __packed action; | 234 | } __packed __aligned(2) action; |
| 235 | 235 | ||
| 236 | if (!sdata) | 236 | if (!sdata) |
| 237 | return; | 237 | return; |
| @@ -2723,7 +2723,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
| 2723 | skb_set_queue_mapping(skb, q); | 2723 | skb_set_queue_mapping(skb, q); |
| 2724 | 2724 | ||
| 2725 | if (!--mesh_hdr->ttl) { | 2725 | if (!--mesh_hdr->ttl) { |
| 2726 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); | 2726 | if (!is_multicast_ether_addr(hdr->addr1)) |
| 2727 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, | ||
| 2728 | dropped_frames_ttl); | ||
| 2727 | goto out; | 2729 | goto out; |
| 2728 | } | 2730 | } |
| 2729 | 2731 | ||
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index fa0844e2a68d..c0c72ae9df42 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c | |||
| @@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, | |||
| 28 | { | 28 | { |
| 29 | struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; | 29 | struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; |
| 30 | struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; | 30 | struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; |
| 31 | struct dst_entry *other_dst = route->tuple[!dir].dst; | ||
| 31 | struct dst_entry *dst = route->tuple[dir].dst; | 32 | struct dst_entry *dst = route->tuple[dir].dst; |
| 32 | 33 | ||
| 33 | ft->dir = dir; | 34 | ft->dir = dir; |
| @@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, | |||
| 50 | ft->src_port = ctt->src.u.tcp.port; | 51 | ft->src_port = ctt->src.u.tcp.port; |
| 51 | ft->dst_port = ctt->dst.u.tcp.port; | 52 | ft->dst_port = ctt->dst.u.tcp.port; |
| 52 | 53 | ||
| 53 | ft->iifidx = route->tuple[dir].ifindex; | 54 | ft->iifidx = other_dst->dev->ifindex; |
| 54 | ft->oifidx = route->tuple[!dir].ifindex; | 55 | ft->oifidx = dst->dev->ifindex; |
| 55 | ft->dst_cache = dst; | 56 | ft->dst_cache = dst; |
| 56 | } | 57 | } |
| 57 | 58 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2b0a93300dd7..fb07f6cfc719 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, | |||
| 2304 | struct net *net = sock_net(skb->sk); | 2304 | struct net *net = sock_net(skb->sk); |
| 2305 | unsigned int s_idx = cb->args[0]; | 2305 | unsigned int s_idx = cb->args[0]; |
| 2306 | const struct nft_rule *rule; | 2306 | const struct nft_rule *rule; |
| 2307 | int rc = 1; | ||
| 2308 | 2307 | ||
| 2309 | list_for_each_entry_rcu(rule, &chain->rules, list) { | 2308 | list_for_each_entry_rcu(rule, &chain->rules, list) { |
| 2310 | if (!nft_is_active(net, rule)) | 2309 | if (!nft_is_active(net, rule)) |
| @@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb, | |||
| 2321 | NLM_F_MULTI | NLM_F_APPEND, | 2320 | NLM_F_MULTI | NLM_F_APPEND, |
| 2322 | table->family, | 2321 | table->family, |
| 2323 | table, chain, rule) < 0) | 2322 | table, chain, rule) < 0) |
| 2324 | goto out_unfinished; | 2323 | return 1; |
| 2325 | 2324 | ||
| 2326 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); | 2325 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); |
| 2327 | cont: | 2326 | cont: |
| 2328 | (*idx)++; | 2327 | (*idx)++; |
| 2329 | } | 2328 | } |
| 2330 | rc = 0; | 2329 | return 0; |
| 2331 | out_unfinished: | ||
| 2332 | cb->args[0] = *idx; | ||
| 2333 | return rc; | ||
| 2334 | } | 2330 | } |
| 2335 | 2331 | ||
| 2336 | static int nf_tables_dump_rules(struct sk_buff *skb, | 2332 | static int nf_tables_dump_rules(struct sk_buff *skb, |
| @@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, | |||
| 2354 | if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) | 2350 | if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) |
| 2355 | continue; | 2351 | continue; |
| 2356 | 2352 | ||
| 2357 | if (ctx && ctx->chain) { | 2353 | if (ctx && ctx->table && ctx->chain) { |
| 2358 | struct rhlist_head *list, *tmp; | 2354 | struct rhlist_head *list, *tmp; |
| 2359 | 2355 | ||
| 2360 | list = rhltable_lookup(&table->chains_ht, ctx->chain, | 2356 | list = rhltable_lookup(&table->chains_ht, ctx->chain, |
| @@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb, | |||
| 2382 | } | 2378 | } |
| 2383 | done: | 2379 | done: |
| 2384 | rcu_read_unlock(); | 2380 | rcu_read_unlock(); |
| 2381 | |||
| 2382 | cb->args[0] = idx; | ||
| 2385 | return skb->len; | 2383 | return skb->len; |
| 2386 | } | 2384 | } |
| 2387 | 2385 | ||
| @@ -4508,6 +4506,8 @@ err6: | |||
| 4508 | err5: | 4506 | err5: |
| 4509 | kfree(trans); | 4507 | kfree(trans); |
| 4510 | err4: | 4508 | err4: |
| 4509 | if (obj) | ||
| 4510 | obj->use--; | ||
| 4511 | kfree(elem.priv); | 4511 | kfree(elem.priv); |
| 4512 | err3: | 4512 | err3: |
| 4513 | if (nla[NFTA_SET_ELEM_DATA] != NULL) | 4513 | if (nla[NFTA_SET_ELEM_DATA] != NULL) |
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 974525eb92df..6e6b9adf7d38 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <net/netfilter/nf_conntrack_core.h> | 12 | #include <net/netfilter/nf_conntrack_core.h> |
| 13 | #include <linux/netfilter/nf_conntrack_common.h> | 13 | #include <linux/netfilter/nf_conntrack_common.h> |
| 14 | #include <net/netfilter/nf_flow_table.h> | 14 | #include <net/netfilter/nf_flow_table.h> |
| 15 | #include <net/netfilter/nf_conntrack_helper.h> | ||
| 15 | 16 | ||
| 16 | struct nft_flow_offload { | 17 | struct nft_flow_offload { |
| 17 | struct nft_flowtable *flowtable; | 18 | struct nft_flowtable *flowtable; |
| @@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, | |||
| 29 | memset(&fl, 0, sizeof(fl)); | 30 | memset(&fl, 0, sizeof(fl)); |
| 30 | switch (nft_pf(pkt)) { | 31 | switch (nft_pf(pkt)) { |
| 31 | case NFPROTO_IPV4: | 32 | case NFPROTO_IPV4: |
| 32 | fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; | 33 | fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; |
| 34 | fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; | ||
| 33 | break; | 35 | break; |
| 34 | case NFPROTO_IPV6: | 36 | case NFPROTO_IPV6: |
| 35 | fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; | 37 | fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; |
| 38 | fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; | ||
| 36 | break; | 39 | break; |
| 37 | } | 40 | } |
| 38 | 41 | ||
| @@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, | |||
| 41 | return -ENOENT; | 44 | return -ENOENT; |
| 42 | 45 | ||
| 43 | route->tuple[dir].dst = this_dst; | 46 | route->tuple[dir].dst = this_dst; |
| 44 | route->tuple[dir].ifindex = nft_in(pkt)->ifindex; | ||
| 45 | route->tuple[!dir].dst = other_dst; | 47 | route->tuple[!dir].dst = other_dst; |
| 46 | route->tuple[!dir].ifindex = nft_out(pkt)->ifindex; | ||
| 47 | 48 | ||
| 48 | return 0; | 49 | return 0; |
| 49 | } | 50 | } |
| @@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, | |||
| 66 | { | 67 | { |
| 67 | struct nft_flow_offload *priv = nft_expr_priv(expr); | 68 | struct nft_flow_offload *priv = nft_expr_priv(expr); |
| 68 | struct nf_flowtable *flowtable = &priv->flowtable->data; | 69 | struct nf_flowtable *flowtable = &priv->flowtable->data; |
| 70 | const struct nf_conn_help *help; | ||
| 69 | enum ip_conntrack_info ctinfo; | 71 | enum ip_conntrack_info ctinfo; |
| 70 | struct nf_flow_route route; | 72 | struct nf_flow_route route; |
| 71 | struct flow_offload *flow; | 73 | struct flow_offload *flow; |
| @@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, | |||
| 88 | goto out; | 90 | goto out; |
| 89 | } | 91 | } |
| 90 | 92 | ||
| 91 | if (test_bit(IPS_HELPER_BIT, &ct->status)) | 93 | help = nfct_help(ct); |
| 94 | if (help) | ||
| 92 | goto out; | 95 | goto out; |
| 93 | 96 | ||
| 94 | if (ctinfo == IP_CT_NEW || | 97 | if (ctinfo == IP_CT_NEW || |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 57e07768c9d1..f54cf17ef7a8 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) | |||
| 276 | 276 | ||
| 277 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); | 277 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); |
| 278 | if (flags & IP6_FH_F_FRAG) { | 278 | if (flags & IP6_FH_F_FRAG) { |
| 279 | if (frag_off) | 279 | if (frag_off) { |
| 280 | key->ip.frag = OVS_FRAG_TYPE_LATER; | 280 | key->ip.frag = OVS_FRAG_TYPE_LATER; |
| 281 | else | 281 | key->ip.proto = nexthdr; |
| 282 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | 282 | return 0; |
| 283 | } | ||
| 284 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | ||
| 283 | } else { | 285 | } else { |
| 284 | key->ip.frag = OVS_FRAG_TYPE_NONE; | 286 | key->ip.frag = OVS_FRAG_TYPE_NONE; |
| 285 | } | 287 | } |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 435a4bdf8f89..691da853bef5 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, | |||
| 500 | return -EINVAL; | 500 | return -EINVAL; |
| 501 | } | 501 | } |
| 502 | 502 | ||
| 503 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | 503 | if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { |
| 504 | attrs |= 1 << type; | 504 | attrs |= 1 << type; |
| 505 | a[type] = nla; | 505 | a[type] = nla; |
| 506 | } | 506 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index eedacdebcd4c..3b1a78906bc0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
| 2628 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; | 2628 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
| 2629 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); | 2629 | dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
| 2630 | if (addr && dev && saddr->sll_halen < dev->addr_len) | 2630 | if (addr && dev && saddr->sll_halen < dev->addr_len) |
| 2631 | goto out; | 2631 | goto out_put; |
| 2632 | } | 2632 | } |
| 2633 | 2633 | ||
| 2634 | err = -ENXIO; | 2634 | err = -ENXIO; |
| @@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2828 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; | 2828 | addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
| 2829 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); | 2829 | dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
| 2830 | if (addr && dev && saddr->sll_halen < dev->addr_len) | 2830 | if (addr && dev && saddr->sll_halen < dev->addr_len) |
| 2831 | goto out; | 2831 | goto out_unlock; |
| 2832 | } | 2832 | } |
| 2833 | 2833 | ||
| 2834 | err = -ENXIO; | 2834 | err = -ENXIO; |
| @@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2887 | goto out_free; | 2887 | goto out_free; |
| 2888 | } else if (reserve) { | 2888 | } else if (reserve) { |
| 2889 | skb_reserve(skb, -reserve); | 2889 | skb_reserve(skb, -reserve); |
| 2890 | if (len < reserve) | 2890 | if (len < reserve + sizeof(struct ipv6hdr) && |
| 2891 | dev->min_header_len != dev->hard_header_len) | ||
| 2891 | skb_reset_network_header(skb); | 2892 | skb_reset_network_header(skb); |
| 2892 | } | 2893 | } |
| 2893 | 2894 | ||
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 2dcb555e6350..4e0c36acf866 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
| @@ -522,7 +522,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
| 522 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 522 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
| 523 | i = 1; | 523 | i = 1; |
| 524 | else | 524 | else |
| 525 | i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); | 525 | i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); |
| 526 | 526 | ||
| 527 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 527 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
| 528 | if (work_alloc == 0) { | 528 | if (work_alloc == 0) { |
| @@ -879,7 +879,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
| 879 | * Instead of knowing how to return a partial rdma read/write we insist that there | 879 | * Instead of knowing how to return a partial rdma read/write we insist that there |
| 880 | * be enough work requests to send the entire message. | 880 | * be enough work requests to send the entire message. |
| 881 | */ | 881 | */ |
| 882 | i = ceil(op->op_count, max_sge); | 882 | i = DIV_ROUND_UP(op->op_count, max_sge); |
| 883 | 883 | ||
| 884 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 884 | work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); |
| 885 | if (work_alloc != i) { | 885 | if (work_alloc != i) { |
diff --git a/net/rds/message.c b/net/rds/message.c index f139420ba1f6..50f13f1d4ae0 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
| @@ -341,7 +341,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
| 341 | { | 341 | { |
| 342 | struct rds_message *rm; | 342 | struct rds_message *rm; |
| 343 | unsigned int i; | 343 | unsigned int i; |
| 344 | int num_sgs = ceil(total_len, PAGE_SIZE); | 344 | int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); |
| 345 | int extra_bytes = num_sgs * sizeof(struct scatterlist); | 345 | int extra_bytes = num_sgs * sizeof(struct scatterlist); |
| 346 | int ret; | 346 | int ret; |
| 347 | 347 | ||
| @@ -351,7 +351,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
| 351 | 351 | ||
| 352 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); | 352 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); |
| 353 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | 353 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
| 354 | rm->data.op_nents = ceil(total_len, PAGE_SIZE); | 354 | rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); |
| 355 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); | 355 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); |
| 356 | if (!rm->data.op_sg) { | 356 | if (!rm->data.op_sg) { |
| 357 | rds_message_put(rm); | 357 | rds_message_put(rm); |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 02ec4a3b2799..4ffe100ff5e6 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -48,10 +48,6 @@ void rdsdebug(char *fmt, ...) | |||
| 48 | } | 48 | } |
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | /* XXX is there one of these somewhere? */ | ||
| 52 | #define ceil(x, y) \ | ||
| 53 | ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; }) | ||
| 54 | |||
| 55 | #define RDS_FRAG_SHIFT 12 | 51 | #define RDS_FRAG_SHIFT 12 |
| 56 | #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) | 52 | #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) |
| 57 | 53 | ||
diff --git a/net/rds/send.c b/net/rds/send.c index 3d822bad7de9..fd8b687d5c05 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -1107,7 +1107,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
| 1107 | size_t total_payload_len = payload_len, rdma_payload_len = 0; | 1107 | size_t total_payload_len = payload_len, rdma_payload_len = 0; |
| 1108 | bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && | 1108 | bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && |
| 1109 | sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); | 1109 | sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); |
| 1110 | int num_sgs = ceil(payload_len, PAGE_SIZE); | 1110 | int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE); |
| 1111 | int namelen; | 1111 | int namelen; |
| 1112 | struct rds_iov_vector_arr vct; | 1112 | struct rds_iov_vector_arr vct; |
| 1113 | int ind; | 1113 | int ind; |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index a2522f9d71e2..96f2952bbdfd 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
| @@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call) | |||
| 419 | EXPORT_SYMBOL(rxrpc_kernel_get_epoch); | 419 | EXPORT_SYMBOL(rxrpc_kernel_get_epoch); |
| 420 | 420 | ||
| 421 | /** | 421 | /** |
| 422 | * rxrpc_kernel_check_call - Check a call's state | ||
| 423 | * @sock: The socket the call is on | ||
| 424 | * @call: The call to check | ||
| 425 | * @_compl: Where to store the completion state | ||
| 426 | * @_abort_code: Where to store any abort code | ||
| 427 | * | ||
| 428 | * Allow a kernel service to query the state of a call and find out the manner | ||
| 429 | * of its termination if it has completed. Returns -EINPROGRESS if the call is | ||
| 430 | * still going, 0 if the call finished successfully, -ECONNABORTED if the call | ||
| 431 | * was aborted and an appropriate error if the call failed in some other way. | ||
| 432 | */ | ||
| 433 | int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call, | ||
| 434 | enum rxrpc_call_completion *_compl, u32 *_abort_code) | ||
| 435 | { | ||
| 436 | if (call->state != RXRPC_CALL_COMPLETE) | ||
| 437 | return -EINPROGRESS; | ||
| 438 | smp_rmb(); | ||
| 439 | *_compl = call->completion; | ||
| 440 | *_abort_code = call->abort_code; | ||
| 441 | return call->error; | ||
| 442 | } | ||
| 443 | EXPORT_SYMBOL(rxrpc_kernel_check_call); | ||
| 444 | |||
| 445 | /** | ||
| 446 | * rxrpc_kernel_retry_call - Allow a kernel service to retry a call | ||
| 447 | * @sock: The socket the call is on | ||
| 448 | * @call: The call to retry | ||
| 449 | * @srx: The address of the peer to contact | ||
| 450 | * @key: The security context to use (defaults to socket setting) | ||
| 451 | * | ||
| 452 | * Allow a kernel service to try resending a client call that failed due to a | ||
| 453 | * network error to a new address. The Tx queue is maintained intact, thereby | ||
| 454 | * relieving the need to re-encrypt any request data that has already been | ||
| 455 | * buffered. | ||
| 456 | */ | ||
| 457 | int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call, | ||
| 458 | struct sockaddr_rxrpc *srx, struct key *key) | ||
| 459 | { | ||
| 460 | struct rxrpc_conn_parameters cp; | ||
| 461 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | ||
| 462 | int ret; | ||
| 463 | |||
| 464 | _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); | ||
| 465 | |||
| 466 | if (!key) | ||
| 467 | key = rx->key; | ||
| 468 | if (key && !key->payload.data[0]) | ||
| 469 | key = NULL; /* a no-security key */ | ||
| 470 | |||
| 471 | memset(&cp, 0, sizeof(cp)); | ||
| 472 | cp.local = rx->local; | ||
| 473 | cp.key = key; | ||
| 474 | cp.security_level = 0; | ||
| 475 | cp.exclusive = false; | ||
| 476 | cp.service_id = srx->srx_service; | ||
| 477 | |||
| 478 | mutex_lock(&call->user_mutex); | ||
| 479 | |||
| 480 | ret = rxrpc_prepare_call_for_retry(rx, call); | ||
| 481 | if (ret == 0) | ||
| 482 | ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); | ||
| 483 | |||
| 484 | mutex_unlock(&call->user_mutex); | ||
| 485 | rxrpc_put_peer(cp.peer); | ||
| 486 | _leave(" = %d", ret); | ||
| 487 | return ret; | ||
| 488 | } | ||
| 489 | EXPORT_SYMBOL(rxrpc_kernel_retry_call); | ||
| 490 | |||
| 491 | /** | ||
| 492 | * rxrpc_kernel_new_call_notification - Get notifications of new calls | 422 | * rxrpc_kernel_new_call_notification - Get notifications of new calls |
| 493 | * @sock: The socket to intercept received messages on | 423 | * @sock: The socket to intercept received messages on |
| 494 | * @notify_new_call: Function to be called when new calls appear | 424 | * @notify_new_call: Function to be called when new calls appear |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index bc628acf4f4f..4b1a534d290a 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
| @@ -476,7 +476,6 @@ enum rxrpc_call_flag { | |||
| 476 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ | 476 | RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ |
| 477 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ | 477 | RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ |
| 478 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ | 478 | RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ |
| 479 | RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */ | ||
| 480 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ | 479 | RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ |
| 481 | RXRPC_CALL_PINGING, /* Ping in process */ | 480 | RXRPC_CALL_PINGING, /* Ping in process */ |
| 482 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ | 481 | RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ |
| @@ -518,6 +517,18 @@ enum rxrpc_call_state { | |||
| 518 | }; | 517 | }; |
| 519 | 518 | ||
| 520 | /* | 519 | /* |
| 520 | * Call completion condition (state == RXRPC_CALL_COMPLETE). | ||
| 521 | */ | ||
| 522 | enum rxrpc_call_completion { | ||
| 523 | RXRPC_CALL_SUCCEEDED, /* - Normal termination */ | ||
| 524 | RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ | ||
| 525 | RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ | ||
| 526 | RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ | ||
| 527 | RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ | ||
| 528 | NR__RXRPC_CALL_COMPLETIONS | ||
| 529 | }; | ||
| 530 | |||
| 531 | /* | ||
| 521 | * Call Tx congestion management modes. | 532 | * Call Tx congestion management modes. |
| 522 | */ | 533 | */ |
| 523 | enum rxrpc_congest_mode { | 534 | enum rxrpc_congest_mode { |
| @@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, | |||
| 761 | struct sockaddr_rxrpc *, | 772 | struct sockaddr_rxrpc *, |
| 762 | struct rxrpc_call_params *, gfp_t, | 773 | struct rxrpc_call_params *, gfp_t, |
| 763 | unsigned int); | 774 | unsigned int); |
| 764 | int rxrpc_retry_client_call(struct rxrpc_sock *, | ||
| 765 | struct rxrpc_call *, | ||
| 766 | struct rxrpc_conn_parameters *, | ||
| 767 | struct sockaddr_rxrpc *, | ||
| 768 | gfp_t); | ||
| 769 | void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, | 775 | void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, |
| 770 | struct sk_buff *); | 776 | struct sk_buff *); |
| 771 | void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); | 777 | void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); |
| 772 | int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *); | ||
| 773 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *); | 778 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *); |
| 774 | bool __rxrpc_queue_call(struct rxrpc_call *); | 779 | bool __rxrpc_queue_call(struct rxrpc_call *); |
| 775 | bool rxrpc_queue_call(struct rxrpc_call *); | 780 | bool rxrpc_queue_call(struct rxrpc_call *); |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8f1a8f85b1f9..8aa2937b069f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
| @@ -325,48 +325,6 @@ error: | |||
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | /* | 327 | /* |
| 328 | * Retry a call to a new address. It is expected that the Tx queue of the call | ||
| 329 | * will contain data previously packaged for an old call. | ||
| 330 | */ | ||
| 331 | int rxrpc_retry_client_call(struct rxrpc_sock *rx, | ||
| 332 | struct rxrpc_call *call, | ||
| 333 | struct rxrpc_conn_parameters *cp, | ||
| 334 | struct sockaddr_rxrpc *srx, | ||
| 335 | gfp_t gfp) | ||
| 336 | { | ||
| 337 | const void *here = __builtin_return_address(0); | ||
| 338 | int ret; | ||
| 339 | |||
| 340 | /* Set up or get a connection record and set the protocol parameters, | ||
| 341 | * including channel number and call ID. | ||
| 342 | */ | ||
| 343 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); | ||
| 344 | if (ret < 0) | ||
| 345 | goto error; | ||
| 346 | |||
| 347 | trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), | ||
| 348 | here, NULL); | ||
| 349 | |||
| 350 | rxrpc_start_call_timer(call); | ||
| 351 | |||
| 352 | _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); | ||
| 353 | |||
| 354 | if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) | ||
| 355 | rxrpc_queue_call(call); | ||
| 356 | |||
| 357 | _leave(" = 0"); | ||
| 358 | return 0; | ||
| 359 | |||
| 360 | error: | ||
| 361 | rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, | ||
| 362 | RX_CALL_DEAD, ret); | ||
| 363 | trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), | ||
| 364 | here, ERR_PTR(ret)); | ||
| 365 | _leave(" = %d", ret); | ||
| 366 | return ret; | ||
| 367 | } | ||
| 368 | |||
| 369 | /* | ||
| 370 | * Set up an incoming call. call->conn points to the connection. | 328 | * Set up an incoming call. call->conn points to the connection. |
| 371 | * This is called in BH context and isn't allowed to fail. | 329 | * This is called in BH context and isn't allowed to fail. |
| 372 | */ | 330 | */ |
| @@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) | |||
| 534 | } | 492 | } |
| 535 | 493 | ||
| 536 | /* | 494 | /* |
| 537 | * Prepare a kernel service call for retry. | ||
| 538 | */ | ||
| 539 | int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call) | ||
| 540 | { | ||
| 541 | const void *here = __builtin_return_address(0); | ||
| 542 | int i; | ||
| 543 | u8 last = 0; | ||
| 544 | |||
| 545 | _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); | ||
| 546 | |||
| 547 | trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), | ||
| 548 | here, (const void *)call->flags); | ||
| 549 | |||
| 550 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); | ||
| 551 | ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED); | ||
| 552 | ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED); | ||
| 553 | ASSERT(list_empty(&call->recvmsg_link)); | ||
| 554 | |||
| 555 | del_timer_sync(&call->timer); | ||
| 556 | |||
| 557 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn); | ||
| 558 | |||
| 559 | if (call->conn) | ||
| 560 | rxrpc_disconnect_call(call); | ||
| 561 | |||
| 562 | if (rxrpc_is_service_call(call) || | ||
| 563 | !call->tx_phase || | ||
| 564 | call->tx_hard_ack != 0 || | ||
| 565 | call->rx_hard_ack != 0 || | ||
| 566 | call->rx_top != 0) | ||
| 567 | return -EINVAL; | ||
| 568 | |||
| 569 | call->state = RXRPC_CALL_UNINITIALISED; | ||
| 570 | call->completion = RXRPC_CALL_SUCCEEDED; | ||
| 571 | call->call_id = 0; | ||
| 572 | call->cid = 0; | ||
| 573 | call->cong_cwnd = 0; | ||
| 574 | call->cong_extra = 0; | ||
| 575 | call->cong_ssthresh = 0; | ||
| 576 | call->cong_mode = 0; | ||
| 577 | call->cong_dup_acks = 0; | ||
| 578 | call->cong_cumul_acks = 0; | ||
| 579 | call->acks_lowest_nak = 0; | ||
| 580 | |||
| 581 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { | ||
| 582 | last |= call->rxtx_annotations[i]; | ||
| 583 | call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST; | ||
| 584 | call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS; | ||
| 585 | } | ||
| 586 | |||
| 587 | _leave(" = 0"); | ||
| 588 | return 0; | ||
| 589 | } | ||
| 590 | |||
| 591 | /* | ||
| 592 | * release all the calls associated with a socket | 495 | * release all the calls associated with a socket |
| 593 | */ | 496 | */ |
| 594 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | 497 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) |
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 521189f4b666..b2adfa825363 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
| @@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, | |||
| 562 | clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); | 562 | clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); |
| 563 | 563 | ||
| 564 | write_lock_bh(&call->state_lock); | 564 | write_lock_bh(&call->state_lock); |
| 565 | if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) | 565 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; |
| 566 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; | ||
| 567 | else | ||
| 568 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | ||
| 569 | write_unlock_bh(&call->state_lock); | 566 | write_unlock_bh(&call->state_lock); |
| 570 | 567 | ||
| 571 | rxrpc_see_call(call); | 568 | rxrpc_see_call(call); |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index be01f9c5d963..46c9312085b1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
| @@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
| 169 | 169 | ||
| 170 | ASSERTCMP(seq, ==, call->tx_top + 1); | 170 | ASSERTCMP(seq, ==, call->tx_top + 1); |
| 171 | 171 | ||
| 172 | if (last) { | 172 | if (last) |
| 173 | annotation |= RXRPC_TX_ANNO_LAST; | 173 | annotation |= RXRPC_TX_ANNO_LAST; |
| 174 | set_bit(RXRPC_CALL_TX_LASTQ, &call->flags); | ||
| 175 | } | ||
| 176 | 174 | ||
| 177 | /* We have to set the timestamp before queueing as the retransmit | 175 | /* We have to set the timestamp before queueing as the retransmit |
| 178 | * algorithm can see the packet as soon as we queue it. | 176 | * algorithm can see the packet as soon as we queue it. |
| @@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
| 386 | call->tx_total_len -= copy; | 384 | call->tx_total_len -= copy; |
| 387 | } | 385 | } |
| 388 | 386 | ||
| 387 | /* check for the far side aborting the call or a network error | ||
| 388 | * occurring */ | ||
| 389 | if (call->state == RXRPC_CALL_COMPLETE) | ||
| 390 | goto call_terminated; | ||
| 391 | |||
| 389 | /* add the packet to the send queue if it's now full */ | 392 | /* add the packet to the send queue if it's now full */ |
| 390 | if (sp->remain <= 0 || | 393 | if (sp->remain <= 0 || |
| 391 | (msg_data_left(msg) == 0 && !more)) { | 394 | (msg_data_left(msg) == 0 && !more)) { |
| @@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
| 425 | notify_end_tx); | 428 | notify_end_tx); |
| 426 | skb = NULL; | 429 | skb = NULL; |
| 427 | } | 430 | } |
| 428 | |||
| 429 | /* Check for the far side aborting the call or a network error | ||
| 430 | * occurring. If this happens, save any packet that was under | ||
| 431 | * construction so that in the case of a network error, the | ||
| 432 | * call can be retried or redirected. | ||
| 433 | */ | ||
| 434 | if (call->state == RXRPC_CALL_COMPLETE) { | ||
| 435 | ret = call->error; | ||
| 436 | goto out; | ||
| 437 | } | ||
| 438 | } while (msg_data_left(msg) > 0); | 431 | } while (msg_data_left(msg) > 0); |
| 439 | 432 | ||
| 440 | success: | 433 | success: |
| @@ -444,6 +437,11 @@ out: | |||
| 444 | _leave(" = %d", ret); | 437 | _leave(" = %d", ret); |
| 445 | return ret; | 438 | return ret; |
| 446 | 439 | ||
| 440 | call_terminated: | ||
| 441 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | ||
| 442 | _leave(" = %d", call->error); | ||
| 443 | return call->error; | ||
| 444 | |||
| 447 | maybe_error: | 445 | maybe_error: |
| 448 | if (copied) | 446 | if (copied) |
| 449 | goto success; | 447 | goto success; |
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index c3b90fadaff6..8b43fe0130f7 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
| @@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { | |||
| 197 | [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, | 197 | [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, |
| 198 | }; | 198 | }; |
| 199 | 199 | ||
| 200 | static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) | ||
| 201 | { | ||
| 202 | if (!p) | ||
| 203 | return; | ||
| 204 | if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
| 205 | dst_release(&p->tcft_enc_metadata->dst); | ||
| 206 | kfree_rcu(p, rcu); | ||
| 207 | } | ||
| 208 | |||
| 200 | static int tunnel_key_init(struct net *net, struct nlattr *nla, | 209 | static int tunnel_key_init(struct net *net, struct nlattr *nla, |
| 201 | struct nlattr *est, struct tc_action **a, | 210 | struct nlattr *est, struct tc_action **a, |
| 202 | int ovr, int bind, bool rtnl_held, | 211 | int ovr, int bind, bool rtnl_held, |
| @@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | |||
| 360 | rcu_swap_protected(t->params, params_new, | 369 | rcu_swap_protected(t->params, params_new, |
| 361 | lockdep_is_held(&t->tcf_lock)); | 370 | lockdep_is_held(&t->tcf_lock)); |
| 362 | spin_unlock_bh(&t->tcf_lock); | 371 | spin_unlock_bh(&t->tcf_lock); |
| 363 | if (params_new) | 372 | tunnel_key_release_params(params_new); |
| 364 | kfree_rcu(params_new, rcu); | ||
| 365 | 373 | ||
| 366 | if (ret == ACT_P_CREATED) | 374 | if (ret == ACT_P_CREATED) |
| 367 | tcf_idr_insert(tn, *a); | 375 | tcf_idr_insert(tn, *a); |
| @@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a) | |||
| 385 | struct tcf_tunnel_key_params *params; | 393 | struct tcf_tunnel_key_params *params; |
| 386 | 394 | ||
| 387 | params = rcu_dereference_protected(t->params, 1); | 395 | params = rcu_dereference_protected(t->params, 1); |
| 388 | if (params) { | 396 | tunnel_key_release_params(params); |
| 389 | if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
| 390 | dst_release(¶ms->tcft_enc_metadata->dst); | ||
| 391 | |||
| 392 | kfree_rcu(params, rcu); | ||
| 393 | } | ||
| 394 | } | 397 | } |
| 395 | 398 | ||
| 396 | static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, | 399 | static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8ce2a0507970..e2b5cb2eb34e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister); | |||
| 1277 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 1277 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 1278 | struct tcf_result *res, bool compat_mode) | 1278 | struct tcf_result *res, bool compat_mode) |
| 1279 | { | 1279 | { |
| 1280 | __be16 protocol = tc_skb_protocol(skb); | ||
| 1281 | #ifdef CONFIG_NET_CLS_ACT | 1280 | #ifdef CONFIG_NET_CLS_ACT |
| 1282 | const int max_reclassify_loop = 4; | 1281 | const int max_reclassify_loop = 4; |
| 1283 | const struct tcf_proto *orig_tp = tp; | 1282 | const struct tcf_proto *orig_tp = tp; |
| @@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
| 1287 | reclassify: | 1286 | reclassify: |
| 1288 | #endif | 1287 | #endif |
| 1289 | for (; tp; tp = rcu_dereference_bh(tp->next)) { | 1288 | for (; tp; tp = rcu_dereference_bh(tp->next)) { |
| 1289 | __be16 protocol = tc_skb_protocol(skb); | ||
| 1290 | int err; | 1290 | int err; |
| 1291 | 1291 | ||
| 1292 | if (tp->protocol != protocol && | 1292 | if (tp->protocol != protocol && |
| @@ -1319,7 +1319,6 @@ reset: | |||
| 1319 | } | 1319 | } |
| 1320 | 1320 | ||
| 1321 | tp = first_tp; | 1321 | tp = first_tp; |
| 1322 | protocol = tc_skb_protocol(skb); | ||
| 1323 | goto reclassify; | 1322 | goto reclassify; |
| 1324 | #endif | 1323 | #endif |
| 1325 | } | 1324 | } |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index dad04e710493..f6aa57fbbbaf 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 1290 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 1290 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
| 1291 | struct cls_fl_filter *fold = *arg; | 1291 | struct cls_fl_filter *fold = *arg; |
| 1292 | struct cls_fl_filter *fnew; | 1292 | struct cls_fl_filter *fnew; |
| 1293 | struct fl_flow_mask *mask; | ||
| 1293 | struct nlattr **tb; | 1294 | struct nlattr **tb; |
| 1294 | struct fl_flow_mask mask = {}; | ||
| 1295 | int err; | 1295 | int err; |
| 1296 | 1296 | ||
| 1297 | if (!tca[TCA_OPTIONS]) | 1297 | if (!tca[TCA_OPTIONS]) |
| 1298 | return -EINVAL; | 1298 | return -EINVAL; |
| 1299 | 1299 | ||
| 1300 | tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | 1300 | mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); |
| 1301 | if (!tb) | 1301 | if (!mask) |
| 1302 | return -ENOBUFS; | 1302 | return -ENOBUFS; |
| 1303 | 1303 | ||
| 1304 | tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | ||
| 1305 | if (!tb) { | ||
| 1306 | err = -ENOBUFS; | ||
| 1307 | goto errout_mask_alloc; | ||
| 1308 | } | ||
| 1309 | |||
| 1304 | err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], | 1310 | err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], |
| 1305 | fl_policy, NULL); | 1311 | fl_policy, NULL); |
| 1306 | if (err < 0) | 1312 | if (err < 0) |
| @@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 1343 | } | 1349 | } |
| 1344 | } | 1350 | } |
| 1345 | 1351 | ||
| 1346 | err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, | 1352 | err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, |
| 1347 | tp->chain->tmplt_priv, extack); | 1353 | tp->chain->tmplt_priv, extack); |
| 1348 | if (err) | 1354 | if (err) |
| 1349 | goto errout_idr; | 1355 | goto errout_idr; |
| 1350 | 1356 | ||
| 1351 | err = fl_check_assign_mask(head, fnew, fold, &mask); | 1357 | err = fl_check_assign_mask(head, fnew, fold, mask); |
| 1352 | if (err) | 1358 | if (err) |
| 1353 | goto errout_idr; | 1359 | goto errout_idr; |
| 1354 | 1360 | ||
| @@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 1392 | } | 1398 | } |
| 1393 | 1399 | ||
| 1394 | kfree(tb); | 1400 | kfree(tb); |
| 1401 | kfree(mask); | ||
| 1395 | return 0; | 1402 | return 0; |
| 1396 | 1403 | ||
| 1397 | errout_mask: | 1404 | errout_mask: |
| @@ -1405,6 +1412,8 @@ errout: | |||
| 1405 | kfree(fnew); | 1412 | kfree(fnew); |
| 1406 | errout_tb: | 1413 | errout_tb: |
| 1407 | kfree(tb); | 1414 | kfree(tb); |
| 1415 | errout_mask_alloc: | ||
| 1416 | kfree(mask); | ||
| 1408 | return err; | 1417 | return err; |
| 1409 | } | 1418 | } |
| 1410 | 1419 | ||
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index b910cd5c56f7..73940293700d 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
| @@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 1667 | if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { | 1667 | if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { |
| 1668 | struct sk_buff *segs, *nskb; | 1668 | struct sk_buff *segs, *nskb; |
| 1669 | netdev_features_t features = netif_skb_features(skb); | 1669 | netdev_features_t features = netif_skb_features(skb); |
| 1670 | unsigned int slen = 0; | 1670 | unsigned int slen = 0, numsegs = 0; |
| 1671 | 1671 | ||
| 1672 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | 1672 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
| 1673 | if (IS_ERR_OR_NULL(segs)) | 1673 | if (IS_ERR_OR_NULL(segs)) |
| @@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 1683 | flow_queue_add(flow, segs); | 1683 | flow_queue_add(flow, segs); |
| 1684 | 1684 | ||
| 1685 | sch->q.qlen++; | 1685 | sch->q.qlen++; |
| 1686 | numsegs++; | ||
| 1686 | slen += segs->len; | 1687 | slen += segs->len; |
| 1687 | q->buffer_used += segs->truesize; | 1688 | q->buffer_used += segs->truesize; |
| 1688 | b->packets++; | 1689 | b->packets++; |
| @@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 1696 | sch->qstats.backlog += slen; | 1697 | sch->qstats.backlog += slen; |
| 1697 | q->avg_window_bytes += slen; | 1698 | q->avg_window_bytes += slen; |
| 1698 | 1699 | ||
| 1699 | qdisc_tree_reduce_backlog(sch, 1, len); | 1700 | qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); |
| 1700 | consume_skb(skb); | 1701 | consume_skb(skb); |
| 1701 | } else { | 1702 | } else { |
| 1702 | /* not splitting */ | 1703 | /* not splitting */ |
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index e689e11b6d0f..c6a502933fe7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c | |||
| @@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 88 | struct Qdisc *child, | 88 | struct Qdisc *child, |
| 89 | struct sk_buff **to_free) | 89 | struct sk_buff **to_free) |
| 90 | { | 90 | { |
| 91 | unsigned int len = qdisc_pkt_len(skb); | ||
| 91 | int err; | 92 | int err; |
| 92 | 93 | ||
| 93 | err = child->ops->enqueue(skb, child, to_free); | 94 | err = child->ops->enqueue(skb, child, to_free); |
| 94 | if (err != NET_XMIT_SUCCESS) | 95 | if (err != NET_XMIT_SUCCESS) |
| 95 | return err; | 96 | return err; |
| 96 | 97 | ||
| 97 | qdisc_qstats_backlog_inc(sch, skb); | 98 | sch->qstats.backlog += len; |
| 98 | sch->q.qlen++; | 99 | sch->q.qlen++; |
| 99 | 100 | ||
| 100 | return NET_XMIT_SUCCESS; | 101 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index cdebaed0f8cf..09b800991065 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
| 350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 350 | static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 351 | struct sk_buff **to_free) | 351 | struct sk_buff **to_free) |
| 352 | { | 352 | { |
| 353 | unsigned int len = qdisc_pkt_len(skb); | ||
| 353 | struct drr_sched *q = qdisc_priv(sch); | 354 | struct drr_sched *q = qdisc_priv(sch); |
| 354 | struct drr_class *cl; | 355 | struct drr_class *cl; |
| 355 | int err = 0; | 356 | int err = 0; |
| 357 | bool first; | ||
| 356 | 358 | ||
| 357 | cl = drr_classify(skb, sch, &err); | 359 | cl = drr_classify(skb, sch, &err); |
| 358 | if (cl == NULL) { | 360 | if (cl == NULL) { |
| @@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 362 | return err; | 364 | return err; |
| 363 | } | 365 | } |
| 364 | 366 | ||
| 367 | first = !cl->qdisc->q.qlen; | ||
| 365 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 368 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
| 366 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 369 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
| 367 | if (net_xmit_drop_count(err)) { | 370 | if (net_xmit_drop_count(err)) { |
| @@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 371 | return err; | 374 | return err; |
| 372 | } | 375 | } |
| 373 | 376 | ||
| 374 | if (cl->qdisc->q.qlen == 1) { | 377 | if (first) { |
| 375 | list_add_tail(&cl->alist, &q->active); | 378 | list_add_tail(&cl->alist, &q->active); |
| 376 | cl->deficit = cl->quantum; | 379 | cl->deficit = cl->quantum; |
| 377 | } | 380 | } |
| 378 | 381 | ||
| 379 | qdisc_qstats_backlog_inc(sch, skb); | 382 | sch->qstats.backlog += len; |
| 380 | sch->q.qlen++; | 383 | sch->q.qlen++; |
| 381 | return err; | 384 | return err; |
| 382 | } | 385 | } |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index f6f480784bc6..42471464ded3 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
| @@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, | |||
| 199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 199 | static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 200 | struct sk_buff **to_free) | 200 | struct sk_buff **to_free) |
| 201 | { | 201 | { |
| 202 | unsigned int len = qdisc_pkt_len(skb); | ||
| 202 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 203 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
| 203 | int err; | 204 | int err; |
| 204 | 205 | ||
| @@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 271 | return err; | 272 | return err; |
| 272 | } | 273 | } |
| 273 | 274 | ||
| 274 | qdisc_qstats_backlog_inc(sch, skb); | 275 | sch->qstats.backlog += len; |
| 275 | sch->q.qlen++; | 276 | sch->q.qlen++; |
| 276 | 277 | ||
| 277 | return NET_XMIT_SUCCESS; | 278 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b18ec1f6de60..24cc220a3218 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
| 1539 | static int | 1539 | static int |
| 1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 1540 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
| 1541 | { | 1541 | { |
| 1542 | unsigned int len = qdisc_pkt_len(skb); | ||
| 1542 | struct hfsc_class *cl; | 1543 | struct hfsc_class *cl; |
| 1543 | int uninitialized_var(err); | 1544 | int uninitialized_var(err); |
| 1545 | bool first; | ||
| 1544 | 1546 | ||
| 1545 | cl = hfsc_classify(skb, sch, &err); | 1547 | cl = hfsc_classify(skb, sch, &err); |
| 1546 | if (cl == NULL) { | 1548 | if (cl == NULL) { |
| @@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
| 1550 | return err; | 1552 | return err; |
| 1551 | } | 1553 | } |
| 1552 | 1554 | ||
| 1555 | first = !cl->qdisc->q.qlen; | ||
| 1553 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 1556 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
| 1554 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1557 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
| 1555 | if (net_xmit_drop_count(err)) { | 1558 | if (net_xmit_drop_count(err)) { |
| @@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
| 1559 | return err; | 1562 | return err; |
| 1560 | } | 1563 | } |
| 1561 | 1564 | ||
| 1562 | if (cl->qdisc->q.qlen == 1) { | 1565 | if (first) { |
| 1563 | unsigned int len = qdisc_pkt_len(skb); | ||
| 1564 | |||
| 1565 | if (cl->cl_flags & HFSC_RSC) | 1566 | if (cl->cl_flags & HFSC_RSC) |
| 1566 | init_ed(cl, len); | 1567 | init_ed(cl, len); |
| 1567 | if (cl->cl_flags & HFSC_FSC) | 1568 | if (cl->cl_flags & HFSC_FSC) |
| @@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
| 1576 | 1577 | ||
| 1577 | } | 1578 | } |
| 1578 | 1579 | ||
| 1579 | qdisc_qstats_backlog_inc(sch, skb); | 1580 | sch->qstats.backlog += len; |
| 1580 | sch->q.qlen++; | 1581 | sch->q.qlen++; |
| 1581 | 1582 | ||
| 1582 | return NET_XMIT_SUCCESS; | 1583 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 58b449490757..30f9da7e1076 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 581 | struct sk_buff **to_free) | 581 | struct sk_buff **to_free) |
| 582 | { | 582 | { |
| 583 | int uninitialized_var(ret); | 583 | int uninitialized_var(ret); |
| 584 | unsigned int len = qdisc_pkt_len(skb); | ||
| 584 | struct htb_sched *q = qdisc_priv(sch); | 585 | struct htb_sched *q = qdisc_priv(sch); |
| 585 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 586 | struct htb_class *cl = htb_classify(skb, sch, &ret); |
| 586 | 587 | ||
| @@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 610 | htb_activate(q, cl); | 611 | htb_activate(q, cl); |
| 611 | } | 612 | } |
| 612 | 613 | ||
| 613 | qdisc_qstats_backlog_inc(sch, skb); | 614 | sch->qstats.backlog += len; |
| 614 | sch->q.qlen++; | 615 | sch->q.qlen++; |
| 615 | return NET_XMIT_SUCCESS; | 616 | return NET_XMIT_SUCCESS; |
| 616 | } | 617 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index cdf68706e40f..847141cd900f 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
| 72 | static int | 72 | static int |
| 73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | 73 | prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
| 74 | { | 74 | { |
| 75 | unsigned int len = qdisc_pkt_len(skb); | ||
| 75 | struct Qdisc *qdisc; | 76 | struct Qdisc *qdisc; |
| 76 | int ret; | 77 | int ret; |
| 77 | 78 | ||
| @@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) | |||
| 88 | 89 | ||
| 89 | ret = qdisc_enqueue(skb, qdisc, to_free); | 90 | ret = qdisc_enqueue(skb, qdisc, to_free); |
| 90 | if (ret == NET_XMIT_SUCCESS) { | 91 | if (ret == NET_XMIT_SUCCESS) { |
| 91 | qdisc_qstats_backlog_inc(sch, skb); | 92 | sch->qstats.backlog += len; |
| 92 | sch->q.qlen++; | 93 | sch->q.qlen++; |
| 93 | return NET_XMIT_SUCCESS; | 94 | return NET_XMIT_SUCCESS; |
| 94 | } | 95 | } |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index dc37c4ead439..29f5c4a24688 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) | |||
| 1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 1210 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 1211 | struct sk_buff **to_free) | 1211 | struct sk_buff **to_free) |
| 1212 | { | 1212 | { |
| 1213 | unsigned int len = qdisc_pkt_len(skb), gso_segs; | ||
| 1213 | struct qfq_sched *q = qdisc_priv(sch); | 1214 | struct qfq_sched *q = qdisc_priv(sch); |
| 1214 | struct qfq_class *cl; | 1215 | struct qfq_class *cl; |
| 1215 | struct qfq_aggregate *agg; | 1216 | struct qfq_aggregate *agg; |
| 1216 | int err = 0; | 1217 | int err = 0; |
| 1218 | bool first; | ||
| 1217 | 1219 | ||
| 1218 | cl = qfq_classify(skb, sch, &err); | 1220 | cl = qfq_classify(skb, sch, &err); |
| 1219 | if (cl == NULL) { | 1221 | if (cl == NULL) { |
| @@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 1224 | } | 1226 | } |
| 1225 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); | 1227 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); |
| 1226 | 1228 | ||
| 1227 | if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { | 1229 | if (unlikely(cl->agg->lmax < len)) { |
| 1228 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", | 1230 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", |
| 1229 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1231 | cl->agg->lmax, len, cl->common.classid); |
| 1230 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1232 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); |
| 1231 | qdisc_pkt_len(skb)); | ||
| 1232 | if (err) { | 1233 | if (err) { |
| 1233 | cl->qstats.drops++; | 1234 | cl->qstats.drops++; |
| 1234 | return qdisc_drop(skb, sch, to_free); | 1235 | return qdisc_drop(skb, sch, to_free); |
| 1235 | } | 1236 | } |
| 1236 | } | 1237 | } |
| 1237 | 1238 | ||
| 1239 | gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
| 1240 | first = !cl->qdisc->q.qlen; | ||
| 1238 | err = qdisc_enqueue(skb, cl->qdisc, to_free); | 1241 | err = qdisc_enqueue(skb, cl->qdisc, to_free); |
| 1239 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 1242 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
| 1240 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); | 1243 | pr_debug("qfq_enqueue: enqueue failed %d\n", err); |
| @@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 1245 | return err; | 1248 | return err; |
| 1246 | } | 1249 | } |
| 1247 | 1250 | ||
| 1248 | bstats_update(&cl->bstats, skb); | 1251 | cl->bstats.bytes += len; |
| 1249 | qdisc_qstats_backlog_inc(sch, skb); | 1252 | cl->bstats.packets += gso_segs; |
| 1253 | sch->qstats.backlog += len; | ||
| 1250 | ++sch->q.qlen; | 1254 | ++sch->q.qlen; |
| 1251 | 1255 | ||
| 1252 | agg = cl->agg; | 1256 | agg = cl->agg; |
| 1253 | /* if the queue was not empty, then done here */ | 1257 | /* if the queue was not empty, then done here */ |
| 1254 | if (cl->qdisc->q.qlen != 1) { | 1258 | if (!first) { |
| 1255 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && | 1259 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && |
| 1256 | list_first_entry(&agg->active, struct qfq_class, alist) | 1260 | list_first_entry(&agg->active, struct qfq_class, alist) |
| 1257 | == cl && cl->deficit < qdisc_pkt_len(skb)) | 1261 | == cl && cl->deficit < len) |
| 1258 | list_move_tail(&cl->alist, &agg->active); | 1262 | list_move_tail(&cl->alist, &agg->active); |
| 1259 | 1263 | ||
| 1260 | return err; | 1264 | return err; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 942dcca09cf2..7f272a9070c5 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 185 | struct sk_buff **to_free) | 185 | struct sk_buff **to_free) |
| 186 | { | 186 | { |
| 187 | struct tbf_sched_data *q = qdisc_priv(sch); | 187 | struct tbf_sched_data *q = qdisc_priv(sch); |
| 188 | unsigned int len = qdisc_pkt_len(skb); | ||
| 188 | int ret; | 189 | int ret; |
| 189 | 190 | ||
| 190 | if (qdisc_pkt_len(skb) > q->max_size) { | 191 | if (qdisc_pkt_len(skb) > q->max_size) { |
| @@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 200 | return ret; | 201 | return ret; |
| 201 | } | 202 | } |
| 202 | 203 | ||
| 203 | qdisc_qstats_backlog_inc(sch, skb); | 204 | sch->qstats.backlog += len; |
| 204 | sch->q.qlen++; | 205 | sch->q.qlen++; |
| 205 | return NET_XMIT_SUCCESS; | 206 | return NET_XMIT_SUCCESS; |
| 206 | } | 207 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index b9ed271b7ef7..6200cd2b4b99 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, | |||
| 97 | 97 | ||
| 98 | switch (ev) { | 98 | switch (ev) { |
| 99 | case NETDEV_UP: | 99 | case NETDEV_UP: |
| 100 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 100 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
| 101 | if (addr) { | 101 | if (addr) { |
| 102 | addr->a.v6.sin6_family = AF_INET6; | 102 | addr->a.v6.sin6_family = AF_INET6; |
| 103 | addr->a.v6.sin6_port = 0; | ||
| 104 | addr->a.v6.sin6_flowinfo = 0; | ||
| 105 | addr->a.v6.sin6_addr = ifa->addr; | 103 | addr->a.v6.sin6_addr = ifa->addr; |
| 106 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; | 104 | addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; |
| 107 | addr->valid = 1; | 105 | addr->valid = 1; |
| @@ -282,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
| 282 | 280 | ||
| 283 | if (saddr) { | 281 | if (saddr) { |
| 284 | fl6->saddr = saddr->v6.sin6_addr; | 282 | fl6->saddr = saddr->v6.sin6_addr; |
| 285 | fl6->fl6_sport = saddr->v6.sin6_port; | 283 | if (!fl6->fl6_sport) |
| 284 | fl6->fl6_sport = saddr->v6.sin6_port; | ||
| 286 | 285 | ||
| 287 | pr_debug("src=%pI6 - ", &fl6->saddr); | 286 | pr_debug("src=%pI6 - ", &fl6->saddr); |
| 288 | } | 287 | } |
| @@ -434,7 +433,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
| 434 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | 433 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
| 435 | if (addr) { | 434 | if (addr) { |
| 436 | addr->a.v6.sin6_family = AF_INET6; | 435 | addr->a.v6.sin6_family = AF_INET6; |
| 437 | addr->a.v6.sin6_port = 0; | ||
| 438 | addr->a.v6.sin6_addr = ifp->addr; | 436 | addr->a.v6.sin6_addr = ifp->addr; |
| 439 | addr->a.v6.sin6_scope_id = dev->ifindex; | 437 | addr->a.v6.sin6_scope_id = dev->ifindex; |
| 440 | addr->valid = 1; | 438 | addr->valid = 1; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d5878ae55840..6abc8b274270 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, | |||
| 101 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | 101 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
| 102 | if (addr) { | 102 | if (addr) { |
| 103 | addr->a.v4.sin_family = AF_INET; | 103 | addr->a.v4.sin_family = AF_INET; |
| 104 | addr->a.v4.sin_port = 0; | ||
| 105 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 104 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
| 106 | addr->valid = 1; | 105 | addr->valid = 1; |
| 107 | INIT_LIST_HEAD(&addr->list); | 106 | INIT_LIST_HEAD(&addr->list); |
| @@ -441,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
| 441 | } | 440 | } |
| 442 | if (saddr) { | 441 | if (saddr) { |
| 443 | fl4->saddr = saddr->v4.sin_addr.s_addr; | 442 | fl4->saddr = saddr->v4.sin_addr.s_addr; |
| 444 | fl4->fl4_sport = saddr->v4.sin_port; | 443 | if (!fl4->fl4_sport) |
| 444 | fl4->fl4_sport = saddr->v4.sin_port; | ||
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, | 447 | pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, |
| @@ -776,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | |||
| 776 | 776 | ||
| 777 | switch (ev) { | 777 | switch (ev) { |
| 778 | case NETDEV_UP: | 778 | case NETDEV_UP: |
| 779 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 779 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
| 780 | if (addr) { | 780 | if (addr) { |
| 781 | addr->a.v4.sin_family = AF_INET; | 781 | addr->a.v4.sin_family = AF_INET; |
| 782 | addr->a.v4.sin_port = 0; | ||
| 783 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 782 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
| 784 | addr->valid = 1; | 783 | addr->valid = 1; |
| 785 | spin_lock_bh(&net->sctp.local_addr_lock); | 784 | spin_lock_bh(&net->sctp.local_addr_lock); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f4ac6c592e13..d05c57664e36 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
| 495 | * | 495 | * |
| 496 | * [INIT ACK back to where the INIT came from.] | 496 | * [INIT ACK back to where the INIT came from.] |
| 497 | */ | 497 | */ |
| 498 | retval->transport = chunk->transport; | 498 | if (chunk->transport) |
| 499 | retval->transport = | ||
| 500 | sctp_assoc_lookup_paddr(asoc, | ||
| 501 | &chunk->transport->ipaddr); | ||
| 499 | 502 | ||
| 500 | retval->subh.init_hdr = | 503 | retval->subh.init_hdr = |
| 501 | sctp_addto_chunk(retval, sizeof(initack), &initack); | 504 | sctp_addto_chunk(retval, sizeof(initack), &initack); |
| @@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, | |||
| 642 | * | 645 | * |
| 643 | * [COOKIE ACK back to where the COOKIE ECHO came from.] | 646 | * [COOKIE ACK back to where the COOKIE ECHO came from.] |
| 644 | */ | 647 | */ |
| 645 | if (retval && chunk) | 648 | if (retval && chunk && chunk->transport) |
| 646 | retval->transport = chunk->transport; | 649 | retval->transport = |
| 650 | sctp_assoc_lookup_paddr(asoc, | ||
| 651 | &chunk->transport->ipaddr); | ||
| 647 | 652 | ||
| 648 | return retval; | 653 | return retval; |
| 649 | } | 654 | } |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 3892e7630f3a..80e0ae5534ec 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
| @@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq( | |||
| 585 | struct sctp_strreset_outreq *outreq = param.v; | 585 | struct sctp_strreset_outreq *outreq = param.v; |
| 586 | struct sctp_stream *stream = &asoc->stream; | 586 | struct sctp_stream *stream = &asoc->stream; |
| 587 | __u32 result = SCTP_STRRESET_DENIED; | 587 | __u32 result = SCTP_STRRESET_DENIED; |
| 588 | __u16 i, nums, flags = 0; | ||
| 589 | __be16 *str_p = NULL; | 588 | __be16 *str_p = NULL; |
| 590 | __u32 request_seq; | 589 | __u32 request_seq; |
| 590 | __u16 i, nums; | ||
| 591 | 591 | ||
| 592 | request_seq = ntohl(outreq->request_seq); | 592 | request_seq = ntohl(outreq->request_seq); |
| 593 | 593 | ||
| @@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq( | |||
| 615 | if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) | 615 | if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) |
| 616 | goto out; | 616 | goto out; |
| 617 | 617 | ||
| 618 | nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); | ||
| 619 | str_p = outreq->list_of_streams; | ||
| 620 | for (i = 0; i < nums; i++) { | ||
| 621 | if (ntohs(str_p[i]) >= stream->incnt) { | ||
| 622 | result = SCTP_STRRESET_ERR_WRONG_SSN; | ||
| 623 | goto out; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | |||
| 618 | if (asoc->strreset_chunk) { | 627 | if (asoc->strreset_chunk) { |
| 619 | if (!sctp_chunk_lookup_strreset_param( | 628 | if (!sctp_chunk_lookup_strreset_param( |
| 620 | asoc, outreq->response_seq, | 629 | asoc, outreq->response_seq, |
| @@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq( | |||
| 637 | sctp_chunk_put(asoc->strreset_chunk); | 646 | sctp_chunk_put(asoc->strreset_chunk); |
| 638 | asoc->strreset_chunk = NULL; | 647 | asoc->strreset_chunk = NULL; |
| 639 | } | 648 | } |
| 640 | |||
| 641 | flags = SCTP_STREAM_RESET_INCOMING_SSN; | ||
| 642 | } | 649 | } |
| 643 | 650 | ||
| 644 | nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); | 651 | if (nums) |
| 645 | if (nums) { | ||
| 646 | str_p = outreq->list_of_streams; | ||
| 647 | for (i = 0; i < nums; i++) { | ||
| 648 | if (ntohs(str_p[i]) >= stream->incnt) { | ||
| 649 | result = SCTP_STRRESET_ERR_WRONG_SSN; | ||
| 650 | goto out; | ||
| 651 | } | ||
| 652 | } | ||
| 653 | |||
| 654 | for (i = 0; i < nums; i++) | 652 | for (i = 0; i < nums; i++) |
| 655 | SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; | 653 | SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; |
| 656 | } else { | 654 | else |
| 657 | for (i = 0; i < stream->incnt; i++) | 655 | for (i = 0; i < stream->incnt; i++) |
| 658 | SCTP_SI(stream, i)->mid = 0; | 656 | SCTP_SI(stream, i)->mid = 0; |
| 659 | } | ||
| 660 | 657 | ||
| 661 | result = SCTP_STRRESET_PERFORMED; | 658 | result = SCTP_STRRESET_PERFORMED; |
| 662 | 659 | ||
| 663 | *evp = sctp_ulpevent_make_stream_reset_event(asoc, | 660 | *evp = sctp_ulpevent_make_stream_reset_event(asoc, |
| 664 | flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, | 661 | SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); |
| 665 | GFP_ATOMIC); | ||
| 666 | 662 | ||
| 667 | out: | 663 | out: |
| 668 | sctp_update_strreset_result(asoc, result); | 664 | sctp_update_strreset_result(asoc, result); |
| @@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq( | |||
| 738 | 734 | ||
| 739 | result = SCTP_STRRESET_PERFORMED; | 735 | result = SCTP_STRRESET_PERFORMED; |
| 740 | 736 | ||
| 741 | *evp = sctp_ulpevent_make_stream_reset_event(asoc, | ||
| 742 | SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); | ||
| 743 | |||
| 744 | out: | 737 | out: |
| 745 | sctp_update_strreset_result(asoc, result); | 738 | sctp_update_strreset_result(asoc, result); |
| 746 | err: | 739 | err: |
| @@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( | |||
| 873 | if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) | 866 | if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) |
| 874 | goto out; | 867 | goto out; |
| 875 | 868 | ||
| 869 | in = ntohs(addstrm->number_of_streams); | ||
| 870 | incnt = stream->incnt + in; | ||
| 871 | if (!in || incnt > SCTP_MAX_STREAM) | ||
| 872 | goto out; | ||
| 873 | |||
| 874 | if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) | ||
| 875 | goto out; | ||
| 876 | |||
| 876 | if (asoc->strreset_chunk) { | 877 | if (asoc->strreset_chunk) { |
| 877 | if (!sctp_chunk_lookup_strreset_param( | 878 | if (!sctp_chunk_lookup_strreset_param( |
| 878 | asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { | 879 | asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { |
| @@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out( | |||
| 896 | } | 897 | } |
| 897 | } | 898 | } |
| 898 | 899 | ||
| 899 | in = ntohs(addstrm->number_of_streams); | ||
| 900 | incnt = stream->incnt + in; | ||
| 901 | if (!in || incnt > SCTP_MAX_STREAM) | ||
| 902 | goto out; | ||
| 903 | |||
| 904 | if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC)) | ||
| 905 | goto out; | ||
| 906 | |||
| 907 | stream->incnt = incnt; | 900 | stream->incnt = incnt; |
| 908 | 901 | ||
| 909 | result = SCTP_STRRESET_PERFORMED; | 902 | result = SCTP_STRRESET_PERFORMED; |
| @@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in( | |||
| 973 | 966 | ||
| 974 | result = SCTP_STRRESET_PERFORMED; | 967 | result = SCTP_STRRESET_PERFORMED; |
| 975 | 968 | ||
| 976 | *evp = sctp_ulpevent_make_stream_change_event(asoc, | ||
| 977 | 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC); | ||
| 978 | |||
| 979 | out: | 969 | out: |
| 980 | sctp_update_strreset_result(asoc, result); | 970 | sctp_update_strreset_result(asoc, result); |
| 981 | err: | 971 | err: |
| @@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp( | |||
| 1036 | sout->mid_uo = 0; | 1026 | sout->mid_uo = 0; |
| 1037 | } | 1027 | } |
| 1038 | } | 1028 | } |
| 1039 | |||
| 1040 | flags = SCTP_STREAM_RESET_OUTGOING_SSN; | ||
| 1041 | } | 1029 | } |
| 1042 | 1030 | ||
| 1031 | flags |= SCTP_STREAM_RESET_OUTGOING_SSN; | ||
| 1032 | |||
| 1043 | for (i = 0; i < stream->outcnt; i++) | 1033 | for (i = 0; i < stream->outcnt; i++) |
| 1044 | SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; | 1034 | SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; |
| 1045 | 1035 | ||
| @@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp( | |||
| 1058 | nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / | 1048 | nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / |
| 1059 | sizeof(__u16); | 1049 | sizeof(__u16); |
| 1060 | 1050 | ||
| 1051 | flags |= SCTP_STREAM_RESET_INCOMING_SSN; | ||
| 1052 | |||
| 1061 | *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, | 1053 | *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, |
| 1062 | nums, str_p, GFP_ATOMIC); | 1054 | nums, str_p, GFP_ATOMIC); |
| 1063 | } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { | 1055 | } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4da4a78d369..c4e56602e0c6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -146,6 +146,9 @@ static int smc_release(struct socket *sock) | |||
| 146 | sock_set_flag(sk, SOCK_DEAD); | 146 | sock_set_flag(sk, SOCK_DEAD); |
| 147 | sk->sk_shutdown |= SHUTDOWN_MASK; | 147 | sk->sk_shutdown |= SHUTDOWN_MASK; |
| 148 | } | 148 | } |
| 149 | |||
| 150 | sk->sk_prot->unhash(sk); | ||
| 151 | |||
| 149 | if (smc->clcsock) { | 152 | if (smc->clcsock) { |
| 150 | if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { | 153 | if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { |
| 151 | /* wake up clcsock accept */ | 154 | /* wake up clcsock accept */ |
| @@ -170,7 +173,6 @@ static int smc_release(struct socket *sock) | |||
| 170 | smc_conn_free(&smc->conn); | 173 | smc_conn_free(&smc->conn); |
| 171 | release_sock(sk); | 174 | release_sock(sk); |
| 172 | 175 | ||
| 173 | sk->sk_prot->unhash(sk); | ||
| 174 | sock_put(sk); /* final sock_put */ | 176 | sock_put(sk); /* final sock_put */ |
| 175 | out: | 177 | out: |
| 176 | return rc; | 178 | return rc; |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 1ff9768f5456..f3023bbc0b7f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -41,6 +41,9 @@ static unsigned long number_cred_unused; | |||
| 41 | 41 | ||
| 42 | static struct cred machine_cred = { | 42 | static struct cred machine_cred = { |
| 43 | .usage = ATOMIC_INIT(1), | 43 | .usage = ATOMIC_INIT(1), |
| 44 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
| 45 | .magic = CRED_MAGIC, | ||
| 46 | #endif | ||
| 44 | }; | 47 | }; |
| 45 | 48 | ||
| 46 | /* | 49 | /* |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc86713b32b6..1531b0219344 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 1549 | cred_len = p++; | 1549 | cred_len = p++; |
| 1550 | 1550 | ||
| 1551 | spin_lock(&ctx->gc_seq_lock); | 1551 | spin_lock(&ctx->gc_seq_lock); |
| 1552 | req->rq_seqno = ctx->gc_seq++; | 1552 | req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; |
| 1553 | spin_unlock(&ctx->gc_seq_lock); | 1553 | spin_unlock(&ctx->gc_seq_lock); |
| 1554 | if (req->rq_seqno == MAXSEQ) | ||
| 1555 | goto out_expired; | ||
| 1554 | 1556 | ||
| 1555 | *p++ = htonl((u32) RPC_GSS_VERSION); | 1557 | *p++ = htonl((u32) RPC_GSS_VERSION); |
| 1556 | *p++ = htonl((u32) ctx->gc_proc); | 1558 | *p++ = htonl((u32) ctx->gc_proc); |
| @@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 1572 | mic.data = (u8 *)(p + 1); | 1574 | mic.data = (u8 *)(p + 1); |
| 1573 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 1575 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
| 1574 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 1576 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
| 1575 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | 1577 | goto out_expired; |
| 1576 | } else if (maj_stat != 0) { | 1578 | } else if (maj_stat != 0) { |
| 1577 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); | 1579 | pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); |
| 1580 | task->tk_status = -EIO; | ||
| 1578 | goto out_put_ctx; | 1581 | goto out_put_ctx; |
| 1579 | } | 1582 | } |
| 1580 | p = xdr_encode_opaque(p, NULL, mic.len); | 1583 | p = xdr_encode_opaque(p, NULL, mic.len); |
| 1581 | gss_put_ctx(ctx); | 1584 | gss_put_ctx(ctx); |
| 1582 | return p; | 1585 | return p; |
| 1586 | out_expired: | ||
| 1587 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | ||
| 1588 | task->tk_status = -EKEYEXPIRED; | ||
| 1583 | out_put_ctx: | 1589 | out_put_ctx: |
| 1584 | gss_put_ctx(ctx); | 1590 | gss_put_ctx(ctx); |
| 1585 | return NULL; | 1591 | return NULL; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 71d9599b5816..d7ec6132c046 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task) | |||
| 1739 | xdr_buf_init(&req->rq_rcv_buf, | 1739 | xdr_buf_init(&req->rq_rcv_buf, |
| 1740 | req->rq_rbuffer, | 1740 | req->rq_rbuffer, |
| 1741 | req->rq_rcvsize); | 1741 | req->rq_rcvsize); |
| 1742 | req->rq_bytes_sent = 0; | ||
| 1743 | 1742 | ||
| 1744 | p = rpc_encode_header(task); | 1743 | p = rpc_encode_header(task); |
| 1745 | if (p == NULL) { | 1744 | if (p == NULL) |
| 1746 | printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); | ||
| 1747 | rpc_exit(task, -EIO); | ||
| 1748 | return; | 1745 | return; |
| 1749 | } | ||
| 1750 | 1746 | ||
| 1751 | encode = task->tk_msg.rpc_proc->p_encode; | 1747 | encode = task->tk_msg.rpc_proc->p_encode; |
| 1752 | if (encode == NULL) | 1748 | if (encode == NULL) |
| @@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task) | |||
| 1771 | /* Did the encode result in an error condition? */ | 1767 | /* Did the encode result in an error condition? */ |
| 1772 | if (task->tk_status != 0) { | 1768 | if (task->tk_status != 0) { |
| 1773 | /* Was the error nonfatal? */ | 1769 | /* Was the error nonfatal? */ |
| 1774 | if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) | 1770 | switch (task->tk_status) { |
| 1771 | case -EAGAIN: | ||
| 1772 | case -ENOMEM: | ||
| 1775 | rpc_delay(task, HZ >> 4); | 1773 | rpc_delay(task, HZ >> 4); |
| 1776 | else | 1774 | break; |
| 1775 | case -EKEYEXPIRED: | ||
| 1776 | task->tk_action = call_refresh; | ||
| 1777 | break; | ||
| 1778 | default: | ||
| 1777 | rpc_exit(task, task->tk_status); | 1779 | rpc_exit(task, task->tk_status); |
| 1780 | } | ||
| 1778 | return; | 1781 | return; |
| 1779 | } | 1782 | } |
| 1780 | 1783 | ||
| @@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task) | |||
| 2336 | *p++ = htonl(clnt->cl_vers); /* program version */ | 2339 | *p++ = htonl(clnt->cl_vers); /* program version */ |
| 2337 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ | 2340 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ |
| 2338 | p = rpcauth_marshcred(task, p); | 2341 | p = rpcauth_marshcred(task, p); |
| 2339 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | 2342 | if (p) |
| 2343 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | ||
| 2340 | return p; | 2344 | return p; |
| 2341 | } | 2345 | } |
| 2342 | 2346 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 73547d17d3c6..f1ec2110efeb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
| 1151 | struct rpc_xprt *xprt = req->rq_xprt; | 1151 | struct rpc_xprt *xprt = req->rq_xprt; |
| 1152 | 1152 | ||
| 1153 | if (xprt_request_need_enqueue_transmit(task, req)) { | 1153 | if (xprt_request_need_enqueue_transmit(task, req)) { |
| 1154 | req->rq_bytes_sent = 0; | ||
| 1154 | spin_lock(&xprt->queue_lock); | 1155 | spin_lock(&xprt->queue_lock); |
| 1155 | /* | 1156 | /* |
| 1156 | * Requests that carry congestion control credits are added | 1157 | * Requests that carry congestion control credits are added |
| @@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
| 1177 | INIT_LIST_HEAD(&req->rq_xmit2); | 1178 | INIT_LIST_HEAD(&req->rq_xmit2); |
| 1178 | goto out; | 1179 | goto out; |
| 1179 | } | 1180 | } |
| 1180 | } else { | 1181 | } else if (!req->rq_seqno) { |
| 1181 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | 1182 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { |
| 1182 | if (pos->rq_task->tk_owner != task->tk_owner) | 1183 | if (pos->rq_task->tk_owner != task->tk_owner) |
| 1183 | continue; | 1184 | continue; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7749a2bf6887..4994e75945b8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |||
| 845 | for (i = 0; i <= buf->rb_sc_last; i++) { | 845 | for (i = 0; i <= buf->rb_sc_last; i++) { |
| 846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | 846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
| 847 | if (!sc) | 847 | if (!sc) |
| 848 | goto out_destroy; | 848 | return -ENOMEM; |
| 849 | 849 | ||
| 850 | sc->sc_xprt = r_xprt; | 850 | sc->sc_xprt = r_xprt; |
| 851 | buf->rb_sc_ctxs[i] = sc; | 851 | buf->rb_sc_ctxs[i] = sc; |
| 852 | } | 852 | } |
| 853 | 853 | ||
| 854 | return 0; | 854 | return 0; |
| 855 | |||
| 856 | out_destroy: | ||
| 857 | rpcrdma_sendctxs_destroy(buf); | ||
| 858 | return -ENOMEM; | ||
| 859 | } | 855 | } |
| 860 | 856 | ||
| 861 | /* The sendctx queue is not guaranteed to have a size that is a | 857 | /* The sendctx queue is not guaranteed to have a size that is a |
| @@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) | |||
| 1113 | WQ_MEM_RECLAIM | WQ_HIGHPRI, | 1109 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
| 1114 | 0, | 1110 | 0, |
| 1115 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); | 1111 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); |
| 1116 | if (!buf->rb_completion_wq) | 1112 | if (!buf->rb_completion_wq) { |
| 1113 | rc = -ENOMEM; | ||
| 1117 | goto out; | 1114 | goto out; |
| 1115 | } | ||
| 1118 | 1116 | ||
| 1119 | return 0; | 1117 | return 0; |
| 1120 | out: | 1118 | out: |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 13559e6a460b..7754aa3e434f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <net/udp.h> | 48 | #include <net/udp.h> |
| 49 | #include <net/tcp.h> | 49 | #include <net/tcp.h> |
| 50 | #include <linux/bvec.h> | 50 | #include <linux/bvec.h> |
| 51 | #include <linux/highmem.h> | ||
| 51 | #include <linux/uio.h> | 52 | #include <linux/uio.h> |
| 52 | 53 | ||
| 53 | #include <trace/events/sunrpc.h> | 54 | #include <trace/events/sunrpc.h> |
| @@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, | |||
| 376 | return sock_recvmsg(sock, msg, flags); | 377 | return sock_recvmsg(sock, msg, flags); |
| 377 | } | 378 | } |
| 378 | 379 | ||
| 380 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | ||
| 381 | static void | ||
| 382 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
| 383 | { | ||
| 384 | struct bvec_iter bi = { | ||
| 385 | .bi_size = count, | ||
| 386 | }; | ||
| 387 | struct bio_vec bv; | ||
| 388 | |||
| 389 | bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); | ||
| 390 | for_each_bvec(bv, bvec, bi, bi) | ||
| 391 | flush_dcache_page(bv.bv_page); | ||
| 392 | } | ||
| 393 | #else | ||
| 394 | static inline void | ||
| 395 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
| 396 | { | ||
| 397 | } | ||
| 398 | #endif | ||
| 399 | |||
| 379 | static ssize_t | 400 | static ssize_t |
| 380 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | 401 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, |
| 381 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) | 402 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) |
| @@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
| 409 | seek + buf->page_base); | 430 | seek + buf->page_base); |
| 410 | if (ret <= 0) | 431 | if (ret <= 0) |
| 411 | goto sock_err; | 432 | goto sock_err; |
| 433 | xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); | ||
| 412 | offset += ret - buf->page_base; | 434 | offset += ret - buf->page_base; |
| 413 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 435 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 414 | goto out; | 436 | goto out; |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 40f5cae623a7..4ad3586da8f0 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb) | |||
| 87 | return limit; | 87 | return limit; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv) | ||
| 91 | { | ||
| 92 | return TLV_GET_LEN(tlv) - TLV_SPACE(0); | ||
| 93 | } | ||
| 94 | |||
| 90 | static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) | 95 | static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) |
| 91 | { | 96 | { |
| 92 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); | 97 | struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); |
| @@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str) | |||
| 166 | return buf; | 171 | return buf; |
| 167 | } | 172 | } |
| 168 | 173 | ||
| 174 | static inline bool string_is_valid(char *s, int len) | ||
| 175 | { | ||
| 176 | return memchr(s, '\0', len) ? true : false; | ||
| 177 | } | ||
| 178 | |||
| 169 | static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | 179 | static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, |
| 170 | struct tipc_nl_compat_msg *msg, | 180 | struct tipc_nl_compat_msg *msg, |
| 171 | struct sk_buff *arg) | 181 | struct sk_buff *arg) |
| @@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 379 | struct nlattr *prop; | 389 | struct nlattr *prop; |
| 380 | struct nlattr *bearer; | 390 | struct nlattr *bearer; |
| 381 | struct tipc_bearer_config *b; | 391 | struct tipc_bearer_config *b; |
| 392 | int len; | ||
| 382 | 393 | ||
| 383 | b = (struct tipc_bearer_config *)TLV_DATA(msg->req); | 394 | b = (struct tipc_bearer_config *)TLV_DATA(msg->req); |
| 384 | 395 | ||
| @@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 386 | if (!bearer) | 397 | if (!bearer) |
| 387 | return -EMSGSIZE; | 398 | return -EMSGSIZE; |
| 388 | 399 | ||
| 400 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | ||
| 401 | if (!string_is_valid(b->name, len)) | ||
| 402 | return -EINVAL; | ||
| 403 | |||
| 389 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) | 404 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) |
| 390 | return -EMSGSIZE; | 405 | return -EMSGSIZE; |
| 391 | 406 | ||
| @@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 411 | { | 426 | { |
| 412 | char *name; | 427 | char *name; |
| 413 | struct nlattr *bearer; | 428 | struct nlattr *bearer; |
| 429 | int len; | ||
| 414 | 430 | ||
| 415 | name = (char *)TLV_DATA(msg->req); | 431 | name = (char *)TLV_DATA(msg->req); |
| 416 | 432 | ||
| @@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 418 | if (!bearer) | 434 | if (!bearer) |
| 419 | return -EMSGSIZE; | 435 | return -EMSGSIZE; |
| 420 | 436 | ||
| 437 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | ||
| 438 | if (!string_is_valid(name, len)) | ||
| 439 | return -EINVAL; | ||
| 440 | |||
| 421 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) | 441 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) |
| 422 | return -EMSGSIZE; | 442 | return -EMSGSIZE; |
| 423 | 443 | ||
| @@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
| 478 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; | 498 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; |
| 479 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; | 499 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; |
| 480 | int err; | 500 | int err; |
| 501 | int len; | ||
| 481 | 502 | ||
| 482 | if (!attrs[TIPC_NLA_LINK]) | 503 | if (!attrs[TIPC_NLA_LINK]) |
| 483 | return -EINVAL; | 504 | return -EINVAL; |
| @@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
| 504 | return err; | 525 | return err; |
| 505 | 526 | ||
| 506 | name = (char *)TLV_DATA(msg->req); | 527 | name = (char *)TLV_DATA(msg->req); |
| 528 | |||
| 529 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
| 530 | if (!string_is_valid(name, len)) | ||
| 531 | return -EINVAL; | ||
| 532 | |||
| 507 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) | 533 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) |
| 508 | return 0; | 534 | return 0; |
| 509 | 535 | ||
| @@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, | |||
| 644 | struct nlattr *prop; | 670 | struct nlattr *prop; |
| 645 | struct nlattr *media; | 671 | struct nlattr *media; |
| 646 | struct tipc_link_config *lc; | 672 | struct tipc_link_config *lc; |
| 673 | int len; | ||
| 647 | 674 | ||
| 648 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 675 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
| 649 | 676 | ||
| @@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, | |||
| 651 | if (!media) | 678 | if (!media) |
| 652 | return -EMSGSIZE; | 679 | return -EMSGSIZE; |
| 653 | 680 | ||
| 681 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); | ||
| 682 | if (!string_is_valid(lc->name, len)) | ||
| 683 | return -EINVAL; | ||
| 684 | |||
| 654 | if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) | 685 | if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) |
| 655 | return -EMSGSIZE; | 686 | return -EMSGSIZE; |
| 656 | 687 | ||
| @@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, | |||
| 671 | struct nlattr *prop; | 702 | struct nlattr *prop; |
| 672 | struct nlattr *bearer; | 703 | struct nlattr *bearer; |
| 673 | struct tipc_link_config *lc; | 704 | struct tipc_link_config *lc; |
| 705 | int len; | ||
| 674 | 706 | ||
| 675 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 707 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
| 676 | 708 | ||
| @@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, | |||
| 678 | if (!bearer) | 710 | if (!bearer) |
| 679 | return -EMSGSIZE; | 711 | return -EMSGSIZE; |
| 680 | 712 | ||
| 713 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); | ||
| 714 | if (!string_is_valid(lc->name, len)) | ||
| 715 | return -EINVAL; | ||
| 716 | |||
| 681 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) | 717 | if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) |
| 682 | return -EMSGSIZE; | 718 | return -EMSGSIZE; |
| 683 | 719 | ||
| @@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 726 | struct tipc_link_config *lc; | 762 | struct tipc_link_config *lc; |
| 727 | struct tipc_bearer *bearer; | 763 | struct tipc_bearer *bearer; |
| 728 | struct tipc_media *media; | 764 | struct tipc_media *media; |
| 765 | int len; | ||
| 729 | 766 | ||
| 730 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | 767 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
| 731 | 768 | ||
| 769 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
| 770 | if (!string_is_valid(lc->name, len)) | ||
| 771 | return -EINVAL; | ||
| 772 | |||
| 732 | media = tipc_media_find(lc->name); | 773 | media = tipc_media_find(lc->name); |
| 733 | if (media) { | 774 | if (media) { |
| 734 | cmd->doit = &__tipc_nl_media_set; | 775 | cmd->doit = &__tipc_nl_media_set; |
| @@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 750 | { | 791 | { |
| 751 | char *name; | 792 | char *name; |
| 752 | struct nlattr *link; | 793 | struct nlattr *link; |
| 794 | int len; | ||
| 753 | 795 | ||
| 754 | name = (char *)TLV_DATA(msg->req); | 796 | name = (char *)TLV_DATA(msg->req); |
| 755 | 797 | ||
| @@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, | |||
| 757 | if (!link) | 799 | if (!link) |
| 758 | return -EMSGSIZE; | 800 | return -EMSGSIZE; |
| 759 | 801 | ||
| 802 | len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
| 803 | if (!string_is_valid(name, len)) | ||
| 804 | return -EINVAL; | ||
| 805 | |||
| 760 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) | 806 | if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) |
| 761 | return -EMSGSIZE; | 807 | return -EMSGSIZE; |
| 762 | 808 | ||
| @@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) | |||
| 778 | }; | 824 | }; |
| 779 | 825 | ||
| 780 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | 826 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); |
| 827 | if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) | ||
| 828 | return -EINVAL; | ||
| 781 | 829 | ||
| 782 | depth = ntohl(ntq->depth); | 830 | depth = ntohl(ntq->depth); |
| 783 | 831 | ||
| @@ -904,8 +952,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) | |||
| 904 | 952 | ||
| 905 | hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, | 953 | hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, |
| 906 | TIPC_NL_PUBL_GET); | 954 | TIPC_NL_PUBL_GET); |
| 907 | if (!hdr) | 955 | if (!hdr) { |
| 956 | kfree_skb(args); | ||
| 908 | return -EMSGSIZE; | 957 | return -EMSGSIZE; |
| 958 | } | ||
| 909 | 959 | ||
| 910 | nest = nla_nest_start(args, TIPC_NLA_SOCK); | 960 | nest = nla_nest_start(args, TIPC_NLA_SOCK); |
| 911 | if (!nest) { | 961 | if (!nest) { |
| @@ -1206,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) | |||
| 1206 | } | 1256 | } |
| 1207 | 1257 | ||
| 1208 | len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); | 1258 | len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); |
| 1209 | if (len && !TLV_OK(msg.req, len)) { | 1259 | if (!len || !TLV_OK(msg.req, len)) { |
| 1210 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); | 1260 | msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); |
| 1211 | err = -EOPNOTSUPP; | 1261 | err = -EOPNOTSUPP; |
| 1212 | goto send; | 1262 | goto send; |
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index efb16f69bd2c..a457c0fbbef1 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c | |||
| @@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) | |||
| 398 | ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); | 398 | ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); |
| 399 | if (ret == -EWOULDBLOCK) | 399 | if (ret == -EWOULDBLOCK) |
| 400 | return -EWOULDBLOCK; | 400 | return -EWOULDBLOCK; |
| 401 | if (ret > 0) { | 401 | if (ret == sizeof(s)) { |
| 402 | read_lock_bh(&sk->sk_callback_lock); | 402 | read_lock_bh(&sk->sk_callback_lock); |
| 403 | ret = tipc_conn_rcv_sub(srv, con, &s); | 403 | ret = tipc_conn_rcv_sub(srv, con, &s); |
| 404 | read_unlock_bh(&sk->sk_callback_lock); | 404 | read_unlock_bh(&sk->sk_callback_lock); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5e49492d5911..74150ad95823 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -555,7 +555,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { | |||
| 555 | }, | 555 | }, |
| 556 | [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), | 556 | [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), |
| 557 | [NL80211_ATTR_PEER_MEASUREMENTS] = | 557 | [NL80211_ATTR_PEER_MEASUREMENTS] = |
| 558 | NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX, | 558 | NLA_POLICY_NESTED(NL80211_PMSR_ATTR_MAX, |
| 559 | nl80211_pmsr_attr_policy), | 559 | nl80211_pmsr_attr_policy), |
| 560 | }; | 560 | }; |
| 561 | 561 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ecfb1a06dbb2..dd58b9909ac9 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -1024,8 +1024,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context) | |||
| 1024 | } | 1024 | } |
| 1025 | 1025 | ||
| 1026 | rtnl_lock(); | 1026 | rtnl_lock(); |
| 1027 | if (WARN_ON(regdb && !IS_ERR(regdb))) { | 1027 | if (regdb && !IS_ERR(regdb)) { |
| 1028 | /* just restore and free new db */ | 1028 | /* negative case - a bug |
| 1029 | * positive case - can happen due to race in case of multiple cb's in | ||
| 1030 | * queue, due to usage of asynchronous callback | ||
| 1031 | * | ||
| 1032 | * Either case, just restore and free new db. | ||
| 1033 | */ | ||
| 1029 | } else if (set_error) { | 1034 | } else if (set_error) { |
| 1030 | regdb = ERR_PTR(set_error); | 1035 | regdb = ERR_PTR(set_error); |
| 1031 | } else if (fw) { | 1036 | } else if (fw) { |
| @@ -1255,7 +1260,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd) | |||
| 1255 | * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), | 1260 | * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), |
| 1256 | * however it is safe for now to assume that a frequency rule should not be | 1261 | * however it is safe for now to assume that a frequency rule should not be |
| 1257 | * part of a frequency's band if the start freq or end freq are off by more | 1262 | * part of a frequency's band if the start freq or end freq are off by more |
| 1258 | * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the | 1263 | * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the |
| 1259 | * 60 GHz band. | 1264 | * 60 GHz band. |
| 1260 | * This resolution can be lowered and should be considered as we add | 1265 | * This resolution can be lowered and should be considered as we add |
| 1261 | * regulatory rule support for other "bands". | 1266 | * regulatory rule support for other "bands". |
| @@ -1270,7 +1275,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, | |||
| 1270 | * with the Channel starting frequency above 45 GHz. | 1275 | * with the Channel starting frequency above 45 GHz. |
| 1271 | */ | 1276 | */ |
| 1272 | u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? | 1277 | u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? |
| 1273 | 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; | 1278 | 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; |
| 1274 | if (abs(freq_khz - freq_range->start_freq_khz) <= limit) | 1279 | if (abs(freq_khz - freq_range->start_freq_khz) <= limit) |
| 1275 | return true; | 1280 | return true; |
| 1276 | if (abs(freq_khz - freq_range->end_freq_khz) <= limit) | 1281 | if (abs(freq_khz - freq_range->end_freq_khz) <= limit) |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..d4de871e7d4d 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
| @@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) | |||
| 41 | * not know if the device has more tx queues than rx, or the opposite. | 41 | * not know if the device has more tx queues than rx, or the opposite. |
| 42 | * This might also change during run time. | 42 | * This might also change during run time. |
| 43 | */ | 43 | */ |
| 44 | static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, | 44 | static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, |
| 45 | u16 queue_id) | 45 | u16 queue_id) |
| 46 | { | 46 | { |
| 47 | if (queue_id >= max_t(unsigned int, | ||
| 48 | dev->real_num_rx_queues, | ||
| 49 | dev->real_num_tx_queues)) | ||
| 50 | return -EINVAL; | ||
| 51 | |||
| 47 | if (queue_id < dev->real_num_rx_queues) | 52 | if (queue_id < dev->real_num_rx_queues) |
| 48 | dev->_rx[queue_id].umem = umem; | 53 | dev->_rx[queue_id].umem = umem; |
| 49 | if (queue_id < dev->real_num_tx_queues) | 54 | if (queue_id < dev->real_num_tx_queues) |
| 50 | dev->_tx[queue_id].umem = umem; | 55 | dev->_tx[queue_id].umem = umem; |
| 56 | |||
| 57 | return 0; | ||
| 51 | } | 58 | } |
| 52 | 59 | ||
| 53 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, | 60 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
| @@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
| 88 | goto out_rtnl_unlock; | 95 | goto out_rtnl_unlock; |
| 89 | } | 96 | } |
| 90 | 97 | ||
| 91 | xdp_reg_umem_at_qid(dev, umem, queue_id); | 98 | err = xdp_reg_umem_at_qid(dev, umem, queue_id); |
| 99 | if (err) | ||
| 100 | goto out_rtnl_unlock; | ||
| 101 | |||
| 92 | umem->dev = dev; | 102 | umem->dev = dev; |
| 93 | umem->queue_id = queue_id; | 103 | umem->queue_id = queue_id; |
| 94 | if (force_copy) | 104 | if (force_copy) |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 66ae15f27c70..db1a91dfa702 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
| @@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c | |||
| 279 | -Wno-gnu-variable-sized-type-not-at-end \ | 279 | -Wno-gnu-variable-sized-type-not-at-end \ |
| 280 | -Wno-address-of-packed-member -Wno-tautological-compare \ | 280 | -Wno-address-of-packed-member -Wno-tautological-compare \ |
| 281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ | 281 | -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ |
| 282 | -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ | ||
| 282 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ | 283 | -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ |
| 283 | ifeq ($(DWARF2BTF),y) | 284 | ifeq ($(DWARF2BTF),y) |
| 284 | $(BTF_PAHOLE) -J $@ | 285 | $(BTF_PAHOLE) -J $@ |
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h new file mode 100644 index 000000000000..5cd7c1d1a5d5 --- /dev/null +++ b/samples/bpf/asm_goto_workaround.h | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* Copyright (c) 2019 Facebook */ | ||
| 3 | #ifndef __ASM_GOTO_WORKAROUND_H | ||
| 4 | #define __ASM_GOTO_WORKAROUND_H | ||
| 5 | |||
| 6 | /* this will bring in asm_volatile_goto macro definition | ||
| 7 | * if enabled by compiler and config options. | ||
| 8 | */ | ||
| 9 | #include <linux/types.h> | ||
| 10 | |||
| 11 | #ifdef asm_volatile_goto | ||
| 12 | #undef asm_volatile_goto | ||
| 13 | #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") | ||
| 14 | #endif | ||
| 15 | |||
| 16 | #endif | ||
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c index d7b68ef5ba79..0bb6507256b7 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/samples/bpf/test_cgrp2_attach2.c | |||
| @@ -77,7 +77,7 @@ static int test_foo_bar(void) | |||
| 77 | 77 | ||
| 78 | /* Create cgroup /foo, get fd, and join it */ | 78 | /* Create cgroup /foo, get fd, and join it */ |
| 79 | foo = create_and_get_cgroup(FOO); | 79 | foo = create_and_get_cgroup(FOO); |
| 80 | if (!foo) | 80 | if (foo < 0) |
| 81 | goto err; | 81 | goto err; |
| 82 | 82 | ||
| 83 | if (join_cgroup(FOO)) | 83 | if (join_cgroup(FOO)) |
| @@ -94,7 +94,7 @@ static int test_foo_bar(void) | |||
| 94 | 94 | ||
| 95 | /* Create cgroup /foo/bar, get fd, and join it */ | 95 | /* Create cgroup /foo/bar, get fd, and join it */ |
| 96 | bar = create_and_get_cgroup(BAR); | 96 | bar = create_and_get_cgroup(BAR); |
| 97 | if (!bar) | 97 | if (bar < 0) |
| 98 | goto err; | 98 | goto err; |
| 99 | 99 | ||
| 100 | if (join_cgroup(BAR)) | 100 | if (join_cgroup(BAR)) |
| @@ -298,19 +298,19 @@ static int test_multiprog(void) | |||
| 298 | goto err; | 298 | goto err; |
| 299 | 299 | ||
| 300 | cg1 = create_and_get_cgroup("/cg1"); | 300 | cg1 = create_and_get_cgroup("/cg1"); |
| 301 | if (!cg1) | 301 | if (cg1 < 0) |
| 302 | goto err; | 302 | goto err; |
| 303 | cg2 = create_and_get_cgroup("/cg1/cg2"); | 303 | cg2 = create_and_get_cgroup("/cg1/cg2"); |
| 304 | if (!cg2) | 304 | if (cg2 < 0) |
| 305 | goto err; | 305 | goto err; |
| 306 | cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); | 306 | cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); |
| 307 | if (!cg3) | 307 | if (cg3 < 0) |
| 308 | goto err; | 308 | goto err; |
| 309 | cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); | 309 | cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); |
| 310 | if (!cg4) | 310 | if (cg4 < 0) |
| 311 | goto err; | 311 | goto err; |
| 312 | cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); | 312 | cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); |
| 313 | if (!cg5) | 313 | if (cg5 < 0) |
| 314 | goto err; | 314 | goto err; |
| 315 | 315 | ||
| 316 | if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) | 316 | if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) |
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c index 2259f997a26c..f082d6ac59f0 100644 --- a/samples/bpf/test_current_task_under_cgroup_user.c +++ b/samples/bpf/test_current_task_under_cgroup_user.c | |||
| @@ -32,7 +32,7 @@ int main(int argc, char **argv) | |||
| 32 | 32 | ||
| 33 | cg2 = create_and_get_cgroup(CGROUP_PATH); | 33 | cg2 = create_and_get_cgroup(CGROUP_PATH); |
| 34 | 34 | ||
| 35 | if (!cg2) | 35 | if (cg2 < 0) |
| 36 | goto err; | 36 | goto err; |
| 37 | 37 | ||
| 38 | if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { | 38 | if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { |
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 0a197f86ac43..8bfda95c77ad 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c | |||
| @@ -103,7 +103,7 @@ int main(int argc, char **argv) | |||
| 103 | return 1; | 103 | return 1; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | ifindex = if_nametoindex(argv[1]); | 106 | ifindex = if_nametoindex(argv[optind]); |
| 107 | if (!ifindex) { | 107 | if (!ifindex) { |
| 108 | perror("if_nametoindex"); | 108 | perror("if_nametoindex"); |
| 109 | return 1; | 109 | return 1; |
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 525bff667a52..30816037036e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
| @@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d) | |||
| 24 | basetarget = $(basename $(notdir $@)) | 24 | basetarget = $(basename $(notdir $@)) |
| 25 | 25 | ||
| 26 | ### | 26 | ### |
| 27 | # filename of first prerequisite with directory and extension stripped | ||
| 28 | baseprereq = $(basename $(notdir $<)) | ||
| 29 | |||
| 30 | ### | ||
| 31 | # Escape single quote for use in echo statements | 27 | # Escape single quote for use in echo statements |
| 32 | escsq = $(subst $(squote),'\$(squote)',$1) | 28 | escsq = $(subst $(squote),'\$(squote)',$1) |
| 33 | 29 | ||
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c index de70b8470971..89c47f57d1ce 100644 --- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c +++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c | |||
| @@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
| 13 | for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { | 13 | for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { |
| 14 | const char *sym; | 14 | const char *sym; |
| 15 | rtx body; | 15 | rtx body; |
| 16 | rtx masked_sp; | 16 | rtx mask, masked_sp; |
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard | 19 | * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard |
| @@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
| 33 | * produces the address of the copy of the stack canary value | 33 | * produces the address of the copy of the stack canary value |
| 34 | * stored in struct thread_info | 34 | * stored in struct thread_info |
| 35 | */ | 35 | */ |
| 36 | mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); | ||
| 36 | masked_sp = gen_reg_rtx(Pmode); | 37 | masked_sp = gen_reg_rtx(Pmode); |
| 37 | 38 | ||
| 38 | emit_insn_before(gen_rtx_SET(masked_sp, | 39 | emit_insn_before(gen_rtx_SET(masked_sp, |
| 39 | gen_rtx_AND(Pmode, | 40 | gen_rtx_AND(Pmode, |
| 40 | stack_pointer_rtx, | 41 | stack_pointer_rtx, |
| 41 | GEN_INT(sp_mask))), | 42 | mask)), |
| 42 | insn); | 43 | insn); |
| 43 | 44 | ||
| 44 | SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, | 45 | SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, |
| @@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void) | |||
| 52 | #define NO_GATE | 53 | #define NO_GATE |
| 53 | #include "gcc-generate-rtl-pass.h" | 54 | #include "gcc-generate-rtl-pass.h" |
| 54 | 55 | ||
| 56 | #if BUILDING_GCC_VERSION >= 9000 | ||
| 57 | static bool no(void) | ||
| 58 | { | ||
| 59 | return false; | ||
| 60 | } | ||
| 61 | |||
| 62 | static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) | ||
| 63 | { | ||
| 64 | targetm.have_stack_protect_combined_set = no; | ||
| 65 | targetm.have_stack_protect_combined_test = no; | ||
| 66 | } | ||
| 67 | #endif | ||
| 68 | |||
| 55 | __visible int plugin_init(struct plugin_name_args *plugin_info, | 69 | __visible int plugin_init(struct plugin_name_args *plugin_info, |
| 56 | struct plugin_gcc_version *version) | 70 | struct plugin_gcc_version *version) |
| 57 | { | 71 | { |
| @@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, | |||
| 99 | register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, | 113 | register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, |
| 100 | NULL, &arm_pertask_ssp_rtl_pass_info); | 114 | NULL, &arm_pertask_ssp_rtl_pass_info); |
| 101 | 115 | ||
| 116 | #if BUILDING_GCC_VERSION >= 9000 | ||
| 117 | register_callback(plugin_info->base_name, PLUGIN_START_UNIT, | ||
| 118 | arm_pertask_ssp_start_unit, NULL); | ||
| 119 | #endif | ||
| 120 | |||
| 102 | return 0; | 121 | return 0; |
| 103 | } | 122 | } |
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index c05ab001b54c..181973509a05 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
| @@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $< | |||
| 206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE | 206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE |
| 207 | $(call filechk,conf_cfg) | 207 | $(call filechk,conf_cfg) |
| 208 | 208 | ||
| 209 | clean-files += conf-cfg | 209 | clean-files += *conf-cfg |
diff --git a/security/security.c b/security/security.c index f1b8d2587639..55bc49027ba9 100644 --- a/security/security.c +++ b/security/security.c | |||
| @@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) | |||
| 1027 | 1027 | ||
| 1028 | void security_cred_free(struct cred *cred) | 1028 | void security_cred_free(struct cred *cred) |
| 1029 | { | 1029 | { |
| 1030 | /* | ||
| 1031 | * There is a failure case in prepare_creds() that | ||
| 1032 | * may result in a call here with ->security being NULL. | ||
| 1033 | */ | ||
| 1034 | if (unlikely(cred->security == NULL)) | ||
| 1035 | return; | ||
| 1036 | |||
| 1030 | call_void_hook(cred_free, cred); | 1037 | call_void_hook(cred_free, cred); |
| 1031 | } | 1038 | } |
| 1032 | 1039 | ||
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index a50d625e7946..c1c31e33657a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
| @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) | |||
| 732 | kfree(key); | 732 | kfree(key); |
| 733 | if (datum) { | 733 | if (datum) { |
| 734 | levdatum = datum; | 734 | levdatum = datum; |
| 735 | ebitmap_destroy(&levdatum->level->cat); | 735 | if (levdatum->level) |
| 736 | ebitmap_destroy(&levdatum->level->cat); | ||
| 736 | kfree(levdatum->level); | 737 | kfree(levdatum->level); |
| 737 | } | 738 | } |
| 738 | kfree(datum); | 739 | kfree(datum); |
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index ffda91a4a1aa..02514fe558b4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
| @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
| 368 | break; | 368 | break; |
| 369 | case YAMA_SCOPE_RELATIONAL: | 369 | case YAMA_SCOPE_RELATIONAL: |
| 370 | rcu_read_lock(); | 370 | rcu_read_lock(); |
| 371 | if (!task_is_descendant(current, child) && | 371 | if (!pid_alive(child)) |
| 372 | rc = -EPERM; | ||
| 373 | if (!rc && !task_is_descendant(current, child) && | ||
| 372 | !ptracer_exception_found(current, child) && | 374 | !ptracer_exception_found(current, child) && |
| 373 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) | 375 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
| 374 | rc = -EPERM; | 376 | rc = -EPERM; |
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a5b09e75e787..f7d2b373da0a 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
| @@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params) | |||
| 541 | { | 541 | { |
| 542 | /* first let's check the buffer parameter's */ | 542 | /* first let's check the buffer parameter's */ |
| 543 | if (params->buffer.fragment_size == 0 || | 543 | if (params->buffer.fragment_size == 0 || |
| 544 | params->buffer.fragments > INT_MAX / params->buffer.fragment_size) | 544 | params->buffer.fragments > INT_MAX / params->buffer.fragment_size || |
| 545 | params->buffer.fragments == 0) | ||
| 545 | return -EINVAL; | 546 | return -EINVAL; |
| 546 | 547 | ||
| 547 | /* now codec parameters */ | 548 | /* now codec parameters */ |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 51cc6589443f..152f54137082 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
| 931 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), | 931 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), |
| 932 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), | 932 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), |
| 933 | SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), | 933 | SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), |
| 934 | SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), | ||
| 934 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 935 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
| 935 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 936 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
| 936 | SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), | 937 | SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0b3e7a18ca78..b4f472157ebd 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -6926,7 +6926,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
| 6926 | {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, | 6926 | {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, |
| 6927 | {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, | 6927 | {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, |
| 6928 | {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, | 6928 | {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, |
| 6929 | {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, | 6929 | {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, |
| 6930 | {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, | 6930 | {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, |
| 6931 | {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, | 6931 | {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, |
| 6932 | {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, | 6932 | {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, |
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index 022a8912c8a2..3d58338fa3cf 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c | |||
| @@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev) | |||
| 611 | } | 611 | } |
| 612 | irqflags = *((unsigned int *)(pdev->dev.platform_data)); | 612 | irqflags = *((unsigned int *)(pdev->dev.platform_data)); |
| 613 | 613 | ||
| 614 | adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data), | ||
| 615 | GFP_KERNEL); | ||
| 616 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 614 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 617 | if (!res) { | 615 | if (!res) { |
| 618 | dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n"); | 616 | dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n"); |
| 619 | return -ENODEV; | 617 | return -ENODEV; |
| 620 | } | 618 | } |
| 621 | 619 | ||
| 620 | adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL); | ||
| 621 | if (!adata) | ||
| 622 | return -ENOMEM; | ||
| 623 | |||
| 622 | adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, | 624 | adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, |
| 623 | resource_size(res)); | 625 | resource_size(res)); |
| 624 | 626 | ||
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 3ab2949c1dfa..b19d7a3e7a2c 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c | |||
| @@ -1890,51 +1890,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component) | |||
| 1890 | pm_runtime_disable(&hdev->dev); | 1890 | pm_runtime_disable(&hdev->dev); |
| 1891 | } | 1891 | } |
| 1892 | 1892 | ||
| 1893 | #ifdef CONFIG_PM | 1893 | #ifdef CONFIG_PM_SLEEP |
| 1894 | static int hdmi_codec_prepare(struct device *dev) | 1894 | static int hdmi_codec_resume(struct device *dev) |
| 1895 | { | ||
| 1896 | struct hdac_device *hdev = dev_to_hdac_dev(dev); | ||
| 1897 | |||
| 1898 | pm_runtime_get_sync(&hdev->dev); | ||
| 1899 | |||
| 1900 | /* | ||
| 1901 | * Power down afg. | ||
| 1902 | * codec_read is preferred over codec_write to set the power state. | ||
| 1903 | * This way verb is send to set the power state and response | ||
| 1904 | * is received. So setting power state is ensured without using loop | ||
| 1905 | * to read the state. | ||
| 1906 | */ | ||
| 1907 | snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, | ||
| 1908 | AC_PWRST_D3); | ||
| 1909 | |||
| 1910 | return 0; | ||
| 1911 | } | ||
| 1912 | |||
| 1913 | static void hdmi_codec_complete(struct device *dev) | ||
| 1914 | { | 1895 | { |
| 1915 | struct hdac_device *hdev = dev_to_hdac_dev(dev); | 1896 | struct hdac_device *hdev = dev_to_hdac_dev(dev); |
| 1916 | struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); | 1897 | struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); |
| 1898 | int ret; | ||
| 1917 | 1899 | ||
| 1918 | /* Power up afg */ | 1900 | ret = pm_runtime_force_resume(dev); |
| 1919 | snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, | 1901 | if (ret < 0) |
| 1920 | AC_PWRST_D0); | 1902 | return ret; |
| 1921 | |||
| 1922 | hdac_hdmi_skl_enable_all_pins(hdev); | ||
| 1923 | hdac_hdmi_skl_enable_dp12(hdev); | ||
| 1924 | |||
| 1925 | /* | 1903 | /* |
| 1926 | * As the ELD notify callback request is not entertained while the | 1904 | * As the ELD notify callback request is not entertained while the |
| 1927 | * device is in suspend state. Need to manually check detection of | 1905 | * device is in suspend state. Need to manually check detection of |
| 1928 | * all pins here. pin capablity change is not support, so use the | 1906 | * all pins here. pin capablity change is not support, so use the |
| 1929 | * already set pin caps. | 1907 | * already set pin caps. |
| 1908 | * | ||
| 1909 | * NOTE: this is safe to call even if the codec doesn't actually resume. | ||
| 1910 | * The pin check involves only with DRM audio component hooks, so it | ||
| 1911 | * works even if the HD-audio side is still dreaming peacefully. | ||
| 1930 | */ | 1912 | */ |
| 1931 | hdac_hdmi_present_sense_all_pins(hdev, hdmi, false); | 1913 | hdac_hdmi_present_sense_all_pins(hdev, hdmi, false); |
| 1932 | 1914 | return 0; | |
| 1933 | pm_runtime_put_sync(&hdev->dev); | ||
| 1934 | } | 1915 | } |
| 1935 | #else | 1916 | #else |
| 1936 | #define hdmi_codec_prepare NULL | 1917 | #define hdmi_codec_resume NULL |
| 1937 | #define hdmi_codec_complete NULL | ||
| 1938 | #endif | 1918 | #endif |
| 1939 | 1919 | ||
| 1940 | static const struct snd_soc_component_driver hdmi_hda_codec = { | 1920 | static const struct snd_soc_component_driver hdmi_hda_codec = { |
| @@ -2135,75 +2115,6 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev) | |||
| 2135 | } | 2115 | } |
| 2136 | 2116 | ||
| 2137 | #ifdef CONFIG_PM | 2117 | #ifdef CONFIG_PM |
| 2138 | /* | ||
| 2139 | * Power management sequences | ||
| 2140 | * ========================== | ||
| 2141 | * | ||
| 2142 | * The following explains the PM handling of HDAC HDMI with its parent | ||
| 2143 | * device SKL and display power usage | ||
| 2144 | * | ||
| 2145 | * Probe | ||
| 2146 | * ----- | ||
| 2147 | * In SKL probe, | ||
| 2148 | * 1. skl_probe_work() powers up the display (refcount++ -> 1) | ||
| 2149 | * 2. enumerates the codecs on the link | ||
| 2150 | * 3. powers down the display (refcount-- -> 0) | ||
| 2151 | * | ||
| 2152 | * In HDAC HDMI probe, | ||
| 2153 | * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1) | ||
| 2154 | * 2. probe the codec | ||
| 2155 | * 3. put the HDAC HDMI device to runtime suspend | ||
| 2156 | * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) | ||
| 2157 | * | ||
| 2158 | * Once children are runtime suspended, SKL device also goes to runtime | ||
| 2159 | * suspend | ||
| 2160 | * | ||
| 2161 | * HDMI Playback | ||
| 2162 | * ------------- | ||
| 2163 | * Open HDMI device, | ||
| 2164 | * 1. skl_runtime_resume() invoked | ||
| 2165 | * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1) | ||
| 2166 | * | ||
| 2167 | * Close HDMI device, | ||
| 2168 | * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) | ||
| 2169 | * 2. skl_runtime_suspend() invoked | ||
| 2170 | * | ||
| 2171 | * S0/S3 Cycle with playback in progress | ||
| 2172 | * ------------------------------------- | ||
| 2173 | * When the device is opened for playback, the device is runtime active | ||
| 2174 | * already and the display refcount is 1 as explained above. | ||
| 2175 | * | ||
| 2176 | * Entering to S3, | ||
| 2177 | * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just | ||
| 2178 | * increments the PM runtime usage count of the codec since the device | ||
| 2179 | * is in use already | ||
| 2180 | * 2. skl_suspend() powers down the display (refcount-- -> 0) | ||
| 2181 | * | ||
| 2182 | * Wakeup from S3, | ||
| 2183 | * 1. skl_resume() powers up the display (refcount++ -> 1) | ||
| 2184 | * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just | ||
| 2185 | * decrements the PM runtime usage count of the codec since the device | ||
| 2186 | * is in use already | ||
| 2187 | * | ||
| 2188 | * Once playback is stopped, the display refcount is set to 0 as explained | ||
| 2189 | * above in the HDMI playback sequence. The PM handlings are designed in | ||
| 2190 | * such way that to balance the refcount of display power when the codec | ||
| 2191 | * device put to S3 while playback is going on. | ||
| 2192 | * | ||
| 2193 | * S0/S3 Cycle without playback in progress | ||
| 2194 | * ---------------------------------------- | ||
| 2195 | * Entering to S3, | ||
| 2196 | * 1. hdmi_codec_prepare() invoke the runtime resume of codec | ||
| 2197 | * 2. skl_runtime_resume() invoked | ||
| 2198 | * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1) | ||
| 2199 | * 4. skl_suspend() powers down the display (refcount-- -> 0) | ||
| 2200 | * | ||
| 2201 | * Wakeup from S3, | ||
| 2202 | * 1. skl_resume() powers up the display (refcount++ -> 1) | ||
| 2203 | * 2. hdmi_codec_complete() invokes the runtime suspend of codec | ||
| 2204 | * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0) | ||
| 2205 | * 4. skl_runtime_suspend() invoked | ||
| 2206 | */ | ||
| 2207 | static int hdac_hdmi_runtime_suspend(struct device *dev) | 2118 | static int hdac_hdmi_runtime_suspend(struct device *dev) |
| 2208 | { | 2119 | { |
| 2209 | struct hdac_device *hdev = dev_to_hdac_dev(dev); | 2120 | struct hdac_device *hdev = dev_to_hdac_dev(dev); |
| @@ -2277,8 +2188,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev) | |||
| 2277 | 2188 | ||
| 2278 | static const struct dev_pm_ops hdac_hdmi_pm = { | 2189 | static const struct dev_pm_ops hdac_hdmi_pm = { |
| 2279 | SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) | 2190 | SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) |
| 2280 | .prepare = hdmi_codec_prepare, | 2191 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume) |
| 2281 | .complete = hdmi_codec_complete, | ||
| 2282 | }; | 2192 | }; |
| 2283 | 2193 | ||
| 2284 | static const struct hda_device_id hdmi_list[] = { | 2194 | static const struct hda_device_id hdmi_list[] = { |
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 6cb1653be804..4cc24a5d5c31 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c | |||
| @@ -1400,24 +1400,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute) | |||
| 1400 | if (ret != 0) { | 1400 | if (ret != 0) { |
| 1401 | dev_err(component->dev, | 1401 | dev_err(component->dev, |
| 1402 | "Failed to set digital mute: %d\n", ret); | 1402 | "Failed to set digital mute: %d\n", ret); |
| 1403 | mutex_unlock(&pcm512x->mutex); | 1403 | goto unlock; |
| 1404 | return ret; | ||
| 1405 | } | 1404 | } |
| 1406 | 1405 | ||
| 1407 | regmap_read_poll_timeout(pcm512x->regmap, | 1406 | regmap_read_poll_timeout(pcm512x->regmap, |
| 1408 | PCM512x_ANALOG_MUTE_DET, | 1407 | PCM512x_ANALOG_MUTE_DET, |
| 1409 | mute_det, (mute_det & 0x3) == 0, | 1408 | mute_det, (mute_det & 0x3) == 0, |
| 1410 | 200, 10000); | 1409 | 200, 10000); |
| 1411 | |||
| 1412 | mutex_unlock(&pcm512x->mutex); | ||
| 1413 | } else { | 1410 | } else { |
| 1414 | pcm512x->mute &= ~0x1; | 1411 | pcm512x->mute &= ~0x1; |
| 1415 | ret = pcm512x_update_mute(pcm512x); | 1412 | ret = pcm512x_update_mute(pcm512x); |
| 1416 | if (ret != 0) { | 1413 | if (ret != 0) { |
| 1417 | dev_err(component->dev, | 1414 | dev_err(component->dev, |
| 1418 | "Failed to update digital mute: %d\n", ret); | 1415 | "Failed to update digital mute: %d\n", ret); |
| 1419 | mutex_unlock(&pcm512x->mutex); | 1416 | goto unlock; |
| 1420 | return ret; | ||
| 1421 | } | 1417 | } |
| 1422 | 1418 | ||
| 1423 | regmap_read_poll_timeout(pcm512x->regmap, | 1419 | regmap_read_poll_timeout(pcm512x->regmap, |
| @@ -1428,9 +1424,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute) | |||
| 1428 | 200, 10000); | 1424 | 200, 10000); |
| 1429 | } | 1425 | } |
| 1430 | 1426 | ||
| 1427 | unlock: | ||
| 1431 | mutex_unlock(&pcm512x->mutex); | 1428 | mutex_unlock(&pcm512x->mutex); |
| 1432 | 1429 | ||
| 1433 | return 0; | 1430 | return ret; |
| 1434 | } | 1431 | } |
| 1435 | 1432 | ||
| 1436 | static const struct snd_soc_dai_ops pcm512x_dai_ops = { | 1433 | static const struct snd_soc_dai_ops pcm512x_dai_ops = { |
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c index 0ef966d56bac..e2855ab9a2c6 100644 --- a/sound/soc/codecs/rt274.c +++ b/sound/soc/codecs/rt274.c | |||
| @@ -1128,8 +1128,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c, | |||
| 1128 | return ret; | 1128 | return ret; |
| 1129 | } | 1129 | } |
| 1130 | 1130 | ||
| 1131 | regmap_read(rt274->regmap, | 1131 | ret = regmap_read(rt274->regmap, |
| 1132 | RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); | 1132 | RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); |
| 1133 | if (ret) | ||
| 1134 | return ret; | ||
| 1135 | |||
| 1133 | if (val != RT274_VENDOR_ID) { | 1136 | if (val != RT274_VENDOR_ID) { |
| 1134 | dev_err(&i2c->dev, | 1137 | dev_err(&i2c->dev, |
| 1135 | "Device with ID register %#x is not rt274\n", val); | 1138 | "Device with ID register %#x is not rt274\n", val); |
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c index 4d46f4567c3a..bec2eefa8b0f 100644 --- a/sound/soc/codecs/rt5514-spi.c +++ b/sound/soc/codecs/rt5514-spi.c | |||
| @@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component) | |||
| 280 | 280 | ||
| 281 | rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), | 281 | rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), |
| 282 | GFP_KERNEL); | 282 | GFP_KERNEL); |
| 283 | if (!rt5514_dsp) | ||
| 284 | return -ENOMEM; | ||
| 283 | 285 | ||
| 284 | rt5514_dsp->dev = &rt5514_spi->dev; | 286 | rt5514_dsp->dev = &rt5514_spi->dev; |
| 285 | mutex_init(&rt5514_dsp->dma_lock); | 287 | mutex_init(&rt5514_dsp->dma_lock); |
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 34cfaf8f6f34..89c43b26c379 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
| @@ -2512,6 +2512,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682) | |||
| 2512 | regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000); | 2512 | regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000); |
| 2513 | regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000); | 2513 | regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000); |
| 2514 | regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005); | 2514 | regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005); |
| 2515 | regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4); | ||
| 2515 | 2516 | ||
| 2516 | mutex_unlock(&rt5682->calibrate_mutex); | 2517 | mutex_unlock(&rt5682->calibrate_mutex); |
| 2517 | 2518 | ||
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h index d82a8301fd74..96944cff0ed7 100644 --- a/sound/soc/codecs/rt5682.h +++ b/sound/soc/codecs/rt5682.h | |||
| @@ -849,18 +849,18 @@ | |||
| 849 | #define RT5682_SCLK_SRC_PLL2 (0x2 << 13) | 849 | #define RT5682_SCLK_SRC_PLL2 (0x2 << 13) |
| 850 | #define RT5682_SCLK_SRC_SDW (0x3 << 13) | 850 | #define RT5682_SCLK_SRC_SDW (0x3 << 13) |
| 851 | #define RT5682_SCLK_SRC_RCCLK (0x4 << 13) | 851 | #define RT5682_SCLK_SRC_RCCLK (0x4 << 13) |
| 852 | #define RT5682_PLL1_SRC_MASK (0x3 << 10) | 852 | #define RT5682_PLL2_SRC_MASK (0x3 << 10) |
| 853 | #define RT5682_PLL1_SRC_SFT 10 | 853 | #define RT5682_PLL2_SRC_SFT 10 |
| 854 | #define RT5682_PLL1_SRC_MCLK (0x0 << 10) | 854 | #define RT5682_PLL2_SRC_MCLK (0x0 << 10) |
| 855 | #define RT5682_PLL1_SRC_BCLK1 (0x1 << 10) | 855 | #define RT5682_PLL2_SRC_BCLK1 (0x1 << 10) |
| 856 | #define RT5682_PLL1_SRC_SDW (0x2 << 10) | 856 | #define RT5682_PLL2_SRC_SDW (0x2 << 10) |
| 857 | #define RT5682_PLL1_SRC_RC (0x3 << 10) | 857 | #define RT5682_PLL2_SRC_RC (0x3 << 10) |
| 858 | #define RT5682_PLL2_SRC_MASK (0x3 << 8) | 858 | #define RT5682_PLL1_SRC_MASK (0x3 << 8) |
| 859 | #define RT5682_PLL2_SRC_SFT 8 | 859 | #define RT5682_PLL1_SRC_SFT 8 |
| 860 | #define RT5682_PLL2_SRC_MCLK (0x0 << 8) | 860 | #define RT5682_PLL1_SRC_MCLK (0x0 << 8) |
| 861 | #define RT5682_PLL2_SRC_BCLK1 (0x1 << 8) | 861 | #define RT5682_PLL1_SRC_BCLK1 (0x1 << 8) |
| 862 | #define RT5682_PLL2_SRC_SDW (0x2 << 8) | 862 | #define RT5682_PLL1_SRC_SDW (0x2 << 8) |
| 863 | #define RT5682_PLL2_SRC_RC (0x3 << 8) | 863 | #define RT5682_PLL1_SRC_RC (0x3 << 8) |
| 864 | 864 | ||
| 865 | 865 | ||
| 866 | 866 | ||
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index e2b5a11b16d1..f03195d2ab2e 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c | |||
| @@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component, | |||
| 822 | case SND_SOC_BIAS_PREPARE: | 822 | case SND_SOC_BIAS_PREPARE: |
| 823 | break; | 823 | break; |
| 824 | case SND_SOC_BIAS_STANDBY: | 824 | case SND_SOC_BIAS_STANDBY: |
| 825 | /* Initial cold start */ | ||
| 826 | if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) | ||
| 827 | break; | ||
| 828 | |||
| 825 | /* Switch off BCLK_N Divider */ | 829 | /* Switch off BCLK_N Divider */ |
| 826 | snd_soc_component_update_bits(component, AIC32X4_BCLKN, | 830 | snd_soc_component_update_bits(component, AIC32X4_BCLKN, |
| 827 | AIC32X4_BCLKEN, 0); | 831 | AIC32X4_BCLKEN, 0); |
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index 392d5eef356d..99e07b01a2ce 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c | |||
| @@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, | |||
| 86 | if (!buf) | 86 | if (!buf) |
| 87 | return -ENOMEM; | 87 | return -ENOMEM; |
| 88 | 88 | ||
| 89 | ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", | 89 | ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", |
| 90 | pdcr, ptcr); | 90 | pdcr, ptcr); |
| 91 | 91 | ||
| 92 | if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) | 92 | if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) |
| 93 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 93 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 94 | "TxFS output from %s, ", | 94 | "TxFS output from %s, ", |
| 95 | audmux_port_string((ptcr >> 27) & 0x7)); | 95 | audmux_port_string((ptcr >> 27) & 0x7)); |
| 96 | else | 96 | else |
| 97 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 97 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 98 | "TxFS input, "); | 98 | "TxFS input, "); |
| 99 | 99 | ||
| 100 | if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) | 100 | if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) |
| 101 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 101 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 102 | "TxClk output from %s", | 102 | "TxClk output from %s", |
| 103 | audmux_port_string((ptcr >> 22) & 0x7)); | 103 | audmux_port_string((ptcr >> 22) & 0x7)); |
| 104 | else | 104 | else |
| 105 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 105 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 106 | "TxClk input"); | 106 | "TxClk input"); |
| 107 | 107 | ||
| 108 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); | 108 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 109 | 109 | ||
| 110 | if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { | 110 | if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { |
| 111 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 111 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 112 | "Port is symmetric"); | 112 | "Port is symmetric"); |
| 113 | } else { | 113 | } else { |
| 114 | if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) | 114 | if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) |
| 115 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 115 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 116 | "RxFS output from %s, ", | 116 | "RxFS output from %s, ", |
| 117 | audmux_port_string((ptcr >> 17) & 0x7)); | 117 | audmux_port_string((ptcr >> 17) & 0x7)); |
| 118 | else | 118 | else |
| 119 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 119 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 120 | "RxFS input, "); | 120 | "RxFS input, "); |
| 121 | 121 | ||
| 122 | if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) | 122 | if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) |
| 123 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 123 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 124 | "RxClk output from %s", | 124 | "RxClk output from %s", |
| 125 | audmux_port_string((ptcr >> 12) & 0x7)); | 125 | audmux_port_string((ptcr >> 12) & 0x7)); |
| 126 | else | 126 | else |
| 127 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 127 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 128 | "RxClk input"); | 128 | "RxClk input"); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 131 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 132 | "\nData received from %s\n", | 132 | "\nData received from %s\n", |
| 133 | audmux_port_string((pdcr >> 13) & 0x7)); | 133 | audmux_port_string((pdcr >> 13) & 0x7)); |
| 134 | 134 | ||
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 99a62ba409df..bd9fd2035c55 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig | |||
| @@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI | |||
| 91 | config SND_SST_ATOM_HIFI2_PLATFORM_ACPI | 91 | config SND_SST_ATOM_HIFI2_PLATFORM_ACPI |
| 92 | tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" | 92 | tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" |
| 93 | default ACPI | 93 | default ACPI |
| 94 | depends on X86 && ACPI | 94 | depends on X86 && ACPI && PCI |
| 95 | select SND_SST_IPC_ACPI | 95 | select SND_SST_IPC_ACPI |
| 96 | select SND_SST_ATOM_HIFI2_PLATFORM | 96 | select SND_SST_ATOM_HIFI2_PLATFORM |
| 97 | select SND_SOC_ACPI_INTEL_MATCH | 97 | select SND_SOC_ACPI_INTEL_MATCH |
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index afc559866095..91a2436ce952 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c | |||
| @@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream, | |||
| 399 | struct snd_pcm_hw_params *params, | 399 | struct snd_pcm_hw_params *params, |
| 400 | struct snd_soc_dai *dai) | 400 | struct snd_soc_dai *dai) |
| 401 | { | 401 | { |
| 402 | snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); | 402 | int ret; |
| 403 | |||
| 404 | ret = | ||
| 405 | snd_pcm_lib_malloc_pages(substream, | ||
| 406 | params_buffer_bytes(params)); | ||
| 407 | if (ret) | ||
| 408 | return ret; | ||
| 403 | memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); | 409 | memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); |
| 404 | return 0; | 410 | return 0; |
| 405 | } | 411 | } |
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c index 68e6543e6cb0..99f2a0156ae8 100644 --- a/sound/soc/intel/boards/broadwell.c +++ b/sound/soc/intel/boards/broadwell.c | |||
| @@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = { | |||
| 192 | .stream_name = "Loopback", | 192 | .stream_name = "Loopback", |
| 193 | .cpu_dai_name = "Loopback Pin", | 193 | .cpu_dai_name = "Loopback Pin", |
| 194 | .platform_name = "haswell-pcm-audio", | 194 | .platform_name = "haswell-pcm-audio", |
| 195 | .dynamic = 0, | 195 | .dynamic = 1, |
| 196 | .codec_name = "snd-soc-dummy", | 196 | .codec_name = "snd-soc-dummy", |
| 197 | .codec_dai_name = "snd-soc-dummy-dai", | 197 | .codec_dai_name = "snd-soc-dummy-dai", |
| 198 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, | 198 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, |
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c index c74c4f17316f..8f83b182c4f9 100644 --- a/sound/soc/intel/boards/glk_rt5682_max98357a.c +++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c | |||
| @@ -55,39 +55,6 @@ enum { | |||
| 55 | GLK_DPCM_AUDIO_HDMI3_PB, | 55 | GLK_DPCM_AUDIO_HDMI3_PB, |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | static int platform_clock_control(struct snd_soc_dapm_widget *w, | ||
| 59 | struct snd_kcontrol *k, int event) | ||
| 60 | { | ||
| 61 | struct snd_soc_dapm_context *dapm = w->dapm; | ||
| 62 | struct snd_soc_card *card = dapm->card; | ||
| 63 | struct snd_soc_dai *codec_dai; | ||
| 64 | int ret = 0; | ||
| 65 | |||
| 66 | codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI); | ||
| 67 | if (!codec_dai) { | ||
| 68 | dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n"); | ||
| 69 | return -EIO; | ||
| 70 | } | ||
| 71 | |||
| 72 | if (SND_SOC_DAPM_EVENT_OFF(event)) { | ||
| 73 | ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); | ||
| 74 | if (ret) | ||
| 75 | dev_err(card->dev, "failed to stop sysclk: %d\n", ret); | ||
| 76 | } else if (SND_SOC_DAPM_EVENT_ON(event)) { | ||
| 77 | ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK, | ||
| 78 | GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ); | ||
| 79 | if (ret < 0) { | ||
| 80 | dev_err(card->dev, "can't set codec pll: %d\n", ret); | ||
| 81 | return ret; | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | if (ret) | ||
| 86 | dev_err(card->dev, "failed to start internal clk: %d\n", ret); | ||
| 87 | |||
| 88 | return ret; | ||
| 89 | } | ||
| 90 | |||
| 91 | static const struct snd_kcontrol_new geminilake_controls[] = { | 58 | static const struct snd_kcontrol_new geminilake_controls[] = { |
| 92 | SOC_DAPM_PIN_SWITCH("Headphone Jack"), | 59 | SOC_DAPM_PIN_SWITCH("Headphone Jack"), |
| 93 | SOC_DAPM_PIN_SWITCH("Headset Mic"), | 60 | SOC_DAPM_PIN_SWITCH("Headset Mic"), |
| @@ -102,14 +69,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = { | |||
| 102 | SND_SOC_DAPM_SPK("HDMI1", NULL), | 69 | SND_SOC_DAPM_SPK("HDMI1", NULL), |
| 103 | SND_SOC_DAPM_SPK("HDMI2", NULL), | 70 | SND_SOC_DAPM_SPK("HDMI2", NULL), |
| 104 | SND_SOC_DAPM_SPK("HDMI3", NULL), | 71 | SND_SOC_DAPM_SPK("HDMI3", NULL), |
| 105 | SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, | ||
| 106 | platform_clock_control, SND_SOC_DAPM_PRE_PMU | | ||
| 107 | SND_SOC_DAPM_POST_PMD), | ||
| 108 | }; | 72 | }; |
| 109 | 73 | ||
| 110 | static const struct snd_soc_dapm_route geminilake_map[] = { | 74 | static const struct snd_soc_dapm_route geminilake_map[] = { |
| 111 | /* HP jack connectors - unknown if we have jack detection */ | 75 | /* HP jack connectors - unknown if we have jack detection */ |
| 112 | { "Headphone Jack", NULL, "Platform Clock" }, | ||
| 113 | { "Headphone Jack", NULL, "HPOL" }, | 76 | { "Headphone Jack", NULL, "HPOL" }, |
| 114 | { "Headphone Jack", NULL, "HPOR" }, | 77 | { "Headphone Jack", NULL, "HPOR" }, |
| 115 | 78 | ||
| @@ -117,7 +80,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = { | |||
| 117 | { "Spk", NULL, "Speaker" }, | 80 | { "Spk", NULL, "Speaker" }, |
| 118 | 81 | ||
| 119 | /* other jacks */ | 82 | /* other jacks */ |
| 120 | { "Headset Mic", NULL, "Platform Clock" }, | ||
| 121 | { "IN1P", NULL, "Headset Mic" }, | 83 | { "IN1P", NULL, "Headset Mic" }, |
| 122 | 84 | ||
| 123 | /* digital mics */ | 85 | /* digital mics */ |
| @@ -177,6 +139,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd) | |||
| 177 | struct snd_soc_jack *jack; | 139 | struct snd_soc_jack *jack; |
| 178 | int ret; | 140 | int ret; |
| 179 | 141 | ||
| 142 | ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK, | ||
| 143 | GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ); | ||
| 144 | if (ret < 0) { | ||
| 145 | dev_err(rtd->dev, "can't set codec pll: %d\n", ret); | ||
| 146 | return ret; | ||
| 147 | } | ||
| 148 | |||
| 180 | /* Configure sysclk for codec */ | 149 | /* Configure sysclk for codec */ |
| 181 | ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, | 150 | ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, |
| 182 | RT5682_PLL_FREQ, SND_SOC_CLOCK_IN); | 151 | RT5682_PLL_FREQ, SND_SOC_CLOCK_IN); |
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c index eab1f439dd3f..a4022983a7ce 100644 --- a/sound/soc/intel/boards/haswell.c +++ b/sound/soc/intel/boards/haswell.c | |||
| @@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = { | |||
| 146 | .stream_name = "Loopback", | 146 | .stream_name = "Loopback", |
| 147 | .cpu_dai_name = "Loopback Pin", | 147 | .cpu_dai_name = "Loopback Pin", |
| 148 | .platform_name = "haswell-pcm-audio", | 148 | .platform_name = "haswell-pcm-audio", |
| 149 | .dynamic = 0, | 149 | .dynamic = 1, |
| 150 | .codec_name = "snd-soc-dummy", | 150 | .codec_name = "snd-soc-dummy", |
| 151 | .codec_dai_name = "snd-soc-dummy-dai", | 151 | .codec_dai_name = "snd-soc-dummy-dai", |
| 152 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, | 152 | .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, |
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 60c94836bf5b..4ed5b7e17d44 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c | |||
| @@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev) | |||
| 336 | skl->skl_sst->fw_loaded = false; | 336 | skl->skl_sst->fw_loaded = false; |
| 337 | } | 337 | } |
| 338 | 338 | ||
| 339 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | ||
| 340 | snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); | ||
| 341 | |||
| 342 | return 0; | 339 | return 0; |
| 343 | } | 340 | } |
| 344 | 341 | ||
| @@ -350,10 +347,6 @@ static int skl_resume(struct device *dev) | |||
| 350 | struct hdac_ext_link *hlink = NULL; | 347 | struct hdac_ext_link *hlink = NULL; |
| 351 | int ret; | 348 | int ret; |
| 352 | 349 | ||
| 353 | /* Turned OFF in HDMI codec driver after codec reconfiguration */ | ||
| 354 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | ||
| 355 | snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); | ||
| 356 | |||
| 357 | /* | 350 | /* |
| 358 | * resume only when we are not in suspend active, otherwise need to | 351 | * resume only when we are not in suspend active, otherwise need to |
| 359 | * restore the device | 352 | * restore the device |
| @@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus) | |||
| 446 | snd_hdac_ext_bus_exit(bus); | 439 | snd_hdac_ext_bus_exit(bus); |
| 447 | 440 | ||
| 448 | cancel_work_sync(&skl->probe_work); | 441 | cancel_work_sync(&skl->probe_work); |
| 449 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | 442 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { |
| 443 | snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); | ||
| 450 | snd_hdac_i915_exit(bus); | 444 | snd_hdac_i915_exit(bus); |
| 445 | } | ||
| 451 | 446 | ||
| 452 | return 0; | 447 | return 0; |
| 453 | } | 448 | } |
| @@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work) | |||
| 814 | err = skl_platform_register(bus->dev); | 809 | err = skl_platform_register(bus->dev); |
| 815 | if (err < 0) { | 810 | if (err < 0) { |
| 816 | dev_err(bus->dev, "platform register failed: %d\n", err); | 811 | dev_err(bus->dev, "platform register failed: %d\n", err); |
| 817 | return; | 812 | goto out_err; |
| 818 | } | 813 | } |
| 819 | 814 | ||
| 820 | err = skl_machine_device_register(skl); | 815 | err = skl_machine_device_register(skl); |
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 5b986b74dd36..548eb4fa2da6 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c | |||
| @@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) | |||
| 570 | prtd->audio_client = q6asm_audio_client_alloc(dev, | 570 | prtd->audio_client = q6asm_audio_client_alloc(dev, |
| 571 | (q6asm_cb)compress_event_handler, | 571 | (q6asm_cb)compress_event_handler, |
| 572 | prtd, stream_id, LEGACY_PCM_MODE); | 572 | prtd, stream_id, LEGACY_PCM_MODE); |
| 573 | if (!prtd->audio_client) { | 573 | if (IS_ERR(prtd->audio_client)) { |
| 574 | dev_err(dev, "Could not allocate memory\n"); | 574 | dev_err(dev, "Could not allocate memory\n"); |
| 575 | kfree(prtd); | 575 | ret = PTR_ERR(prtd->audio_client); |
| 576 | return -ENOMEM; | 576 | goto free_prtd; |
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * | 579 | size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * |
| @@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) | |||
| 582 | &prtd->dma_buffer); | 582 | &prtd->dma_buffer); |
| 583 | if (ret) { | 583 | if (ret) { |
| 584 | dev_err(dev, "Cannot allocate buffer(s)\n"); | 584 | dev_err(dev, "Cannot allocate buffer(s)\n"); |
| 585 | return ret; | 585 | goto free_client; |
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | if (pdata->sid < 0) | 588 | if (pdata->sid < 0) |
| @@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream) | |||
| 595 | runtime->private_data = prtd; | 595 | runtime->private_data = prtd; |
| 596 | 596 | ||
| 597 | return 0; | 597 | return 0; |
| 598 | |||
| 599 | free_client: | ||
| 600 | q6asm_audio_client_free(prtd->audio_client); | ||
| 601 | free_prtd: | ||
| 602 | kfree(prtd); | ||
| 603 | |||
| 604 | return ret; | ||
| 598 | } | 605 | } |
| 599 | 606 | ||
| 600 | static int q6asm_dai_compr_free(struct snd_compr_stream *stream) | 607 | static int q6asm_dai_compr_free(struct snd_compr_stream *stream) |
| @@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev, | |||
| 874 | 881 | ||
| 875 | for_each_child_of_node(dev->of_node, node) { | 882 | for_each_child_of_node(dev->of_node, node) { |
| 876 | ret = of_property_read_u32(node, "reg", &id); | 883 | ret = of_property_read_u32(node, "reg", &id); |
| 877 | if (ret || id > MAX_SESSIONS || id < 0) { | 884 | if (ret || id >= MAX_SESSIONS || id < 0) { |
| 878 | dev_err(dev, "valid dai id not found:%d\n", ret); | 885 | dev_err(dev, "valid dai id not found:%d\n", ret); |
| 879 | continue; | 886 | continue; |
| 880 | } | 887 | } |
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 1db8ef668223..6f66a58e23ca 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c | |||
| @@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream, | |||
| 158 | return ret; | 158 | return ret; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static void sdm845_jack_free(struct snd_jack *jack) | ||
| 162 | { | ||
| 163 | struct snd_soc_component *component = jack->private_data; | ||
| 164 | |||
| 165 | snd_soc_component_set_jack(component, NULL, NULL); | ||
| 166 | } | ||
| 167 | |||
| 161 | static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) | 168 | static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) |
| 162 | { | 169 | { |
| 163 | struct snd_soc_component *component; | 170 | struct snd_soc_component *component; |
| 164 | struct snd_soc_dai_link *dai_link = rtd->dai_link; | ||
| 165 | struct snd_soc_card *card = rtd->card; | 171 | struct snd_soc_card *card = rtd->card; |
| 172 | struct snd_soc_dai *codec_dai = rtd->codec_dai; | ||
| 173 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; | ||
| 166 | struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card); | 174 | struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card); |
| 167 | int i, rval; | 175 | struct snd_jack *jack; |
| 176 | int rval; | ||
| 168 | 177 | ||
| 169 | if (!pdata->jack_setup) { | 178 | if (!pdata->jack_setup) { |
| 170 | struct snd_jack *jack; | ||
| 171 | |||
| 172 | rval = snd_soc_card_jack_new(card, "Headset Jack", | 179 | rval = snd_soc_card_jack_new(card, "Headset Jack", |
| 173 | SND_JACK_HEADSET | | 180 | SND_JACK_HEADSET | |
| 174 | SND_JACK_HEADPHONE | | 181 | SND_JACK_HEADPHONE | |
| @@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) | |||
| 190 | pdata->jack_setup = true; | 197 | pdata->jack_setup = true; |
| 191 | } | 198 | } |
| 192 | 199 | ||
| 193 | for (i = 0 ; i < dai_link->num_codecs; i++) { | 200 | switch (cpu_dai->id) { |
| 194 | struct snd_soc_dai *dai = rtd->codec_dais[i]; | 201 | case PRIMARY_MI2S_RX: |
| 202 | jack = pdata->jack.jack; | ||
| 203 | component = codec_dai->component; | ||
| 195 | 204 | ||
| 196 | component = dai->component; | 205 | jack->private_data = component; |
| 197 | rval = snd_soc_component_set_jack( | 206 | jack->private_free = sdm845_jack_free; |
| 198 | component, &pdata->jack, NULL); | 207 | rval = snd_soc_component_set_jack(component, |
| 208 | &pdata->jack, NULL); | ||
| 199 | if (rval != 0 && rval != -ENOTSUPP) { | 209 | if (rval != 0 && rval != -ENOTSUPP) { |
| 200 | dev_warn(card->dev, "Failed to set jack: %d\n", rval); | 210 | dev_warn(card->dev, "Failed to set jack: %d\n", rval); |
| 201 | return rval; | 211 | return rval; |
| 202 | } | 212 | } |
| 213 | break; | ||
| 214 | default: | ||
| 215 | break; | ||
| 203 | } | 216 | } |
| 204 | 217 | ||
| 205 | return 0; | 218 | return 0; |
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 922fb6aa3ed1..5aee11c94f2a 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c | |||
| @@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream) | |||
| 202 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 202 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
| 203 | struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id]; | 203 | struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id]; |
| 204 | 204 | ||
| 205 | pr_debug("PCM data: addr 0x%08ulx len %d\n", | 205 | pr_debug("PCM data: addr 0x%08lx len %d\n", |
| 206 | (u32)runtime->dma_addr, runtime->dma_bytes); | 206 | (u32)runtime->dma_addr, runtime->dma_bytes); |
| 207 | 207 | ||
| 208 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 208 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 0462b3ec977a..aae450ba4f08 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
| @@ -742,7 +742,7 @@ static struct snd_soc_component *soc_find_component( | |||
| 742 | if (of_node) { | 742 | if (of_node) { |
| 743 | if (component->dev->of_node == of_node) | 743 | if (component->dev->of_node == of_node) |
| 744 | return component; | 744 | return component; |
| 745 | } else if (strcmp(component->name, name) == 0) { | 745 | } else if (name && strcmp(component->name, name) == 0) { |
| 746 | return component; | 746 | return component; |
| 747 | } | 747 | } |
| 748 | } | 748 | } |
| @@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card, | |||
| 1034 | * this function should be removed in the future | 1034 | * this function should be removed in the future |
| 1035 | */ | 1035 | */ |
| 1036 | /* convert Legacy platform link */ | 1036 | /* convert Legacy platform link */ |
| 1037 | if (!platform) { | 1037 | if (!platform || dai_link->legacy_platform) { |
| 1038 | platform = devm_kzalloc(card->dev, | 1038 | platform = devm_kzalloc(card->dev, |
| 1039 | sizeof(struct snd_soc_dai_link_component), | 1039 | sizeof(struct snd_soc_dai_link_component), |
| 1040 | GFP_KERNEL); | 1040 | GFP_KERNEL); |
| 1041 | if (!platform) | 1041 | if (!platform) |
| 1042 | return -ENOMEM; | 1042 | return -ENOMEM; |
| 1043 | 1043 | ||
| 1044 | dai_link->platform = platform; | 1044 | dai_link->platform = platform; |
| 1045 | platform->name = dai_link->platform_name; | 1045 | dai_link->legacy_platform = 1; |
| 1046 | platform->of_node = dai_link->platform_of_node; | 1046 | platform->name = dai_link->platform_name; |
| 1047 | platform->dai_name = NULL; | 1047 | platform->of_node = dai_link->platform_of_node; |
| 1048 | platform->dai_name = NULL; | ||
| 1048 | } | 1049 | } |
| 1049 | 1050 | ||
| 1050 | /* if there's no platform we match on the empty platform */ | 1051 | /* if there's no platform we match on the empty platform */ |
| @@ -1129,6 +1130,15 @@ static int soc_init_dai_link(struct snd_soc_card *card, | |||
| 1129 | link->name); | 1130 | link->name); |
| 1130 | return -EINVAL; | 1131 | return -EINVAL; |
| 1131 | } | 1132 | } |
| 1133 | |||
| 1134 | /* | ||
| 1135 | * Defer card registartion if platform dai component is not added to | ||
| 1136 | * component list. | ||
| 1137 | */ | ||
| 1138 | if ((link->platform->of_node || link->platform->name) && | ||
| 1139 | !soc_find_component(link->platform->of_node, link->platform->name)) | ||
| 1140 | return -EPROBE_DEFER; | ||
| 1141 | |||
| 1132 | /* | 1142 | /* |
| 1133 | * CPU device may be specified by either name or OF node, but | 1143 | * CPU device may be specified by either name or OF node, but |
| 1134 | * can be left unspecified, and will be matched based on DAI | 1144 | * can be left unspecified, and will be matched based on DAI |
| @@ -1140,6 +1150,15 @@ static int soc_init_dai_link(struct snd_soc_card *card, | |||
| 1140 | link->name); | 1150 | link->name); |
| 1141 | return -EINVAL; | 1151 | return -EINVAL; |
| 1142 | } | 1152 | } |
| 1153 | |||
| 1154 | /* | ||
| 1155 | * Defer card registartion if cpu dai component is not added to | ||
| 1156 | * component list. | ||
| 1157 | */ | ||
| 1158 | if ((link->cpu_of_node || link->cpu_name) && | ||
| 1159 | !soc_find_component(link->cpu_of_node, link->cpu_name)) | ||
| 1160 | return -EPROBE_DEFER; | ||
| 1161 | |||
| 1143 | /* | 1162 | /* |
| 1144 | * At least one of CPU DAI name or CPU device name/node must be | 1163 | * At least one of CPU DAI name or CPU device name/node must be |
| 1145 | * specified | 1164 | * specified |
| @@ -2739,15 +2758,18 @@ int snd_soc_register_card(struct snd_soc_card *card) | |||
| 2739 | if (!card->name || !card->dev) | 2758 | if (!card->name || !card->dev) |
| 2740 | return -EINVAL; | 2759 | return -EINVAL; |
| 2741 | 2760 | ||
| 2761 | mutex_lock(&client_mutex); | ||
| 2742 | for_each_card_prelinks(card, i, link) { | 2762 | for_each_card_prelinks(card, i, link) { |
| 2743 | 2763 | ||
| 2744 | ret = soc_init_dai_link(card, link); | 2764 | ret = soc_init_dai_link(card, link); |
| 2745 | if (ret) { | 2765 | if (ret) { |
| 2746 | dev_err(card->dev, "ASoC: failed to init link %s\n", | 2766 | dev_err(card->dev, "ASoC: failed to init link %s\n", |
| 2747 | link->name); | 2767 | link->name); |
| 2768 | mutex_unlock(&client_mutex); | ||
| 2748 | return ret; | 2769 | return ret; |
| 2749 | } | 2770 | } |
| 2750 | } | 2771 | } |
| 2772 | mutex_unlock(&client_mutex); | ||
| 2751 | 2773 | ||
| 2752 | dev_set_drvdata(card->dev, card); | 2774 | dev_set_drvdata(card->dev, card); |
| 2753 | 2775 | ||
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index a5178845065b..2c4c13419539 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
| @@ -2019,19 +2019,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file, | |||
| 2019 | out = is_connected_output_ep(w, NULL, NULL); | 2019 | out = is_connected_output_ep(w, NULL, NULL); |
| 2020 | } | 2020 | } |
| 2021 | 2021 | ||
| 2022 | ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", | 2022 | ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", |
| 2023 | w->name, w->power ? "On" : "Off", | 2023 | w->name, w->power ? "On" : "Off", |
| 2024 | w->force ? " (forced)" : "", in, out); | 2024 | w->force ? " (forced)" : "", in, out); |
| 2025 | 2025 | ||
| 2026 | if (w->reg >= 0) | 2026 | if (w->reg >= 0) |
| 2027 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 2027 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 2028 | " - R%d(0x%x) mask 0x%x", | 2028 | " - R%d(0x%x) mask 0x%x", |
| 2029 | w->reg, w->reg, w->mask << w->shift); | 2029 | w->reg, w->reg, w->mask << w->shift); |
| 2030 | 2030 | ||
| 2031 | ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); | 2031 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 2032 | 2032 | ||
| 2033 | if (w->sname) | 2033 | if (w->sname) |
| 2034 | ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", | 2034 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", |
| 2035 | w->sname, | 2035 | w->sname, |
| 2036 | w->active ? "active" : "inactive"); | 2036 | w->active ? "active" : "inactive"); |
| 2037 | 2037 | ||
| @@ -2044,7 +2044,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, | |||
| 2044 | if (!p->connect) | 2044 | if (!p->connect) |
| 2045 | continue; | 2045 | continue; |
| 2046 | 2046 | ||
| 2047 | ret += snprintf(buf + ret, PAGE_SIZE - ret, | 2047 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, |
| 2048 | " %s \"%s\" \"%s\"\n", | 2048 | " %s \"%s\" \"%s\"\n", |
| 2049 | (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", | 2049 | (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", |
| 2050 | p->name ? p->name : "static", | 2050 | p->name ? p->name : "static", |
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index eeda6d5565bc..a10fcb5963c6 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c | |||
| @@ -108,7 +108,7 @@ struct davinci_mcasp { | |||
| 108 | /* Used for comstraint setting on the second stream */ | 108 | /* Used for comstraint setting on the second stream */ |
| 109 | u32 channels; | 109 | u32 channels; |
| 110 | 110 | ||
| 111 | #ifdef CONFIG_PM_SLEEP | 111 | #ifdef CONFIG_PM |
| 112 | struct davinci_mcasp_context context; | 112 | struct davinci_mcasp_context context; |
| 113 | #endif | 113 | #endif |
| 114 | 114 | ||
| @@ -1486,74 +1486,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai) | |||
| 1486 | return 0; | 1486 | return 0; |
| 1487 | } | 1487 | } |
| 1488 | 1488 | ||
| 1489 | #ifdef CONFIG_PM_SLEEP | ||
| 1490 | static int davinci_mcasp_suspend(struct snd_soc_dai *dai) | ||
| 1491 | { | ||
| 1492 | struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai); | ||
| 1493 | struct davinci_mcasp_context *context = &mcasp->context; | ||
| 1494 | u32 reg; | ||
| 1495 | int i; | ||
| 1496 | |||
| 1497 | context->pm_state = pm_runtime_active(mcasp->dev); | ||
| 1498 | if (!context->pm_state) | ||
| 1499 | pm_runtime_get_sync(mcasp->dev); | ||
| 1500 | |||
| 1501 | for (i = 0; i < ARRAY_SIZE(context_regs); i++) | ||
| 1502 | context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]); | ||
| 1503 | |||
| 1504 | if (mcasp->txnumevt) { | ||
| 1505 | reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; | ||
| 1506 | context->afifo_regs[0] = mcasp_get_reg(mcasp, reg); | ||
| 1507 | } | ||
| 1508 | if (mcasp->rxnumevt) { | ||
| 1509 | reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; | ||
| 1510 | context->afifo_regs[1] = mcasp_get_reg(mcasp, reg); | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | for (i = 0; i < mcasp->num_serializer; i++) | ||
| 1514 | context->xrsr_regs[i] = mcasp_get_reg(mcasp, | ||
| 1515 | DAVINCI_MCASP_XRSRCTL_REG(i)); | ||
| 1516 | |||
| 1517 | pm_runtime_put_sync(mcasp->dev); | ||
| 1518 | |||
| 1519 | return 0; | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | static int davinci_mcasp_resume(struct snd_soc_dai *dai) | ||
| 1523 | { | ||
| 1524 | struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai); | ||
| 1525 | struct davinci_mcasp_context *context = &mcasp->context; | ||
| 1526 | u32 reg; | ||
| 1527 | int i; | ||
| 1528 | |||
| 1529 | pm_runtime_get_sync(mcasp->dev); | ||
| 1530 | |||
| 1531 | for (i = 0; i < ARRAY_SIZE(context_regs); i++) | ||
| 1532 | mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]); | ||
| 1533 | |||
| 1534 | if (mcasp->txnumevt) { | ||
| 1535 | reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; | ||
| 1536 | mcasp_set_reg(mcasp, reg, context->afifo_regs[0]); | ||
| 1537 | } | ||
| 1538 | if (mcasp->rxnumevt) { | ||
| 1539 | reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; | ||
| 1540 | mcasp_set_reg(mcasp, reg, context->afifo_regs[1]); | ||
| 1541 | } | ||
| 1542 | |||
| 1543 | for (i = 0; i < mcasp->num_serializer; i++) | ||
| 1544 | mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i), | ||
| 1545 | context->xrsr_regs[i]); | ||
| 1546 | |||
| 1547 | if (!context->pm_state) | ||
| 1548 | pm_runtime_put_sync(mcasp->dev); | ||
| 1549 | |||
| 1550 | return 0; | ||
| 1551 | } | ||
| 1552 | #else | ||
| 1553 | #define davinci_mcasp_suspend NULL | ||
| 1554 | #define davinci_mcasp_resume NULL | ||
| 1555 | #endif | ||
| 1556 | |||
| 1557 | #define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000 | 1489 | #define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000 |
| 1558 | 1490 | ||
| 1559 | #define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \ | 1491 | #define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \ |
| @@ -1571,8 +1503,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = { | |||
| 1571 | { | 1503 | { |
| 1572 | .name = "davinci-mcasp.0", | 1504 | .name = "davinci-mcasp.0", |
| 1573 | .probe = davinci_mcasp_dai_probe, | 1505 | .probe = davinci_mcasp_dai_probe, |
| 1574 | .suspend = davinci_mcasp_suspend, | ||
| 1575 | .resume = davinci_mcasp_resume, | ||
| 1576 | .playback = { | 1506 | .playback = { |
| 1577 | .channels_min = 1, | 1507 | .channels_min = 1, |
| 1578 | .channels_max = 32 * 16, | 1508 | .channels_max = 32 * 16, |
| @@ -1976,7 +1906,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev) | |||
| 1976 | } | 1906 | } |
| 1977 | 1907 | ||
| 1978 | mcasp->num_serializer = pdata->num_serializer; | 1908 | mcasp->num_serializer = pdata->num_serializer; |
| 1979 | #ifdef CONFIG_PM_SLEEP | 1909 | #ifdef CONFIG_PM |
| 1980 | mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev, | 1910 | mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev, |
| 1981 | mcasp->num_serializer, sizeof(u32), | 1911 | mcasp->num_serializer, sizeof(u32), |
| 1982 | GFP_KERNEL); | 1912 | GFP_KERNEL); |
| @@ -2196,11 +2126,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev) | |||
| 2196 | return 0; | 2126 | return 0; |
| 2197 | } | 2127 | } |
| 2198 | 2128 | ||
| 2129 | #ifdef CONFIG_PM | ||
| 2130 | static int davinci_mcasp_runtime_suspend(struct device *dev) | ||
| 2131 | { | ||
| 2132 | struct davinci_mcasp *mcasp = dev_get_drvdata(dev); | ||
| 2133 | struct davinci_mcasp_context *context = &mcasp->context; | ||
| 2134 | u32 reg; | ||
| 2135 | int i; | ||
| 2136 | |||
| 2137 | for (i = 0; i < ARRAY_SIZE(context_regs); i++) | ||
| 2138 | context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]); | ||
| 2139 | |||
| 2140 | if (mcasp->txnumevt) { | ||
| 2141 | reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; | ||
| 2142 | context->afifo_regs[0] = mcasp_get_reg(mcasp, reg); | ||
| 2143 | } | ||
| 2144 | if (mcasp->rxnumevt) { | ||
| 2145 | reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; | ||
| 2146 | context->afifo_regs[1] = mcasp_get_reg(mcasp, reg); | ||
| 2147 | } | ||
| 2148 | |||
| 2149 | for (i = 0; i < mcasp->num_serializer; i++) | ||
| 2150 | context->xrsr_regs[i] = mcasp_get_reg(mcasp, | ||
| 2151 | DAVINCI_MCASP_XRSRCTL_REG(i)); | ||
| 2152 | |||
| 2153 | return 0; | ||
| 2154 | } | ||
| 2155 | |||
| 2156 | static int davinci_mcasp_runtime_resume(struct device *dev) | ||
| 2157 | { | ||
| 2158 | struct davinci_mcasp *mcasp = dev_get_drvdata(dev); | ||
| 2159 | struct davinci_mcasp_context *context = &mcasp->context; | ||
| 2160 | u32 reg; | ||
| 2161 | int i; | ||
| 2162 | |||
| 2163 | for (i = 0; i < ARRAY_SIZE(context_regs); i++) | ||
| 2164 | mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]); | ||
| 2165 | |||
| 2166 | if (mcasp->txnumevt) { | ||
| 2167 | reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET; | ||
| 2168 | mcasp_set_reg(mcasp, reg, context->afifo_regs[0]); | ||
| 2169 | } | ||
| 2170 | if (mcasp->rxnumevt) { | ||
| 2171 | reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET; | ||
| 2172 | mcasp_set_reg(mcasp, reg, context->afifo_regs[1]); | ||
| 2173 | } | ||
| 2174 | |||
| 2175 | for (i = 0; i < mcasp->num_serializer; i++) | ||
| 2176 | mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i), | ||
| 2177 | context->xrsr_regs[i]); | ||
| 2178 | |||
| 2179 | return 0; | ||
| 2180 | } | ||
| 2181 | |||
| 2182 | #endif | ||
| 2183 | |||
| 2184 | static const struct dev_pm_ops davinci_mcasp_pm_ops = { | ||
| 2185 | SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend, | ||
| 2186 | davinci_mcasp_runtime_resume, | ||
| 2187 | NULL) | ||
| 2188 | }; | ||
| 2189 | |||
| 2199 | static struct platform_driver davinci_mcasp_driver = { | 2190 | static struct platform_driver davinci_mcasp_driver = { |
| 2200 | .probe = davinci_mcasp_probe, | 2191 | .probe = davinci_mcasp_probe, |
| 2201 | .remove = davinci_mcasp_remove, | 2192 | .remove = davinci_mcasp_remove, |
| 2202 | .driver = { | 2193 | .driver = { |
| 2203 | .name = "davinci-mcasp", | 2194 | .name = "davinci-mcasp", |
| 2195 | .pm = &davinci_mcasp_pm_ops, | ||
| 2204 | .of_match_table = mcasp_dt_ids, | 2196 | .of_match_table = mcasp_dt_ids, |
| 2205 | }, | 2197 | }, |
| 2206 | }; | 2198 | }; |
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig index 25e287feb58c..723a583a8d57 100644 --- a/sound/soc/xilinx/Kconfig +++ b/sound/soc/xilinx/Kconfig | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | config SND_SOC_XILINX_I2S | 1 | config SND_SOC_XILINX_I2S |
| 2 | tristate "Audio support for the the Xilinx I2S" | 2 | tristate "Audio support for the Xilinx I2S" |
| 3 | help | 3 | help |
| 4 | Select this option to enable Xilinx I2S Audio. This enables | 4 | Select this option to enable Xilinx I2S Audio. This enables |
| 5 | I2S playback and capture using xilinx soft IP. In transmitter | 5 | I2S playback and capture using xilinx soft IP. In transmitter |
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c index d4ae9eff41ce..8b353166ad44 100644 --- a/sound/soc/xilinx/xlnx_i2s.c +++ b/sound/soc/xilinx/xlnx_i2s.c | |||
| @@ -1,12 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | // |
| 3 | * Xilinx ASoC I2S audio support | 3 | // Xilinx ASoC I2S audio support |
| 4 | * | 4 | // |
| 5 | * Copyright (C) 2018 Xilinx, Inc. | 5 | // Copyright (C) 2018 Xilinx, Inc. |
| 6 | * | 6 | // |
| 7 | * Author: Praveen Vuppala <praveenv@xilinx.com> | 7 | // Author: Praveen Vuppala <praveenv@xilinx.com> |
| 8 | * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> | 8 | // Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> |
| 9 | */ | ||
| 10 | 9 | ||
| 11 | #include <linux/io.h> | 10 | #include <linux/io.h> |
| 12 | #include <linux/module.h> | 11 | #include <linux/module.h> |
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h | |||
| @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
| 47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
| 48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
| 49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
| 50 | PERF_REG_POWERPC_MMCRA, | ||
| 50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
| 51 | }; | 52 | }; |
| 52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 492f0f24e2d3..4ad1f0894d53 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile | |||
| @@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c | |||
| 93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) | 93 | SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) |
| 94 | 94 | ||
| 95 | ifeq ($(feature-libbfd),1) | 95 | ifeq ($(feature-libbfd),1) |
| 96 | LIBS += -lbfd -ldl -lopcodes | ||
| 97 | else ifeq ($(feature-libbfd-liberty),1) | ||
| 98 | LIBS += -lbfd -ldl -lopcodes -liberty | ||
| 99 | else ifeq ($(feature-libbfd-liberty-z),1) | ||
| 100 | LIBS += -lbfd -ldl -lopcodes -liberty -lz | ||
| 101 | endif | ||
| 102 | |||
| 103 | ifneq ($(filter -lbfd,$(LIBS)),) | ||
| 96 | CFLAGS += -DHAVE_LIBBFD_SUPPORT | 104 | CFLAGS += -DHAVE_LIBBFD_SUPPORT |
| 97 | SRCS += $(BFD_SRCS) | 105 | SRCS += $(BFD_SRCS) |
| 98 | LIBS += -lbfd -lopcodes | ||
| 99 | endif | 106 | endif |
| 100 | 107 | ||
| 101 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o | 108 | OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o |
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 3f0629edbca5..6ba5f567a9d8 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c | |||
| @@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset, | |||
| 82 | int bits_to_copy; | 82 | int bits_to_copy; |
| 83 | __u64 print_num; | 83 | __u64 print_num; |
| 84 | 84 | ||
| 85 | data += BITS_ROUNDDOWN_BYTES(bit_offset); | ||
| 86 | bit_offset = BITS_PER_BYTE_MASKED(bit_offset); | ||
| 87 | bits_to_copy = bit_offset + nr_bits; | 85 | bits_to_copy = bit_offset + nr_bits; |
| 88 | bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); | 86 | bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); |
| 89 | 87 | ||
| @@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, | |||
| 118 | * BTF_INT_OFFSET() cannot exceed 64 bits. | 116 | * BTF_INT_OFFSET() cannot exceed 64 bits. |
| 119 | */ | 117 | */ |
| 120 | total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); | 118 | total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); |
| 121 | btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, | 119 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
| 120 | bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset); | ||
| 121 | btf_dumper_bitfield(nr_bits, bit_offset, data, jw, | ||
| 122 | is_plain_text); | 122 | is_plain_text); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| @@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id, | |||
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); | 218 | jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); |
| 219 | data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); | ||
| 219 | if (bitfield_size) { | 220 | if (bitfield_size) { |
| 220 | btf_dumper_bitfield(bitfield_size, bit_offset, | 221 | btf_dumper_bitfield(bitfield_size, |
| 221 | data, d->jw, d->is_plain_text); | 222 | BITS_PER_BYTE_MASKED(bit_offset), |
| 223 | data_off, d->jw, d->is_plain_text); | ||
| 222 | } else { | 224 | } else { |
| 223 | data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset); | ||
| 224 | ret = btf_dumper_do_type(d, m[i].type, | 225 | ret = btf_dumper_do_type(d, m[i].type, |
| 225 | BITS_PER_BYTE_MASKED(bit_offset), | 226 | BITS_PER_BYTE_MASKED(bit_offset), |
| 226 | data_off); | 227 | data_off); |
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c index bff7ee026680..6046dcab51cc 100644 --- a/tools/bpf/bpftool/json_writer.c +++ b/tools/bpf/bpftool/json_writer.c | |||
| @@ -1,15 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) |
| 2 | /* | 2 | /* |
| 3 | * Simple streaming JSON writer | 3 | * Simple streaming JSON writer |
| 4 | * | 4 | * |
| 5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
| 6 | * after elements | 6 | * after elements |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
| 14 | */ | 9 | */ |
| 15 | 10 | ||
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h index c1ab51aed99c..cb9a1993681c 100644 --- a/tools/bpf/bpftool/json_writer.h +++ b/tools/bpf/bpftool/json_writer.h | |||
| @@ -5,11 +5,6 @@ | |||
| 5 | * This takes care of the annoying bits of JSON syntax like the commas | 5 | * This takes care of the annoying bits of JSON syntax like the commas |
| 6 | * after elements | 6 | * after elements |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version | ||
| 11 | * 2 of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * Authors: Stephen Hemminger <stephen@networkplumber.org> | 8 | * Authors: Stephen Hemminger <stephen@networkplumber.org> |
| 14 | */ | 9 | */ |
| 15 | 10 | ||
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h new file mode 100644 index 000000000000..0d18b1d1fbbc --- /dev/null +++ b/tools/include/uapi/linux/pkt_sched.h | |||
| @@ -0,0 +1,1163 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
| 2 | #ifndef __LINUX_PKT_SCHED_H | ||
| 3 | #define __LINUX_PKT_SCHED_H | ||
| 4 | |||
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 7 | /* Logical priority bands not depending on specific packet scheduler. | ||
| 8 | Every scheduler will map them to real traffic classes, if it has | ||
| 9 | no more precise mechanism to classify packets. | ||
| 10 | |||
| 11 | These numbers have no special meaning, though their coincidence | ||
| 12 | with obsolete IPv6 values is not occasional :-). New IPv6 drafts | ||
| 13 | preferred full anarchy inspired by diffserv group. | ||
| 14 | |||
| 15 | Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy | ||
| 16 | class, actually, as rule it will be handled with more care than | ||
| 17 | filler or even bulk. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #define TC_PRIO_BESTEFFORT 0 | ||
| 21 | #define TC_PRIO_FILLER 1 | ||
| 22 | #define TC_PRIO_BULK 2 | ||
| 23 | #define TC_PRIO_INTERACTIVE_BULK 4 | ||
| 24 | #define TC_PRIO_INTERACTIVE 6 | ||
| 25 | #define TC_PRIO_CONTROL 7 | ||
| 26 | |||
| 27 | #define TC_PRIO_MAX 15 | ||
| 28 | |||
| 29 | /* Generic queue statistics, available for all the elements. | ||
| 30 | Particular schedulers may have also their private records. | ||
| 31 | */ | ||
| 32 | |||
| 33 | struct tc_stats { | ||
| 34 | __u64 bytes; /* Number of enqueued bytes */ | ||
| 35 | __u32 packets; /* Number of enqueued packets */ | ||
| 36 | __u32 drops; /* Packets dropped because of lack of resources */ | ||
| 37 | __u32 overlimits; /* Number of throttle events when this | ||
| 38 | * flow goes out of allocated bandwidth */ | ||
| 39 | __u32 bps; /* Current flow byte rate */ | ||
| 40 | __u32 pps; /* Current flow packet rate */ | ||
| 41 | __u32 qlen; | ||
| 42 | __u32 backlog; | ||
| 43 | }; | ||
| 44 | |||
| 45 | struct tc_estimator { | ||
| 46 | signed char interval; | ||
| 47 | unsigned char ewma_log; | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* "Handles" | ||
| 51 | --------- | ||
| 52 | |||
| 53 | All the traffic control objects have 32bit identifiers, or "handles". | ||
| 54 | |||
| 55 | They can be considered as opaque numbers from user API viewpoint, | ||
| 56 | but actually they always consist of two fields: major and | ||
| 57 | minor numbers, which are interpreted by kernel specially, | ||
| 58 | that may be used by applications, though not recommended. | ||
| 59 | |||
| 60 | F.e. qdisc handles always have minor number equal to zero, | ||
| 61 | classes (or flows) have major equal to parent qdisc major, and | ||
| 62 | minor uniquely identifying class inside qdisc. | ||
| 63 | |||
| 64 | Macros to manipulate handles: | ||
| 65 | */ | ||
| 66 | |||
| 67 | #define TC_H_MAJ_MASK (0xFFFF0000U) | ||
| 68 | #define TC_H_MIN_MASK (0x0000FFFFU) | ||
| 69 | #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) | ||
| 70 | #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) | ||
| 71 | #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) | ||
| 72 | |||
| 73 | #define TC_H_UNSPEC (0U) | ||
| 74 | #define TC_H_ROOT (0xFFFFFFFFU) | ||
| 75 | #define TC_H_INGRESS (0xFFFFFFF1U) | ||
| 76 | #define TC_H_CLSACT TC_H_INGRESS | ||
| 77 | |||
| 78 | #define TC_H_MIN_PRIORITY 0xFFE0U | ||
| 79 | #define TC_H_MIN_INGRESS 0xFFF2U | ||
| 80 | #define TC_H_MIN_EGRESS 0xFFF3U | ||
| 81 | |||
| 82 | /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ | ||
| 83 | enum tc_link_layer { | ||
| 84 | TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ | ||
| 85 | TC_LINKLAYER_ETHERNET, | ||
| 86 | TC_LINKLAYER_ATM, | ||
| 87 | }; | ||
| 88 | #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ | ||
| 89 | |||
| 90 | struct tc_ratespec { | ||
| 91 | unsigned char cell_log; | ||
| 92 | __u8 linklayer; /* lower 4 bits */ | ||
| 93 | unsigned short overhead; | ||
| 94 | short cell_align; | ||
| 95 | unsigned short mpu; | ||
| 96 | __u32 rate; | ||
| 97 | }; | ||
| 98 | |||
| 99 | #define TC_RTAB_SIZE 1024 | ||
| 100 | |||
| 101 | struct tc_sizespec { | ||
| 102 | unsigned char cell_log; | ||
| 103 | unsigned char size_log; | ||
| 104 | short cell_align; | ||
| 105 | int overhead; | ||
| 106 | unsigned int linklayer; | ||
| 107 | unsigned int mpu; | ||
| 108 | unsigned int mtu; | ||
| 109 | unsigned int tsize; | ||
| 110 | }; | ||
| 111 | |||
| 112 | enum { | ||
| 113 | TCA_STAB_UNSPEC, | ||
| 114 | TCA_STAB_BASE, | ||
| 115 | TCA_STAB_DATA, | ||
| 116 | __TCA_STAB_MAX | ||
| 117 | }; | ||
| 118 | |||
| 119 | #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) | ||
| 120 | |||
| 121 | /* FIFO section */ | ||
| 122 | |||
| 123 | struct tc_fifo_qopt { | ||
| 124 | __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ | ||
| 125 | }; | ||
| 126 | |||
| 127 | /* SKBPRIO section */ | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1). | ||
| 131 | * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able | ||
| 132 | * to map one to one the DS field of IPV4 and IPV6 headers. | ||
| 133 | * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY. | ||
| 134 | */ | ||
| 135 | |||
| 136 | #define SKBPRIO_MAX_PRIORITY 64 | ||
| 137 | |||
| 138 | struct tc_skbprio_qopt { | ||
| 139 | __u32 limit; /* Queue length in packets. */ | ||
| 140 | }; | ||
| 141 | |||
| 142 | /* PRIO section */ | ||
| 143 | |||
| 144 | #define TCQ_PRIO_BANDS 16 | ||
| 145 | #define TCQ_MIN_PRIO_BANDS 2 | ||
| 146 | |||
| 147 | struct tc_prio_qopt { | ||
| 148 | int bands; /* Number of bands */ | ||
| 149 | __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ | ||
| 150 | }; | ||
| 151 | |||
| 152 | /* MULTIQ section */ | ||
| 153 | |||
| 154 | struct tc_multiq_qopt { | ||
| 155 | __u16 bands; /* Number of bands */ | ||
| 156 | __u16 max_bands; /* Maximum number of queues */ | ||
| 157 | }; | ||
| 158 | |||
| 159 | /* PLUG section */ | ||
| 160 | |||
| 161 | #define TCQ_PLUG_BUFFER 0 | ||
| 162 | #define TCQ_PLUG_RELEASE_ONE 1 | ||
| 163 | #define TCQ_PLUG_RELEASE_INDEFINITE 2 | ||
| 164 | #define TCQ_PLUG_LIMIT 3 | ||
| 165 | |||
| 166 | struct tc_plug_qopt { | ||
| 167 | /* TCQ_PLUG_BUFFER: Inset a plug into the queue and | ||
| 168 | * buffer any incoming packets | ||
| 169 | * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head | ||
| 170 | * to beginning of the next plug. | ||
| 171 | * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. | ||
| 172 | * Stop buffering packets until the next TCQ_PLUG_BUFFER | ||
| 173 | * command is received (just act as a pass-thru queue). | ||
| 174 | * TCQ_PLUG_LIMIT: Increase/decrease queue size | ||
| 175 | */ | ||
| 176 | int action; | ||
| 177 | __u32 limit; | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* TBF section */ | ||
| 181 | |||
| 182 | struct tc_tbf_qopt { | ||
| 183 | struct tc_ratespec rate; | ||
| 184 | struct tc_ratespec peakrate; | ||
| 185 | __u32 limit; | ||
| 186 | __u32 buffer; | ||
| 187 | __u32 mtu; | ||
| 188 | }; | ||
| 189 | |||
| 190 | enum { | ||
| 191 | TCA_TBF_UNSPEC, | ||
| 192 | TCA_TBF_PARMS, | ||
| 193 | TCA_TBF_RTAB, | ||
| 194 | TCA_TBF_PTAB, | ||
| 195 | TCA_TBF_RATE64, | ||
| 196 | TCA_TBF_PRATE64, | ||
| 197 | TCA_TBF_BURST, | ||
| 198 | TCA_TBF_PBURST, | ||
| 199 | TCA_TBF_PAD, | ||
| 200 | __TCA_TBF_MAX, | ||
| 201 | }; | ||
| 202 | |||
| 203 | #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) | ||
| 204 | |||
| 205 | |||
| 206 | /* TEQL section */ | ||
| 207 | |||
| 208 | /* TEQL does not require any parameters */ | ||
| 209 | |||
| 210 | /* SFQ section */ | ||
| 211 | |||
| 212 | struct tc_sfq_qopt { | ||
| 213 | unsigned quantum; /* Bytes per round allocated to flow */ | ||
| 214 | int perturb_period; /* Period of hash perturbation */ | ||
| 215 | __u32 limit; /* Maximal packets in queue */ | ||
| 216 | unsigned divisor; /* Hash divisor */ | ||
| 217 | unsigned flows; /* Maximal number of flows */ | ||
| 218 | }; | ||
| 219 | |||
| 220 | struct tc_sfqred_stats { | ||
| 221 | __u32 prob_drop; /* Early drops, below max threshold */ | ||
| 222 | __u32 forced_drop; /* Early drops, after max threshold */ | ||
| 223 | __u32 prob_mark; /* Marked packets, below max threshold */ | ||
| 224 | __u32 forced_mark; /* Marked packets, after max threshold */ | ||
| 225 | __u32 prob_mark_head; /* Marked packets, below max threshold */ | ||
| 226 | __u32 forced_mark_head;/* Marked packets, after max threshold */ | ||
| 227 | }; | ||
| 228 | |||
| 229 | struct tc_sfq_qopt_v1 { | ||
| 230 | struct tc_sfq_qopt v0; | ||
| 231 | unsigned int depth; /* max number of packets per flow */ | ||
| 232 | unsigned int headdrop; | ||
| 233 | /* SFQRED parameters */ | ||
| 234 | __u32 limit; /* HARD maximal flow queue length (bytes) */ | ||
| 235 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 236 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 237 | unsigned char Wlog; /* log(W) */ | ||
| 238 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 239 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 240 | unsigned char flags; | ||
| 241 | __u32 max_P; /* probability, high resolution */ | ||
| 242 | /* SFQRED stats */ | ||
| 243 | struct tc_sfqred_stats stats; | ||
| 244 | }; | ||
| 245 | |||
| 246 | |||
| 247 | struct tc_sfq_xstats { | ||
| 248 | __s32 allot; | ||
| 249 | }; | ||
| 250 | |||
| 251 | /* RED section */ | ||
| 252 | |||
| 253 | enum { | ||
| 254 | TCA_RED_UNSPEC, | ||
| 255 | TCA_RED_PARMS, | ||
| 256 | TCA_RED_STAB, | ||
| 257 | TCA_RED_MAX_P, | ||
| 258 | __TCA_RED_MAX, | ||
| 259 | }; | ||
| 260 | |||
| 261 | #define TCA_RED_MAX (__TCA_RED_MAX - 1) | ||
| 262 | |||
| 263 | struct tc_red_qopt { | ||
| 264 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
| 265 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 266 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 267 | unsigned char Wlog; /* log(W) */ | ||
| 268 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 269 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 270 | unsigned char flags; | ||
| 271 | #define TC_RED_ECN 1 | ||
| 272 | #define TC_RED_HARDDROP 2 | ||
| 273 | #define TC_RED_ADAPTATIVE 4 | ||
| 274 | }; | ||
| 275 | |||
| 276 | struct tc_red_xstats { | ||
| 277 | __u32 early; /* Early drops */ | ||
| 278 | __u32 pdrop; /* Drops due to queue limits */ | ||
| 279 | __u32 other; /* Drops due to drop() calls */ | ||
| 280 | __u32 marked; /* Marked packets */ | ||
| 281 | }; | ||
| 282 | |||
| 283 | /* GRED section */ | ||
| 284 | |||
| 285 | #define MAX_DPs 16 | ||
| 286 | |||
| 287 | enum { | ||
| 288 | TCA_GRED_UNSPEC, | ||
| 289 | TCA_GRED_PARMS, | ||
| 290 | TCA_GRED_STAB, | ||
| 291 | TCA_GRED_DPS, | ||
| 292 | TCA_GRED_MAX_P, | ||
| 293 | TCA_GRED_LIMIT, | ||
| 294 | TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ | ||
| 295 | __TCA_GRED_MAX, | ||
| 296 | }; | ||
| 297 | |||
| 298 | #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) | ||
| 299 | |||
| 300 | enum { | ||
| 301 | TCA_GRED_VQ_ENTRY_UNSPEC, | ||
| 302 | TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ | ||
| 303 | __TCA_GRED_VQ_ENTRY_MAX, | ||
| 304 | }; | ||
| 305 | #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) | ||
| 306 | |||
| 307 | enum { | ||
| 308 | TCA_GRED_VQ_UNSPEC, | ||
| 309 | TCA_GRED_VQ_PAD, | ||
| 310 | TCA_GRED_VQ_DP, /* u32 */ | ||
| 311 | TCA_GRED_VQ_STAT_BYTES, /* u64 */ | ||
| 312 | TCA_GRED_VQ_STAT_PACKETS, /* u32 */ | ||
| 313 | TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ | ||
| 314 | TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ | ||
| 315 | TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ | ||
| 316 | TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ | ||
| 317 | TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ | ||
| 318 | TCA_GRED_VQ_STAT_PDROP, /* u32 */ | ||
| 319 | TCA_GRED_VQ_STAT_OTHER, /* u32 */ | ||
| 320 | TCA_GRED_VQ_FLAGS, /* u32 */ | ||
| 321 | __TCA_GRED_VQ_MAX | ||
| 322 | }; | ||
| 323 | |||
| 324 | #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) | ||
| 325 | |||
| 326 | struct tc_gred_qopt { | ||
| 327 | __u32 limit; /* HARD maximal queue length (bytes) */ | ||
| 328 | __u32 qth_min; /* Min average length threshold (bytes) */ | ||
| 329 | __u32 qth_max; /* Max average length threshold (bytes) */ | ||
| 330 | __u32 DP; /* up to 2^32 DPs */ | ||
| 331 | __u32 backlog; | ||
| 332 | __u32 qave; | ||
| 333 | __u32 forced; | ||
| 334 | __u32 early; | ||
| 335 | __u32 other; | ||
| 336 | __u32 pdrop; | ||
| 337 | __u8 Wlog; /* log(W) */ | ||
| 338 | __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 339 | __u8 Scell_log; /* cell size for idle damping */ | ||
| 340 | __u8 prio; /* prio of this VQ */ | ||
| 341 | __u32 packets; | ||
| 342 | __u32 bytesin; | ||
| 343 | }; | ||
| 344 | |||
| 345 | /* gred setup */ | ||
| 346 | struct tc_gred_sopt { | ||
| 347 | __u32 DPs; | ||
| 348 | __u32 def_DP; | ||
| 349 | __u8 grio; | ||
| 350 | __u8 flags; | ||
| 351 | __u16 pad1; | ||
| 352 | }; | ||
| 353 | |||
| 354 | /* CHOKe section */ | ||
| 355 | |||
| 356 | enum { | ||
| 357 | TCA_CHOKE_UNSPEC, | ||
| 358 | TCA_CHOKE_PARMS, | ||
| 359 | TCA_CHOKE_STAB, | ||
| 360 | TCA_CHOKE_MAX_P, | ||
| 361 | __TCA_CHOKE_MAX, | ||
| 362 | }; | ||
| 363 | |||
| 364 | #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1) | ||
| 365 | |||
| 366 | struct tc_choke_qopt { | ||
| 367 | __u32 limit; /* Hard queue length (packets) */ | ||
| 368 | __u32 qth_min; /* Min average threshold (packets) */ | ||
| 369 | __u32 qth_max; /* Max average threshold (packets) */ | ||
| 370 | unsigned char Wlog; /* log(W) */ | ||
| 371 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ | ||
| 372 | unsigned char Scell_log; /* cell size for idle damping */ | ||
| 373 | unsigned char flags; /* see RED flags */ | ||
| 374 | }; | ||
| 375 | |||
| 376 | struct tc_choke_xstats { | ||
| 377 | __u32 early; /* Early drops */ | ||
| 378 | __u32 pdrop; /* Drops due to queue limits */ | ||
| 379 | __u32 other; /* Drops due to drop() calls */ | ||
| 380 | __u32 marked; /* Marked packets */ | ||
| 381 | __u32 matched; /* Drops due to flow match */ | ||
| 382 | }; | ||
| 383 | |||
| 384 | /* HTB section */ | ||
| 385 | #define TC_HTB_NUMPRIO 8 | ||
| 386 | #define TC_HTB_MAXDEPTH 8 | ||
| 387 | #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ | ||
| 388 | |||
| 389 | struct tc_htb_opt { | ||
| 390 | struct tc_ratespec rate; | ||
| 391 | struct tc_ratespec ceil; | ||
| 392 | __u32 buffer; | ||
| 393 | __u32 cbuffer; | ||
| 394 | __u32 quantum; | ||
| 395 | __u32 level; /* out only */ | ||
| 396 | __u32 prio; | ||
| 397 | }; | ||
| 398 | struct tc_htb_glob { | ||
| 399 | __u32 version; /* to match HTB/TC */ | ||
| 400 | __u32 rate2quantum; /* bps->quantum divisor */ | ||
| 401 | __u32 defcls; /* default class number */ | ||
| 402 | __u32 debug; /* debug flags */ | ||
| 403 | |||
| 404 | /* stats */ | ||
| 405 | __u32 direct_pkts; /* count of non shaped packets */ | ||
| 406 | }; | ||
| 407 | enum { | ||
| 408 | TCA_HTB_UNSPEC, | ||
| 409 | TCA_HTB_PARMS, | ||
| 410 | TCA_HTB_INIT, | ||
| 411 | TCA_HTB_CTAB, | ||
| 412 | TCA_HTB_RTAB, | ||
| 413 | TCA_HTB_DIRECT_QLEN, | ||
| 414 | TCA_HTB_RATE64, | ||
| 415 | TCA_HTB_CEIL64, | ||
| 416 | TCA_HTB_PAD, | ||
| 417 | __TCA_HTB_MAX, | ||
| 418 | }; | ||
| 419 | |||
| 420 | #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) | ||
| 421 | |||
| 422 | struct tc_htb_xstats { | ||
| 423 | __u32 lends; | ||
| 424 | __u32 borrows; | ||
| 425 | __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ | ||
| 426 | __s32 tokens; | ||
| 427 | __s32 ctokens; | ||
| 428 | }; | ||
| 429 | |||
| 430 | /* HFSC section */ | ||
| 431 | |||
| 432 | struct tc_hfsc_qopt { | ||
| 433 | __u16 defcls; /* default class */ | ||
| 434 | }; | ||
| 435 | |||
| 436 | struct tc_service_curve { | ||
| 437 | __u32 m1; /* slope of the first segment in bps */ | ||
| 438 | __u32 d; /* x-projection of the first segment in us */ | ||
| 439 | __u32 m2; /* slope of the second segment in bps */ | ||
| 440 | }; | ||
| 441 | |||
| 442 | struct tc_hfsc_stats { | ||
| 443 | __u64 work; /* total work done */ | ||
| 444 | __u64 rtwork; /* work done by real-time criteria */ | ||
| 445 | __u32 period; /* current period */ | ||
| 446 | __u32 level; /* class level in hierarchy */ | ||
| 447 | }; | ||
| 448 | |||
| 449 | enum { | ||
| 450 | TCA_HFSC_UNSPEC, | ||
| 451 | TCA_HFSC_RSC, | ||
| 452 | TCA_HFSC_FSC, | ||
| 453 | TCA_HFSC_USC, | ||
| 454 | __TCA_HFSC_MAX, | ||
| 455 | }; | ||
| 456 | |||
| 457 | #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) | ||
| 458 | |||
| 459 | |||
| 460 | /* CBQ section */ | ||
| 461 | |||
| 462 | #define TC_CBQ_MAXPRIO 8 | ||
| 463 | #define TC_CBQ_MAXLEVEL 8 | ||
| 464 | #define TC_CBQ_DEF_EWMA 5 | ||
| 465 | |||
| 466 | struct tc_cbq_lssopt { | ||
| 467 | unsigned char change; | ||
| 468 | unsigned char flags; | ||
| 469 | #define TCF_CBQ_LSS_BOUNDED 1 | ||
| 470 | #define TCF_CBQ_LSS_ISOLATED 2 | ||
| 471 | unsigned char ewma_log; | ||
| 472 | unsigned char level; | ||
| 473 | #define TCF_CBQ_LSS_FLAGS 1 | ||
| 474 | #define TCF_CBQ_LSS_EWMA 2 | ||
| 475 | #define TCF_CBQ_LSS_MAXIDLE 4 | ||
| 476 | #define TCF_CBQ_LSS_MINIDLE 8 | ||
| 477 | #define TCF_CBQ_LSS_OFFTIME 0x10 | ||
| 478 | #define TCF_CBQ_LSS_AVPKT 0x20 | ||
| 479 | __u32 maxidle; | ||
| 480 | __u32 minidle; | ||
| 481 | __u32 offtime; | ||
| 482 | __u32 avpkt; | ||
| 483 | }; | ||
| 484 | |||
| 485 | struct tc_cbq_wrropt { | ||
| 486 | unsigned char flags; | ||
| 487 | unsigned char priority; | ||
| 488 | unsigned char cpriority; | ||
| 489 | unsigned char __reserved; | ||
| 490 | __u32 allot; | ||
| 491 | __u32 weight; | ||
| 492 | }; | ||
| 493 | |||
| 494 | struct tc_cbq_ovl { | ||
| 495 | unsigned char strategy; | ||
| 496 | #define TC_CBQ_OVL_CLASSIC 0 | ||
| 497 | #define TC_CBQ_OVL_DELAY 1 | ||
| 498 | #define TC_CBQ_OVL_LOWPRIO 2 | ||
| 499 | #define TC_CBQ_OVL_DROP 3 | ||
| 500 | #define TC_CBQ_OVL_RCLASSIC 4 | ||
| 501 | unsigned char priority2; | ||
| 502 | __u16 pad; | ||
| 503 | __u32 penalty; | ||
| 504 | }; | ||
| 505 | |||
| 506 | struct tc_cbq_police { | ||
| 507 | unsigned char police; | ||
| 508 | unsigned char __res1; | ||
| 509 | unsigned short __res2; | ||
| 510 | }; | ||
| 511 | |||
| 512 | struct tc_cbq_fopt { | ||
| 513 | __u32 split; | ||
| 514 | __u32 defmap; | ||
| 515 | __u32 defchange; | ||
| 516 | }; | ||
| 517 | |||
| 518 | struct tc_cbq_xstats { | ||
| 519 | __u32 borrows; | ||
| 520 | __u32 overactions; | ||
| 521 | __s32 avgidle; | ||
| 522 | __s32 undertime; | ||
| 523 | }; | ||
| 524 | |||
| 525 | enum { | ||
| 526 | TCA_CBQ_UNSPEC, | ||
| 527 | TCA_CBQ_LSSOPT, | ||
| 528 | TCA_CBQ_WRROPT, | ||
| 529 | TCA_CBQ_FOPT, | ||
| 530 | TCA_CBQ_OVL_STRATEGY, | ||
| 531 | TCA_CBQ_RATE, | ||
| 532 | TCA_CBQ_RTAB, | ||
| 533 | TCA_CBQ_POLICE, | ||
| 534 | __TCA_CBQ_MAX, | ||
| 535 | }; | ||
| 536 | |||
| 537 | #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) | ||
| 538 | |||
| 539 | /* dsmark section */ | ||
| 540 | |||
| 541 | enum { | ||
| 542 | TCA_DSMARK_UNSPEC, | ||
| 543 | TCA_DSMARK_INDICES, | ||
| 544 | TCA_DSMARK_DEFAULT_INDEX, | ||
| 545 | TCA_DSMARK_SET_TC_INDEX, | ||
| 546 | TCA_DSMARK_MASK, | ||
| 547 | TCA_DSMARK_VALUE, | ||
| 548 | __TCA_DSMARK_MAX, | ||
| 549 | }; | ||
| 550 | |||
| 551 | #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) | ||
| 552 | |||
| 553 | /* ATM section */ | ||
| 554 | |||
| 555 | enum { | ||
| 556 | TCA_ATM_UNSPEC, | ||
| 557 | TCA_ATM_FD, /* file/socket descriptor */ | ||
| 558 | TCA_ATM_PTR, /* pointer to descriptor - later */ | ||
| 559 | TCA_ATM_HDR, /* LL header */ | ||
| 560 | TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ | ||
| 561 | TCA_ATM_ADDR, /* PVC address (for output only) */ | ||
| 562 | TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ | ||
| 563 | __TCA_ATM_MAX, | ||
| 564 | }; | ||
| 565 | |||
| 566 | #define TCA_ATM_MAX (__TCA_ATM_MAX - 1) | ||
| 567 | |||
| 568 | /* Network emulator */ | ||
| 569 | |||
| 570 | enum { | ||
| 571 | TCA_NETEM_UNSPEC, | ||
| 572 | TCA_NETEM_CORR, | ||
| 573 | TCA_NETEM_DELAY_DIST, | ||
| 574 | TCA_NETEM_REORDER, | ||
| 575 | TCA_NETEM_CORRUPT, | ||
| 576 | TCA_NETEM_LOSS, | ||
| 577 | TCA_NETEM_RATE, | ||
| 578 | TCA_NETEM_ECN, | ||
| 579 | TCA_NETEM_RATE64, | ||
| 580 | TCA_NETEM_PAD, | ||
| 581 | TCA_NETEM_LATENCY64, | ||
| 582 | TCA_NETEM_JITTER64, | ||
| 583 | TCA_NETEM_SLOT, | ||
| 584 | TCA_NETEM_SLOT_DIST, | ||
| 585 | __TCA_NETEM_MAX, | ||
| 586 | }; | ||
| 587 | |||
| 588 | #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) | ||
| 589 | |||
| 590 | struct tc_netem_qopt { | ||
| 591 | __u32 latency; /* added delay (us) */ | ||
| 592 | __u32 limit; /* fifo limit (packets) */ | ||
| 593 | __u32 loss; /* random packet loss (0=none ~0=100%) */ | ||
| 594 | __u32 gap; /* re-ordering gap (0 for none) */ | ||
| 595 | __u32 duplicate; /* random packet dup (0=none ~0=100%) */ | ||
| 596 | __u32 jitter; /* random jitter in latency (us) */ | ||
| 597 | }; | ||
| 598 | |||
| 599 | struct tc_netem_corr { | ||
| 600 | __u32 delay_corr; /* delay correlation */ | ||
| 601 | __u32 loss_corr; /* packet loss correlation */ | ||
| 602 | __u32 dup_corr; /* duplicate correlation */ | ||
| 603 | }; | ||
| 604 | |||
| 605 | struct tc_netem_reorder { | ||
| 606 | __u32 probability; | ||
| 607 | __u32 correlation; | ||
| 608 | }; | ||
| 609 | |||
| 610 | struct tc_netem_corrupt { | ||
| 611 | __u32 probability; | ||
| 612 | __u32 correlation; | ||
| 613 | }; | ||
| 614 | |||
| 615 | struct tc_netem_rate { | ||
| 616 | __u32 rate; /* byte/s */ | ||
| 617 | __s32 packet_overhead; | ||
| 618 | __u32 cell_size; | ||
| 619 | __s32 cell_overhead; | ||
| 620 | }; | ||
| 621 | |||
| 622 | struct tc_netem_slot { | ||
| 623 | __s64 min_delay; /* nsec */ | ||
| 624 | __s64 max_delay; | ||
| 625 | __s32 max_packets; | ||
| 626 | __s32 max_bytes; | ||
| 627 | __s64 dist_delay; /* nsec */ | ||
| 628 | __s64 dist_jitter; /* nsec */ | ||
| 629 | }; | ||
| 630 | |||
| 631 | enum { | ||
| 632 | NETEM_LOSS_UNSPEC, | ||
| 633 | NETEM_LOSS_GI, /* General Intuitive - 4 state model */ | ||
| 634 | NETEM_LOSS_GE, /* Gilbert Elliot models */ | ||
| 635 | __NETEM_LOSS_MAX | ||
| 636 | }; | ||
| 637 | #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) | ||
| 638 | |||
| 639 | /* State transition probabilities for 4 state model */ | ||
| 640 | struct tc_netem_gimodel { | ||
| 641 | __u32 p13; | ||
| 642 | __u32 p31; | ||
| 643 | __u32 p32; | ||
| 644 | __u32 p14; | ||
| 645 | __u32 p23; | ||
| 646 | }; | ||
| 647 | |||
| 648 | /* Gilbert-Elliot models */ | ||
| 649 | struct tc_netem_gemodel { | ||
| 650 | __u32 p; | ||
| 651 | __u32 r; | ||
| 652 | __u32 h; | ||
| 653 | __u32 k1; | ||
| 654 | }; | ||
| 655 | |||
| 656 | #define NETEM_DIST_SCALE 8192 | ||
| 657 | #define NETEM_DIST_MAX 16384 | ||
| 658 | |||
| 659 | /* DRR */ | ||
| 660 | |||
| 661 | enum { | ||
| 662 | TCA_DRR_UNSPEC, | ||
| 663 | TCA_DRR_QUANTUM, | ||
| 664 | __TCA_DRR_MAX | ||
| 665 | }; | ||
| 666 | |||
| 667 | #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) | ||
| 668 | |||
| 669 | struct tc_drr_stats { | ||
| 670 | __u32 deficit; | ||
| 671 | }; | ||
| 672 | |||
| 673 | /* MQPRIO */ | ||
| 674 | #define TC_QOPT_BITMASK 15 | ||
| 675 | #define TC_QOPT_MAX_QUEUE 16 | ||
| 676 | |||
| 677 | enum { | ||
| 678 | TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ | ||
| 679 | TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ | ||
| 680 | __TC_MQPRIO_HW_OFFLOAD_MAX | ||
| 681 | }; | ||
| 682 | |||
| 683 | #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) | ||
| 684 | |||
| 685 | enum { | ||
| 686 | TC_MQPRIO_MODE_DCB, | ||
| 687 | TC_MQPRIO_MODE_CHANNEL, | ||
| 688 | __TC_MQPRIO_MODE_MAX | ||
| 689 | }; | ||
| 690 | |||
| 691 | #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1) | ||
| 692 | |||
| 693 | enum { | ||
| 694 | TC_MQPRIO_SHAPER_DCB, | ||
| 695 | TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */ | ||
| 696 | __TC_MQPRIO_SHAPER_MAX | ||
| 697 | }; | ||
| 698 | |||
| 699 | #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1) | ||
| 700 | |||
| 701 | struct tc_mqprio_qopt { | ||
| 702 | __u8 num_tc; | ||
| 703 | __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; | ||
| 704 | __u8 hw; | ||
| 705 | __u16 count[TC_QOPT_MAX_QUEUE]; | ||
| 706 | __u16 offset[TC_QOPT_MAX_QUEUE]; | ||
| 707 | }; | ||
| 708 | |||
| 709 | #define TC_MQPRIO_F_MODE 0x1 | ||
| 710 | #define TC_MQPRIO_F_SHAPER 0x2 | ||
| 711 | #define TC_MQPRIO_F_MIN_RATE 0x4 | ||
| 712 | #define TC_MQPRIO_F_MAX_RATE 0x8 | ||
| 713 | |||
| 714 | enum { | ||
| 715 | TCA_MQPRIO_UNSPEC, | ||
| 716 | TCA_MQPRIO_MODE, | ||
| 717 | TCA_MQPRIO_SHAPER, | ||
| 718 | TCA_MQPRIO_MIN_RATE64, | ||
| 719 | TCA_MQPRIO_MAX_RATE64, | ||
| 720 | __TCA_MQPRIO_MAX, | ||
| 721 | }; | ||
| 722 | |||
| 723 | #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1) | ||
| 724 | |||
| 725 | /* SFB */ | ||
| 726 | |||
| 727 | enum { | ||
| 728 | TCA_SFB_UNSPEC, | ||
| 729 | TCA_SFB_PARMS, | ||
| 730 | __TCA_SFB_MAX, | ||
| 731 | }; | ||
| 732 | |||
| 733 | #define TCA_SFB_MAX (__TCA_SFB_MAX - 1) | ||
| 734 | |||
| 735 | /* | ||
| 736 | * Note: increment, decrement are Q0.16 fixed-point values. | ||
| 737 | */ | ||
| 738 | struct tc_sfb_qopt { | ||
| 739 | __u32 rehash_interval; /* delay between hash move, in ms */ | ||
| 740 | __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */ | ||
| 741 | __u32 max; /* max len of qlen_min */ | ||
| 742 | __u32 bin_size; /* maximum queue length per bin */ | ||
| 743 | __u32 increment; /* probability increment, (d1 in Blue) */ | ||
| 744 | __u32 decrement; /* probability decrement, (d2 in Blue) */ | ||
| 745 | __u32 limit; /* max SFB queue length */ | ||
| 746 | __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ | ||
| 747 | __u32 penalty_burst; | ||
| 748 | }; | ||
| 749 | |||
| 750 | struct tc_sfb_xstats { | ||
| 751 | __u32 earlydrop; | ||
| 752 | __u32 penaltydrop; | ||
| 753 | __u32 bucketdrop; | ||
| 754 | __u32 queuedrop; | ||
| 755 | __u32 childdrop; /* drops in child qdisc */ | ||
| 756 | __u32 marked; | ||
| 757 | __u32 maxqlen; | ||
| 758 | __u32 maxprob; | ||
| 759 | __u32 avgprob; | ||
| 760 | }; | ||
| 761 | |||
| 762 | #define SFB_MAX_PROB 0xFFFF | ||
| 763 | |||
| 764 | /* QFQ */ | ||
| 765 | enum { | ||
| 766 | TCA_QFQ_UNSPEC, | ||
| 767 | TCA_QFQ_WEIGHT, | ||
| 768 | TCA_QFQ_LMAX, | ||
| 769 | __TCA_QFQ_MAX | ||
| 770 | }; | ||
| 771 | |||
| 772 | #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1) | ||
| 773 | |||
| 774 | struct tc_qfq_stats { | ||
| 775 | __u32 weight; | ||
| 776 | __u32 lmax; | ||
| 777 | }; | ||
| 778 | |||
| 779 | /* CODEL */ | ||
| 780 | |||
| 781 | enum { | ||
| 782 | TCA_CODEL_UNSPEC, | ||
| 783 | TCA_CODEL_TARGET, | ||
| 784 | TCA_CODEL_LIMIT, | ||
| 785 | TCA_CODEL_INTERVAL, | ||
| 786 | TCA_CODEL_ECN, | ||
| 787 | TCA_CODEL_CE_THRESHOLD, | ||
| 788 | __TCA_CODEL_MAX | ||
| 789 | }; | ||
| 790 | |||
| 791 | #define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1) | ||
| 792 | |||
| 793 | struct tc_codel_xstats { | ||
| 794 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
| 795 | __u32 count; /* how many drops we've done since the last time we | ||
| 796 | * entered dropping state | ||
| 797 | */ | ||
| 798 | __u32 lastcount; /* count at entry to dropping state */ | ||
| 799 | __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */ | ||
| 800 | __s32 drop_next; /* time to drop next packet */ | ||
| 801 | __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */ | ||
| 802 | __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */ | ||
| 803 | __u32 dropping; /* are we in dropping state ? */ | ||
| 804 | __u32 ce_mark; /* number of CE marked packets because of ce_threshold */ | ||
| 805 | }; | ||
| 806 | |||
| 807 | /* FQ_CODEL */ | ||
| 808 | |||
| 809 | enum { | ||
| 810 | TCA_FQ_CODEL_UNSPEC, | ||
| 811 | TCA_FQ_CODEL_TARGET, | ||
| 812 | TCA_FQ_CODEL_LIMIT, | ||
| 813 | TCA_FQ_CODEL_INTERVAL, | ||
| 814 | TCA_FQ_CODEL_ECN, | ||
| 815 | TCA_FQ_CODEL_FLOWS, | ||
| 816 | TCA_FQ_CODEL_QUANTUM, | ||
| 817 | TCA_FQ_CODEL_CE_THRESHOLD, | ||
| 818 | TCA_FQ_CODEL_DROP_BATCH_SIZE, | ||
| 819 | TCA_FQ_CODEL_MEMORY_LIMIT, | ||
| 820 | __TCA_FQ_CODEL_MAX | ||
| 821 | }; | ||
| 822 | |||
| 823 | #define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1) | ||
| 824 | |||
| 825 | enum { | ||
| 826 | TCA_FQ_CODEL_XSTATS_QDISC, | ||
| 827 | TCA_FQ_CODEL_XSTATS_CLASS, | ||
| 828 | }; | ||
| 829 | |||
| 830 | struct tc_fq_codel_qd_stats { | ||
| 831 | __u32 maxpacket; /* largest packet we've seen so far */ | ||
| 832 | __u32 drop_overlimit; /* number of time max qdisc | ||
| 833 | * packet limit was hit | ||
| 834 | */ | ||
| 835 | __u32 ecn_mark; /* number of packets we ECN marked | ||
| 836 | * instead of being dropped | ||
| 837 | */ | ||
| 838 | __u32 new_flow_count; /* number of time packets | ||
| 839 | * created a 'new flow' | ||
| 840 | */ | ||
| 841 | __u32 new_flows_len; /* count of flows in new list */ | ||
| 842 | __u32 old_flows_len; /* count of flows in old list */ | ||
| 843 | __u32 ce_mark; /* packets above ce_threshold */ | ||
| 844 | __u32 memory_usage; /* in bytes */ | ||
| 845 | __u32 drop_overmemory; | ||
| 846 | }; | ||
| 847 | |||
| 848 | struct tc_fq_codel_cl_stats { | ||
| 849 | __s32 deficit; | ||
| 850 | __u32 ldelay; /* in-queue delay seen by most recently | ||
| 851 | * dequeued packet | ||
| 852 | */ | ||
| 853 | __u32 count; | ||
| 854 | __u32 lastcount; | ||
| 855 | __u32 dropping; | ||
| 856 | __s32 drop_next; | ||
| 857 | }; | ||
| 858 | |||
| 859 | struct tc_fq_codel_xstats { | ||
| 860 | __u32 type; | ||
| 861 | union { | ||
| 862 | struct tc_fq_codel_qd_stats qdisc_stats; | ||
| 863 | struct tc_fq_codel_cl_stats class_stats; | ||
| 864 | }; | ||
| 865 | }; | ||
| 866 | |||
| 867 | /* FQ */ | ||
| 868 | |||
| 869 | enum { | ||
| 870 | TCA_FQ_UNSPEC, | ||
| 871 | |||
| 872 | TCA_FQ_PLIMIT, /* limit of total number of packets in queue */ | ||
| 873 | |||
| 874 | TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */ | ||
| 875 | |||
| 876 | TCA_FQ_QUANTUM, /* RR quantum */ | ||
| 877 | |||
| 878 | TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */ | ||
| 879 | |||
| 880 | TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */ | ||
| 881 | |||
| 882 | TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */ | ||
| 883 | |||
| 884 | TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */ | ||
| 885 | |||
| 886 | TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */ | ||
| 887 | |||
| 888 | TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */ | ||
| 889 | |||
| 890 | TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ | ||
| 891 | |||
| 892 | TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ | ||
| 893 | |||
| 894 | TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ | ||
| 895 | |||
| 896 | __TCA_FQ_MAX | ||
| 897 | }; | ||
| 898 | |||
| 899 | #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) | ||
| 900 | |||
| 901 | struct tc_fq_qd_stats { | ||
| 902 | __u64 gc_flows; | ||
| 903 | __u64 highprio_packets; | ||
| 904 | __u64 tcp_retrans; | ||
| 905 | __u64 throttled; | ||
| 906 | __u64 flows_plimit; | ||
| 907 | __u64 pkts_too_long; | ||
| 908 | __u64 allocation_errors; | ||
| 909 | __s64 time_next_delayed_flow; | ||
| 910 | __u32 flows; | ||
| 911 | __u32 inactive_flows; | ||
| 912 | __u32 throttled_flows; | ||
| 913 | __u32 unthrottle_latency_ns; | ||
| 914 | __u64 ce_mark; /* packets above ce_threshold */ | ||
| 915 | }; | ||
| 916 | |||
| 917 | /* Heavy-Hitter Filter */ | ||
| 918 | |||
| 919 | enum { | ||
| 920 | TCA_HHF_UNSPEC, | ||
| 921 | TCA_HHF_BACKLOG_LIMIT, | ||
| 922 | TCA_HHF_QUANTUM, | ||
| 923 | TCA_HHF_HH_FLOWS_LIMIT, | ||
| 924 | TCA_HHF_RESET_TIMEOUT, | ||
| 925 | TCA_HHF_ADMIT_BYTES, | ||
| 926 | TCA_HHF_EVICT_TIMEOUT, | ||
| 927 | TCA_HHF_NON_HH_WEIGHT, | ||
| 928 | __TCA_HHF_MAX | ||
| 929 | }; | ||
| 930 | |||
| 931 | #define TCA_HHF_MAX (__TCA_HHF_MAX - 1) | ||
| 932 | |||
| 933 | struct tc_hhf_xstats { | ||
| 934 | __u32 drop_overlimit; /* number of times max qdisc packet limit | ||
| 935 | * was hit | ||
| 936 | */ | ||
| 937 | __u32 hh_overlimit; /* number of times max heavy-hitters was hit */ | ||
| 938 | __u32 hh_tot_count; /* number of captured heavy-hitters so far */ | ||
| 939 | __u32 hh_cur_count; /* number of current heavy-hitters */ | ||
| 940 | }; | ||
| 941 | |||
| 942 | /* PIE */ | ||
| 943 | enum { | ||
| 944 | TCA_PIE_UNSPEC, | ||
| 945 | TCA_PIE_TARGET, | ||
| 946 | TCA_PIE_LIMIT, | ||
| 947 | TCA_PIE_TUPDATE, | ||
| 948 | TCA_PIE_ALPHA, | ||
| 949 | TCA_PIE_BETA, | ||
| 950 | TCA_PIE_ECN, | ||
| 951 | TCA_PIE_BYTEMODE, | ||
| 952 | __TCA_PIE_MAX | ||
| 953 | }; | ||
| 954 | #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) | ||
| 955 | |||
| 956 | struct tc_pie_xstats { | ||
| 957 | __u32 prob; /* current probability */ | ||
| 958 | __u32 delay; /* current delay in ms */ | ||
| 959 | __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ | ||
| 960 | __u32 packets_in; /* total number of packets enqueued */ | ||
| 961 | __u32 dropped; /* packets dropped due to pie_action */ | ||
| 962 | __u32 overlimit; /* dropped due to lack of space in queue */ | ||
| 963 | __u32 maxq; /* maximum queue size */ | ||
| 964 | __u32 ecn_mark; /* packets marked with ecn*/ | ||
| 965 | }; | ||
| 966 | |||
| 967 | /* CBS */ | ||
| 968 | struct tc_cbs_qopt { | ||
| 969 | __u8 offload; | ||
| 970 | __u8 _pad[3]; | ||
| 971 | __s32 hicredit; | ||
| 972 | __s32 locredit; | ||
| 973 | __s32 idleslope; | ||
| 974 | __s32 sendslope; | ||
| 975 | }; | ||
| 976 | |||
| 977 | enum { | ||
| 978 | TCA_CBS_UNSPEC, | ||
| 979 | TCA_CBS_PARMS, | ||
| 980 | __TCA_CBS_MAX, | ||
| 981 | }; | ||
| 982 | |||
| 983 | #define TCA_CBS_MAX (__TCA_CBS_MAX - 1) | ||
| 984 | |||
| 985 | |||
| 986 | /* ETF */ | ||
| 987 | struct tc_etf_qopt { | ||
| 988 | __s32 delta; | ||
| 989 | __s32 clockid; | ||
| 990 | __u32 flags; | ||
| 991 | #define TC_ETF_DEADLINE_MODE_ON BIT(0) | ||
| 992 | #define TC_ETF_OFFLOAD_ON BIT(1) | ||
| 993 | }; | ||
| 994 | |||
| 995 | enum { | ||
| 996 | TCA_ETF_UNSPEC, | ||
| 997 | TCA_ETF_PARMS, | ||
| 998 | __TCA_ETF_MAX, | ||
| 999 | }; | ||
| 1000 | |||
| 1001 | #define TCA_ETF_MAX (__TCA_ETF_MAX - 1) | ||
| 1002 | |||
| 1003 | |||
| 1004 | /* CAKE */ | ||
| 1005 | enum { | ||
| 1006 | TCA_CAKE_UNSPEC, | ||
| 1007 | TCA_CAKE_PAD, | ||
| 1008 | TCA_CAKE_BASE_RATE64, | ||
| 1009 | TCA_CAKE_DIFFSERV_MODE, | ||
| 1010 | TCA_CAKE_ATM, | ||
| 1011 | TCA_CAKE_FLOW_MODE, | ||
| 1012 | TCA_CAKE_OVERHEAD, | ||
| 1013 | TCA_CAKE_RTT, | ||
| 1014 | TCA_CAKE_TARGET, | ||
| 1015 | TCA_CAKE_AUTORATE, | ||
| 1016 | TCA_CAKE_MEMORY, | ||
| 1017 | TCA_CAKE_NAT, | ||
| 1018 | TCA_CAKE_RAW, | ||
| 1019 | TCA_CAKE_WASH, | ||
| 1020 | TCA_CAKE_MPU, | ||
| 1021 | TCA_CAKE_INGRESS, | ||
| 1022 | TCA_CAKE_ACK_FILTER, | ||
| 1023 | TCA_CAKE_SPLIT_GSO, | ||
| 1024 | __TCA_CAKE_MAX | ||
| 1025 | }; | ||
| 1026 | #define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1) | ||
| 1027 | |||
| 1028 | enum { | ||
| 1029 | __TCA_CAKE_STATS_INVALID, | ||
| 1030 | TCA_CAKE_STATS_PAD, | ||
| 1031 | TCA_CAKE_STATS_CAPACITY_ESTIMATE64, | ||
| 1032 | TCA_CAKE_STATS_MEMORY_LIMIT, | ||
| 1033 | TCA_CAKE_STATS_MEMORY_USED, | ||
| 1034 | TCA_CAKE_STATS_AVG_NETOFF, | ||
| 1035 | TCA_CAKE_STATS_MIN_NETLEN, | ||
| 1036 | TCA_CAKE_STATS_MAX_NETLEN, | ||
| 1037 | TCA_CAKE_STATS_MIN_ADJLEN, | ||
| 1038 | TCA_CAKE_STATS_MAX_ADJLEN, | ||
| 1039 | TCA_CAKE_STATS_TIN_STATS, | ||
| 1040 | TCA_CAKE_STATS_DEFICIT, | ||
| 1041 | TCA_CAKE_STATS_COBALT_COUNT, | ||
| 1042 | TCA_CAKE_STATS_DROPPING, | ||
| 1043 | TCA_CAKE_STATS_DROP_NEXT_US, | ||
| 1044 | TCA_CAKE_STATS_P_DROP, | ||
| 1045 | TCA_CAKE_STATS_BLUE_TIMER_US, | ||
| 1046 | __TCA_CAKE_STATS_MAX | ||
| 1047 | }; | ||
| 1048 | #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1) | ||
| 1049 | |||
| 1050 | enum { | ||
| 1051 | __TCA_CAKE_TIN_STATS_INVALID, | ||
| 1052 | TCA_CAKE_TIN_STATS_PAD, | ||
| 1053 | TCA_CAKE_TIN_STATS_SENT_PACKETS, | ||
| 1054 | TCA_CAKE_TIN_STATS_SENT_BYTES64, | ||
| 1055 | TCA_CAKE_TIN_STATS_DROPPED_PACKETS, | ||
| 1056 | TCA_CAKE_TIN_STATS_DROPPED_BYTES64, | ||
| 1057 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, | ||
| 1058 | TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, | ||
| 1059 | TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, | ||
| 1060 | TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, | ||
| 1061 | TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, | ||
| 1062 | TCA_CAKE_TIN_STATS_BACKLOG_BYTES, | ||
| 1063 | TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, | ||
| 1064 | TCA_CAKE_TIN_STATS_TARGET_US, | ||
| 1065 | TCA_CAKE_TIN_STATS_INTERVAL_US, | ||
| 1066 | TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, | ||
| 1067 | TCA_CAKE_TIN_STATS_WAY_MISSES, | ||
| 1068 | TCA_CAKE_TIN_STATS_WAY_COLLISIONS, | ||
| 1069 | TCA_CAKE_TIN_STATS_PEAK_DELAY_US, | ||
| 1070 | TCA_CAKE_TIN_STATS_AVG_DELAY_US, | ||
| 1071 | TCA_CAKE_TIN_STATS_BASE_DELAY_US, | ||
| 1072 | TCA_CAKE_TIN_STATS_SPARSE_FLOWS, | ||
| 1073 | TCA_CAKE_TIN_STATS_BULK_FLOWS, | ||
| 1074 | TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, | ||
| 1075 | TCA_CAKE_TIN_STATS_MAX_SKBLEN, | ||
| 1076 | TCA_CAKE_TIN_STATS_FLOW_QUANTUM, | ||
| 1077 | __TCA_CAKE_TIN_STATS_MAX | ||
| 1078 | }; | ||
| 1079 | #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1) | ||
| 1080 | #define TC_CAKE_MAX_TINS (8) | ||
| 1081 | |||
| 1082 | enum { | ||
| 1083 | CAKE_FLOW_NONE = 0, | ||
| 1084 | CAKE_FLOW_SRC_IP, | ||
| 1085 | CAKE_FLOW_DST_IP, | ||
| 1086 | CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */ | ||
| 1087 | CAKE_FLOW_FLOWS, | ||
| 1088 | CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */ | ||
| 1089 | CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */ | ||
| 1090 | CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */ | ||
| 1091 | CAKE_FLOW_MAX, | ||
| 1092 | }; | ||
| 1093 | |||
| 1094 | enum { | ||
| 1095 | CAKE_DIFFSERV_DIFFSERV3 = 0, | ||
| 1096 | CAKE_DIFFSERV_DIFFSERV4, | ||
| 1097 | CAKE_DIFFSERV_DIFFSERV8, | ||
| 1098 | CAKE_DIFFSERV_BESTEFFORT, | ||
| 1099 | CAKE_DIFFSERV_PRECEDENCE, | ||
| 1100 | CAKE_DIFFSERV_MAX | ||
| 1101 | }; | ||
| 1102 | |||
| 1103 | enum { | ||
| 1104 | CAKE_ACK_NONE = 0, | ||
| 1105 | CAKE_ACK_FILTER, | ||
| 1106 | CAKE_ACK_AGGRESSIVE, | ||
| 1107 | CAKE_ACK_MAX | ||
| 1108 | }; | ||
| 1109 | |||
| 1110 | enum { | ||
| 1111 | CAKE_ATM_NONE = 0, | ||
| 1112 | CAKE_ATM_ATM, | ||
| 1113 | CAKE_ATM_PTM, | ||
| 1114 | CAKE_ATM_MAX | ||
| 1115 | }; | ||
| 1116 | |||
| 1117 | |||
| 1118 | /* TAPRIO */ | ||
| 1119 | enum { | ||
| 1120 | TC_TAPRIO_CMD_SET_GATES = 0x00, | ||
| 1121 | TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, | ||
| 1122 | TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, | ||
| 1123 | }; | ||
| 1124 | |||
| 1125 | enum { | ||
| 1126 | TCA_TAPRIO_SCHED_ENTRY_UNSPEC, | ||
| 1127 | TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ | ||
| 1128 | TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ | ||
| 1129 | TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ | ||
| 1130 | TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ | ||
| 1131 | __TCA_TAPRIO_SCHED_ENTRY_MAX, | ||
| 1132 | }; | ||
| 1133 | #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) | ||
| 1134 | |||
| 1135 | /* The format for schedule entry list is: | ||
| 1136 | * [TCA_TAPRIO_SCHED_ENTRY_LIST] | ||
| 1137 | * [TCA_TAPRIO_SCHED_ENTRY] | ||
| 1138 | * [TCA_TAPRIO_SCHED_ENTRY_CMD] | ||
| 1139 | * [TCA_TAPRIO_SCHED_ENTRY_GATES] | ||
| 1140 | * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] | ||
| 1141 | */ | ||
| 1142 | enum { | ||
| 1143 | TCA_TAPRIO_SCHED_UNSPEC, | ||
| 1144 | TCA_TAPRIO_SCHED_ENTRY, | ||
| 1145 | __TCA_TAPRIO_SCHED_MAX, | ||
| 1146 | }; | ||
| 1147 | |||
| 1148 | #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) | ||
| 1149 | |||
| 1150 | enum { | ||
| 1151 | TCA_TAPRIO_ATTR_UNSPEC, | ||
| 1152 | TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ | ||
| 1153 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ | ||
| 1154 | TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ | ||
| 1155 | TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ | ||
| 1156 | TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ | ||
| 1157 | TCA_TAPRIO_PAD, | ||
| 1158 | __TCA_TAPRIO_ATTR_MAX, | ||
| 1159 | }; | ||
| 1160 | |||
| 1161 | #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) | ||
| 1162 | |||
| 1163 | #endif | ||
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore index f81e549ddfdb..4db74758c674 100644 --- a/tools/lib/bpf/.gitignore +++ b/tools/lib/bpf/.gitignore | |||
| @@ -1,2 +1,3 @@ | |||
| 1 | libbpf_version.h | 1 | libbpf_version.h |
| 2 | FEATURE-DUMP.libbpf | 2 | FEATURE-DUMP.libbpf |
| 3 | test_libbpf | ||
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst index 056f38310722..607aae40f4ed 100644 --- a/tools/lib/bpf/README.rst +++ b/tools/lib/bpf/README.rst | |||
| @@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is: | |||
| 132 | Format of version script and ways to handle ABI changes, including | 132 | Format of version script and ways to handle ABI changes, including |
| 133 | incompatible ones, described in details in [1]. | 133 | incompatible ones, described in details in [1]. |
| 134 | 134 | ||
| 135 | Stand-alone build | ||
| 136 | ================= | ||
| 137 | |||
| 138 | Under https://github.com/libbpf/libbpf there is a (semi-)automated | ||
| 139 | mirror of the mainline's version of libbpf for a stand-alone build. | ||
| 140 | |||
| 141 | However, all changes to libbpf's code base must be upstreamed through | ||
| 142 | the mainline kernel tree. | ||
| 143 | |||
| 144 | License | ||
| 145 | ======= | ||
| 146 | |||
| 147 | libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause. | ||
| 148 | |||
| 135 | Links | 149 | Links |
| 136 | ===== | 150 | ===== |
| 137 | 151 | ||
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3caaa3428774..88cbd110ae58 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c | |||
| @@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, | |||
| 65 | return syscall(__NR_bpf, cmd, attr, size); | 65 | return syscall(__NR_bpf, cmd, attr, size); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) | ||
| 69 | { | ||
| 70 | int fd; | ||
| 71 | |||
| 72 | do { | ||
| 73 | fd = sys_bpf(BPF_PROG_LOAD, attr, size); | ||
| 74 | } while (fd < 0 && errno == EAGAIN); | ||
| 75 | |||
| 76 | return fd; | ||
| 77 | } | ||
| 78 | |||
| 68 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) | 79 | int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) |
| 69 | { | 80 | { |
| 70 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; | 81 | __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; |
| @@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 232 | memcpy(attr.prog_name, load_attr->name, | 243 | memcpy(attr.prog_name, load_attr->name, |
| 233 | min(name_len, BPF_OBJ_NAME_LEN - 1)); | 244 | min(name_len, BPF_OBJ_NAME_LEN - 1)); |
| 234 | 245 | ||
| 235 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 246 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 236 | if (fd >= 0) | 247 | if (fd >= 0) |
| 237 | return fd; | 248 | return fd; |
| 238 | 249 | ||
| @@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 269 | break; | 280 | break; |
| 270 | } | 281 | } |
| 271 | 282 | ||
| 272 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 283 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 273 | 284 | ||
| 274 | if (fd >= 0) | 285 | if (fd >= 0) |
| 275 | goto done; | 286 | goto done; |
| @@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, | |||
| 283 | attr.log_size = log_buf_sz; | 294 | attr.log_size = log_buf_sz; |
| 284 | attr.log_level = 1; | 295 | attr.log_level = 1; |
| 285 | log_buf[0] = 0; | 296 | log_buf[0] = 0; |
| 286 | fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 297 | fd = sys_bpf_prog_load(&attr, sizeof(attr)); |
| 287 | done: | 298 | done: |
| 288 | free(finfo); | 299 | free(finfo); |
| 289 | free(linfo); | 300 | free(linfo); |
| @@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, | |||
| 328 | attr.kern_version = kern_version; | 339 | attr.kern_version = kern_version; |
| 329 | attr.prog_flags = prog_flags; | 340 | attr.prog_flags = prog_flags; |
| 330 | 341 | ||
| 331 | return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); | 342 | return sys_bpf_prog_load(&attr, sizeof(attr)); |
| 332 | } | 343 | } |
| 333 | 344 | ||
| 334 | int bpf_map_update_elem(int fd, const void *key, const void *value, | 345 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 1076393e6f43..e18a3556f5e3 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h | |||
| @@ -63,7 +63,8 @@ static const char *reg_names[] = { | |||
| 63 | [PERF_REG_POWERPC_TRAP] = "trap", | 63 | [PERF_REG_POWERPC_TRAP] = "trap", |
| 64 | [PERF_REG_POWERPC_DAR] = "dar", | 64 | [PERF_REG_POWERPC_DAR] = "dar", |
| 65 | [PERF_REG_POWERPC_DSISR] = "dsisr", | 65 | [PERF_REG_POWERPC_DSISR] = "dsisr", |
| 66 | [PERF_REG_POWERPC_SIER] = "sier" | 66 | [PERF_REG_POWERPC_SIER] = "sier", |
| 67 | [PERF_REG_POWERPC_MMCRA] = "mmcra" | ||
| 67 | }; | 68 | }; |
| 68 | 69 | ||
| 69 | static inline const char *perf_reg_name(int id) | 70 | static inline const char *perf_reg_name(int id) |
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index 07fcd977d93e..34d5134681d9 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c | |||
| @@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = { | |||
| 53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), | 53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), |
| 54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), | 54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), |
| 55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), | 55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), |
| 56 | SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), | ||
| 56 | SMPL_REG_END | 57 | SMPL_REG_END |
| 57 | }; | 58 | }; |
| 58 | 59 | ||
diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c index e75238404555..2d4baf57822f 100644 --- a/tools/testing/nvdimm/dimm_devs.c +++ b/tools/testing/nvdimm/dimm_devs.c | |||
| @@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev, | |||
| 18 | * For the test version we need to poll the "hardware" in order | 18 | * For the test version we need to poll the "hardware" in order |
| 19 | * to get the updated status for unlock testing. | 19 | * to get the updated status for unlock testing. |
| 20 | */ | 20 | */ |
| 21 | nvdimm->sec.state = nvdimm_security_state(nvdimm, false); | 21 | nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER); |
| 22 | nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true); | 22 | nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER); |
| 23 | 23 | ||
| 24 | switch (nvdimm->sec.state) { | 24 | switch (nvdimm->sec.state) { |
| 25 | case NVDIMM_SECURITY_DISABLED: | 25 | case NVDIMM_SECURITY_DISABLED: |
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 4a9785043a39..dd093bd91aa9 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore | |||
| @@ -28,3 +28,4 @@ flow_dissector_load | |||
| 28 | test_netcnt | 28 | test_netcnt |
| 29 | test_section_names | 29 | test_section_names |
| 30 | test_tcpnotify_user | 30 | test_tcpnotify_user |
| 31 | test_libbpf | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 73aa6d8f4a2f..41ab7a3668b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
| @@ -55,7 +55,10 @@ TEST_PROGS := test_kmod.sh \ | |||
| 55 | test_flow_dissector.sh \ | 55 | test_flow_dissector.sh \ |
| 56 | test_xdp_vlan.sh | 56 | test_xdp_vlan.sh |
| 57 | 57 | ||
| 58 | TEST_PROGS_EXTENDED := with_addr.sh | 58 | TEST_PROGS_EXTENDED := with_addr.sh \ |
| 59 | with_tunnels.sh \ | ||
| 60 | tcp_client.py \ | ||
| 61 | tcp_server.py | ||
| 59 | 62 | ||
| 60 | # Compile but not part of 'make run_tests' | 63 | # Compile but not part of 'make run_tests' |
| 61 | TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ | 64 | TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ |
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index cf16948aad4a..6692a40a6979 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c | |||
| @@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void) | |||
| 155 | * This function creates a cgroup under the top level workdir and returns the | 155 | * This function creates a cgroup under the top level workdir and returns the |
| 156 | * file descriptor. It is idempotent. | 156 | * file descriptor. It is idempotent. |
| 157 | * | 157 | * |
| 158 | * On success, it returns the file descriptor. On failure it returns 0. | 158 | * On success, it returns the file descriptor. On failure it returns -1. |
| 159 | * If there is a failure, it prints the error to stderr. | 159 | * If there is a failure, it prints the error to stderr. |
| 160 | */ | 160 | */ |
| 161 | int create_and_get_cgroup(const char *path) | 161 | int create_and_get_cgroup(const char *path) |
| @@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path) | |||
| 166 | format_cgroup_path(cgroup_path, path); | 166 | format_cgroup_path(cgroup_path, path); |
| 167 | if (mkdir(cgroup_path, 0777) && errno != EEXIST) { | 167 | if (mkdir(cgroup_path, 0777) && errno != EEXIST) { |
| 168 | log_err("mkdiring cgroup %s .. %s", path, cgroup_path); | 168 | log_err("mkdiring cgroup %s .. %s", path, cgroup_path); |
| 169 | return 0; | 169 | return -1; |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | fd = open(cgroup_path, O_RDONLY); | 172 | fd = open(cgroup_path, O_RDONLY); |
| 173 | if (fd < 0) { | 173 | if (fd < 0) { |
| 174 | log_err("Opening Cgroup"); | 174 | log_err("Opening Cgroup"); |
| 175 | return 0; | 175 | return -1; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | return fd; | 178 | return fd; |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 8bcd38010582..a0bd04befe87 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
| @@ -3526,6 +3526,8 @@ struct pprint_mapv { | |||
| 3526 | ENUM_TWO, | 3526 | ENUM_TWO, |
| 3527 | ENUM_THREE, | 3527 | ENUM_THREE, |
| 3528 | } aenum; | 3528 | } aenum; |
| 3529 | uint32_t ui32b; | ||
| 3530 | uint32_t bits2c:2; | ||
| 3529 | }; | 3531 | }; |
| 3530 | 3532 | ||
| 3531 | static struct btf_raw_test pprint_test_template[] = { | 3533 | static struct btf_raw_test pprint_test_template[] = { |
| @@ -3568,7 +3570,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3568 | BTF_ENUM_ENC(NAME_TBD, 2), | 3570 | BTF_ENUM_ENC(NAME_TBD, 2), |
| 3569 | BTF_ENUM_ENC(NAME_TBD, 3), | 3571 | BTF_ENUM_ENC(NAME_TBD, 3), |
| 3570 | /* struct pprint_mapv */ /* [16] */ | 3572 | /* struct pprint_mapv */ /* [16] */ |
| 3571 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), | 3573 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40), |
| 3572 | BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ | 3574 | BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ |
| 3573 | BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ | 3575 | BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ |
| 3574 | BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ | 3576 | BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ |
| @@ -3577,9 +3579,11 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3577 | BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ | 3579 | BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ |
| 3578 | BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ | 3580 | BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ |
| 3579 | BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ | 3581 | BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ |
| 3582 | BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */ | ||
| 3583 | BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */ | ||
| 3580 | BTF_END_RAW, | 3584 | BTF_END_RAW, |
| 3581 | }, | 3585 | }, |
| 3582 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), | 3586 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), |
| 3583 | .key_size = sizeof(unsigned int), | 3587 | .key_size = sizeof(unsigned int), |
| 3584 | .value_size = sizeof(struct pprint_mapv), | 3588 | .value_size = sizeof(struct pprint_mapv), |
| 3585 | .key_type_id = 3, /* unsigned int */ | 3589 | .key_type_id = 3, /* unsigned int */ |
| @@ -3628,7 +3632,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3628 | BTF_ENUM_ENC(NAME_TBD, 2), | 3632 | BTF_ENUM_ENC(NAME_TBD, 2), |
| 3629 | BTF_ENUM_ENC(NAME_TBD, 3), | 3633 | BTF_ENUM_ENC(NAME_TBD, 3), |
| 3630 | /* struct pprint_mapv */ /* [16] */ | 3634 | /* struct pprint_mapv */ /* [16] */ |
| 3631 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), | 3635 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), |
| 3632 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ | 3636 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ |
| 3633 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ | 3637 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ |
| 3634 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ | 3638 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ |
| @@ -3637,9 +3641,11 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3637 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ | 3641 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ |
| 3638 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ | 3642 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ |
| 3639 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ | 3643 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ |
| 3644 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ | ||
| 3645 | BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ | ||
| 3640 | BTF_END_RAW, | 3646 | BTF_END_RAW, |
| 3641 | }, | 3647 | }, |
| 3642 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), | 3648 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"), |
| 3643 | .key_size = sizeof(unsigned int), | 3649 | .key_size = sizeof(unsigned int), |
| 3644 | .value_size = sizeof(struct pprint_mapv), | 3650 | .value_size = sizeof(struct pprint_mapv), |
| 3645 | .key_type_id = 3, /* unsigned int */ | 3651 | .key_type_id = 3, /* unsigned int */ |
| @@ -3690,7 +3696,7 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3690 | BTF_ENUM_ENC(NAME_TBD, 2), | 3696 | BTF_ENUM_ENC(NAME_TBD, 2), |
| 3691 | BTF_ENUM_ENC(NAME_TBD, 3), | 3697 | BTF_ENUM_ENC(NAME_TBD, 3), |
| 3692 | /* struct pprint_mapv */ /* [16] */ | 3698 | /* struct pprint_mapv */ /* [16] */ |
| 3693 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), | 3699 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40), |
| 3694 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ | 3700 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ |
| 3695 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ | 3701 | BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ |
| 3696 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ | 3702 | BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ |
| @@ -3699,13 +3705,15 @@ static struct btf_raw_test pprint_test_template[] = { | |||
| 3699 | BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ | 3705 | BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ |
| 3700 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ | 3706 | BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ |
| 3701 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ | 3707 | BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ |
| 3708 | BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ | ||
| 3709 | BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ | ||
| 3702 | /* typedef unsigned int ___int */ /* [17] */ | 3710 | /* typedef unsigned int ___int */ /* [17] */ |
| 3703 | BTF_TYPEDEF_ENC(NAME_TBD, 18), | 3711 | BTF_TYPEDEF_ENC(NAME_TBD, 18), |
| 3704 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ | 3712 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ |
| 3705 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ | 3713 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ |
| 3706 | BTF_END_RAW, | 3714 | BTF_END_RAW, |
| 3707 | }, | 3715 | }, |
| 3708 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), | 3716 | BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"), |
| 3709 | .key_size = sizeof(unsigned int), | 3717 | .key_size = sizeof(unsigned int), |
| 3710 | .value_size = sizeof(struct pprint_mapv), | 3718 | .value_size = sizeof(struct pprint_mapv), |
| 3711 | .key_type_id = 3, /* unsigned int */ | 3719 | .key_type_id = 3, /* unsigned int */ |
| @@ -3793,6 +3801,8 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i, | |||
| 3793 | v->unused_bits2b = 3; | 3801 | v->unused_bits2b = 3; |
| 3794 | v->ui64 = i; | 3802 | v->ui64 = i; |
| 3795 | v->aenum = i & 0x03; | 3803 | v->aenum = i & 0x03; |
| 3804 | v->ui32b = 4; | ||
| 3805 | v->bits2c = 1; | ||
| 3796 | v = (void *)v + rounded_value_size; | 3806 | v = (void *)v + rounded_value_size; |
| 3797 | } | 3807 | } |
| 3798 | } | 3808 | } |
| @@ -3955,7 +3965,8 @@ static int do_test_pprint(int test_num) | |||
| 3955 | 3965 | ||
| 3956 | nexpected_line = snprintf(expected_line, sizeof(expected_line), | 3966 | nexpected_line = snprintf(expected_line, sizeof(expected_line), |
| 3957 | "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," | 3967 | "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," |
| 3958 | "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", | 3968 | "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," |
| 3969 | "%u,0x%x}\n", | ||
| 3959 | percpu_map ? "\tcpu" : "", | 3970 | percpu_map ? "\tcpu" : "", |
| 3960 | percpu_map ? cpu : next_key, | 3971 | percpu_map ? cpu : next_key, |
| 3961 | cmapv->ui32, cmapv->si32, | 3972 | cmapv->ui32, cmapv->si32, |
| @@ -3967,7 +3978,9 @@ static int do_test_pprint(int test_num) | |||
| 3967 | cmapv->ui8a[2], cmapv->ui8a[3], | 3978 | cmapv->ui8a[2], cmapv->ui8a[3], |
| 3968 | cmapv->ui8a[4], cmapv->ui8a[5], | 3979 | cmapv->ui8a[4], cmapv->ui8a[5], |
| 3969 | cmapv->ui8a[6], cmapv->ui8a[7], | 3980 | cmapv->ui8a[6], cmapv->ui8a[7], |
| 3970 | pprint_enum_str[cmapv->aenum]); | 3981 | pprint_enum_str[cmapv->aenum], |
| 3982 | cmapv->ui32b, | ||
| 3983 | cmapv->bits2c); | ||
| 3971 | 3984 | ||
| 3972 | err = check_line(expected_line, nexpected_line, | 3985 | err = check_line(expected_line, nexpected_line, |
| 3973 | sizeof(expected_line), line); | 3986 | sizeof(expected_line), line); |
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index f44834155f25..2fc4625c1a15 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c | |||
| @@ -81,7 +81,7 @@ int main(int argc, char **argv) | |||
| 81 | 81 | ||
| 82 | /* Create a cgroup, get fd, and join it */ | 82 | /* Create a cgroup, get fd, and join it */ |
| 83 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 83 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
| 84 | if (!cgroup_fd) { | 84 | if (cgroup_fd < 0) { |
| 85 | printf("Failed to create test cgroup\n"); | 85 | printf("Failed to create test cgroup\n"); |
| 86 | goto err; | 86 | goto err; |
| 87 | } | 87 | } |
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 9c8b50bac7e0..76e4993b7c16 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c | |||
| @@ -43,7 +43,7 @@ int main(int argc, char **argv) | |||
| 43 | 43 | ||
| 44 | /* Create a cgroup, get fd, and join it */ | 44 | /* Create a cgroup, get fd, and join it */ |
| 45 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 45 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
| 46 | if (!cgroup_fd) { | 46 | if (cgroup_fd < 0) { |
| 47 | printf("Failed to create test cgroup\n"); | 47 | printf("Failed to create test cgroup\n"); |
| 48 | goto err; | 48 | goto err; |
| 49 | } | 49 | } |
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c index 44ed7f29f8ab..c1da5404454a 100644 --- a/tools/testing/selftests/bpf/test_netcnt.c +++ b/tools/testing/selftests/bpf/test_netcnt.c | |||
| @@ -65,7 +65,7 @@ int main(int argc, char **argv) | |||
| 65 | 65 | ||
| 66 | /* Create a cgroup, get fd, and join it */ | 66 | /* Create a cgroup, get fd, and join it */ |
| 67 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); | 67 | cgroup_fd = create_and_get_cgroup(TEST_CGROUP); |
| 68 | if (!cgroup_fd) { | 68 | if (cgroup_fd < 0) { |
| 69 | printf("Failed to create test cgroup\n"); | 69 | printf("Failed to create test cgroup\n"); |
| 70 | goto err; | 70 | goto err; |
| 71 | } | 71 | } |
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..25f0083a9b2e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
| @@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) | |||
| 1188 | int i, j; | 1188 | int i, j; |
| 1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1189 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
| 1190 | int build_id_matches = 0; | 1190 | int build_id_matches = 0; |
| 1191 | int retry = 1; | ||
| 1191 | 1192 | ||
| 1193 | retry: | ||
| 1192 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); | 1194 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); |
| 1193 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1195 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
| 1194 | goto out; | 1196 | goto out; |
| @@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) | |||
| 1301 | previous_key = key; | 1303 | previous_key = key; |
| 1302 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1304 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
| 1303 | 1305 | ||
| 1306 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
| 1307 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
| 1308 | * try it one more time. | ||
| 1309 | */ | ||
| 1310 | if (build_id_matches < 1 && retry--) { | ||
| 1311 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
| 1312 | close(pmu_fd); | ||
| 1313 | bpf_object__close(obj); | ||
| 1314 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
| 1315 | __func__); | ||
| 1316 | goto retry; | ||
| 1317 | } | ||
| 1318 | |||
| 1304 | if (CHECK(build_id_matches < 1, "build id match", | 1319 | if (CHECK(build_id_matches < 1, "build id match", |
| 1305 | "Didn't find expected build ID from the map\n")) | 1320 | "Didn't find expected build ID from the map\n")) |
| 1306 | goto disable_pmu; | 1321 | goto disable_pmu; |
| @@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) | |||
| 1341 | int i, j; | 1356 | int i, j; |
| 1342 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; | 1357 | struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; |
| 1343 | int build_id_matches = 0; | 1358 | int build_id_matches = 0; |
| 1359 | int retry = 1; | ||
| 1344 | 1360 | ||
| 1361 | retry: | ||
| 1345 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); | 1362 | err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); |
| 1346 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | 1363 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) |
| 1347 | return; | 1364 | return; |
| @@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) | |||
| 1436 | previous_key = key; | 1453 | previous_key = key; |
| 1437 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); | 1454 | } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); |
| 1438 | 1455 | ||
| 1456 | /* stack_map_get_build_id_offset() is racy and sometimes can return | ||
| 1457 | * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; | ||
| 1458 | * try it one more time. | ||
| 1459 | */ | ||
| 1460 | if (build_id_matches < 1 && retry--) { | ||
| 1461 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
| 1462 | close(pmu_fd); | ||
| 1463 | bpf_object__close(obj); | ||
| 1464 | printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", | ||
| 1465 | __func__); | ||
| 1466 | goto retry; | ||
| 1467 | } | ||
| 1468 | |||
| 1439 | if (CHECK(build_id_matches < 1, "build id match", | 1469 | if (CHECK(build_id_matches < 1, "build id match", |
| 1440 | "Didn't find expected build ID from the map\n")) | 1470 | "Didn't find expected build ID from the map\n")) |
| 1441 | goto disable_pmu; | 1471 | goto disable_pmu; |
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c index c121cc59f314..9220747c069d 100644 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c | |||
| @@ -164,7 +164,7 @@ int main(int argc, char **argv) | |||
| 164 | goto err; | 164 | goto err; |
| 165 | 165 | ||
| 166 | cgfd = create_and_get_cgroup(CGROUP_PATH); | 166 | cgfd = create_and_get_cgroup(CGROUP_PATH); |
| 167 | if (!cgfd) | 167 | if (cgfd < 0) |
| 168 | goto err; | 168 | goto err; |
| 169 | 169 | ||
| 170 | if (join_cgroup(CGROUP_PATH)) | 170 | if (join_cgroup(CGROUP_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index b8ebe2f58074..561ffb6d6433 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c | |||
| @@ -458,7 +458,7 @@ int main(int argc, char **argv) | |||
| 458 | goto err; | 458 | goto err; |
| 459 | 459 | ||
| 460 | cgfd = create_and_get_cgroup(CG_PATH); | 460 | cgfd = create_and_get_cgroup(CG_PATH); |
| 461 | if (!cgfd) | 461 | if (cgfd < 0) |
| 462 | goto err; | 462 | goto err; |
| 463 | 463 | ||
| 464 | if (join_cgroup(CG_PATH)) | 464 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 73b7493d4120..3f110eaaf29c 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" | 44 | #define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" |
| 45 | #define SRC6_IP "::1" | 45 | #define SRC6_IP "::1" |
| 46 | #define SRC6_REWRITE_IP "::6" | 46 | #define SRC6_REWRITE_IP "::6" |
| 47 | #define WILDCARD6_IP "::" | ||
| 47 | #define SERV6_PORT 6060 | 48 | #define SERV6_PORT 6060 |
| 48 | #define SERV6_REWRITE_PORT 6666 | 49 | #define SERV6_REWRITE_PORT 6666 |
| 49 | 50 | ||
| @@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test); | |||
| 85 | static int bind6_prog_load(const struct sock_addr_test *test); | 86 | static int bind6_prog_load(const struct sock_addr_test *test); |
| 86 | static int connect4_prog_load(const struct sock_addr_test *test); | 87 | static int connect4_prog_load(const struct sock_addr_test *test); |
| 87 | static int connect6_prog_load(const struct sock_addr_test *test); | 88 | static int connect6_prog_load(const struct sock_addr_test *test); |
| 89 | static int sendmsg_allow_prog_load(const struct sock_addr_test *test); | ||
| 88 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test); | 90 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test); |
| 89 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); | 91 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); |
| 90 | static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); | 92 | static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); |
| 91 | static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); | 93 | static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); |
| 92 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); | 94 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); |
| 93 | static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); | 95 | static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); |
| 96 | static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test); | ||
| 94 | 97 | ||
| 95 | static struct sock_addr_test tests[] = { | 98 | static struct sock_addr_test tests[] = { |
| 96 | /* bind */ | 99 | /* bind */ |
| @@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = { | |||
| 463 | SYSCALL_ENOTSUPP, | 466 | SYSCALL_ENOTSUPP, |
| 464 | }, | 467 | }, |
| 465 | { | 468 | { |
| 469 | "sendmsg6: set dst IP = [::] (BSD'ism)", | ||
| 470 | sendmsg6_rw_wildcard_prog_load, | ||
| 471 | BPF_CGROUP_UDP6_SENDMSG, | ||
| 472 | BPF_CGROUP_UDP6_SENDMSG, | ||
| 473 | AF_INET6, | ||
| 474 | SOCK_DGRAM, | ||
| 475 | SERV6_IP, | ||
| 476 | SERV6_PORT, | ||
| 477 | SERV6_REWRITE_IP, | ||
| 478 | SERV6_REWRITE_PORT, | ||
| 479 | SRC6_REWRITE_IP, | ||
| 480 | SUCCESS, | ||
| 481 | }, | ||
| 482 | { | ||
| 483 | "sendmsg6: preserve dst IP = [::] (BSD'ism)", | ||
| 484 | sendmsg_allow_prog_load, | ||
| 485 | BPF_CGROUP_UDP6_SENDMSG, | ||
| 486 | BPF_CGROUP_UDP6_SENDMSG, | ||
| 487 | AF_INET6, | ||
| 488 | SOCK_DGRAM, | ||
| 489 | WILDCARD6_IP, | ||
| 490 | SERV6_PORT, | ||
| 491 | SERV6_REWRITE_IP, | ||
| 492 | SERV6_PORT, | ||
| 493 | SRC6_IP, | ||
| 494 | SUCCESS, | ||
| 495 | }, | ||
| 496 | { | ||
| 466 | "sendmsg6: deny call", | 497 | "sendmsg6: deny call", |
| 467 | sendmsg_deny_prog_load, | 498 | sendmsg_deny_prog_load, |
| 468 | BPF_CGROUP_UDP6_SENDMSG, | 499 | BPF_CGROUP_UDP6_SENDMSG, |
| @@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test) | |||
| 734 | return load_path(test, CONNECT6_PROG_PATH); | 765 | return load_path(test, CONNECT6_PROG_PATH); |
| 735 | } | 766 | } |
| 736 | 767 | ||
| 737 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test) | 768 | static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test, |
| 769 | int32_t rc) | ||
| 738 | { | 770 | { |
| 739 | struct bpf_insn insns[] = { | 771 | struct bpf_insn insns[] = { |
| 740 | /* return 0 */ | 772 | /* return rc */ |
| 741 | BPF_MOV64_IMM(BPF_REG_0, 0), | 773 | BPF_MOV64_IMM(BPF_REG_0, rc), |
| 742 | BPF_EXIT_INSN(), | 774 | BPF_EXIT_INSN(), |
| 743 | }; | 775 | }; |
| 744 | return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); | 776 | return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); |
| 745 | } | 777 | } |
| 746 | 778 | ||
| 779 | static int sendmsg_allow_prog_load(const struct sock_addr_test *test) | ||
| 780 | { | ||
| 781 | return sendmsg_ret_only_prog_load(test, /*rc*/ 1); | ||
| 782 | } | ||
| 783 | |||
| 784 | static int sendmsg_deny_prog_load(const struct sock_addr_test *test) | ||
| 785 | { | ||
| 786 | return sendmsg_ret_only_prog_load(test, /*rc*/ 0); | ||
| 787 | } | ||
| 788 | |||
| 747 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) | 789 | static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) |
| 748 | { | 790 | { |
| 749 | struct sockaddr_in dst4_rw_addr; | 791 | struct sockaddr_in dst4_rw_addr; |
| @@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test) | |||
| 864 | return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); | 906 | return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); |
| 865 | } | 907 | } |
| 866 | 908 | ||
| 909 | static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test) | ||
| 910 | { | ||
| 911 | return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP); | ||
| 912 | } | ||
| 913 | |||
| 867 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) | 914 | static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) |
| 868 | { | 915 | { |
| 869 | return load_path(test, SENDMSG6_PROG_PATH); | 916 | return load_path(test, SENDMSG6_PROG_PATH); |
| @@ -1395,7 +1442,7 @@ int main(int argc, char **argv) | |||
| 1395 | goto err; | 1442 | goto err; |
| 1396 | 1443 | ||
| 1397 | cgfd = create_and_get_cgroup(CG_PATH); | 1444 | cgfd = create_and_get_cgroup(CG_PATH); |
| 1398 | if (!cgfd) | 1445 | if (cgfd < 0) |
| 1399 | goto err; | 1446 | goto err; |
| 1400 | 1447 | ||
| 1401 | if (join_cgroup(CG_PATH)) | 1448 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index b6c2c605d8c0..fc7832ee566b 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c | |||
| @@ -202,7 +202,7 @@ int main(int argc, char **argv) | |||
| 202 | goto err; | 202 | goto err; |
| 203 | 203 | ||
| 204 | cgfd = create_and_get_cgroup(CG_PATH); | 204 | cgfd = create_and_get_cgroup(CG_PATH); |
| 205 | if (!cgfd) | 205 | if (cgfd < 0) |
| 206 | goto err; | 206 | goto err; |
| 207 | 207 | ||
| 208 | if (join_cgroup(CG_PATH)) | 208 | if (join_cgroup(CG_PATH)) |
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index e6eebda7d112..716b4e3be581 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c | |||
| @@ -103,7 +103,7 @@ int main(int argc, char **argv) | |||
| 103 | goto err; | 103 | goto err; |
| 104 | 104 | ||
| 105 | cg_fd = create_and_get_cgroup(cg_path); | 105 | cg_fd = create_and_get_cgroup(cg_path); |
| 106 | if (!cg_fd) | 106 | if (cg_fd < 0) |
| 107 | goto err; | 107 | goto err; |
| 108 | 108 | ||
| 109 | if (join_cgroup(cg_path)) | 109 | if (join_cgroup(cg_path)) |
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index ff3c4522aed6..4e4353711a86 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c | |||
| @@ -115,7 +115,7 @@ int main(int argc, char **argv) | |||
| 115 | goto err; | 115 | goto err; |
| 116 | 116 | ||
| 117 | cg_fd = create_and_get_cgroup(cg_path); | 117 | cg_fd = create_and_get_cgroup(cg_path); |
| 118 | if (!cg_fd) | 118 | if (cg_fd < 0) |
| 119 | goto err; | 119 | goto err; |
| 120 | 120 | ||
| 121 | if (join_cgroup(cg_path)) | 121 | if (join_cgroup(cg_path)) |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 10d44446e801..2fd90d456892 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
| @@ -6934,6 +6934,126 @@ static struct bpf_test tests[] = { | |||
| 6934 | .retval = 1, | 6934 | .retval = 1, |
| 6935 | }, | 6935 | }, |
| 6936 | { | 6936 | { |
| 6937 | "map access: mixing value pointer and scalar, 1", | ||
| 6938 | .insns = { | ||
| 6939 | // load map value pointer into r0 and r2 | ||
| 6940 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
| 6941 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
| 6942 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
| 6943 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
| 6944 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
| 6945 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
| 6946 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 6947 | BPF_EXIT_INSN(), | ||
| 6948 | // load some number from the map into r1 | ||
| 6949 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
| 6950 | // depending on r1, branch: | ||
| 6951 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), | ||
| 6952 | // branch A | ||
| 6953 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
| 6954 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
| 6955 | BPF_JMP_A(2), | ||
| 6956 | // branch B | ||
| 6957 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
| 6958 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
| 6959 | // common instruction | ||
| 6960 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
| 6961 | // depending on r1, branch: | ||
| 6962 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
| 6963 | // branch A | ||
| 6964 | BPF_JMP_A(4), | ||
| 6965 | // branch B | ||
| 6966 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
| 6967 | // verifier follows fall-through | ||
| 6968 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
| 6969 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 6970 | BPF_EXIT_INSN(), | ||
| 6971 | // fake-dead code; targeted from branch A to | ||
| 6972 | // prevent dead code sanitization | ||
| 6973 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
| 6974 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 6975 | BPF_EXIT_INSN(), | ||
| 6976 | }, | ||
| 6977 | .fixup_map_array_48b = { 1 }, | ||
| 6978 | .result = ACCEPT, | ||
| 6979 | .result_unpriv = REJECT, | ||
| 6980 | .errstr_unpriv = "R2 tried to add from different pointers or scalars", | ||
| 6981 | .retval = 0, | ||
| 6982 | }, | ||
| 6983 | { | ||
| 6984 | "map access: mixing value pointer and scalar, 2", | ||
| 6985 | .insns = { | ||
| 6986 | // load map value pointer into r0 and r2 | ||
| 6987 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
| 6988 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
| 6989 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
| 6990 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
| 6991 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
| 6992 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
| 6993 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 6994 | BPF_EXIT_INSN(), | ||
| 6995 | // load some number from the map into r1 | ||
| 6996 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
| 6997 | // depending on r1, branch: | ||
| 6998 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
| 6999 | // branch A | ||
| 7000 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
| 7001 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
| 7002 | BPF_JMP_A(2), | ||
| 7003 | // branch B | ||
| 7004 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), | ||
| 7005 | BPF_MOV64_IMM(BPF_REG_3, 0), | ||
| 7006 | // common instruction | ||
| 7007 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
| 7008 | // depending on r1, branch: | ||
| 7009 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), | ||
| 7010 | // branch A | ||
| 7011 | BPF_JMP_A(4), | ||
| 7012 | // branch B | ||
| 7013 | BPF_MOV64_IMM(BPF_REG_0, 0x13371337), | ||
| 7014 | // verifier follows fall-through | ||
| 7015 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), | ||
| 7016 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 7017 | BPF_EXIT_INSN(), | ||
| 7018 | // fake-dead code; targeted from branch A to | ||
| 7019 | // prevent dead code sanitization | ||
| 7020 | BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), | ||
| 7021 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
| 7022 | BPF_EXIT_INSN(), | ||
| 7023 | }, | ||
| 7024 | .fixup_map_array_48b = { 1 }, | ||
| 7025 | .result = ACCEPT, | ||
| 7026 | .result_unpriv = REJECT, | ||
| 7027 | .errstr_unpriv = "R2 tried to add from different maps or paths", | ||
| 7028 | .retval = 0, | ||
| 7029 | }, | ||
| 7030 | { | ||
| 7031 | "sanitation: alu with different scalars", | ||
| 7032 | .insns = { | ||
| 7033 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
| 7034 | BPF_LD_MAP_FD(BPF_REG_ARG1, 0), | ||
| 7035 | BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), | ||
| 7036 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), | ||
| 7037 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), | ||
| 7038 | BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), | ||
| 7039 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), | ||
| 7040 | BPF_EXIT_INSN(), | ||
| 7041 | BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), | ||
| 7042 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
| 7043 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
| 7044 | BPF_MOV64_IMM(BPF_REG_3, 0x100000), | ||
| 7045 | BPF_JMP_A(2), | ||
| 7046 | BPF_MOV64_IMM(BPF_REG_2, 42), | ||
| 7047 | BPF_MOV64_IMM(BPF_REG_3, 0x100001), | ||
| 7048 | BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), | ||
| 7049 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
| 7050 | BPF_EXIT_INSN(), | ||
| 7051 | }, | ||
| 7052 | .fixup_map_array_48b = { 1 }, | ||
| 7053 | .result = ACCEPT, | ||
| 7054 | .retval = 0x100000, | ||
| 7055 | }, | ||
| 7056 | { | ||
| 6937 | "map access: value_ptr += known scalar, upper oob arith, test 1", | 7057 | "map access: value_ptr += known scalar, upper oob arith, test 1", |
| 6938 | .insns = { | 7058 | .insns = { |
| 6939 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | 7059 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh index 94fdbf215c14..c4cf6e6d800e 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh | |||
| @@ -25,6 +25,7 @@ ALL_TESTS=" | |||
| 25 | lag_unlink_slaves_test | 25 | lag_unlink_slaves_test |
| 26 | lag_dev_deletion_test | 26 | lag_dev_deletion_test |
| 27 | vlan_interface_uppers_test | 27 | vlan_interface_uppers_test |
| 28 | bridge_extern_learn_test | ||
| 28 | devlink_reload_test | 29 | devlink_reload_test |
| 29 | " | 30 | " |
| 30 | NUM_NETIFS=2 | 31 | NUM_NETIFS=2 |
| @@ -541,6 +542,25 @@ vlan_interface_uppers_test() | |||
| 541 | ip link del dev br0 | 542 | ip link del dev br0 |
| 542 | } | 543 | } |
| 543 | 544 | ||
| 545 | bridge_extern_learn_test() | ||
| 546 | { | ||
| 547 | # Test that externally learned entries added from user space are | ||
| 548 | # marked as offloaded | ||
| 549 | RET=0 | ||
| 550 | |||
| 551 | ip link add name br0 type bridge | ||
| 552 | ip link set dev $swp1 master br0 | ||
| 553 | |||
| 554 | bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn | ||
| 555 | |||
| 556 | bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload | ||
| 557 | check_err $? "fdb entry not marked as offloaded when should" | ||
| 558 | |||
| 559 | log_test "externally learned fdb entry" | ||
| 560 | |||
| 561 | ip link del dev br0 | ||
| 562 | } | ||
| 563 | |||
| 544 | devlink_reload_test() | 564 | devlink_reload_test() |
| 545 | { | 565 | { |
| 546 | # Test that after executing all the above configuration tests, a | 566 | # Test that after executing all the above configuration tests, a |
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh index dcf9f4e913e0..ae6146ec5afd 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh | |||
| @@ -847,6 +847,24 @@ sanitization_vlan_aware_test() | |||
| 847 | 847 | ||
| 848 | log_test "vlan-aware - failed enslavement to vlan-aware bridge" | 848 | log_test "vlan-aware - failed enslavement to vlan-aware bridge" |
| 849 | 849 | ||
| 850 | bridge vlan del vid 10 dev vxlan20 | ||
| 851 | bridge vlan add vid 20 dev vxlan20 pvid untagged | ||
| 852 | |||
| 853 | # Test that offloading of an unsupported tunnel fails when it is | ||
| 854 | # triggered by addition of VLAN to a local port | ||
| 855 | RET=0 | ||
| 856 | |||
| 857 | # TOS must be set to inherit | ||
| 858 | ip link set dev vxlan10 type vxlan tos 42 | ||
| 859 | |||
| 860 | ip link set dev $swp1 master br0 | ||
| 861 | bridge vlan add vid 10 dev $swp1 &> /dev/null | ||
| 862 | check_fail $? | ||
| 863 | |||
| 864 | log_test "vlan-aware - failed vlan addition to a local port" | ||
| 865 | |||
| 866 | ip link set dev vxlan10 type vxlan tos inherit | ||
| 867 | |||
| 850 | ip link del dev vxlan20 | 868 | ip link del dev vxlan20 |
| 851 | ip link del dev vxlan10 | 869 | ip link del dev vxlan10 |
| 852 | ip link del dev br0 | 870 | ip link del dev br0 |
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c index f8d468f54e98..aaa1e9f083c3 100644 --- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c +++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c | |||
| @@ -37,7 +37,7 @@ static int get_debugfs(char **path) | |||
| 37 | struct libmnt_table *tb; | 37 | struct libmnt_table *tb; |
| 38 | struct libmnt_iter *itr = NULL; | 38 | struct libmnt_iter *itr = NULL; |
| 39 | struct libmnt_fs *fs; | 39 | struct libmnt_fs *fs; |
| 40 | int found = 0; | 40 | int found = 0, ret; |
| 41 | 41 | ||
| 42 | cxt = mnt_new_context(); | 42 | cxt = mnt_new_context(); |
| 43 | if (!cxt) | 43 | if (!cxt) |
| @@ -58,8 +58,11 @@ static int get_debugfs(char **path) | |||
| 58 | break; | 58 | break; |
| 59 | } | 59 | } |
| 60 | } | 60 | } |
| 61 | if (found) | 61 | if (found) { |
| 62 | asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); | 62 | ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); |
| 63 | if (ret < 0) | ||
| 64 | err(EXIT_FAILURE, "failed to format string"); | ||
| 65 | } | ||
| 63 | 66 | ||
| 64 | mnt_free_iter(itr); | 67 | mnt_free_iter(itr); |
| 65 | mnt_free_context(cxt); | 68 | mnt_free_context(cxt); |
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 23022e9d32eb..b52cfdefecbf 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, | |||
| 571 | * already exist. | 571 | * already exist. |
| 572 | */ | 572 | */ |
| 573 | region = (struct userspace_mem_region *) userspace_mem_region_find( | 573 | region = (struct userspace_mem_region *) userspace_mem_region_find( |
| 574 | vm, guest_paddr, guest_paddr + npages * vm->page_size); | 574 | vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); |
| 575 | if (region != NULL) | 575 | if (region != NULL) |
| 576 | TEST_ASSERT(false, "overlapping userspace_mem_region already " | 576 | TEST_ASSERT(false, "overlapping userspace_mem_region already " |
| 577 | "exists\n" | 577 | "exists\n" |
| @@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, | |||
| 587 | region = region->next) { | 587 | region = region->next) { |
| 588 | if (region->region.slot == slot) | 588 | if (region->region.slot == slot) |
| 589 | break; | 589 | break; |
| 590 | if ((guest_paddr <= (region->region.guest_phys_addr | ||
| 591 | + region->region.memory_size)) | ||
| 592 | && ((guest_paddr + npages * vm->page_size) | ||
| 593 | >= region->region.guest_phys_addr)) | ||
| 594 | break; | ||
| 595 | } | 590 | } |
| 596 | if (region != NULL) | 591 | if (region != NULL) |
| 597 | TEST_ASSERT(false, "A mem region with the requested slot " | 592 | TEST_ASSERT(false, "A mem region with the requested slot " |
| 598 | "or overlapping physical memory range already exists.\n" | 593 | "already exists.\n" |
| 599 | " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" | 594 | " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" |
| 600 | " existing slot: %u paddr: 0x%lx size: 0x%lx", | 595 | " existing slot: %u paddr: 0x%lx size: 0x%lx", |
| 601 | slot, guest_paddr, npages, | 596 | slot, guest_paddr, npages, |
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index ea3c73e8f4f6..c49c2a28b0eb 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c | |||
| @@ -103,6 +103,12 @@ int main(int argc, char *argv[]) | |||
| 103 | 103 | ||
| 104 | vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); | 104 | vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); |
| 105 | 105 | ||
| 106 | /* KVM should return supported EVMCS version range */ | ||
| 107 | TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && | ||
| 108 | (evmcs_ver & 0xff) > 0, | ||
| 109 | "Incorrect EVMCS version range: %x:%x\n", | ||
| 110 | evmcs_ver & 0xff, evmcs_ver >> 8); | ||
| 111 | |||
| 106 | run = vcpu_state(vm, VCPU_ID); | 112 | run = vcpu_state(vm, VCPU_ID); |
| 107 | 113 | ||
| 108 | vcpu_regs_get(vm, VCPU_ID, ®s1); | 114 | vcpu_regs_get(vm, VCPU_ID, ®s1); |
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh index d8313d0438b7..b90dff8d3a94 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #!/bin/bash | 1 | #!/bin/bash |
| 2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
| 3 | 3 | ||
| 4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding" | 4 | ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn" |
| 5 | NUM_NETIFS=4 | 5 | NUM_NETIFS=4 |
| 6 | CHECK_TC="yes" | 6 | CHECK_TC="yes" |
| 7 | source lib.sh | 7 | source lib.sh |
| @@ -96,6 +96,51 @@ flooding() | |||
| 96 | flood_test $swp2 $h1 $h2 | 96 | flood_test $swp2 $h1 $h2 |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | vlan_deletion() | ||
| 100 | { | ||
| 101 | # Test that the deletion of a VLAN on a bridge port does not affect | ||
| 102 | # the PVID VLAN | ||
| 103 | log_info "Add and delete a VLAN on bridge port $swp1" | ||
| 104 | |||
| 105 | bridge vlan add vid 10 dev $swp1 | ||
| 106 | bridge vlan del vid 10 dev $swp1 | ||
| 107 | |||
| 108 | ping_ipv4 | ||
| 109 | ping_ipv6 | ||
| 110 | } | ||
| 111 | |||
| 112 | extern_learn() | ||
| 113 | { | ||
| 114 | local mac=de:ad:be:ef:13:37 | ||
| 115 | local ageing_time | ||
| 116 | |||
| 117 | # Test that externally learned FDB entries can roam, but not age out | ||
| 118 | RET=0 | ||
| 119 | |||
| 120 | bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1 | ||
| 121 | |||
| 122 | bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 | ||
| 123 | check_err $? "Did not find FDB entry when should" | ||
| 124 | |||
| 125 | # Wait for 10 seconds after the ageing time to make sure the FDB entry | ||
| 126 | # was not aged out | ||
| 127 | ageing_time=$(bridge_ageing_time_get br0) | ||
| 128 | sleep $((ageing_time + 10)) | ||
| 129 | |||
| 130 | bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37 | ||
| 131 | check_err $? "FDB entry was aged out when should not" | ||
| 132 | |||
| 133 | $MZ $h2 -c 1 -p 64 -a $mac -t ip -q | ||
| 134 | |||
| 135 | bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37 | ||
| 136 | check_err $? "FDB entry did not roam when should" | ||
| 137 | |||
| 138 | log_test "Externally learned FDB entry - ageing & roaming" | ||
| 139 | |||
| 140 | bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null | ||
| 141 | bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null | ||
| 142 | } | ||
| 143 | |||
| 99 | trap cleanup EXIT | 144 | trap cleanup EXIT |
| 100 | 145 | ||
| 101 | setup_prepare | 146 | setup_prepare |
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index 56cef3b1c194..bb10e33690b2 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh | |||
| @@ -629,7 +629,7 @@ __test_ecn_decap() | |||
| 629 | RET=0 | 629 | RET=0 |
| 630 | 630 | ||
| 631 | tc filter add dev $h1 ingress pref 77 prot ip \ | 631 | tc filter add dev $h1 ingress pref 77 prot ip \ |
| 632 | flower ip_tos $decapped_tos action pass | 632 | flower ip_tos $decapped_tos action drop |
| 633 | sleep 1 | 633 | sleep 1 |
| 634 | vxlan_encapped_ping_test v2 v1 192.0.2.17 \ | 634 | vxlan_encapped_ping_test v2 v1 192.0.2.17 \ |
| 635 | $orig_inner_tos $orig_outer_tos \ | 635 | $orig_inner_tos $orig_outer_tos \ |
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c index 61ae2782388e..5d56cc0838f6 100644 --- a/tools/testing/selftests/net/ip_defrag.c +++ b/tools/testing/selftests/net/ip_defrag.c | |||
| @@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
| 203 | { | 203 | { |
| 204 | struct ip *iphdr = (struct ip *)ip_frame; | 204 | struct ip *iphdr = (struct ip *)ip_frame; |
| 205 | struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; | 205 | struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; |
| 206 | const bool ipv4 = !ipv6; | ||
| 206 | int res; | 207 | int res; |
| 207 | int offset; | 208 | int offset; |
| 208 | int frag_len; | 209 | int frag_len; |
| @@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
| 239 | iphdr->ip_sum = 0; | 240 | iphdr->ip_sum = 0; |
| 240 | } | 241 | } |
| 241 | 242 | ||
| 243 | /* Occasionally test in-order fragments. */ | ||
| 244 | if (!cfg_overlap && (rand() % 100 < 15)) { | ||
| 245 | offset = 0; | ||
| 246 | while (offset < (UDP_HLEN + payload_len)) { | ||
| 247 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 248 | offset += max_frag_len; | ||
| 249 | } | ||
| 250 | return; | ||
| 251 | } | ||
| 252 | |||
| 253 | /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */ | ||
| 254 | if (ipv4 && !cfg_overlap && (rand() % 100 < 20) && | ||
| 255 | (payload_len > 9 * max_frag_len)) { | ||
| 256 | offset = 6 * max_frag_len; | ||
| 257 | while (offset < (UDP_HLEN + payload_len)) { | ||
| 258 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 259 | offset += max_frag_len; | ||
| 260 | } | ||
| 261 | offset = 3 * max_frag_len; | ||
| 262 | while (offset < 6 * max_frag_len) { | ||
| 263 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 264 | offset += max_frag_len; | ||
| 265 | } | ||
| 266 | offset = 0; | ||
| 267 | while (offset < 3 * max_frag_len) { | ||
| 268 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 269 | offset += max_frag_len; | ||
| 270 | } | ||
| 271 | return; | ||
| 272 | } | ||
| 273 | |||
| 242 | /* Odd fragments. */ | 274 | /* Odd fragments. */ |
| 243 | offset = max_frag_len; | 275 | offset = max_frag_len; |
| 244 | while (offset < (UDP_HLEN + payload_len)) { | 276 | while (offset < (UDP_HLEN + payload_len)) { |
| 245 | send_fragment(fd_raw, addr, alen, offset, ipv6); | 277 | send_fragment(fd_raw, addr, alen, offset, ipv6); |
| 278 | /* IPv4 ignores duplicates, so randomly send a duplicate. */ | ||
| 279 | if (ipv4 && (1 == rand() % 100)) | ||
| 280 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 246 | offset += 2 * max_frag_len; | 281 | offset += 2 * max_frag_len; |
| 247 | } | 282 | } |
| 248 | 283 | ||
| 249 | if (cfg_overlap) { | 284 | if (cfg_overlap) { |
| 250 | /* Send an extra random fragment. */ | 285 | /* Send an extra random fragment. */ |
| 251 | offset = rand() % (UDP_HLEN + payload_len - 1); | ||
| 252 | /* sendto() returns EINVAL if offset + frag_len is too small. */ | ||
| 253 | if (ipv6) { | 286 | if (ipv6) { |
| 254 | struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); | 287 | struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); |
| 288 | /* sendto() returns EINVAL if offset + frag_len is too small. */ | ||
| 289 | offset = rand() % (UDP_HLEN + payload_len - 1); | ||
| 255 | frag_len = max_frag_len + rand() % 256; | 290 | frag_len = max_frag_len + rand() % 256; |
| 256 | /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ | 291 | /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ |
| 257 | frag_len &= ~0x7; | 292 | frag_len &= ~0x7; |
| @@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
| 259 | ip6hdr->ip6_plen = htons(frag_len); | 294 | ip6hdr->ip6_plen = htons(frag_len); |
| 260 | frag_len += IP6_HLEN; | 295 | frag_len += IP6_HLEN; |
| 261 | } else { | 296 | } else { |
| 262 | frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; | 297 | /* In IPv4, duplicates and some fragments completely inside |
| 298 | * previously sent fragments are dropped/ignored. So | ||
| 299 | * random offset and frag_len can result in a dropped | ||
| 300 | * fragment instead of a dropped queue/packet. So we | ||
| 301 | * hard-code offset and frag_len. | ||
| 302 | * | ||
| 303 | * See ade446403bfb ("net: ipv4: do not handle duplicate | ||
| 304 | * fragments as overlapping"). | ||
| 305 | */ | ||
| 306 | if (max_frag_len * 4 < payload_len || max_frag_len < 16) { | ||
| 307 | /* not enough payload to play with random offset and frag_len. */ | ||
| 308 | offset = 8; | ||
| 309 | frag_len = IP4_HLEN + UDP_HLEN + max_frag_len; | ||
| 310 | } else { | ||
| 311 | offset = rand() % (payload_len / 2); | ||
| 312 | frag_len = 2 * max_frag_len + 1 + rand() % 256; | ||
| 313 | } | ||
| 263 | iphdr->ip_off = htons(offset / 8 | IP4_MF); | 314 | iphdr->ip_off = htons(offset / 8 | IP4_MF); |
| 264 | iphdr->ip_len = htons(frag_len); | 315 | iphdr->ip_len = htons(frag_len); |
| 265 | } | 316 | } |
| 266 | res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); | 317 | res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); |
| 267 | if (res < 0) | 318 | if (res < 0) |
| 268 | error(1, errno, "sendto overlap"); | 319 | error(1, errno, "sendto overlap: %d", frag_len); |
| 269 | if (res != frag_len) | 320 | if (res != frag_len) |
| 270 | error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); | 321 | error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); |
| 271 | frag_counter++; | 322 | frag_counter++; |
| @@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
| 275 | offset = 0; | 326 | offset = 0; |
| 276 | while (offset < (UDP_HLEN + payload_len)) { | 327 | while (offset < (UDP_HLEN + payload_len)) { |
| 277 | send_fragment(fd_raw, addr, alen, offset, ipv6); | 328 | send_fragment(fd_raw, addr, alen, offset, ipv6); |
| 329 | /* IPv4 ignores duplicates, so randomly send a duplicate. */ | ||
| 330 | if (ipv4 && (1 == rand() % 100)) | ||
| 331 | send_fragment(fd_raw, addr, alen, offset, ipv6); | ||
| 278 | offset += 2 * max_frag_len; | 332 | offset += 2 * max_frag_len; |
| 279 | } | 333 | } |
| 280 | } | 334 | } |
| @@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr, | |||
| 282 | static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) | 336 | static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) |
| 283 | { | 337 | { |
| 284 | int fd_tx_raw, fd_rx_udp; | 338 | int fd_tx_raw, fd_rx_udp; |
| 285 | struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; | 339 | /* Frag queue timeout is set to one second in the calling script; |
| 340 | * socket timeout should be just a bit longer to avoid tests interfering | ||
| 341 | * with each other. | ||
| 342 | */ | ||
| 343 | struct timeval tv = { .tv_sec = 1, .tv_usec = 10 }; | ||
| 286 | int idx; | 344 | int idx; |
| 287 | int min_frag_len = ipv6 ? 1280 : 8; | 345 | int min_frag_len = ipv6 ? 1280 : 8; |
| 288 | 346 | ||
| @@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) | |||
| 308 | payload_len += (rand() % 4096)) { | 366 | payload_len += (rand() % 4096)) { |
| 309 | if (cfg_verbose) | 367 | if (cfg_verbose) |
| 310 | printf("payload_len: %d\n", payload_len); | 368 | printf("payload_len: %d\n", payload_len); |
| 311 | max_frag_len = min_frag_len; | 369 | |
| 312 | do { | 370 | if (cfg_overlap) { |
| 371 | /* With overlaps, one send/receive pair below takes | ||
| 372 | * at least one second (== timeout) to run, so there | ||
| 373 | * is not enough test time to run a nested loop: | ||
| 374 | * the full overlap test takes 20-30 seconds. | ||
| 375 | */ | ||
| 376 | max_frag_len = min_frag_len + | ||
| 377 | rand() % (1500 - FRAG_HLEN - min_frag_len); | ||
| 313 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); | 378 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); |
| 314 | recv_validate_udp(fd_rx_udp); | 379 | recv_validate_udp(fd_rx_udp); |
| 315 | max_frag_len += 8 * (rand() % 8); | 380 | } else { |
| 316 | } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); | 381 | /* Without overlaps, each packet reassembly (== one |
| 382 | * send/receive pair below) takes very little time to | ||
| 383 | * run, so we can easily afford more thourough testing | ||
| 384 | * with a nested loop: the full non-overlap test takes | ||
| 385 | * less than one second). | ||
| 386 | */ | ||
| 387 | max_frag_len = min_frag_len; | ||
| 388 | do { | ||
| 389 | send_udp_frags(fd_tx_raw, addr, alen, ipv6); | ||
| 390 | recv_validate_udp(fd_rx_udp); | ||
| 391 | max_frag_len += 8 * (rand() % 8); | ||
| 392 | } while (max_frag_len < (1500 - FRAG_HLEN) && | ||
| 393 | max_frag_len <= payload_len); | ||
| 394 | } | ||
| 317 | } | 395 | } |
| 318 | 396 | ||
| 319 | /* Cleanup. */ | 397 | /* Cleanup. */ |
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index f34672796044..7dd79a9efb17 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh | |||
| @@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)" | |||
| 11 | setup() { | 11 | setup() { |
| 12 | ip netns add "${NETNS}" | 12 | ip netns add "${NETNS}" |
| 13 | ip -netns "${NETNS}" link set lo up | 13 | ip -netns "${NETNS}" link set lo up |
| 14 | |||
| 14 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 | 15 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 |
| 15 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 | 16 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 |
| 17 | ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1 | ||
| 18 | |||
| 16 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 | 19 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 |
| 17 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 | 20 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 |
| 21 | ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1 | ||
| 22 | |||
| 23 | # DST cache can get full with a lot of frags, with GC not keeping up with the test. | ||
| 24 | ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1 | ||
| 18 | } | 25 | } |
| 19 | 26 | ||
| 20 | cleanup() { | 27 | cleanup() { |
| @@ -27,7 +34,6 @@ setup | |||
| 27 | echo "ipv4 defrag" | 34 | echo "ipv4 defrag" |
| 28 | ip netns exec "${NETNS}" ./ip_defrag -4 | 35 | ip netns exec "${NETNS}" ./ip_defrag -4 |
| 29 | 36 | ||
| 30 | |||
| 31 | echo "ipv4 defrag with overlaps" | 37 | echo "ipv4 defrag with overlaps" |
| 32 | ip netns exec "${NETNS}" ./ip_defrag -4o | 38 | ip netns exec "${NETNS}" ./ip_defrag -4o |
| 33 | 39 | ||
| @@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6 | |||
| 37 | echo "ipv6 defrag with overlaps" | 43 | echo "ipv6 defrag with overlaps" |
| 38 | ip netns exec "${NETNS}" ./ip_defrag -6o | 44 | ip netns exec "${NETNS}" ./ip_defrag -6o |
| 39 | 45 | ||
| 46 | echo "all tests done" | ||
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c index 2e563d17cf0c..d1bbafb16f47 100644 --- a/tools/testing/selftests/networking/timestamping/txtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c | |||
| @@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) | |||
| 240 | cm->cmsg_type == IP_RECVERR) || | 240 | cm->cmsg_type == IP_RECVERR) || |
| 241 | (cm->cmsg_level == SOL_IPV6 && | 241 | (cm->cmsg_level == SOL_IPV6 && |
| 242 | cm->cmsg_type == IPV6_RECVERR) || | 242 | cm->cmsg_type == IPV6_RECVERR) || |
| 243 | (cm->cmsg_level = SOL_PACKET && | 243 | (cm->cmsg_level == SOL_PACKET && |
| 244 | cm->cmsg_type == PACKET_TX_TIMESTAMP)) { | 244 | cm->cmsg_type == PACKET_TX_TIMESTAMP)) { |
| 245 | serr = (void *) CMSG_DATA(cm); | 245 | serr = (void *) CMSG_DATA(cm); |
| 246 | if (serr->ee_errno != ENOMSG || | 246 | if (serr->ee_errno != ENOMSG || |
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c index e20b017e7073..b2065536d407 100644 --- a/tools/testing/selftests/rtc/rtctest.c +++ b/tools/testing/selftests/rtc/rtctest.c | |||
| @@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) { | |||
| 145 | 145 | ||
| 146 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); | 146 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
| 147 | ASSERT_NE(-1, rc); | 147 | ASSERT_NE(-1, rc); |
| 148 | EXPECT_NE(0, rc); | 148 | ASSERT_NE(0, rc); |
| 149 | 149 | ||
| 150 | /* Disable alarm interrupts */ | 150 | /* Disable alarm interrupts */ |
| 151 | rc = ioctl(self->fd, RTC_AIE_OFF, 0); | 151 | rc = ioctl(self->fd, RTC_AIE_OFF, 0); |
| 152 | ASSERT_NE(-1, rc); | 152 | ASSERT_NE(-1, rc); |
| 153 | 153 | ||
| 154 | if (rc == 0) | ||
| 155 | return; | ||
| 156 | |||
| 157 | rc = read(self->fd, &data, sizeof(unsigned long)); | 154 | rc = read(self->fd, &data, sizeof(unsigned long)); |
| 158 | ASSERT_NE(-1, rc); | 155 | ASSERT_NE(-1, rc); |
| 159 | TH_LOG("data: %lx", data); | 156 | TH_LOG("data: %lx", data); |
| @@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) { | |||
| 202 | 199 | ||
| 203 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); | 200 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); |
| 204 | ASSERT_NE(-1, rc); | 201 | ASSERT_NE(-1, rc); |
| 205 | EXPECT_NE(0, rc); | 202 | ASSERT_NE(0, rc); |
| 203 | |||
| 204 | rc = read(self->fd, &data, sizeof(unsigned long)); | ||
| 205 | ASSERT_NE(-1, rc); | ||
| 206 | |||
| 207 | rc = ioctl(self->fd, RTC_RD_TIME, &tm); | ||
| 208 | ASSERT_NE(-1, rc); | ||
| 209 | |||
| 210 | new = timegm((struct tm *)&tm); | ||
| 211 | ASSERT_EQ(new, secs); | ||
| 212 | } | ||
| 213 | |||
| 214 | TEST_F(rtc, alarm_alm_set_minute) { | ||
| 215 | struct timeval tv = { .tv_sec = 62 }; | ||
| 216 | unsigned long data; | ||
| 217 | struct rtc_time tm; | ||
| 218 | fd_set readfds; | ||
| 219 | time_t secs, new; | ||
| 220 | int rc; | ||
| 221 | |||
| 222 | rc = ioctl(self->fd, RTC_RD_TIME, &tm); | ||
| 223 | ASSERT_NE(-1, rc); | ||
| 224 | |||
| 225 | secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec; | ||
| 226 | gmtime_r(&secs, (struct tm *)&tm); | ||
| 227 | |||
| 228 | rc = ioctl(self->fd, RTC_ALM_SET, &tm); | ||
| 229 | if (rc == -1) { | ||
| 230 | ASSERT_EQ(EINVAL, errno); | ||
| 231 | TH_LOG("skip alarms are not supported."); | ||
| 232 | return; | ||
| 233 | } | ||
| 234 | |||
| 235 | rc = ioctl(self->fd, RTC_ALM_READ, &tm); | ||
| 236 | ASSERT_NE(-1, rc); | ||
| 237 | |||
| 238 | TH_LOG("Alarm time now set to %02d:%02d:%02d.", | ||
| 239 | tm.tm_hour, tm.tm_min, tm.tm_sec); | ||
| 240 | |||
| 241 | /* Enable alarm interrupts */ | ||
| 242 | rc = ioctl(self->fd, RTC_AIE_ON, 0); | ||
| 243 | ASSERT_NE(-1, rc); | ||
| 244 | |||
| 245 | FD_ZERO(&readfds); | ||
| 246 | FD_SET(self->fd, &readfds); | ||
| 247 | |||
| 248 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); | ||
| 249 | ASSERT_NE(-1, rc); | ||
| 250 | ASSERT_NE(0, rc); | ||
| 251 | |||
| 252 | /* Disable alarm interrupts */ | ||
| 253 | rc = ioctl(self->fd, RTC_AIE_OFF, 0); | ||
| 254 | ASSERT_NE(-1, rc); | ||
| 255 | |||
| 256 | rc = read(self->fd, &data, sizeof(unsigned long)); | ||
| 257 | ASSERT_NE(-1, rc); | ||
| 258 | TH_LOG("data: %lx", data); | ||
| 259 | |||
| 260 | rc = ioctl(self->fd, RTC_RD_TIME, &tm); | ||
| 261 | ASSERT_NE(-1, rc); | ||
| 262 | |||
| 263 | new = timegm((struct tm *)&tm); | ||
| 264 | ASSERT_EQ(new, secs); | ||
| 265 | } | ||
| 266 | |||
| 267 | TEST_F(rtc, alarm_wkalm_set_minute) { | ||
| 268 | struct timeval tv = { .tv_sec = 62 }; | ||
| 269 | struct rtc_wkalrm alarm = { 0 }; | ||
| 270 | struct rtc_time tm; | ||
| 271 | unsigned long data; | ||
| 272 | fd_set readfds; | ||
| 273 | time_t secs, new; | ||
| 274 | int rc; | ||
| 275 | |||
| 276 | rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time); | ||
| 277 | ASSERT_NE(-1, rc); | ||
| 278 | |||
| 279 | secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec; | ||
| 280 | gmtime_r(&secs, (struct tm *)&alarm.time); | ||
| 281 | |||
| 282 | alarm.enabled = 1; | ||
| 283 | |||
| 284 | rc = ioctl(self->fd, RTC_WKALM_SET, &alarm); | ||
| 285 | if (rc == -1) { | ||
| 286 | ASSERT_EQ(EINVAL, errno); | ||
| 287 | TH_LOG("skip alarms are not supported."); | ||
| 288 | return; | ||
| 289 | } | ||
| 290 | |||
| 291 | rc = ioctl(self->fd, RTC_WKALM_RD, &alarm); | ||
| 292 | ASSERT_NE(-1, rc); | ||
| 293 | |||
| 294 | TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.", | ||
| 295 | alarm.time.tm_mday, alarm.time.tm_mon + 1, | ||
| 296 | alarm.time.tm_year + 1900, alarm.time.tm_hour, | ||
| 297 | alarm.time.tm_min, alarm.time.tm_sec); | ||
| 298 | |||
| 299 | FD_ZERO(&readfds); | ||
| 300 | FD_SET(self->fd, &readfds); | ||
| 301 | |||
| 302 | rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); | ||
| 303 | ASSERT_NE(-1, rc); | ||
| 304 | ASSERT_NE(0, rc); | ||
| 206 | 305 | ||
| 207 | rc = read(self->fd, &data, sizeof(unsigned long)); | 306 | rc = read(self->fd, &data, sizeof(unsigned long)); |
| 208 | ASSERT_NE(-1, rc); | 307 | ASSERT_NE(-1, rc); |
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index fce7f4ce0692..1760b3e39730 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile | |||
| @@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark | |||
| 9 | CFLAGS += -Wl,-no-as-needed -Wall | 9 | CFLAGS += -Wl,-no-as-needed -Wall |
| 10 | 10 | ||
| 11 | seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h | 11 | seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h |
| 12 | $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ | 12 | $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@ |
| 13 | 13 | ||
| 14 | TEST_PROGS += $(BINARIES) | 14 | TEST_PROGS += $(BINARIES) |
| 15 | EXTRA_CLEAN := $(BINARIES) | 15 | EXTRA_CLEAN := $(BINARIES) |
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 067cb4607d6c..496a9a8c773a 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
| @@ -3044,7 +3044,7 @@ TEST(user_notification_basic) | |||
| 3044 | /* Check that the basic notification machinery works */ | 3044 | /* Check that the basic notification machinery works */ |
| 3045 | listener = user_trap_syscall(__NR_getpid, | 3045 | listener = user_trap_syscall(__NR_getpid, |
| 3046 | SECCOMP_FILTER_FLAG_NEW_LISTENER); | 3046 | SECCOMP_FILTER_FLAG_NEW_LISTENER); |
| 3047 | EXPECT_GE(listener, 0); | 3047 | ASSERT_GE(listener, 0); |
| 3048 | 3048 | ||
| 3049 | /* Installing a second listener in the chain should EBUSY */ | 3049 | /* Installing a second listener in the chain should EBUSY */ |
| 3050 | EXPECT_EQ(user_trap_syscall(__NR_getpid, | 3050 | EXPECT_EQ(user_trap_syscall(__NR_getpid, |
| @@ -3103,7 +3103,7 @@ TEST(user_notification_kill_in_middle) | |||
| 3103 | 3103 | ||
| 3104 | listener = user_trap_syscall(__NR_getpid, | 3104 | listener = user_trap_syscall(__NR_getpid, |
| 3105 | SECCOMP_FILTER_FLAG_NEW_LISTENER); | 3105 | SECCOMP_FILTER_FLAG_NEW_LISTENER); |
| 3106 | EXPECT_GE(listener, 0); | 3106 | ASSERT_GE(listener, 0); |
| 3107 | 3107 | ||
| 3108 | /* | 3108 | /* |
| 3109 | * Check that nothing bad happens when we kill the task in the middle | 3109 | * Check that nothing bad happens when we kill the task in the middle |
| @@ -3152,7 +3152,7 @@ TEST(user_notification_signal) | |||
| 3152 | 3152 | ||
| 3153 | listener = user_trap_syscall(__NR_gettid, | 3153 | listener = user_trap_syscall(__NR_gettid, |
| 3154 | SECCOMP_FILTER_FLAG_NEW_LISTENER); | 3154 | SECCOMP_FILTER_FLAG_NEW_LISTENER); |
| 3155 | EXPECT_GE(listener, 0); | 3155 | ASSERT_GE(listener, 0); |
| 3156 | 3156 | ||
| 3157 | pid = fork(); | 3157 | pid = fork(); |
| 3158 | ASSERT_GE(pid, 0); | 3158 | ASSERT_GE(pid, 0); |
| @@ -3215,7 +3215,7 @@ TEST(user_notification_closed_listener) | |||
| 3215 | 3215 | ||
| 3216 | listener = user_trap_syscall(__NR_getpid, | 3216 | listener = user_trap_syscall(__NR_getpid, |
| 3217 | SECCOMP_FILTER_FLAG_NEW_LISTENER); | 3217 | SECCOMP_FILTER_FLAG_NEW_LISTENER); |
| 3218 | EXPECT_GE(listener, 0); | 3218 | ASSERT_GE(listener, 0); |
| 3219 | 3219 | ||
| 3220 | /* | 3220 | /* |
| 3221 | * Check that we get an ENOSYS when the listener is closed. | 3221 | * Check that we get an ENOSYS when the listener is closed. |
| @@ -3376,7 +3376,7 @@ TEST(seccomp_get_notif_sizes) | |||
| 3376 | { | 3376 | { |
| 3377 | struct seccomp_notif_sizes sizes; | 3377 | struct seccomp_notif_sizes sizes; |
| 3378 | 3378 | ||
| 3379 | EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); | 3379 | ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); |
| 3380 | EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); | 3380 | EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); |
| 3381 | EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); | 3381 | EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); |
| 3382 | } | 3382 | } |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json index 637ea0219617..0da3545cabdb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", | 17 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", |
| 18 | "expExitCode": "0", | 18 | "expExitCode": "0", |
| 19 | "verifyCmd": "$TC actions get action ife index 2", | 19 | "verifyCmd": "$TC actions get action ife index 2", |
| 20 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", | 20 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2", |
| 21 | "matchCount": "1", | 21 | "matchCount": "1", |
| 22 | "teardown": [ | 22 | "teardown": [ |
| 23 | "$TC actions flush action ife" | 23 | "$TC actions flush action ife" |
| @@ -41,7 +41,7 @@ | |||
| 41 | "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", | 41 | "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", |
| 42 | "expExitCode": "0", | 42 | "expExitCode": "0", |
| 43 | "verifyCmd": "$TC actions get action ife index 2", | 43 | "verifyCmd": "$TC actions get action ife index 2", |
| 44 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", | 44 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2", |
| 45 | "matchCount": "1", | 45 | "matchCount": "1", |
| 46 | "teardown": [ | 46 | "teardown": [ |
| 47 | "$TC actions flush action ife" | 47 | "$TC actions flush action ife" |
| @@ -65,7 +65,7 @@ | |||
| 65 | "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", | 65 | "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", |
| 66 | "expExitCode": "0", | 66 | "expExitCode": "0", |
| 67 | "verifyCmd": "$TC actions get action ife index 2", | 67 | "verifyCmd": "$TC actions get action ife index 2", |
| 68 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", | 68 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2", |
| 69 | "matchCount": "1", | 69 | "matchCount": "1", |
| 70 | "teardown": [ | 70 | "teardown": [ |
| 71 | "$TC actions flush action ife" | 71 | "$TC actions flush action ife" |
| @@ -89,7 +89,7 @@ | |||
| 89 | "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", | 89 | "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", |
| 90 | "expExitCode": "0", | 90 | "expExitCode": "0", |
| 91 | "verifyCmd": "$TC actions get action ife index 2", | 91 | "verifyCmd": "$TC actions get action ife index 2", |
| 92 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", | 92 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2", |
| 93 | "matchCount": "1", | 93 | "matchCount": "1", |
| 94 | "teardown": [ | 94 | "teardown": [ |
| 95 | "$TC actions flush action ife" | 95 | "$TC actions flush action ife" |
| @@ -113,7 +113,7 @@ | |||
| 113 | "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", | 113 | "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", |
| 114 | "expExitCode": "0", | 114 | "expExitCode": "0", |
| 115 | "verifyCmd": "$TC actions get action ife index 2", | 115 | "verifyCmd": "$TC actions get action ife index 2", |
| 116 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", | 116 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2", |
| 117 | "matchCount": "1", | 117 | "matchCount": "1", |
| 118 | "teardown": [ | 118 | "teardown": [ |
| 119 | "$TC actions flush action ife" | 119 | "$TC actions flush action ife" |
| @@ -137,7 +137,7 @@ | |||
| 137 | "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", | 137 | "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", |
| 138 | "expExitCode": "0", | 138 | "expExitCode": "0", |
| 139 | "verifyCmd": "$TC actions get action ife index 2", | 139 | "verifyCmd": "$TC actions get action ife index 2", |
| 140 | "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", | 140 | "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2", |
| 141 | "matchCount": "1", | 141 | "matchCount": "1", |
| 142 | "teardown": [ | 142 | "teardown": [ |
| 143 | "$TC actions flush action ife" | 143 | "$TC actions flush action ife" |
| @@ -161,7 +161,7 @@ | |||
| 161 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", | 161 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", |
| 162 | "expExitCode": "0", | 162 | "expExitCode": "0", |
| 163 | "verifyCmd": "$TC actions get action ife index 90", | 163 | "verifyCmd": "$TC actions get action ife index 90", |
| 164 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", | 164 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90", |
| 165 | "matchCount": "1", | 165 | "matchCount": "1", |
| 166 | "teardown": [ | 166 | "teardown": [ |
| 167 | "$TC actions flush action ife" | 167 | "$TC actions flush action ife" |
| @@ -185,7 +185,7 @@ | |||
| 185 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", | 185 | "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", |
| 186 | "expExitCode": "255", | 186 | "expExitCode": "255", |
| 187 | "verifyCmd": "$TC actions get action ife index 90", | 187 | "verifyCmd": "$TC actions get action ife index 90", |
| 188 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", | 188 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90", |
| 189 | "matchCount": "0", | 189 | "matchCount": "0", |
| 190 | "teardown": [] | 190 | "teardown": [] |
| 191 | }, | 191 | }, |
| @@ -207,7 +207,7 @@ | |||
| 207 | "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", | 207 | "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", |
| 208 | "expExitCode": "0", | 208 | "expExitCode": "0", |
| 209 | "verifyCmd": "$TC actions get action ife index 9", | 209 | "verifyCmd": "$TC actions get action ife index 9", |
| 210 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", | 210 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9", |
| 211 | "matchCount": "1", | 211 | "matchCount": "1", |
| 212 | "teardown": [ | 212 | "teardown": [ |
| 213 | "$TC actions flush action ife" | 213 | "$TC actions flush action ife" |
| @@ -231,7 +231,7 @@ | |||
| 231 | "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", | 231 | "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", |
| 232 | "expExitCode": "0", | 232 | "expExitCode": "0", |
| 233 | "verifyCmd": "$TC actions get action ife index 9", | 233 | "verifyCmd": "$TC actions get action ife index 9", |
| 234 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", | 234 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9", |
| 235 | "matchCount": "1", | 235 | "matchCount": "1", |
| 236 | "teardown": [ | 236 | "teardown": [ |
| 237 | "$TC actions flush action ife" | 237 | "$TC actions flush action ife" |
| @@ -255,7 +255,7 @@ | |||
| 255 | "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", | 255 | "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", |
| 256 | "expExitCode": "0", | 256 | "expExitCode": "0", |
| 257 | "verifyCmd": "$TC actions get action ife index 9", | 257 | "verifyCmd": "$TC actions get action ife index 9", |
| 258 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", | 258 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9", |
| 259 | "matchCount": "1", | 259 | "matchCount": "1", |
| 260 | "teardown": [ | 260 | "teardown": [ |
| 261 | "$TC actions flush action ife" | 261 | "$TC actions flush action ife" |
| @@ -279,7 +279,7 @@ | |||
| 279 | "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", | 279 | "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", |
| 280 | "expExitCode": "0", | 280 | "expExitCode": "0", |
| 281 | "verifyCmd": "$TC actions get action ife index 9", | 281 | "verifyCmd": "$TC actions get action ife index 9", |
| 282 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", | 282 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9", |
| 283 | "matchCount": "1", | 283 | "matchCount": "1", |
| 284 | "teardown": [ | 284 | "teardown": [ |
| 285 | "$TC actions flush action ife" | 285 | "$TC actions flush action ife" |
| @@ -303,7 +303,7 @@ | |||
| 303 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", | 303 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", |
| 304 | "expExitCode": "0", | 304 | "expExitCode": "0", |
| 305 | "verifyCmd": "$TC actions get action ife index 9", | 305 | "verifyCmd": "$TC actions get action ife index 9", |
| 306 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", | 306 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9", |
| 307 | "matchCount": "1", | 307 | "matchCount": "1", |
| 308 | "teardown": [ | 308 | "teardown": [ |
| 309 | "$TC actions flush action ife" | 309 | "$TC actions flush action ife" |
| @@ -327,7 +327,7 @@ | |||
| 327 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", | 327 | "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", |
| 328 | "expExitCode": "0", | 328 | "expExitCode": "0", |
| 329 | "verifyCmd": "$TC actions get action ife index 9", | 329 | "verifyCmd": "$TC actions get action ife index 9", |
| 330 | "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", | 330 | "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9", |
| 331 | "matchCount": "1", | 331 | "matchCount": "1", |
| 332 | "teardown": [ | 332 | "teardown": [ |
| 333 | "$TC actions flush action ife" | 333 | "$TC actions flush action ife" |
| @@ -351,7 +351,7 @@ | |||
| 351 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", | 351 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", |
| 352 | "expExitCode": "0", | 352 | "expExitCode": "0", |
| 353 | "verifyCmd": "$TC actions get action ife index 99", | 353 | "verifyCmd": "$TC actions get action ife index 99", |
| 354 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", | 354 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99", |
| 355 | "matchCount": "1", | 355 | "matchCount": "1", |
| 356 | "teardown": [ | 356 | "teardown": [ |
| 357 | "$TC actions flush action ife" | 357 | "$TC actions flush action ife" |
| @@ -375,7 +375,7 @@ | |||
| 375 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", | 375 | "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", |
| 376 | "expExitCode": "255", | 376 | "expExitCode": "255", |
| 377 | "verifyCmd": "$TC actions get action ife index 99", | 377 | "verifyCmd": "$TC actions get action ife index 99", |
| 378 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", | 378 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99", |
| 379 | "matchCount": "0", | 379 | "matchCount": "0", |
| 380 | "teardown": [] | 380 | "teardown": [] |
| 381 | }, | 381 | }, |
| @@ -397,7 +397,7 @@ | |||
| 397 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", | 397 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", |
| 398 | "expExitCode": "0", | 398 | "expExitCode": "0", |
| 399 | "verifyCmd": "$TC actions get action ife index 1", | 399 | "verifyCmd": "$TC actions get action ife index 1", |
| 400 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", | 400 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1", |
| 401 | "matchCount": "1", | 401 | "matchCount": "1", |
| 402 | "teardown": [ | 402 | "teardown": [ |
| 403 | "$TC actions flush action ife" | 403 | "$TC actions flush action ife" |
| @@ -421,7 +421,7 @@ | |||
| 421 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", | 421 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", |
| 422 | "expExitCode": "0", | 422 | "expExitCode": "0", |
| 423 | "verifyCmd": "$TC actions get action ife index 1", | 423 | "verifyCmd": "$TC actions get action ife index 1", |
| 424 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", | 424 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1", |
| 425 | "matchCount": "1", | 425 | "matchCount": "1", |
| 426 | "teardown": [ | 426 | "teardown": [ |
| 427 | "$TC actions flush action ife" | 427 | "$TC actions flush action ife" |
| @@ -445,7 +445,7 @@ | |||
| 445 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", | 445 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", |
| 446 | "expExitCode": "0", | 446 | "expExitCode": "0", |
| 447 | "verifyCmd": "$TC actions get action ife index 1", | 447 | "verifyCmd": "$TC actions get action ife index 1", |
| 448 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", | 448 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", |
| 449 | "matchCount": "1", | 449 | "matchCount": "1", |
| 450 | "teardown": [ | 450 | "teardown": [ |
| 451 | "$TC actions flush action ife" | 451 | "$TC actions flush action ife" |
| @@ -469,7 +469,7 @@ | |||
| 469 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", | 469 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", |
| 470 | "expExitCode": "0", | 470 | "expExitCode": "0", |
| 471 | "verifyCmd": "$TC actions get action ife index 1", | 471 | "verifyCmd": "$TC actions get action ife index 1", |
| 472 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", | 472 | "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1", |
| 473 | "matchCount": "1", | 473 | "matchCount": "1", |
| 474 | "teardown": [ | 474 | "teardown": [ |
| 475 | "$TC actions flush action ife" | 475 | "$TC actions flush action ife" |
| @@ -493,7 +493,7 @@ | |||
| 493 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", | 493 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", |
| 494 | "expExitCode": "0", | 494 | "expExitCode": "0", |
| 495 | "verifyCmd": "$TC actions get action ife index 77", | 495 | "verifyCmd": "$TC actions get action ife index 77", |
| 496 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", | 496 | "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77", |
| 497 | "matchCount": "1", | 497 | "matchCount": "1", |
| 498 | "teardown": [ | 498 | "teardown": [ |
| 499 | "$TC actions flush action ife" | 499 | "$TC actions flush action ife" |
| @@ -517,7 +517,7 @@ | |||
| 517 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", | 517 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", |
| 518 | "expExitCode": "0", | 518 | "expExitCode": "0", |
| 519 | "verifyCmd": "$TC actions get action ife index 77", | 519 | "verifyCmd": "$TC actions get action ife index 77", |
| 520 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", | 520 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77", |
| 521 | "matchCount": "1", | 521 | "matchCount": "1", |
| 522 | "teardown": [ | 522 | "teardown": [ |
| 523 | "$TC actions flush action ife" | 523 | "$TC actions flush action ife" |
| @@ -541,7 +541,7 @@ | |||
| 541 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", | 541 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", |
| 542 | "expExitCode": "0", | 542 | "expExitCode": "0", |
| 543 | "verifyCmd": "$TC actions get action ife index 77", | 543 | "verifyCmd": "$TC actions get action ife index 77", |
| 544 | "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", | 544 | "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77", |
| 545 | "matchCount": "1", | 545 | "matchCount": "1", |
| 546 | "teardown": [ | 546 | "teardown": [ |
| 547 | "$TC actions flush action ife" | 547 | "$TC actions flush action ife" |
| @@ -565,7 +565,7 @@ | |||
| 565 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", | 565 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", |
| 566 | "expExitCode": "0", | 566 | "expExitCode": "0", |
| 567 | "verifyCmd": "$TC actions get action ife index 1", | 567 | "verifyCmd": "$TC actions get action ife index 1", |
| 568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", | 568 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1", |
| 569 | "matchCount": "1", | 569 | "matchCount": "1", |
| 570 | "teardown": [ | 570 | "teardown": [ |
| 571 | "$TC actions flush action ife" | 571 | "$TC actions flush action ife" |
| @@ -589,7 +589,7 @@ | |||
| 589 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", | 589 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", |
| 590 | "expExitCode": "255", | 590 | "expExitCode": "255", |
| 591 | "verifyCmd": "$TC actions get action ife index 1", | 591 | "verifyCmd": "$TC actions get action ife index 1", |
| 592 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", | 592 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1", |
| 593 | "matchCount": "0", | 593 | "matchCount": "0", |
| 594 | "teardown": [] | 594 | "teardown": [] |
| 595 | }, | 595 | }, |
| @@ -611,7 +611,7 @@ | |||
| 611 | "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", | 611 | "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", |
| 612 | "expExitCode": "0", | 612 | "expExitCode": "0", |
| 613 | "verifyCmd": "$TC actions get action ife index 1", | 613 | "verifyCmd": "$TC actions get action ife index 1", |
| 614 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", | 614 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1", |
| 615 | "matchCount": "1", | 615 | "matchCount": "1", |
| 616 | "teardown": [ | 616 | "teardown": [ |
| 617 | "$TC actions flush action ife" | 617 | "$TC actions flush action ife" |
| @@ -635,7 +635,7 @@ | |||
| 635 | "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", | 635 | "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", |
| 636 | "expExitCode": "0", | 636 | "expExitCode": "0", |
| 637 | "verifyCmd": "$TC actions get action ife index 1", | 637 | "verifyCmd": "$TC actions get action ife index 1", |
| 638 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", | 638 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", |
| 639 | "matchCount": "1", | 639 | "matchCount": "1", |
| 640 | "teardown": [ | 640 | "teardown": [ |
| 641 | "$TC actions flush action ife" | 641 | "$TC actions flush action ife" |
| @@ -659,7 +659,7 @@ | |||
| 659 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", | 659 | "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", |
| 660 | "expExitCode": "0", | 660 | "expExitCode": "0", |
| 661 | "verifyCmd": "$TC actions get action ife index 11", | 661 | "verifyCmd": "$TC actions get action ife index 11", |
| 662 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", | 662 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", |
| 663 | "matchCount": "1", | 663 | "matchCount": "1", |
| 664 | "teardown": [ | 664 | "teardown": [ |
| 665 | "$TC actions flush action ife" | 665 | "$TC actions flush action ife" |
| @@ -683,7 +683,7 @@ | |||
| 683 | "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", | 683 | "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", |
| 684 | "expExitCode": "0", | 684 | "expExitCode": "0", |
| 685 | "verifyCmd": "$TC actions get action ife index 1", | 685 | "verifyCmd": "$TC actions get action ife index 1", |
| 686 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", | 686 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1", |
| 687 | "matchCount": "1", | 687 | "matchCount": "1", |
| 688 | "teardown": [ | 688 | "teardown": [ |
| 689 | "$TC actions flush action ife" | 689 | "$TC actions flush action ife" |
| @@ -707,7 +707,7 @@ | |||
| 707 | "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", | 707 | "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", |
| 708 | "expExitCode": "0", | 708 | "expExitCode": "0", |
| 709 | "verifyCmd": "$TC actions get action ife index 21", | 709 | "verifyCmd": "$TC actions get action ife index 21", |
| 710 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", | 710 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21", |
| 711 | "matchCount": "1", | 711 | "matchCount": "1", |
| 712 | "teardown": [ | 712 | "teardown": [ |
| 713 | "$TC actions flush action ife" | 713 | "$TC actions flush action ife" |
| @@ -731,7 +731,7 @@ | |||
| 731 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", | 731 | "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", |
| 732 | "expExitCode": "0", | 732 | "expExitCode": "0", |
| 733 | "verifyCmd": "$TC actions get action ife index 21", | 733 | "verifyCmd": "$TC actions get action ife index 21", |
| 734 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", | 734 | "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21", |
| 735 | "matchCount": "1", | 735 | "matchCount": "1", |
| 736 | "teardown": [ | 736 | "teardown": [ |
| 737 | "$TC actions flush action ife" | 737 | "$TC actions flush action ife" |
| @@ -739,7 +739,7 @@ | |||
| 739 | }, | 739 | }, |
| 740 | { | 740 | { |
| 741 | "id": "fac3", | 741 | "id": "fac3", |
| 742 | "name": "Create valid ife encode action with index at 32-bit maximnum", | 742 | "name": "Create valid ife encode action with index at 32-bit maximum", |
| 743 | "category": [ | 743 | "category": [ |
| 744 | "actions", | 744 | "actions", |
| 745 | "ife" | 745 | "ife" |
| @@ -755,7 +755,7 @@ | |||
| 755 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", | 755 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", |
| 756 | "expExitCode": "0", | 756 | "expExitCode": "0", |
| 757 | "verifyCmd": "$TC actions get action ife index 4294967295", | 757 | "verifyCmd": "$TC actions get action ife index 4294967295", |
| 758 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", | 758 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295", |
| 759 | "matchCount": "1", | 759 | "matchCount": "1", |
| 760 | "teardown": [ | 760 | "teardown": [ |
| 761 | "$TC actions flush action ife" | 761 | "$TC actions flush action ife" |
| @@ -779,7 +779,7 @@ | |||
| 779 | "cmdUnderTest": "$TC actions add action ife decode pass index 1", | 779 | "cmdUnderTest": "$TC actions add action ife decode pass index 1", |
| 780 | "expExitCode": "0", | 780 | "expExitCode": "0", |
| 781 | "verifyCmd": "$TC actions get action ife index 1", | 781 | "verifyCmd": "$TC actions get action ife index 1", |
| 782 | "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 782 | "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 783 | "matchCount": "1", | 783 | "matchCount": "1", |
| 784 | "teardown": [ | 784 | "teardown": [ |
| 785 | "$TC actions flush action ife" | 785 | "$TC actions flush action ife" |
| @@ -803,7 +803,7 @@ | |||
| 803 | "cmdUnderTest": "$TC actions add action ife decode pipe index 1", | 803 | "cmdUnderTest": "$TC actions add action ife decode pipe index 1", |
| 804 | "expExitCode": "0", | 804 | "expExitCode": "0", |
| 805 | "verifyCmd": "$TC actions get action ife index 1", | 805 | "verifyCmd": "$TC actions get action ife index 1", |
| 806 | "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 806 | "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 807 | "matchCount": "1", | 807 | "matchCount": "1", |
| 808 | "teardown": [ | 808 | "teardown": [ |
| 809 | "$TC actions flush action ife" | 809 | "$TC actions flush action ife" |
| @@ -827,7 +827,7 @@ | |||
| 827 | "cmdUnderTest": "$TC actions add action ife decode continue index 1", | 827 | "cmdUnderTest": "$TC actions add action ife decode continue index 1", |
| 828 | "expExitCode": "0", | 828 | "expExitCode": "0", |
| 829 | "verifyCmd": "$TC actions get action ife index 1", | 829 | "verifyCmd": "$TC actions get action ife index 1", |
| 830 | "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 830 | "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 831 | "matchCount": "1", | 831 | "matchCount": "1", |
| 832 | "teardown": [ | 832 | "teardown": [ |
| 833 | "$TC actions flush action ife" | 833 | "$TC actions flush action ife" |
| @@ -851,7 +851,7 @@ | |||
| 851 | "cmdUnderTest": "$TC actions add action ife decode drop index 1", | 851 | "cmdUnderTest": "$TC actions add action ife decode drop index 1", |
| 852 | "expExitCode": "0", | 852 | "expExitCode": "0", |
| 853 | "verifyCmd": "$TC actions get action ife index 1", | 853 | "verifyCmd": "$TC actions get action ife index 1", |
| 854 | "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 854 | "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 855 | "matchCount": "1", | 855 | "matchCount": "1", |
| 856 | "teardown": [ | 856 | "teardown": [ |
| 857 | "$TC actions flush action ife" | 857 | "$TC actions flush action ife" |
| @@ -875,7 +875,7 @@ | |||
| 875 | "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", | 875 | "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", |
| 876 | "expExitCode": "0", | 876 | "expExitCode": "0", |
| 877 | "verifyCmd": "$TC actions get action ife index 1", | 877 | "verifyCmd": "$TC actions get action ife index 1", |
| 878 | "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 878 | "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 879 | "matchCount": "1", | 879 | "matchCount": "1", |
| 880 | "teardown": [ | 880 | "teardown": [ |
| 881 | "$TC actions flush action ife" | 881 | "$TC actions flush action ife" |
| @@ -899,7 +899,7 @@ | |||
| 899 | "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", | 899 | "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", |
| 900 | "expExitCode": "0", | 900 | "expExitCode": "0", |
| 901 | "verifyCmd": "$TC actions get action ife index 1", | 901 | "verifyCmd": "$TC actions get action ife index 1", |
| 902 | "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", | 902 | "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1", |
| 903 | "matchCount": "1", | 903 | "matchCount": "1", |
| 904 | "teardown": [ | 904 | "teardown": [ |
| 905 | "$TC actions flush action ife" | 905 | "$TC actions flush action ife" |
| @@ -923,7 +923,7 @@ | |||
| 923 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", | 923 | "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", |
| 924 | "expExitCode": "255", | 924 | "expExitCode": "255", |
| 925 | "verifyCmd": "$TC actions get action ife index 4294967295999", | 925 | "verifyCmd": "$TC actions get action ife index 4294967295999", |
| 926 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", | 926 | "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999", |
| 927 | "matchCount": "0", | 927 | "matchCount": "0", |
| 928 | "teardown": [] | 928 | "teardown": [] |
| 929 | }, | 929 | }, |
| @@ -945,7 +945,7 @@ | |||
| 945 | "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", | 945 | "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", |
| 946 | "expExitCode": "255", | 946 | "expExitCode": "255", |
| 947 | "verifyCmd": "$TC actions get action ife index 4", | 947 | "verifyCmd": "$TC actions get action ife index 4", |
| 948 | "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", | 948 | "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4", |
| 949 | "matchCount": "0", | 949 | "matchCount": "0", |
| 950 | "teardown": [] | 950 | "teardown": [] |
| 951 | }, | 951 | }, |
| @@ -967,7 +967,7 @@ | |||
| 967 | "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", | 967 | "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", |
| 968 | "expExitCode": "0", | 968 | "expExitCode": "0", |
| 969 | "verifyCmd": "$TC actions get action ife index 4", | 969 | "verifyCmd": "$TC actions get action ife index 4", |
| 970 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", | 970 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", |
| 971 | "matchCount": "1", | 971 | "matchCount": "1", |
| 972 | "teardown": [ | 972 | "teardown": [ |
| 973 | "$TC actions flush action ife" | 973 | "$TC actions flush action ife" |
| @@ -991,7 +991,7 @@ | |||
| 991 | "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", | 991 | "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", |
| 992 | "expExitCode": "255", | 992 | "expExitCode": "255", |
| 993 | "verifyCmd": "$TC actions get action ife index 4", | 993 | "verifyCmd": "$TC actions get action ife index 4", |
| 994 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", | 994 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4", |
| 995 | "matchCount": "0", | 995 | "matchCount": "0", |
| 996 | "teardown": [] | 996 | "teardown": [] |
| 997 | }, | 997 | }, |
| @@ -1013,7 +1013,7 @@ | |||
| 1013 | "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", | 1013 | "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", |
| 1014 | "expExitCode": "255", | 1014 | "expExitCode": "255", |
| 1015 | "verifyCmd": "$TC actions get action ife index 4", | 1015 | "verifyCmd": "$TC actions get action ife index 4", |
| 1016 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", | 1016 | "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4", |
| 1017 | "matchCount": "0", | 1017 | "matchCount": "0", |
| 1018 | "teardown": [] | 1018 | "teardown": [] |
| 1019 | }, | 1019 | }, |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index 10b2d894e436..e7e15a7336b6 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json | |||
| @@ -82,35 +82,6 @@ | |||
| 82 | ] | 82 | ] |
| 83 | }, | 83 | }, |
| 84 | { | 84 | { |
| 85 | "id": "ba4e", | ||
| 86 | "name": "Add tunnel_key set action with missing mandatory id parameter", | ||
| 87 | "category": [ | ||
| 88 | "actions", | ||
| 89 | "tunnel_key" | ||
| 90 | ], | ||
| 91 | "setup": [ | ||
| 92 | [ | ||
| 93 | "$TC actions flush action tunnel_key", | ||
| 94 | 0, | ||
| 95 | 1, | ||
| 96 | 255 | ||
| 97 | ] | ||
| 98 | ], | ||
| 99 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2", | ||
| 100 | "expExitCode": "255", | ||
| 101 | "verifyCmd": "$TC actions list action tunnel_key", | ||
| 102 | "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2", | ||
| 103 | "matchCount": "0", | ||
| 104 | "teardown": [ | ||
| 105 | [ | ||
| 106 | "$TC actions flush action tunnel_key", | ||
| 107 | 0, | ||
| 108 | 1, | ||
| 109 | 255 | ||
| 110 | ] | ||
| 111 | ] | ||
| 112 | }, | ||
| 113 | { | ||
| 114 | "id": "a5e0", | 85 | "id": "a5e0", |
| 115 | "name": "Add tunnel_key set action with invalid src_ip parameter", | 86 | "name": "Add tunnel_key set action with invalid src_ip parameter", |
| 116 | "category": [ | 87 | "category": [ |
| @@ -634,7 +605,7 @@ | |||
| 634 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", | 605 | "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", |
| 635 | "expExitCode": "0", | 606 | "expExitCode": "0", |
| 636 | "verifyCmd": "$TC actions get action tunnel_key index 4", | 607 | "verifyCmd": "$TC actions get action tunnel_key index 4", |
| 637 | "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", | 608 | "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", |
| 638 | "matchCount": "1", | 609 | "matchCount": "1", |
| 639 | "teardown": [ | 610 | "teardown": [ |
| 640 | "$TC actions flush action tunnel_key" | 611 | "$TC actions flush action tunnel_key" |
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c index 880b96fc80d4..c0534e298b51 100644 --- a/tools/testing/selftests/vm/gup_benchmark.c +++ b/tools/testing/selftests/vm/gup_benchmark.c | |||
| @@ -25,6 +25,7 @@ struct gup_benchmark { | |||
| 25 | __u64 size; | 25 | __u64 size; |
| 26 | __u32 nr_pages_per_call; | 26 | __u32 nr_pages_per_call; |
| 27 | __u32 flags; | 27 | __u32 flags; |
| 28 | __u64 expansion[10]; /* For future use */ | ||
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 30 | int main(int argc, char **argv) | 31 | int main(int argc, char **argv) |
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index 50f7e9272481..bf1bb15b6fbe 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c | |||
| @@ -1503,7 +1503,7 @@ exit: | |||
| 1503 | exit(20); | 1503 | exit(20); |
| 1504 | } | 1504 | } |
| 1505 | if (successes != total_nr_tests) { | 1505 | if (successes != total_nr_tests) { |
| 1506 | eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n", | 1506 | eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n", |
| 1507 | successes, total_nr_tests); | 1507 | successes, total_nr_tests); |
| 1508 | exit(21); | 1508 | exit(21); |
| 1509 | } | 1509 | } |
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 460b4bdf4c1e..5d546dcdbc80 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c | |||
| @@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) | |||
| 1133 | pkey_assert(err); | 1133 | pkey_assert(err); |
| 1134 | } | 1134 | } |
| 1135 | 1135 | ||
| 1136 | void become_child(void) | ||
| 1137 | { | ||
| 1138 | pid_t forkret; | ||
| 1139 | |||
| 1140 | forkret = fork(); | ||
| 1141 | pkey_assert(forkret >= 0); | ||
| 1142 | dprintf3("[%d] fork() ret: %d\n", getpid(), forkret); | ||
| 1143 | |||
| 1144 | if (!forkret) { | ||
| 1145 | /* in the child */ | ||
| 1146 | return; | ||
| 1147 | } | ||
| 1148 | exit(0); | ||
| 1149 | } | ||
| 1150 | |||
| 1136 | /* Assumes that all pkeys other than 'pkey' are unallocated */ | 1151 | /* Assumes that all pkeys other than 'pkey' are unallocated */ |
| 1137 | void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | 1152 | void test_pkey_alloc_exhaust(int *ptr, u16 pkey) |
| 1138 | { | 1153 | { |
| @@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | |||
| 1141 | int nr_allocated_pkeys = 0; | 1156 | int nr_allocated_pkeys = 0; |
| 1142 | int i; | 1157 | int i; |
| 1143 | 1158 | ||
| 1144 | for (i = 0; i < NR_PKEYS*2; i++) { | 1159 | for (i = 0; i < NR_PKEYS*3; i++) { |
| 1145 | int new_pkey; | 1160 | int new_pkey; |
| 1146 | dprintf1("%s() alloc loop: %d\n", __func__, i); | 1161 | dprintf1("%s() alloc loop: %d\n", __func__, i); |
| 1147 | new_pkey = alloc_pkey(); | 1162 | new_pkey = alloc_pkey(); |
| @@ -1152,21 +1167,27 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | |||
| 1152 | if ((new_pkey == -1) && (errno == ENOSPC)) { | 1167 | if ((new_pkey == -1) && (errno == ENOSPC)) { |
| 1153 | dprintf2("%s() failed to allocate pkey after %d tries\n", | 1168 | dprintf2("%s() failed to allocate pkey after %d tries\n", |
| 1154 | __func__, nr_allocated_pkeys); | 1169 | __func__, nr_allocated_pkeys); |
| 1155 | break; | 1170 | } else { |
| 1171 | /* | ||
| 1172 | * Ensure the number of successes never | ||
| 1173 | * exceeds the number of keys supported | ||
| 1174 | * in the hardware. | ||
| 1175 | */ | ||
| 1176 | pkey_assert(nr_allocated_pkeys < NR_PKEYS); | ||
| 1177 | allocated_pkeys[nr_allocated_pkeys++] = new_pkey; | ||
| 1156 | } | 1178 | } |
| 1157 | pkey_assert(nr_allocated_pkeys < NR_PKEYS); | 1179 | |
| 1158 | allocated_pkeys[nr_allocated_pkeys++] = new_pkey; | 1180 | /* |
| 1181 | * Make sure that allocation state is properly | ||
| 1182 | * preserved across fork(). | ||
| 1183 | */ | ||
| 1184 | if (i == NR_PKEYS*2) | ||
| 1185 | become_child(); | ||
| 1159 | } | 1186 | } |
| 1160 | 1187 | ||
| 1161 | dprintf3("%s()::%d\n", __func__, __LINE__); | 1188 | dprintf3("%s()::%d\n", __func__, __LINE__); |
| 1162 | 1189 | ||
| 1163 | /* | 1190 | /* |
| 1164 | * ensure it did not reach the end of the loop without | ||
| 1165 | * failure: | ||
| 1166 | */ | ||
| 1167 | pkey_assert(i < NR_PKEYS*2); | ||
| 1168 | |||
| 1169 | /* | ||
| 1170 | * There are 16 pkeys supported in hardware. Three are | 1191 | * There are 16 pkeys supported in hardware. Three are |
| 1171 | * allocated by the time we get here: | 1192 | * allocated by the time we get here: |
| 1172 | * 1. The default key (0) | 1193 | * 1. The default key (0) |
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c index 00a26a82fa98..97311333700e 100644 --- a/tools/testing/selftests/x86/unwind_vdso.c +++ b/tools/testing/selftests/x86/unwind_vdso.c | |||
| @@ -44,7 +44,6 @@ int main() | |||
| 44 | #include <stdbool.h> | 44 | #include <stdbool.h> |
| 45 | #include <sys/ptrace.h> | 45 | #include <sys/ptrace.h> |
| 46 | #include <sys/user.h> | 46 | #include <sys/user.h> |
| 47 | #include <sys/ucontext.h> | ||
| 48 | #include <link.h> | 47 | #include <link.h> |
| 49 | #include <sys/auxv.h> | 48 | #include <sys/auxv.h> |
| 50 | #include <dlfcn.h> | 49 | #include <dlfcn.h> |
