diff options
216 files changed, 1236 insertions, 831 deletions
diff --git a/.clang-format b/.clang-format index e6080f5834a3..bc2ffb2a0b53 100644 --- a/.clang-format +++ b/.clang-format | |||
| @@ -72,6 +72,10 @@ ForEachMacros: | |||
| 72 | - 'apei_estatus_for_each_section' | 72 | - 'apei_estatus_for_each_section' |
| 73 | - 'ata_for_each_dev' | 73 | - 'ata_for_each_dev' |
| 74 | - 'ata_for_each_link' | 74 | - 'ata_for_each_link' |
| 75 | - '__ata_qc_for_each' | ||
| 76 | - 'ata_qc_for_each' | ||
| 77 | - 'ata_qc_for_each_raw' | ||
| 78 | - 'ata_qc_for_each_with_internal' | ||
| 75 | - 'ax25_for_each' | 79 | - 'ax25_for_each' |
| 76 | - 'ax25_uid_for_each' | 80 | - 'ax25_uid_for_each' |
| 77 | - 'bio_for_each_integrity_vec' | 81 | - 'bio_for_each_integrity_vec' |
| @@ -85,6 +89,7 @@ ForEachMacros: | |||
| 85 | - 'blk_queue_for_each_rl' | 89 | - 'blk_queue_for_each_rl' |
| 86 | - 'bond_for_each_slave' | 90 | - 'bond_for_each_slave' |
| 87 | - 'bond_for_each_slave_rcu' | 91 | - 'bond_for_each_slave_rcu' |
| 92 | - 'bpf_for_each_spilled_reg' | ||
| 88 | - 'btree_for_each_safe128' | 93 | - 'btree_for_each_safe128' |
| 89 | - 'btree_for_each_safe32' | 94 | - 'btree_for_each_safe32' |
| 90 | - 'btree_for_each_safe64' | 95 | - 'btree_for_each_safe64' |
| @@ -103,6 +108,8 @@ ForEachMacros: | |||
| 103 | - 'drm_atomic_crtc_for_each_plane' | 108 | - 'drm_atomic_crtc_for_each_plane' |
| 104 | - 'drm_atomic_crtc_state_for_each_plane' | 109 | - 'drm_atomic_crtc_state_for_each_plane' |
| 105 | - 'drm_atomic_crtc_state_for_each_plane_state' | 110 | - 'drm_atomic_crtc_state_for_each_plane_state' |
| 111 | - 'drm_atomic_for_each_plane_damage' | ||
| 112 | - 'drm_connector_for_each_possible_encoder' | ||
| 106 | - 'drm_for_each_connector_iter' | 113 | - 'drm_for_each_connector_iter' |
| 107 | - 'drm_for_each_crtc' | 114 | - 'drm_for_each_crtc' |
| 108 | - 'drm_for_each_encoder' | 115 | - 'drm_for_each_encoder' |
| @@ -121,11 +128,21 @@ ForEachMacros: | |||
| 121 | - 'for_each_bio' | 128 | - 'for_each_bio' |
| 122 | - 'for_each_board_func_rsrc' | 129 | - 'for_each_board_func_rsrc' |
| 123 | - 'for_each_bvec' | 130 | - 'for_each_bvec' |
| 131 | - 'for_each_card_components' | ||
| 132 | - 'for_each_card_links' | ||
| 133 | - 'for_each_card_links_safe' | ||
| 134 | - 'for_each_card_prelinks' | ||
| 135 | - 'for_each_card_rtds' | ||
| 136 | - 'for_each_card_rtds_safe' | ||
| 137 | - 'for_each_cgroup_storage_type' | ||
| 124 | - 'for_each_child_of_node' | 138 | - 'for_each_child_of_node' |
| 125 | - 'for_each_clear_bit' | 139 | - 'for_each_clear_bit' |
| 126 | - 'for_each_clear_bit_from' | 140 | - 'for_each_clear_bit_from' |
| 127 | - 'for_each_cmsghdr' | 141 | - 'for_each_cmsghdr' |
| 128 | - 'for_each_compatible_node' | 142 | - 'for_each_compatible_node' |
| 143 | - 'for_each_component_dais' | ||
| 144 | - 'for_each_component_dais_safe' | ||
| 145 | - 'for_each_comp_order' | ||
| 129 | - 'for_each_console' | 146 | - 'for_each_console' |
| 130 | - 'for_each_cpu' | 147 | - 'for_each_cpu' |
| 131 | - 'for_each_cpu_and' | 148 | - 'for_each_cpu_and' |
| @@ -133,6 +150,10 @@ ForEachMacros: | |||
| 133 | - 'for_each_cpu_wrap' | 150 | - 'for_each_cpu_wrap' |
| 134 | - 'for_each_dev_addr' | 151 | - 'for_each_dev_addr' |
| 135 | - 'for_each_dma_cap_mask' | 152 | - 'for_each_dma_cap_mask' |
| 153 | - 'for_each_dpcm_be' | ||
| 154 | - 'for_each_dpcm_be_rollback' | ||
| 155 | - 'for_each_dpcm_be_safe' | ||
| 156 | - 'for_each_dpcm_fe' | ||
| 136 | - 'for_each_drhd_unit' | 157 | - 'for_each_drhd_unit' |
| 137 | - 'for_each_dss_dev' | 158 | - 'for_each_dss_dev' |
| 138 | - 'for_each_efi_memory_desc' | 159 | - 'for_each_efi_memory_desc' |
| @@ -149,6 +170,7 @@ ForEachMacros: | |||
| 149 | - 'for_each_iommu' | 170 | - 'for_each_iommu' |
| 150 | - 'for_each_ip_tunnel_rcu' | 171 | - 'for_each_ip_tunnel_rcu' |
| 151 | - 'for_each_irq_nr' | 172 | - 'for_each_irq_nr' |
| 173 | - 'for_each_link_codecs' | ||
| 152 | - 'for_each_lru' | 174 | - 'for_each_lru' |
| 153 | - 'for_each_matching_node' | 175 | - 'for_each_matching_node' |
| 154 | - 'for_each_matching_node_and_match' | 176 | - 'for_each_matching_node_and_match' |
| @@ -160,6 +182,7 @@ ForEachMacros: | |||
| 160 | - 'for_each_mem_range_rev' | 182 | - 'for_each_mem_range_rev' |
| 161 | - 'for_each_migratetype_order' | 183 | - 'for_each_migratetype_order' |
| 162 | - 'for_each_msi_entry' | 184 | - 'for_each_msi_entry' |
| 185 | - 'for_each_msi_entry_safe' | ||
| 163 | - 'for_each_net' | 186 | - 'for_each_net' |
| 164 | - 'for_each_netdev' | 187 | - 'for_each_netdev' |
| 165 | - 'for_each_netdev_continue' | 188 | - 'for_each_netdev_continue' |
| @@ -183,12 +206,14 @@ ForEachMacros: | |||
| 183 | - 'for_each_node_with_property' | 206 | - 'for_each_node_with_property' |
| 184 | - 'for_each_of_allnodes' | 207 | - 'for_each_of_allnodes' |
| 185 | - 'for_each_of_allnodes_from' | 208 | - 'for_each_of_allnodes_from' |
| 209 | - 'for_each_of_cpu_node' | ||
| 186 | - 'for_each_of_pci_range' | 210 | - 'for_each_of_pci_range' |
| 187 | - 'for_each_old_connector_in_state' | 211 | - 'for_each_old_connector_in_state' |
| 188 | - 'for_each_old_crtc_in_state' | 212 | - 'for_each_old_crtc_in_state' |
| 189 | - 'for_each_oldnew_connector_in_state' | 213 | - 'for_each_oldnew_connector_in_state' |
| 190 | - 'for_each_oldnew_crtc_in_state' | 214 | - 'for_each_oldnew_crtc_in_state' |
| 191 | - 'for_each_oldnew_plane_in_state' | 215 | - 'for_each_oldnew_plane_in_state' |
| 216 | - 'for_each_oldnew_plane_in_state_reverse' | ||
| 192 | - 'for_each_oldnew_private_obj_in_state' | 217 | - 'for_each_oldnew_private_obj_in_state' |
| 193 | - 'for_each_old_plane_in_state' | 218 | - 'for_each_old_plane_in_state' |
| 194 | - 'for_each_old_private_obj_in_state' | 219 | - 'for_each_old_private_obj_in_state' |
| @@ -206,14 +231,17 @@ ForEachMacros: | |||
| 206 | - 'for_each_process' | 231 | - 'for_each_process' |
| 207 | - 'for_each_process_thread' | 232 | - 'for_each_process_thread' |
| 208 | - 'for_each_property_of_node' | 233 | - 'for_each_property_of_node' |
| 234 | - 'for_each_registered_fb' | ||
| 209 | - 'for_each_reserved_mem_region' | 235 | - 'for_each_reserved_mem_region' |
| 210 | - 'for_each_resv_unavail_range' | 236 | - 'for_each_rtd_codec_dai' |
| 237 | - 'for_each_rtd_codec_dai_rollback' | ||
| 211 | - 'for_each_rtdcom' | 238 | - 'for_each_rtdcom' |
| 212 | - 'for_each_rtdcom_safe' | 239 | - 'for_each_rtdcom_safe' |
| 213 | - 'for_each_set_bit' | 240 | - 'for_each_set_bit' |
| 214 | - 'for_each_set_bit_from' | 241 | - 'for_each_set_bit_from' |
| 215 | - 'for_each_sg' | 242 | - 'for_each_sg' |
| 216 | - 'for_each_sg_page' | 243 | - 'for_each_sg_page' |
| 244 | - 'for_each_sibling_event' | ||
| 217 | - '__for_each_thread' | 245 | - '__for_each_thread' |
| 218 | - 'for_each_thread' | 246 | - 'for_each_thread' |
| 219 | - 'for_each_zone' | 247 | - 'for_each_zone' |
| @@ -251,6 +279,8 @@ ForEachMacros: | |||
| 251 | - 'hlist_nulls_for_each_entry_from' | 279 | - 'hlist_nulls_for_each_entry_from' |
| 252 | - 'hlist_nulls_for_each_entry_rcu' | 280 | - 'hlist_nulls_for_each_entry_rcu' |
| 253 | - 'hlist_nulls_for_each_entry_safe' | 281 | - 'hlist_nulls_for_each_entry_safe' |
| 282 | - 'i3c_bus_for_each_i2cdev' | ||
| 283 | - 'i3c_bus_for_each_i3cdev' | ||
| 254 | - 'ide_host_for_each_port' | 284 | - 'ide_host_for_each_port' |
| 255 | - 'ide_port_for_each_dev' | 285 | - 'ide_port_for_each_dev' |
| 256 | - 'ide_port_for_each_present_dev' | 286 | - 'ide_port_for_each_present_dev' |
| @@ -267,11 +297,14 @@ ForEachMacros: | |||
| 267 | - 'kvm_for_each_memslot' | 297 | - 'kvm_for_each_memslot' |
| 268 | - 'kvm_for_each_vcpu' | 298 | - 'kvm_for_each_vcpu' |
| 269 | - 'list_for_each' | 299 | - 'list_for_each' |
| 300 | - 'list_for_each_codec' | ||
| 301 | - 'list_for_each_codec_safe' | ||
| 270 | - 'list_for_each_entry' | 302 | - 'list_for_each_entry' |
| 271 | - 'list_for_each_entry_continue' | 303 | - 'list_for_each_entry_continue' |
| 272 | - 'list_for_each_entry_continue_rcu' | 304 | - 'list_for_each_entry_continue_rcu' |
| 273 | - 'list_for_each_entry_continue_reverse' | 305 | - 'list_for_each_entry_continue_reverse' |
| 274 | - 'list_for_each_entry_from' | 306 | - 'list_for_each_entry_from' |
| 307 | - 'list_for_each_entry_from_rcu' | ||
| 275 | - 'list_for_each_entry_from_reverse' | 308 | - 'list_for_each_entry_from_reverse' |
| 276 | - 'list_for_each_entry_lockless' | 309 | - 'list_for_each_entry_lockless' |
| 277 | - 'list_for_each_entry_rcu' | 310 | - 'list_for_each_entry_rcu' |
| @@ -291,6 +324,7 @@ ForEachMacros: | |||
| 291 | - 'media_device_for_each_intf' | 324 | - 'media_device_for_each_intf' |
| 292 | - 'media_device_for_each_link' | 325 | - 'media_device_for_each_link' |
| 293 | - 'media_device_for_each_pad' | 326 | - 'media_device_for_each_pad' |
| 327 | - 'nanddev_io_for_each_page' | ||
| 294 | - 'netdev_for_each_lower_dev' | 328 | - 'netdev_for_each_lower_dev' |
| 295 | - 'netdev_for_each_lower_private' | 329 | - 'netdev_for_each_lower_private' |
| 296 | - 'netdev_for_each_lower_private_rcu' | 330 | - 'netdev_for_each_lower_private_rcu' |
| @@ -357,12 +391,14 @@ ForEachMacros: | |||
| 357 | - 'sk_nulls_for_each' | 391 | - 'sk_nulls_for_each' |
| 358 | - 'sk_nulls_for_each_from' | 392 | - 'sk_nulls_for_each_from' |
| 359 | - 'sk_nulls_for_each_rcu' | 393 | - 'sk_nulls_for_each_rcu' |
| 394 | - 'snd_array_for_each' | ||
| 360 | - 'snd_pcm_group_for_each_entry' | 395 | - 'snd_pcm_group_for_each_entry' |
| 361 | - 'snd_soc_dapm_widget_for_each_path' | 396 | - 'snd_soc_dapm_widget_for_each_path' |
| 362 | - 'snd_soc_dapm_widget_for_each_path_safe' | 397 | - 'snd_soc_dapm_widget_for_each_path_safe' |
| 363 | - 'snd_soc_dapm_widget_for_each_sink_path' | 398 | - 'snd_soc_dapm_widget_for_each_sink_path' |
| 364 | - 'snd_soc_dapm_widget_for_each_source_path' | 399 | - 'snd_soc_dapm_widget_for_each_source_path' |
| 365 | - 'tb_property_for_each' | 400 | - 'tb_property_for_each' |
| 401 | - 'tcf_exts_for_each_action' | ||
| 366 | - 'udp_portaddr_for_each_entry' | 402 | - 'udp_portaddr_for_each_entry' |
| 367 | - 'udp_portaddr_for_each_entry_rcu' | 403 | - 'udp_portaddr_for_each_entry_rcu' |
| 368 | - 'usb_hub_for_each_child' | 404 | - 'usb_hub_for_each_child' |
| @@ -371,6 +407,11 @@ ForEachMacros: | |||
| 371 | - 'v4l2_m2m_for_each_dst_buf_safe' | 407 | - 'v4l2_m2m_for_each_dst_buf_safe' |
| 372 | - 'v4l2_m2m_for_each_src_buf' | 408 | - 'v4l2_m2m_for_each_src_buf' |
| 373 | - 'v4l2_m2m_for_each_src_buf_safe' | 409 | - 'v4l2_m2m_for_each_src_buf_safe' |
| 410 | - 'virtio_device_for_each_vq' | ||
| 411 | - 'xa_for_each' | ||
| 412 | - 'xas_for_each' | ||
| 413 | - 'xas_for_each_conflict' | ||
| 414 | - 'xas_for_each_marked' | ||
| 374 | - 'zorro_for_each_dev' | 415 | - 'zorro_for_each_dev' |
| 375 | 416 | ||
| 376 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 | 417 | #IncludeBlocks: Preserve # Unknown to clang-format-5.0 |
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 84262cdb8d29..96fa46cb133c 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt | |||
| @@ -235,4 +235,4 @@ cpus { | |||
| 235 | =========================================== | 235 | =========================================== |
| 236 | 236 | ||
| 237 | [1] ARM Linux Kernel documentation - CPUs bindings | 237 | [1] ARM Linux Kernel documentation - CPUs bindings |
| 238 | Documentation/devicetree/bindings/arm/cpus.txt | 238 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 8f0937db55c5..45730ba60af5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt | |||
| @@ -684,7 +684,7 @@ cpus { | |||
| 684 | =========================================== | 684 | =========================================== |
| 685 | 685 | ||
| 686 | [1] ARM Linux Kernel documentation - CPUs bindings | 686 | [1] ARM Linux Kernel documentation - CPUs bindings |
| 687 | Documentation/devicetree/bindings/arm/cpus.txt | 687 | Documentation/devicetree/bindings/arm/cpus.yaml |
| 688 | 688 | ||
| 689 | [2] ARM Linux Kernel documentation - PSCI bindings | 689 | [2] ARM Linux Kernel documentation - PSCI bindings |
| 690 | Documentation/devicetree/bindings/arm/psci.txt | 690 | Documentation/devicetree/bindings/arm/psci.txt |
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt index 1b2ab1ff5587..46652bf65147 100644 --- a/Documentation/devicetree/bindings/arm/sp810.txt +++ b/Documentation/devicetree/bindings/arm/sp810.txt | |||
| @@ -4,7 +4,7 @@ SP810 System Controller | |||
| 4 | Required properties: | 4 | Required properties: |
| 5 | 5 | ||
| 6 | - compatible: standard compatible string for a Primecell peripheral, | 6 | - compatible: standard compatible string for a Primecell peripheral, |
| 7 | see Documentation/devicetree/bindings/arm/primecell.txt | 7 | see Documentation/devicetree/bindings/arm/primecell.yaml |
| 8 | for more details | 8 | for more details |
| 9 | should be: "arm,sp810", "arm,primecell" | 9 | should be: "arm,sp810", "arm,primecell" |
| 10 | 10 | ||
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index de9eb0486630..b0d80c0fb265 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt | |||
| @@ -472,4 +472,4 @@ cpus { | |||
| 472 | 472 | ||
| 473 | =============================================================================== | 473 | =============================================================================== |
| 474 | [1] ARM Linux kernel documentation | 474 | [1] ARM Linux kernel documentation |
| 475 | Documentation/devicetree/bindings/arm/cpus.txt | 475 | Documentation/devicetree/bindings/arm/cpus.yaml |
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt index af376a01f2b7..23b52dc02266 100644 --- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt +++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt | |||
| @@ -18,4 +18,4 @@ Required Properties: | |||
| 18 | Each clock is assigned an identifier and client nodes use this identifier | 18 | Each clock is assigned an identifier and client nodes use this identifier |
| 19 | to specify the clock which they consume. | 19 | to specify the clock which they consume. |
| 20 | 20 | ||
| 21 | All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. | 21 | All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>. |
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt index ef89ab46b2c9..572fa2773ec4 100644 --- a/Documentation/devicetree/bindings/display/arm,pl11x.txt +++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | * ARM PrimeCell Color LCD Controller PL110/PL111 | 1 | * ARM PrimeCell Color LCD Controller PL110/PL111 |
| 2 | 2 | ||
| 3 | See also Documentation/devicetree/bindings/arm/primecell.txt | 3 | See also Documentation/devicetree/bindings/arm/primecell.yaml |
| 4 | 4 | ||
| 5 | Required properties: | 5 | Required properties: |
| 6 | 6 | ||
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 38ca2201e8ae..2e097b57f170 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt | |||
| @@ -14,8 +14,6 @@ Required properties: | |||
| 14 | 14 | ||
| 15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K | 15 | "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K |
| 16 | SoCs (either from AP or CP), see | 16 | SoCs (either from AP or CP), see |
| 17 | Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt | ||
| 18 | and | ||
| 19 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt | 17 | Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt |
| 20 | for specific details about the offset property. | 18 | for specific details about the offset property. |
| 21 | 19 | ||
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index b83bb8249074..a3be5298a5eb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt | |||
| @@ -78,7 +78,7 @@ Sub-nodes: | |||
| 78 | PPI affinity can be expressed as a single "ppi-partitions" node, | 78 | PPI affinity can be expressed as a single "ppi-partitions" node, |
| 79 | containing a set of sub-nodes, each with the following property: | 79 | containing a set of sub-nodes, each with the following property: |
| 80 | - affinity: Should be a list of phandles to CPU nodes (as described in | 80 | - affinity: Should be a list of phandles to CPU nodes (as described in |
| 81 | Documentation/devicetree/bindings/arm/cpus.txt). | 81 | Documentation/devicetree/bindings/arm/cpus.yaml). |
| 82 | 82 | ||
| 83 | GICv3 has one or more Interrupt Translation Services (ITS) that are | 83 | GICv3 has one or more Interrupt Translation Services (ITS) that are |
| 84 | used to route Message Signalled Interrupts (MSI) to the CPUs. | 84 | used to route Message Signalled Interrupts (MSI) to the CPUs. |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt index 0b8cc533ca83..cf759e5f9b10 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt | |||
| @@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function | |||
| 55 | = EXAMPLE | 55 | = EXAMPLE |
| 56 | The following example represents the GLINK RPM node on a MSM8996 device, with | 56 | The following example represents the GLINK RPM node on a MSM8996 device, with |
| 57 | the function for the "rpm_request" channel defined, which is used for | 57 | the function for the "rpm_request" channel defined, which is used for |
| 58 | regualtors and root clocks. | 58 | regulators and root clocks. |
| 59 | 59 | ||
| 60 | apcs_glb: mailbox@9820000 { | 60 | apcs_glb: mailbox@9820000 { |
| 61 | compatible = "qcom,msm8996-apcs-hmss-global"; | 61 | compatible = "qcom,msm8996-apcs-hmss-global"; |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt index a35af2dafdad..49e1d72d3648 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt | |||
| @@ -41,12 +41,12 @@ processor ID) and a string identifier. | |||
| 41 | - qcom,local-pid: | 41 | - qcom,local-pid: |
| 42 | Usage: required | 42 | Usage: required |
| 43 | Value type: <u32> | 43 | Value type: <u32> |
| 44 | Definition: specifies the identfier of the local endpoint of this edge | 44 | Definition: specifies the identifier of the local endpoint of this edge |
| 45 | 45 | ||
| 46 | - qcom,remote-pid: | 46 | - qcom,remote-pid: |
| 47 | Usage: required | 47 | Usage: required |
| 48 | Value type: <u32> | 48 | Value type: <u32> |
| 49 | Definition: specifies the identfier of the remote endpoint of this edge | 49 | Definition: specifies the identifier of the remote endpoint of this edge |
| 50 | 50 | ||
| 51 | = SUBNODES | 51 | = SUBNODES |
| 52 | Each SMP2P pair contain a set of inbound and outbound entries, these are | 52 | Each SMP2P pair contain a set of inbound and outbound entries, these are |
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 62af30511a95..60a5ec04e8f0 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt | |||
| @@ -163,6 +163,14 @@ C. Boot options | |||
| 163 | be preserved until there actually is some text is output to the console. | 163 | be preserved until there actually is some text is output to the console. |
| 164 | This option causes fbcon to bind immediately to the fbdev device. | 164 | This option causes fbcon to bind immediately to the fbdev device. |
| 165 | 165 | ||
| 166 | 7. fbcon=logo-pos:<location> | ||
| 167 | |||
| 168 | The only possible 'location' is 'center' (without quotes), and when | ||
| 169 | given, the bootup logo is moved from the default top-left corner | ||
| 170 | location to the center of the framebuffer. If more than one logo is | ||
| 171 | displayed due to multiple CPUs, the collected line of logos is moved | ||
| 172 | as a whole. | ||
| 173 | |||
| 166 | C. Attaching, Detaching and Unloading | 174 | C. Attaching, Detaching and Unloading |
| 167 | 175 | ||
| 168 | Before going on to how to attach, detach and unload the framebuffer console, an | 176 | Before going on to how to attach, detach and unload the framebuffer console, an |
| @@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION | |||
| 955 | endif | 955 | endif |
| 956 | endif | 956 | endif |
| 957 | 957 | ||
| 958 | PHONY += prepare0 | ||
| 958 | 959 | ||
| 959 | ifeq ($(KBUILD_EXTMOD),) | 960 | ifeq ($(KBUILD_EXTMOD),) |
| 960 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ | 961 | core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ |
| @@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc | |||
| 1061 | # archprepare is used in arch Makefiles and when processed asm symlink, | 1062 | # archprepare is used in arch Makefiles and when processed asm symlink, |
| 1062 | # version.h and scripts_basic is processed / created. | 1063 | # version.h and scripts_basic is processed / created. |
| 1063 | 1064 | ||
| 1064 | # Listed in dependency order | 1065 | PHONY += prepare archprepare prepare1 prepare2 prepare3 |
| 1065 | PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 | ||
| 1066 | 1066 | ||
| 1067 | # prepare3 is used to check if we are building in a separate output directory, | 1067 | # prepare3 is used to check if we are building in a separate output directory, |
| 1068 | # and if so do: | 1068 | # and if so do: |
| @@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS)) | |||
| 1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) | 1360 | mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) |
| 1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) | 1361 | mrproper-dirs := $(addprefix _mrproper_,scripts) |
| 1362 | 1362 | ||
| 1363 | PHONY += $(mrproper-dirs) mrproper archmrproper | 1363 | PHONY += $(mrproper-dirs) mrproper |
| 1364 | $(mrproper-dirs): | 1364 | $(mrproper-dirs): |
| 1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) | 1365 | $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) |
| 1366 | 1366 | ||
| 1367 | mrproper: clean archmrproper $(mrproper-dirs) | 1367 | mrproper: clean $(mrproper-dirs) |
| 1368 | $(call cmd,rmdirs) | 1368 | $(call cmd,rmdirs) |
| 1369 | $(call cmd,rmfiles) | 1369 | $(call cmd,rmfiles) |
| 1370 | 1370 | ||
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index eb43e09c1980..926434f413fa 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h | |||
| @@ -60,8 +60,6 @@ | |||
| 60 | 60 | ||
| 61 | #ifdef CONFIG_KASAN_SW_TAGS | 61 | #ifdef CONFIG_KASAN_SW_TAGS |
| 62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) | 62 | #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) |
| 63 | #else | ||
| 64 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 65 | #endif | 63 | #endif |
| 66 | 64 | ||
| 67 | #ifndef __ASSEMBLY__ | 65 | #ifndef __ASSEMBLY__ |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index ac352accb3d9..3e8063f4f9d3 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
| @@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void) | |||
| 60 | * later determine that kpti is required, then | 60 | * later determine that kpti is required, then |
| 61 | * kpti_install_ng_mappings() will make them non-global. | 61 | * kpti_install_ng_mappings() will make them non-global. |
| 62 | */ | 62 | */ |
| 63 | if (arm64_kernel_unmapped_at_el0()) | ||
| 64 | return true; | ||
| 65 | |||
| 63 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) | 66 | if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
| 64 | return arm64_kernel_unmapped_at_el0(); | 67 | return false; |
| 65 | 68 | ||
| 66 | /* | 69 | /* |
| 67 | * KASLR is enabled so we're going to be enabling kpti on non-broken | 70 | * KASLR is enabled so we're going to be enabling kpti on non-broken |
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index f0e6ab8abe9c..ba6b41790fcd 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
| 15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 16 | 16 | ||
| 17 | #include <asm/cacheflush.h> | ||
| 17 | #include <asm/fixmap.h> | 18 | #include <asm/fixmap.h> |
| 18 | #include <asm/kernel-pgtable.h> | 19 | #include <asm/kernel-pgtable.h> |
| 19 | #include <asm/memory.h> | 20 | #include <asm/memory.h> |
| @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) | |||
| 43 | return ret; | 44 | return ret; |
| 44 | } | 45 | } |
| 45 | 46 | ||
| 46 | static __init const u8 *get_cmdline(void *fdt) | 47 | static __init const u8 *kaslr_get_cmdline(void *fdt) |
| 47 | { | 48 | { |
| 48 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; | 49 | static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; |
| 49 | 50 | ||
| @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
| 109 | * Check if 'nokaslr' appears on the command line, and | 110 | * Check if 'nokaslr' appears on the command line, and |
| 110 | * return 0 if that is the case. | 111 | * return 0 if that is the case. |
| 111 | */ | 112 | */ |
| 112 | cmdline = get_cmdline(fdt); | 113 | cmdline = kaslr_get_cmdline(fdt); |
| 113 | str = strstr(cmdline, "nokaslr"); | 114 | str = strstr(cmdline, "nokaslr"); |
| 114 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) | 115 | if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) |
| 115 | return 0; | 116 | return 0; |
| @@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
| 169 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; | 170 | module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; |
| 170 | module_alloc_base &= PAGE_MASK; | 171 | module_alloc_base &= PAGE_MASK; |
| 171 | 172 | ||
| 173 | __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); | ||
| 174 | __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); | ||
| 175 | |||
| 172 | return offset; | 176 | return offset; |
| 173 | } | 177 | } |
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 4003ddc616e1..f801f3708a89 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile | |||
| @@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/ | |||
| 37 | 37 | ||
| 38 | boot := arch/h8300/boot | 38 | boot := arch/h8300/boot |
| 39 | 39 | ||
| 40 | archmrproper: | ||
| 41 | |||
| 42 | archclean: | 40 | archclean: |
| 43 | $(Q)$(MAKE) $(clean)=$(boot) | 41 | $(Q)$(MAKE) $(clean)=$(boot) |
| 44 | 42 | ||
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 320d86f192ee..171290f9f1de 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
| @@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig | |||
| 16 | NM := $(CROSS_COMPILE)nm -B | 16 | NM := $(CROSS_COMPILE)nm -B |
| 17 | READELF := $(CROSS_COMPILE)readelf | 17 | READELF := $(CROSS_COMPILE)readelf |
| 18 | 18 | ||
| 19 | export AWK | ||
| 20 | |||
| 21 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ | 19 | CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ |
| 22 | 20 | ||
| 23 | OBJCOPYFLAGS := --strip-all | 21 | OBJCOPYFLAGS := --strip-all |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 787290781b8c..0d14f51d0002 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -3155,6 +3155,7 @@ config MIPS32_O32 | |||
| 3155 | config MIPS32_N32 | 3155 | config MIPS32_N32 |
| 3156 | bool "Kernel support for n32 binaries" | 3156 | bool "Kernel support for n32 binaries" |
| 3157 | depends on 64BIT | 3157 | depends on 64BIT |
| 3158 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | ||
| 3158 | select COMPAT | 3159 | select COMPAT |
| 3159 | select MIPS32_COMPAT | 3160 | select MIPS32_COMPAT |
| 3160 | select SYSVIPC_COMPAT if SYSVIPC | 3161 | select SYSVIPC_COMPAT if SYSVIPC |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..fe3773539eff 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
| @@ -173,6 +173,31 @@ void __init plat_mem_setup(void) | |||
| 173 | pm_power_off = bcm47xx_machine_halt; | 173 | pm_power_off = bcm47xx_machine_halt; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | #ifdef CONFIG_BCM47XX_BCMA | ||
| 177 | static struct device * __init bcm47xx_setup_device(void) | ||
| 178 | { | ||
| 179 | struct device *dev; | ||
| 180 | int err; | ||
| 181 | |||
| 182 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
| 183 | if (!dev) | ||
| 184 | return NULL; | ||
| 185 | |||
| 186 | err = dev_set_name(dev, "bcm47xx_soc"); | ||
| 187 | if (err) { | ||
| 188 | pr_err("Failed to set SoC device name: %d\n", err); | ||
| 189 | kfree(dev); | ||
| 190 | return NULL; | ||
| 191 | } | ||
| 192 | |||
| 193 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
| 194 | if (err) | ||
| 195 | pr_err("Failed to set SoC DMA mask: %d\n", err); | ||
| 196 | |||
| 197 | return dev; | ||
| 198 | } | ||
| 199 | #endif | ||
| 200 | |||
| 176 | /* | 201 | /* |
| 177 | * This finishes bus initialization doing things that were not possible without | 202 | * This finishes bus initialization doing things that were not possible without |
| 178 | * kmalloc. Make sure to call it late enough (after mm_init). | 203 | * kmalloc. Make sure to call it late enough (after mm_init). |
| @@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) | |||
| 183 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { | 208 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { |
| 184 | int err; | 209 | int err; |
| 185 | 210 | ||
| 211 | bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); | ||
| 212 | if (!bcm47xx_bus.bcma.dev) | ||
| 213 | panic("Failed to setup SoC device\n"); | ||
| 214 | |||
| 186 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); | 215 | err = bcma_host_soc_init(&bcm47xx_bus.bcma); |
| 187 | if (err) | 216 | if (err) |
| 188 | panic("Failed to initialize BCMA bus (err %d)", err); | 217 | panic("Failed to initialize BCMA bus (err %d)", err); |
| @@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) | |||
| 235 | #endif | 264 | #endif |
| 236 | #ifdef CONFIG_BCM47XX_BCMA | 265 | #ifdef CONFIG_BCM47XX_BCMA |
| 237 | case BCM47XX_BUS_TYPE_BCMA: | 266 | case BCM47XX_BUS_TYPE_BCMA: |
| 267 | if (device_register(bcm47xx_bus.bcma.dev)) | ||
| 268 | pr_err("Failed to register SoC device\n"); | ||
| 238 | bcma_bus_register(&bcm47xx_bus.bcma.bus); | 269 | bcma_bus_register(&bcm47xx_bus.bcma.bus); |
| 239 | break; | 270 | break; |
| 240 | #endif | 271 | #endif |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2c79ab52977a..8bf43c5a7bc7 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
| @@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored) | |||
| 98 | " sync \n" | 98 | " sync \n" |
| 99 | " synci ($0) \n"); | 99 | " synci ($0) \n"); |
| 100 | 100 | ||
| 101 | relocated_kexec_smp_wait(NULL); | 101 | kexec_reboot(); |
| 102 | } | 102 | } |
| 103 | #endif | 103 | #endif |
| 104 | 104 | ||
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig index 4e4ec779f182..6f981af67826 100644 --- a/arch/mips/configs/ath79_defconfig +++ b/arch/mips/configs/ath79_defconfig | |||
| @@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 66 | # CONFIG_SERIAL_8250_PCI is not set | 66 | # CONFIG_SERIAL_8250_PCI is not set |
| 67 | CONFIG_SERIAL_8250_NR_UARTS=1 | 67 | CONFIG_SERIAL_8250_NR_UARTS=1 |
| 68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 | 68 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 |
| 69 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 69 | CONFIG_SERIAL_AR933X=y | 70 | CONFIG_SERIAL_AR933X=y |
| 70 | CONFIG_SERIAL_AR933X_CONSOLE=y | 71 | CONFIG_SERIAL_AR933X_CONSOLE=y |
| 71 | # CONFIG_HW_RANDOM is not set | 72 | # CONFIG_HW_RANDOM is not set |
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h index c6b63a409641..6dd8ad2409dc 100644 --- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h +++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h | |||
| @@ -18,8 +18,6 @@ | |||
| 18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) | 18 | #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) |
| 19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) | 19 | #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) |
| 20 | 20 | ||
| 21 | #define MIPS_CPU_TIMER_IRQ 7 | ||
| 22 | |||
| 23 | #define MAX_IM 5 | 21 | #define MAX_IM 5 |
| 24 | 22 | ||
| 25 | #endif /* _FALCON_IRQ__ */ | 23 | #endif /* _FALCON_IRQ__ */ |
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h index 141076325307..0b424214a5e9 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h | |||
| @@ -19,8 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) | 20 | #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) |
| 21 | 21 | ||
| 22 | #define MIPS_CPU_TIMER_IRQ 7 | ||
| 23 | |||
| 24 | #define MAX_IM 5 | 22 | #define MAX_IM 5 |
| 25 | 23 | ||
| 26 | #endif | 24 | #endif |
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 6256d35dbf4d..bedb5047aff3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
| @@ -74,14 +74,15 @@ static int __init vdma_init(void) | |||
| 74 | get_order(VDMA_PGTBL_SIZE)); | 74 | get_order(VDMA_PGTBL_SIZE)); |
| 75 | BUG_ON(!pgtbl); | 75 | BUG_ON(!pgtbl); |
| 76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); | 76 | dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); |
| 77 | pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); | 77 | pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
| 80 | * Clear the R4030 translation table | 80 | * Clear the R4030 translation table |
| 81 | */ | 81 | */ |
| 82 | vdma_pgtbl_init(); | 82 | vdma_pgtbl_init(); |
| 83 | 83 | ||
| 84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); | 84 | r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, |
| 85 | CPHYSADDR((unsigned long)pgtbl)); | ||
| 85 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); | 86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); |
| 86 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); | 87 | r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); |
| 87 | 88 | ||
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f0bc3312ed11..6549499eb202 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
| @@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { | |||
| 224 | .irq_set_type = ltq_eiu_settype, | 224 | .irq_set_type = ltq_eiu_settype, |
| 225 | }; | 225 | }; |
| 226 | 226 | ||
| 227 | static void ltq_hw_irqdispatch(int module) | 227 | static void ltq_hw_irq_handler(struct irq_desc *desc) |
| 228 | { | 228 | { |
| 229 | int module = irq_desc_get_irq(desc) - 2; | ||
| 229 | u32 irq; | 230 | u32 irq; |
| 231 | int hwirq; | ||
| 230 | 232 | ||
| 231 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); | 233 | irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); |
| 232 | if (irq == 0) | 234 | if (irq == 0) |
| @@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) | |||
| 237 | * other bits might be bogus | 239 | * other bits might be bogus |
| 238 | */ | 240 | */ |
| 239 | irq = __fls(irq); | 241 | irq = __fls(irq); |
| 240 | do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); | 242 | hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); |
| 243 | generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); | ||
| 241 | 244 | ||
| 242 | /* if this is a EBU irq, we need to ack it or get a deadlock */ | 245 | /* if this is a EBU irq, we need to ack it or get a deadlock */ |
| 243 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) | 246 | if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) |
| @@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) | |||
| 245 | LTQ_EBU_PCC_ISTAT); | 248 | LTQ_EBU_PCC_ISTAT); |
| 246 | } | 249 | } |
| 247 | 250 | ||
| 248 | #define DEFINE_HWx_IRQDISPATCH(x) \ | ||
| 249 | static void ltq_hw ## x ## _irqdispatch(void) \ | ||
| 250 | { \ | ||
| 251 | ltq_hw_irqdispatch(x); \ | ||
| 252 | } | ||
| 253 | DEFINE_HWx_IRQDISPATCH(0) | ||
| 254 | DEFINE_HWx_IRQDISPATCH(1) | ||
| 255 | DEFINE_HWx_IRQDISPATCH(2) | ||
| 256 | DEFINE_HWx_IRQDISPATCH(3) | ||
| 257 | DEFINE_HWx_IRQDISPATCH(4) | ||
| 258 | |||
| 259 | #if MIPS_CPU_TIMER_IRQ == 7 | ||
| 260 | static void ltq_hw5_irqdispatch(void) | ||
| 261 | { | ||
| 262 | do_IRQ(MIPS_CPU_TIMER_IRQ); | ||
| 263 | } | ||
| 264 | #else | ||
| 265 | DEFINE_HWx_IRQDISPATCH(5) | ||
| 266 | #endif | ||
| 267 | |||
| 268 | static void ltq_hw_irq_handler(struct irq_desc *desc) | ||
| 269 | { | ||
| 270 | ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); | ||
| 271 | } | ||
| 272 | |||
| 273 | asmlinkage void plat_irq_dispatch(void) | ||
| 274 | { | ||
| 275 | unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; | ||
| 276 | int irq; | ||
| 277 | |||
| 278 | if (!pending) { | ||
| 279 | spurious_interrupt(); | ||
| 280 | return; | ||
| 281 | } | ||
| 282 | |||
| 283 | pending >>= CAUSEB_IP; | ||
| 284 | while (pending) { | ||
| 285 | irq = fls(pending) - 1; | ||
| 286 | do_IRQ(MIPS_CPU_IRQ_BASE + irq); | ||
| 287 | pending &= ~BIT(irq); | ||
| 288 | } | ||
| 289 | } | ||
| 290 | |||
| 291 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) | 251 | static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) |
| 292 | { | 252 | { |
| 293 | struct irq_chip *chip = <q_irq_type; | 253 | struct irq_chip *chip = <q_irq_type; |
| @@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
| 343 | for (i = 0; i < MAX_IM; i++) | 303 | for (i = 0; i < MAX_IM; i++) |
| 344 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); | 304 | irq_set_chained_handler(i + 2, ltq_hw_irq_handler); |
| 345 | 305 | ||
| 346 | if (cpu_has_vint) { | ||
| 347 | pr_info("Setting up vectored interrupts\n"); | ||
| 348 | set_vi_handler(2, ltq_hw0_irqdispatch); | ||
| 349 | set_vi_handler(3, ltq_hw1_irqdispatch); | ||
| 350 | set_vi_handler(4, ltq_hw2_irqdispatch); | ||
| 351 | set_vi_handler(5, ltq_hw3_irqdispatch); | ||
| 352 | set_vi_handler(6, ltq_hw4_irqdispatch); | ||
| 353 | set_vi_handler(7, ltq_hw5_irqdispatch); | ||
| 354 | } | ||
| 355 | |||
| 356 | ltq_domain = irq_domain_add_linear(node, | 306 | ltq_domain = irq_domain_add_linear(node, |
| 357 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, | 307 | (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, |
| 358 | &irq_domain_ops, 0); | 308 | &irq_domain_ops, 0); |
| 359 | 309 | ||
| 360 | #ifndef CONFIG_MIPS_MT_SMP | ||
| 361 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||
| 362 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
| 363 | #else | ||
| 364 | set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | | ||
| 365 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||
| 366 | #endif | ||
| 367 | |||
| 368 | /* tell oprofile which irq to use */ | 310 | /* tell oprofile which irq to use */ |
| 369 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); | 311 | ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
| 370 | 312 | ||
| 371 | /* | ||
| 372 | * if the timer irq is not one of the mips irqs we need to | ||
| 373 | * create a mapping | ||
| 374 | */ | ||
| 375 | if (MIPS_CPU_TIMER_IRQ != 7) | ||
| 376 | irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); | ||
| 377 | |||
| 378 | /* the external interrupts are optional and xway only */ | 313 | /* the external interrupts are optional and xway only */ |
| 379 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); | 314 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); |
| 380 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { | 315 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { |
| @@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int); | |||
| 411 | 346 | ||
| 412 | unsigned int get_c0_compare_int(void) | 347 | unsigned int get_c0_compare_int(void) |
| 413 | { | 348 | { |
| 414 | return MIPS_CPU_TIMER_IRQ; | 349 | return CP0_LEGACY_COMPARE_IRQ; |
| 415 | } | 350 | } |
| 416 | 351 | ||
| 417 | static struct of_device_id __initdata of_irq_ids[] = { | 352 | static struct of_device_id __initdata of_irq_ids[] = { |
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index 2a5bb849b10e..288b58b00dc8 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c | |||
| @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) | |||
| 369 | int irq; | 369 | int irq; |
| 370 | struct irq_chip *msi; | 370 | struct irq_chip *msi; |
| 371 | 371 | ||
| 372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | 372 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { |
| 373 | return 0; | ||
| 374 | } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { | ||
| 373 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; | 375 | msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; |
| 374 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; | 376 | msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; |
| 375 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; | 377 | msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 0a935c136ec2..ac3482882cf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
| @@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S | |||
| 3 | 3 | ||
| 4 | KBUILD_DEFCONFIG := defconfig | 4 | KBUILD_DEFCONFIG := defconfig |
| 5 | 5 | ||
| 6 | comma = , | ||
| 7 | |||
| 8 | |||
| 9 | ifdef CONFIG_FUNCTION_TRACER | 6 | ifdef CONFIG_FUNCTION_TRACER |
| 10 | arch-y += -malways-save-lp -mno-relax | 7 | arch-y += -malways-save-lp -mno-relax |
| 11 | endif | 8 | endif |
| @@ -54,8 +51,6 @@ endif | |||
| 54 | boot := arch/nds32/boot | 51 | boot := arch/nds32/boot |
| 55 | core-y += $(boot)/dts/ | 52 | core-y += $(boot)/dts/ |
| 56 | 53 | ||
| 57 | .PHONY: FORCE | ||
| 58 | |||
| 59 | Image: vmlinux | 54 | Image: vmlinux |
| 60 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 55 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
| 61 | 56 | ||
| @@ -68,9 +63,6 @@ prepare: vdso_prepare | |||
| 68 | vdso_prepare: prepare0 | 63 | vdso_prepare: prepare0 |
| 69 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h | 64 | $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h |
| 70 | 65 | ||
| 71 | CLEAN_FILES += include/asm-nds32/constants.h* | ||
| 72 | |||
| 73 | # We use MRPROPER_FILES and CLEAN_FILES now | ||
| 74 | archclean: | 66 | archclean: |
| 75 | $(Q)$(MAKE) $(clean)=$(boot) | 67 | $(Q)$(MAKE) $(clean)=$(boot) |
| 76 | 68 | ||
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 70e06d34006c..bf10141c7426 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | KBUILD_DEFCONFIG := or1ksim_defconfig | 20 | KBUILD_DEFCONFIG := or1ksim_defconfig |
| 21 | 21 | ||
| 22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 22 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
| 23 | LDFLAGS_vmlinux := | ||
| 24 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 23 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) |
| 25 | 24 | ||
| 26 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ | 25 | KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ |
| @@ -50,5 +49,3 @@ else | |||
| 50 | BUILTIN_DTB := n | 49 | BUILTIN_DTB := n |
| 51 | endif | 50 | endif |
| 52 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ | 51 | core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ |
| 53 | |||
| 54 | all: vmlinux | ||
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h | |||
| @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
| 47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
| 48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
| 49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
| 50 | PERF_REG_POWERPC_MMCRA, | ||
| 50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
| 51 | }; | 52 | }; |
| 52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 57deb1e9ffea..20cc816b3508 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
| @@ -852,11 +852,12 @@ start_here: | |||
| 852 | 852 | ||
| 853 | /* set up the PTE pointers for the Abatron bdiGDB. | 853 | /* set up the PTE pointers for the Abatron bdiGDB. |
| 854 | */ | 854 | */ |
| 855 | tovirt(r6,r6) | ||
| 856 | lis r5, abatron_pteptrs@h | 855 | lis r5, abatron_pteptrs@h |
| 857 | ori r5, r5, abatron_pteptrs@l | 856 | ori r5, r5, abatron_pteptrs@l |
| 858 | stw r5, 0xf0(0) /* Must match your Abatron config file */ | 857 | stw r5, 0xf0(0) /* Must match your Abatron config file */ |
| 859 | tophys(r5,r5) | 858 | tophys(r5,r5) |
| 859 | lis r6, swapper_pg_dir@h | ||
| 860 | ori r6, r6, swapper_pg_dir@l | ||
| 860 | stw r6, 0(r5) | 861 | stw r6, 0(r5) |
| 861 | 862 | ||
| 862 | /* Now turn on the MMU for real! */ | 863 | /* Now turn on the MMU for real! */ |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index bd5e6834ca69..6794466f6420 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
| 755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, | 755 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, |
| 756 | &uc_transact->uc_mcontext)) | 756 | &uc_transact->uc_mcontext)) |
| 757 | goto badframe; | 757 | goto badframe; |
| 758 | } | 758 | } else |
| 759 | #endif | 759 | #endif |
| 760 | /* Fall through, for non-TM restore */ | 760 | { |
| 761 | if (!MSR_TM_ACTIVE(msr)) { | ||
| 762 | /* | 761 | /* |
| 762 | * Fall through, for non-TM restore | ||
| 763 | * | ||
| 763 | * Unset MSR[TS] on the thread regs since MSR from user | 764 | * Unset MSR[TS] on the thread regs since MSR from user |
| 764 | * context does not have MSR active, and recheckpoint was | 765 | * context does not have MSR active, and recheckpoint was |
| 765 | * not called since restore_tm_sigcontexts() was not called | 766 | * not called since restore_tm_sigcontexts() was not called |
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 29746dc28df5..517662a56bdc 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c | |||
| @@ -967,13 +967,6 @@ out: | |||
| 967 | } | 967 | } |
| 968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 968 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 969 | 969 | ||
| 970 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
| 971 | unsigned long __init arch_syscall_addr(int nr) | ||
| 972 | { | ||
| 973 | return sys_call_table[nr*2]; | ||
| 974 | } | ||
| 975 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
| 976 | |||
| 977 | #ifdef PPC64_ELF_ABI_v1 | 970 | #ifdef PPC64_ELF_ABI_v1 |
| 978 | char *arch_ftrace_match_adjust(char *str, const char *search) | 971 | char *arch_ftrace_match_adjust(char *str, const char *search) |
| 979 | { | 972 | { |
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 5c36b3a8d47a..3349f3f8fe84 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c | |||
| @@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { | |||
| 70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), | 70 | PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), |
| 71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), | 71 | PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), |
| 72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), | 72 | PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), |
| 73 | PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | u64 perf_reg_value(struct pt_regs *regs, int idx) | 76 | u64 perf_reg_value(struct pt_regs *regs, int idx) |
| @@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
| 83 | !is_sier_available())) | 84 | !is_sier_available())) |
| 84 | return 0; | 85 | return 0; |
| 85 | 86 | ||
| 87 | if (idx == PERF_REG_POWERPC_MMCRA && | ||
| 88 | (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || | ||
| 89 | IS_ENABLED(CONFIG_PPC32))) | ||
| 90 | return 0; | ||
| 91 | |||
| 86 | return regs_get_register(regs, pt_regs_offset[idx]); | 92 | return regs_get_register(regs, pt_regs_offset[idx]); |
| 87 | } | 93 | } |
| 88 | 94 | ||
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index a1aaa1569d7c..f0e488d97567 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c | |||
| @@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
| 237 | continue; | 237 | continue; |
| 238 | 238 | ||
| 239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); | 239 | seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); |
| 240 | seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); | 240 | seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys)); |
| 241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); | 241 | seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); |
| 242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); | 242 | seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); |
| 243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); | 243 | seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); |
| 244 | 244 | ||
| 245 | seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); | 245 | seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys)); |
| 246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); | 246 | seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); |
| 247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); | 247 | seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); |
| 248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); | 248 | seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); |
| @@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v) | |||
| 252 | blk->size, blk->owner); | 252 | blk->size, blk->owner); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); | 255 | seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys)); |
| 256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); | 256 | seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); |
| 257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); | 257 | seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); |
| 258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); | 258 | seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..9438fa0fc355 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
| @@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void) | |||
| 538 | /* see if there is a keyboard in the device tree | 538 | /* see if there is a keyboard in the device tree |
| 539 | with a parent of type "adb" */ | 539 | with a parent of type "adb" */ |
| 540 | for_each_node_by_name(kbd, "keyboard") | 540 | for_each_node_by_name(kbd, "keyboard") |
| 541 | if (kbd->parent && kbd->parent->type | 541 | if (of_node_is_type(kbd->parent, "adb")) |
| 542 | && strcmp(kbd->parent->type, "adb") == 0) | ||
| 543 | break; | 542 | break; |
| 544 | of_node_put(kbd); | 543 | of_node_put(kbd); |
| 545 | if (kbd) | 544 | if (kbd) |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index d7f742ed48ba..3f58c7dbd581 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
| @@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe) | |||
| 564 | } | 564 | } |
| 565 | } else { | 565 | } else { |
| 566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ | 566 | /* Create a group for 1 GPU and attached NPUs for POWER8 */ |
| 567 | pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); | 567 | pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL); |
| 568 | table_group = &pe->npucomp->table_group; | 568 | table_group = &pe->npucomp->table_group; |
| 569 | table_group->ops = &pnv_npu_peers_ops; | 569 | table_group->ops = &pnv_npu_peers_ops; |
| 570 | iommu_register_group(table_group, hose->global_number, | 570 | iommu_register_group(table_group, hose->global_number, |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1d6406a051f1..7db3119f8a5b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
| @@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void) | |||
| 2681 | list_for_each_entry(hose, &hose_list, list_node) { | 2681 | list_for_each_entry(hose, &hose_list, list_node) { |
| 2682 | phb = hose->private_data; | 2682 | phb = hose->private_data; |
| 2683 | 2683 | ||
| 2684 | if (phb->type == PNV_PHB_NPU_NVLINK) | 2684 | if (phb->type == PNV_PHB_NPU_NVLINK || |
| 2685 | phb->type == PNV_PHB_NPU_OCAPI) | ||
| 2685 | continue; | 2686 | continue; |
| 2686 | 2687 | ||
| 2687 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { | 2688 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 7725825d887d..37a77e57893e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
| @@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void) | |||
| 264 | if (!of_device_is_compatible(nvdn->parent, | 264 | if (!of_device_is_compatible(nvdn->parent, |
| 265 | "ibm,power9-npu")) | 265 | "ibm,power9-npu")) |
| 266 | continue; | 266 | continue; |
| 267 | #ifdef CONFIG_PPC_POWERNV | ||
| 267 | WARN_ON_ONCE(pnv_npu2_init(hose)); | 268 | WARN_ON_ONCE(pnv_npu2_init(hose)); |
| 269 | #endif | ||
| 268 | break; | 270 | break; |
| 269 | } | 271 | } |
| 270 | } | 272 | } |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 15af091611e2..4b4a7f32b68e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -617,7 +617,7 @@ config X86_INTEL_QUARK | |||
| 617 | 617 | ||
| 618 | config X86_INTEL_LPSS | 618 | config X86_INTEL_LPSS |
| 619 | bool "Intel Low Power Subsystem Support" | 619 | bool "Intel Low Power Subsystem Support" |
| 620 | depends on X86 && ACPI | 620 | depends on X86 && ACPI && PCI |
| 621 | select COMMON_CLK | 621 | select COMMON_CLK |
| 622 | select PINCTRL | 622 | select PINCTRL |
| 623 | select IOSF_MBI | 623 | select IOSF_MBI |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index a77445d1b034..780f2b42c8ef 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
| @@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t | |||
| 711 | { | 711 | { |
| 712 | if (unlikely(!access_ok(ptr,len))) | 712 | if (unlikely(!access_ok(ptr,len))) |
| 713 | return 0; | 713 | return 0; |
| 714 | __uaccess_begin(); | 714 | __uaccess_begin_nospec(); |
| 715 | return 1; | 715 | return 1; |
| 716 | } | 716 | } |
| 717 | #define user_access_begin(a,b) user_access_begin(a,b) | 717 | #define user_access_begin(a,b) user_access_begin(a,b) |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 2f6787fc7106..c54a493e139a 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
| @@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) | |||
| 898 | val = native_read_msr_safe(msr, err); | 898 | val = native_read_msr_safe(msr, err); |
| 899 | switch (msr) { | 899 | switch (msr) { |
| 900 | case MSR_IA32_APICBASE: | 900 | case MSR_IA32_APICBASE: |
| 901 | #ifdef CONFIG_X86_X2APIC | 901 | val &= ~X2APIC_ENABLE; |
| 902 | if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) | ||
| 903 | #endif | ||
| 904 | val &= ~X2APIC_ENABLE; | ||
| 905 | break; | 902 | break; |
| 906 | } | 903 | } |
| 907 | return val; | 904 | return val; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 72bf446c3fee..6e29794573b7 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
| @@ -361,8 +361,6 @@ void xen_timer_resume(void) | |||
| 361 | { | 361 | { |
| 362 | int cpu; | 362 | int cpu; |
| 363 | 363 | ||
| 364 | pvclock_resume(); | ||
| 365 | |||
| 366 | if (xen_clockevent != &xen_vcpuop_clockevent) | 364 | if (xen_clockevent != &xen_vcpuop_clockevent) |
| 367 | return; | 365 | return; |
| 368 | 366 | ||
| @@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { | |||
| 379 | }; | 377 | }; |
| 380 | 378 | ||
| 381 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; | 379 | static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; |
| 380 | static u64 xen_clock_value_saved; | ||
| 382 | 381 | ||
| 383 | void xen_save_time_memory_area(void) | 382 | void xen_save_time_memory_area(void) |
| 384 | { | 383 | { |
| 385 | struct vcpu_register_time_memory_area t; | 384 | struct vcpu_register_time_memory_area t; |
| 386 | int ret; | 385 | int ret; |
| 387 | 386 | ||
| 387 | xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; | ||
| 388 | |||
| 388 | if (!xen_clock) | 389 | if (!xen_clock) |
| 389 | return; | 390 | return; |
| 390 | 391 | ||
| @@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) | |||
| 404 | int ret; | 405 | int ret; |
| 405 | 406 | ||
| 406 | if (!xen_clock) | 407 | if (!xen_clock) |
| 407 | return; | 408 | goto out; |
| 408 | 409 | ||
| 409 | t.addr.v = &xen_clock->pvti; | 410 | t.addr.v = &xen_clock->pvti; |
| 410 | 411 | ||
| @@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) | |||
| 421 | if (ret != 0) | 422 | if (ret != 0) |
| 422 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", | 423 | pr_notice("Cannot restore secondary vcpu_time_info (err %d)", |
| 423 | ret); | 424 | ret); |
| 425 | |||
| 426 | out: | ||
| 427 | /* Need pvclock_resume() before using xen_clocksource_read(). */ | ||
| 428 | pvclock_resume(); | ||
| 429 | xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; | ||
| 424 | } | 430 | } |
| 425 | 431 | ||
| 426 | static void xen_setup_vsyscall_time_info(void) | 432 | static void xen_setup_vsyscall_time_info(void) |
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 63e0f12be7c9..72adbbe975d5 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c | |||
| @@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, | |||
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
| 1156 | /** | 1156 | /** |
| 1157 | * __bfq_deactivate_entity - deactivate an entity from its service tree. | 1157 | * __bfq_deactivate_entity - update sched_data and service trees for |
| 1158 | * @entity: the entity to deactivate. | 1158 | * entity, so as to represent entity as inactive |
| 1159 | * @entity: the entity being deactivated. | ||
| 1159 | * @ins_into_idle_tree: if false, the entity will not be put into the | 1160 | * @ins_into_idle_tree: if false, the entity will not be put into the |
| 1160 | * idle tree. | 1161 | * idle tree. |
| 1161 | * | 1162 | * |
| 1162 | * Deactivates an entity, independently of its previous state. Must | 1163 | * If necessary and allowed, puts entity into the idle tree. NOTE: |
| 1163 | * be invoked only if entity is on a service tree. Extracts the entity | 1164 | * entity may be on no tree if in service. |
| 1164 | * from that tree, and if necessary and allowed, puts it into the idle | ||
| 1165 | * tree. | ||
| 1166 | */ | 1165 | */ |
| 1167 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) | 1166 | bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) |
| 1168 | { | 1167 | { |
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c index fb2c82c351e4..038cb627c868 100644 --- a/block/blk-mq-debugfs-zoned.c +++ b/block/blk-mq-debugfs-zoned.c | |||
| @@ -1,8 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. |
| 4 | * | ||
| 5 | * This file is released under the GPL. | ||
| 6 | */ | 4 | */ |
| 7 | 5 | ||
| 8 | #include <linux/blkdev.h> | 6 | #include <linux/blkdev.h> |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ba37b9e15e9..8f5b533764ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1906 | { | 1906 | { |
| 1907 | const int is_sync = op_is_sync(bio->bi_opf); | 1907 | const int is_sync = op_is_sync(bio->bi_opf); |
| 1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); | 1908 | const int is_flush_fua = op_is_flush(bio->bi_opf); |
| 1909 | struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; | 1909 | struct blk_mq_alloc_data data = { .flags = 0}; |
| 1910 | struct request *rq; | 1910 | struct request *rq; |
| 1911 | struct blk_plug *plug; | 1911 | struct blk_plug *plug; |
| 1912 | struct request *same_queue_rq = NULL; | 1912 | struct request *same_queue_rq = NULL; |
| @@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1928 | 1928 | ||
| 1929 | rq_qos_throttle(q, bio); | 1929 | rq_qos_throttle(q, bio); |
| 1930 | 1930 | ||
| 1931 | data.cmd_flags = bio->bi_opf; | ||
| 1931 | rq = blk_mq_get_request(q, bio, &data); | 1932 | rq = blk_mq_get_request(q, bio, &data); |
| 1932 | if (unlikely(!rq)) { | 1933 | if (unlikely(!rq)) { |
| 1933 | rq_qos_cleanup(q, bio); | 1934 | rq_qos_cleanup(q, bio); |
diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 6651e713c45d..5564e73266a6 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c | |||
| @@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 539 | ictx = skcipher_instance_ctx(inst); | 539 | ictx = skcipher_instance_ctx(inst); |
| 540 | 540 | ||
| 541 | /* Stream cipher, e.g. "xchacha12" */ | 541 | /* Stream cipher, e.g. "xchacha12" */ |
| 542 | crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, | ||
| 543 | skcipher_crypto_instance(inst)); | ||
| 542 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, | 544 | err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, |
| 543 | 0, crypto_requires_sync(algt->type, | 545 | 0, crypto_requires_sync(algt->type, |
| 544 | algt->mask)); | 546 | algt->mask)); |
| @@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 547 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); | 549 | streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); |
| 548 | 550 | ||
| 549 | /* Block cipher, e.g. "aes" */ | 551 | /* Block cipher, e.g. "aes" */ |
| 552 | crypto_set_spawn(&ictx->blockcipher_spawn, | ||
| 553 | skcipher_crypto_instance(inst)); | ||
| 550 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, | 554 | err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, |
| 551 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); | 555 | CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); |
| 552 | if (err) | 556 | if (err) |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 37f54d1b2f66..4be293a4b5f0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, | |||
| 58 | return -EINVAL; | 58 | return -EINVAL; |
| 59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 59 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
| 60 | return -EINVAL; | 60 | return -EINVAL; |
| 61 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 61 | |
| 62 | /* | ||
| 63 | * RTA_OK() didn't align the rtattr's payload when validating that it | ||
| 64 | * fits in the buffer. Yet, the keys should start on the next 4-byte | ||
| 65 | * aligned boundary. To avoid confusion, require that the rtattr | ||
| 66 | * payload be exactly the param struct, which has a 4-byte aligned size. | ||
| 67 | */ | ||
| 68 | if (RTA_PAYLOAD(rta) != sizeof(*param)) | ||
| 62 | return -EINVAL; | 69 | return -EINVAL; |
| 70 | BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); | ||
| 63 | 71 | ||
| 64 | param = RTA_DATA(rta); | 72 | param = RTA_DATA(rta); |
| 65 | keys->enckeylen = be32_to_cpu(param->enckeylen); | 73 | keys->enckeylen = be32_to_cpu(param->enckeylen); |
| 66 | 74 | ||
| 67 | key += RTA_ALIGN(rta->rta_len); | 75 | key += rta->rta_len; |
| 68 | keylen -= RTA_ALIGN(rta->rta_len); | 76 | keylen -= rta->rta_len; |
| 69 | 77 | ||
| 70 | if (keylen < keys->enckeylen) | 78 | if (keylen < keys->enckeylen) |
| 71 | return -EINVAL; | 79 | return -EINVAL; |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 80a25cc04aec..4741fe89ba2c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
| @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | |||
| 279 | struct aead_request *req = areq->data; | 279 | struct aead_request *req = areq->data; |
| 280 | 280 | ||
| 281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); | 281 | err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); |
| 282 | aead_request_complete(req, err); | 282 | authenc_esn_request_complete(req, err); |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) | 285 | static int crypto_authenc_esn_decrypt(struct aead_request *req) |
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9a5c60f08aad..c0cf87ae7ef6 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c | |||
| @@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) | |||
| 100 | 100 | ||
| 101 | for (i = 0; i <= 63; i++) { | 101 | for (i = 0; i <= 63; i++) { |
| 102 | 102 | ||
| 103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); | 103 | ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); |
| 104 | 104 | ||
| 105 | ss2 = ss1 ^ rol32(a, 12); | 105 | ss2 = ss1 ^ rol32(a, 12); |
| 106 | 106 | ||
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 7c6afc111d76..bb857421c2e8 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
| @@ -41,7 +41,8 @@ acpi-y += ec.o | |||
| 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
| 42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o | 42 | acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o |
| 43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o | 43 | obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o |
| 44 | acpi-y += acpi_lpss.o acpi_apd.o | 44 | acpi-$(CONFIG_PCI) += acpi_lpss.o |
| 45 | acpi-y += acpi_apd.o | ||
| 45 | acpi-y += acpi_platform.o | 46 | acpi-y += acpi_platform.o |
| 46 | acpi-y += acpi_pnp.o | 47 | acpi-y += acpi_pnp.o |
| 47 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o | 48 | acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 99d820a693a8..5c093ce01bcd 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1054,18 +1054,6 @@ void __init acpi_early_init(void) | |||
| 1054 | goto error0; | 1054 | goto error0; |
| 1055 | } | 1055 | } |
| 1056 | 1056 | ||
| 1057 | /* | ||
| 1058 | * ACPI 2.0 requires the EC driver to be loaded and work before | ||
| 1059 | * the EC device is found in the namespace (i.e. before | ||
| 1060 | * acpi_load_tables() is called). | ||
| 1061 | * | ||
| 1062 | * This is accomplished by looking for the ECDT table, and getting | ||
| 1063 | * the EC parameters out of that. | ||
| 1064 | * | ||
| 1065 | * Ignore the result. Not having an ECDT is not fatal. | ||
| 1066 | */ | ||
| 1067 | status = acpi_ec_ecdt_probe(); | ||
| 1068 | |||
| 1069 | #ifdef CONFIG_X86 | 1057 | #ifdef CONFIG_X86 |
| 1070 | if (!acpi_ioapic) { | 1058 | if (!acpi_ioapic) { |
| 1071 | /* compatible (0) means level (3) */ | 1059 | /* compatible (0) means level (3) */ |
| @@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void) | |||
| 1142 | goto error1; | 1130 | goto error1; |
| 1143 | } | 1131 | } |
| 1144 | 1132 | ||
| 1133 | /* | ||
| 1134 | * ACPI 2.0 requires the EC driver to be loaded and work before the EC | ||
| 1135 | * device is found in the namespace. | ||
| 1136 | * | ||
| 1137 | * This is accomplished by looking for the ECDT table and getting the EC | ||
| 1138 | * parameters out of that. | ||
| 1139 | * | ||
| 1140 | * Do that before calling acpi_initialize_objects() which may trigger EC | ||
| 1141 | * address space accesses. | ||
| 1142 | */ | ||
| 1143 | acpi_ec_ecdt_probe(); | ||
| 1144 | |||
| 1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); | 1145 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); |
| 1146 | if (ACPI_FAILURE(status)) { | 1146 | if (ACPI_FAILURE(status)) { |
| 1147 | printk(KERN_ERR PREFIX | 1147 | printk(KERN_ERR PREFIX |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 7e6952edb5b0..6a9e1fb8913a 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -81,7 +81,11 @@ void acpi_debugfs_init(void); | |||
| 81 | #else | 81 | #else |
| 82 | static inline void acpi_debugfs_init(void) { return; } | 82 | static inline void acpi_debugfs_init(void) { return; } |
| 83 | #endif | 83 | #endif |
| 84 | #ifdef CONFIG_PCI | ||
| 84 | void acpi_lpss_init(void); | 85 | void acpi_lpss_init(void); |
| 86 | #else | ||
| 87 | static inline void acpi_lpss_init(void) {} | ||
| 88 | #endif | ||
| 85 | 89 | ||
| 86 | void acpi_apd_init(void); | 90 | void acpi_apd_init(void); |
| 87 | 91 | ||
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 011d3db19c80..5143e11e3b0f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <acpi/nfit.h> | 26 | #include <acpi/nfit.h> |
| 27 | #include "intel.h" | 27 | #include "intel.h" |
| 28 | #include "nfit.h" | 28 | #include "nfit.h" |
| 29 | #include "intel.h" | ||
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| 32 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | 31 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is |
| @@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) | |||
| 78 | } | 77 | } |
| 79 | EXPORT_SYMBOL(to_nfit_uuid); | 78 | EXPORT_SYMBOL(to_nfit_uuid); |
| 80 | 79 | ||
| 81 | static struct acpi_nfit_desc *to_acpi_nfit_desc( | ||
| 82 | struct nvdimm_bus_descriptor *nd_desc) | ||
| 83 | { | ||
| 84 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); | ||
| 85 | } | ||
| 86 | |||
| 87 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) | 80 | static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) |
| 88 | { | 81 | { |
| 89 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; | 82 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| @@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) | |||
| 419 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | 412 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, |
| 420 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) | 413 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) |
| 421 | { | 414 | { |
| 422 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 415 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 423 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 416 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| 424 | union acpi_object in_obj, in_buf, *out_obj; | 417 | union acpi_object in_obj, in_buf, *out_obj; |
| 425 | const struct nd_cmd_desc *desc = NULL; | 418 | const struct nd_cmd_desc *desc = NULL; |
| @@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
| 721 | struct acpi_nfit_memory_map *memdev; | 714 | struct acpi_nfit_memory_map *memdev; |
| 722 | struct acpi_nfit_desc *acpi_desc; | 715 | struct acpi_nfit_desc *acpi_desc; |
| 723 | struct nfit_mem *nfit_mem; | 716 | struct nfit_mem *nfit_mem; |
| 717 | u16 physical_id; | ||
| 724 | 718 | ||
| 725 | mutex_lock(&acpi_desc_lock); | 719 | mutex_lock(&acpi_desc_lock); |
| 726 | list_for_each_entry(acpi_desc, &acpi_descs, list) { | 720 | list_for_each_entry(acpi_desc, &acpi_descs, list) { |
| @@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags) | |||
| 728 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { | 722 | list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { |
| 729 | memdev = __to_nfit_memdev(nfit_mem); | 723 | memdev = __to_nfit_memdev(nfit_mem); |
| 730 | if (memdev->device_handle == device_handle) { | 724 | if (memdev->device_handle == device_handle) { |
| 725 | *flags = memdev->flags; | ||
| 726 | physical_id = memdev->physical_id; | ||
| 731 | mutex_unlock(&acpi_desc->init_mutex); | 727 | mutex_unlock(&acpi_desc->init_mutex); |
| 732 | mutex_unlock(&acpi_desc_lock); | 728 | mutex_unlock(&acpi_desc_lock); |
| 733 | *flags = memdev->flags; | 729 | return physical_id; |
| 734 | return memdev->physical_id; | ||
| 735 | } | 730 | } |
| 736 | } | 731 | } |
| 737 | mutex_unlock(&acpi_desc->init_mutex); | 732 | mutex_unlock(&acpi_desc->init_mutex); |
| @@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, | |||
| 2231 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); | 2226 | nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); |
| 2232 | if (!nd_set) | 2227 | if (!nd_set) |
| 2233 | return -ENOMEM; | 2228 | return -ENOMEM; |
| 2234 | ndr_desc->nd_set = nd_set; | ||
| 2235 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); | 2229 | guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); |
| 2236 | 2230 | ||
| 2237 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); | 2231 | info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); |
| @@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init); | |||
| 3367 | 3361 | ||
| 3368 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | 3362 | static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) |
| 3369 | { | 3363 | { |
| 3370 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3364 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 3371 | struct device *dev = acpi_desc->dev; | 3365 | struct device *dev = acpi_desc->dev; |
| 3372 | 3366 | ||
| 3373 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ | 3367 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
| @@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
| 3384 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, | 3378 | static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, |
| 3385 | struct nvdimm *nvdimm, unsigned int cmd) | 3379 | struct nvdimm *nvdimm, unsigned int cmd) |
| 3386 | { | 3380 | { |
| 3387 | struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); | 3381 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 3388 | 3382 | ||
| 3389 | if (nvdimm) | 3383 | if (nvdimm) |
| 3390 | return 0; | 3384 | return 0; |
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 850b2927b4e7..f70de71f79d6 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c | |||
| @@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm, | |||
| 146 | 146 | ||
| 147 | static void nvdimm_invalidate_cache(void); | 147 | static void nvdimm_invalidate_cache(void); |
| 148 | 148 | ||
| 149 | static int intel_security_unlock(struct nvdimm *nvdimm, | 149 | static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, |
| 150 | const struct nvdimm_key_data *key_data) | 150 | const struct nvdimm_key_data *key_data) |
| 151 | { | 151 | { |
| 152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 152 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| @@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm, | |||
| 227 | return 0; | 227 | return 0; |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | static int intel_security_erase(struct nvdimm *nvdimm, | 230 | static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, |
| 231 | const struct nvdimm_key_data *key, | 231 | const struct nvdimm_key_data *key, |
| 232 | enum nvdimm_passphrase_type ptype) | 232 | enum nvdimm_passphrase_type ptype) |
| 233 | { | 233 | { |
| @@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm, | |||
| 276 | return 0; | 276 | return 0; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | static int intel_security_query_overwrite(struct nvdimm *nvdimm) | 279 | static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) |
| 280 | { | 280 | { |
| 281 | int rc; | 281 | int rc; |
| 282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 282 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
| @@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm) | |||
| 313 | return 0; | 313 | return 0; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static int intel_security_overwrite(struct nvdimm *nvdimm, | 316 | static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, |
| 317 | const struct nvdimm_key_data *nkey) | 317 | const struct nvdimm_key_data *nkey) |
| 318 | { | 318 | { |
| 319 | int rc; | 319 | int rc; |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 4ca7a6b4eaae..8218db17ebdb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers" | |||
| 1091 | 1091 | ||
| 1092 | config PATA_ACPI | 1092 | config PATA_ACPI |
| 1093 | tristate "ACPI firmware driver for PATA" | 1093 | tristate "ACPI firmware driver for PATA" |
| 1094 | depends on ATA_ACPI && ATA_BMDMA | 1094 | depends on ATA_ACPI && ATA_BMDMA && PCI |
| 1095 | help | 1095 | help |
| 1096 | This option enables an ACPI method driver which drives | 1096 | This option enables an ACPI method driver which drives |
| 1097 | motherboard PATA controller interfaces through the ACPI | 1097 | motherboard PATA controller interfaces through the ACPI |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 1bd1145ad8b5..330c1f7e9665 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
| @@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) | |||
| 108 | * suppress pointless writes. | 108 | * suppress pointless writes. |
| 109 | */ | 109 | */ |
| 110 | for (i = 0; i < d->chip->num_regs; i++) { | 110 | for (i = 0; i < d->chip->num_regs; i++) { |
| 111 | if (!d->chip->mask_base) | ||
| 112 | continue; | ||
| 113 | |||
| 111 | reg = d->chip->mask_base + | 114 | reg = d->chip->mask_base + |
| 112 | (i * map->reg_stride * d->irq_reg_stride); | 115 | (i * map->reg_stride * d->irq_reg_stride); |
| 113 | if (d->chip->mask_invert) { | 116 | if (d->chip->mask_invert) { |
| @@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) | |||
| 258 | const struct regmap_irq_type *t = &irq_data->type; | 261 | const struct regmap_irq_type *t = &irq_data->type; |
| 259 | 262 | ||
| 260 | if ((t->types_supported & type) != type) | 263 | if ((t->types_supported & type) != type) |
| 261 | return -ENOTSUPP; | 264 | return 0; |
| 262 | 265 | ||
| 263 | reg = t->type_reg_offset / map->reg_stride; | 266 | reg = t->type_reg_offset / map->reg_stride; |
| 264 | 267 | ||
| @@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
| 588 | /* Mask all the interrupts by default */ | 591 | /* Mask all the interrupts by default */ |
| 589 | for (i = 0; i < chip->num_regs; i++) { | 592 | for (i = 0; i < chip->num_regs; i++) { |
| 590 | d->mask_buf[i] = d->mask_buf_def[i]; | 593 | d->mask_buf[i] = d->mask_buf_def[i]; |
| 594 | if (!chip->mask_base) | ||
| 595 | continue; | ||
| 596 | |||
| 591 | reg = chip->mask_base + | 597 | reg = chip->mask_base + |
| 592 | (i * map->reg_stride * d->irq_reg_stride); | 598 | (i * map->reg_stride * d->irq_reg_stride); |
| 593 | if (chip->mask_invert) | 599 | if (chip->mask_invert) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 08696f5f00bb..7c9a949e876b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) | |||
| 288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); | 288 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
| 289 | set_capacity(nbd->disk, config->bytesize >> 9); | 289 | set_capacity(nbd->disk, config->bytesize >> 9); |
| 290 | if (bdev) { | 290 | if (bdev) { |
| 291 | if (bdev->bd_disk) | 291 | if (bdev->bd_disk) { |
| 292 | bd_set_size(bdev, config->bytesize); | 292 | bd_set_size(bdev, config->bytesize); |
| 293 | else | 293 | set_blocksize(bdev, config->blksize); |
| 294 | } else | ||
| 294 | bdev->bd_invalidated = 1; | 295 | bdev->bd_invalidated = 1; |
| 295 | bdput(bdev); | 296 | bdput(bdev); |
| 296 | } | 297 | } |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5a90075f719d..0be55fcc19ba 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU | |||
| 692 | depends on ARCH_BCM_IPROC | 692 | depends on ARCH_BCM_IPROC |
| 693 | depends on MAILBOX | 693 | depends on MAILBOX |
| 694 | default m | 694 | default m |
| 695 | select CRYPTO_AUTHENC | ||
| 695 | select CRYPTO_DES | 696 | select CRYPTO_DES |
| 696 | select CRYPTO_MD5 | 697 | select CRYPTO_MD5 |
| 697 | select CRYPTO_SHA1 | 698 | select CRYPTO_SHA1 |
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c9393ffb70ed..5567cbda2798 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
| @@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2845 | struct spu_hw *spu = &iproc_priv.spu; | 2845 | struct spu_hw *spu = &iproc_priv.spu; |
| 2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); | 2846 | struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); |
| 2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); | 2847 | struct crypto_tfm *tfm = crypto_aead_tfm(cipher); |
| 2848 | struct rtattr *rta = (void *)key; | 2848 | struct crypto_authenc_keys keys; |
| 2849 | struct crypto_authenc_key_param *param; | 2849 | int ret; |
| 2850 | const u8 *origkey = key; | ||
| 2851 | const unsigned int origkeylen = keylen; | ||
| 2852 | |||
| 2853 | int ret = 0; | ||
| 2854 | 2850 | ||
| 2855 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, | 2851 | flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, |
| 2856 | keylen); | 2852 | keylen); |
| 2857 | flow_dump(" key: ", key, keylen); | 2853 | flow_dump(" key: ", key, keylen); |
| 2858 | 2854 | ||
| 2859 | if (!RTA_OK(rta, keylen)) | 2855 | ret = crypto_authenc_extractkeys(&keys, key, keylen); |
| 2860 | goto badkey; | 2856 | if (ret) |
| 2861 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
| 2862 | goto badkey; | ||
| 2863 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 2864 | goto badkey; | 2857 | goto badkey; |
| 2865 | 2858 | ||
| 2866 | param = RTA_DATA(rta); | 2859 | if (keys.enckeylen > MAX_KEY_SIZE || |
| 2867 | ctx->enckeylen = be32_to_cpu(param->enckeylen); | 2860 | keys.authkeylen > MAX_KEY_SIZE) |
| 2868 | |||
| 2869 | key += RTA_ALIGN(rta->rta_len); | ||
| 2870 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 2871 | |||
| 2872 | if (keylen < ctx->enckeylen) | ||
| 2873 | goto badkey; | ||
| 2874 | if (ctx->enckeylen > MAX_KEY_SIZE) | ||
| 2875 | goto badkey; | 2861 | goto badkey; |
| 2876 | 2862 | ||
| 2877 | ctx->authkeylen = keylen - ctx->enckeylen; | 2863 | ctx->enckeylen = keys.enckeylen; |
| 2878 | 2864 | ctx->authkeylen = keys.authkeylen; | |
| 2879 | if (ctx->authkeylen > MAX_KEY_SIZE) | ||
| 2880 | goto badkey; | ||
| 2881 | 2865 | ||
| 2882 | memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); | 2866 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
| 2883 | /* May end up padding auth key. So make sure it's zeroed. */ | 2867 | /* May end up padding auth key. So make sure it's zeroed. */ |
| 2884 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | 2868 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
| 2885 | memcpy(ctx->authkey, key, ctx->authkeylen); | 2869 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
| 2886 | 2870 | ||
| 2887 | switch (ctx->alg->cipher_info.alg) { | 2871 | switch (ctx->alg->cipher_info.alg) { |
| 2888 | case CIPHER_ALG_DES: | 2872 | case CIPHER_ALG_DES: |
| @@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2890 | u32 tmp[DES_EXPKEY_WORDS]; | 2874 | u32 tmp[DES_EXPKEY_WORDS]; |
| 2891 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; | 2875 | u32 flags = CRYPTO_TFM_RES_WEAK_KEY; |
| 2892 | 2876 | ||
| 2893 | if (des_ekey(tmp, key) == 0) { | 2877 | if (des_ekey(tmp, keys.enckey) == 0) { |
| 2894 | if (crypto_aead_get_flags(cipher) & | 2878 | if (crypto_aead_get_flags(cipher) & |
| 2895 | CRYPTO_TFM_REQ_WEAK_KEY) { | 2879 | CRYPTO_TFM_REQ_WEAK_KEY) { |
| 2896 | crypto_aead_set_flags(cipher, flags); | 2880 | crypto_aead_set_flags(cipher, flags); |
| @@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2905 | break; | 2889 | break; |
| 2906 | case CIPHER_ALG_3DES: | 2890 | case CIPHER_ALG_3DES: |
| 2907 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { | 2891 | if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { |
| 2908 | const u32 *K = (const u32 *)key; | 2892 | const u32 *K = (const u32 *)keys.enckey; |
| 2909 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; | 2893 | u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; |
| 2910 | 2894 | ||
| 2911 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | 2895 | if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
| @@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, | |||
| 2956 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 2940 | ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
| 2957 | ctx->fallback_cipher->base.crt_flags |= | 2941 | ctx->fallback_cipher->base.crt_flags |= |
| 2958 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | 2942 | tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
| 2959 | ret = | 2943 | ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); |
| 2960 | crypto_aead_setkey(ctx->fallback_cipher, origkey, | ||
| 2961 | origkeylen); | ||
| 2962 | if (ret) { | 2944 | if (ret) { |
| 2963 | flow_log(" fallback setkey() returned:%d\n", ret); | 2945 | flow_log(" fallback setkey() returned:%d\n", ret); |
| 2964 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 2946 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 92e593e2069a..80ae69f906fb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) | |||
| 3476 | * Skip algorithms requiring message digests | 3476 | * Skip algorithms requiring message digests |
| 3477 | * if MD or MD size is not supported by device. | 3477 | * if MD or MD size is not supported by device. |
| 3478 | */ | 3478 | */ |
| 3479 | if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && | 3479 | if (is_mdha(c2_alg_sel) && |
| 3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) | 3480 | (!md_inst || t_alg->aead.maxauthsize > md_limit)) |
| 3481 | continue; | 3481 | continue; |
| 3482 | 3482 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 81712aa5d0f2..bb1a2cdf1951 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
| 1072 | 1072 | ||
| 1073 | desc = edesc->hw_desc; | 1073 | desc = edesc->hw_desc; |
| 1074 | 1074 | ||
| 1075 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | 1075 | if (buflen) { |
| 1076 | if (dma_mapping_error(jrdev, state->buf_dma)) { | 1076 | state->buf_dma = dma_map_single(jrdev, buf, buflen, |
| 1077 | dev_err(jrdev, "unable to map src\n"); | 1077 | DMA_TO_DEVICE); |
| 1078 | goto unmap; | 1078 | if (dma_mapping_error(jrdev, state->buf_dma)) { |
| 1079 | } | 1079 | dev_err(jrdev, "unable to map src\n"); |
| 1080 | goto unmap; | ||
| 1081 | } | ||
| 1080 | 1082 | ||
| 1081 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | 1083 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
| 1084 | } | ||
| 1082 | 1085 | ||
| 1083 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | 1086 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
| 1084 | digestsize); | 1087 | digestsize); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index ec10230178c5..4b6854bf896a 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -1155,6 +1155,7 @@ | |||
| 1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) | 1155 | #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) |
| 1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) | 1156 | #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) |
| 1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) | 1157 | #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) |
| 1158 | #define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) | ||
| 1158 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) | 1159 | #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) |
| 1159 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) | 1160 | #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) |
| 1160 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) | 1161 | #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) |
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 67ea94079837..8c6b83e02a70 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | 7 | ||
| 8 | #ifndef CAAM_ERROR_H | 8 | #ifndef CAAM_ERROR_H |
| 9 | #define CAAM_ERROR_H | 9 | #define CAAM_ERROR_H |
| 10 | |||
| 11 | #include "desc.h" | ||
| 12 | |||
| 10 | #define CAAM_ERROR_STR_MAX 302 | 13 | #define CAAM_ERROR_STR_MAX 302 |
| 11 | 14 | ||
| 12 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | 15 | void caam_strstatus(struct device *dev, u32 status, bool qi_v2); |
| @@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | |||
| 17 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 20 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, |
| 18 | int rowsize, int groupsize, struct scatterlist *sg, | 21 | int rowsize, int groupsize, struct scatterlist *sg, |
| 19 | size_t tlen, bool ascii); | 22 | size_t tlen, bool ascii); |
| 23 | |||
| 24 | static inline bool is_mdha(u32 algtype) | ||
| 25 | { | ||
| 26 | return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == | ||
| 27 | OP_ALG_CHA_MDHA; | ||
| 28 | } | ||
| 20 | #endif /* CAAM_ERROR_H */ | 29 | #endif /* CAAM_ERROR_H */ |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index e34e4df8fd24..fe070d75c842 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
| @@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) | |||
| 567 | 567 | ||
| 568 | /* ORH error code */ | 568 | /* ORH error code */ |
| 569 | err = READ_ONCE(*sr->resp.orh) & 0xff; | 569 | err = READ_ONCE(*sr->resp.orh) & 0xff; |
| 570 | softreq_destroy(sr); | ||
| 571 | 570 | ||
| 572 | if (sr->callback) | 571 | if (sr->callback) |
| 573 | sr->callback(sr->cb_arg, err); | 572 | sr->callback(sr->cb_arg, err); |
| 573 | softreq_destroy(sr); | ||
| 574 | 574 | ||
| 575 | req_completed++; | 575 | req_completed++; |
| 576 | } | 576 | } |
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index f2643cda45db..a3527c00b29a 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c | |||
| @@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 549 | unsigned int keylen) | 549 | unsigned int keylen) |
| 550 | { | 550 | { |
| 551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 551 | struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 552 | struct rtattr *rta = (struct rtattr *)key; | ||
| 553 | struct cc_crypto_req cc_req = {}; | 552 | struct cc_crypto_req cc_req = {}; |
| 554 | struct crypto_authenc_key_param *param; | ||
| 555 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; | 553 | struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; |
| 556 | int rc = -EINVAL; | ||
| 557 | unsigned int seq_len = 0; | 554 | unsigned int seq_len = 0; |
| 558 | struct device *dev = drvdata_to_dev(ctx->drvdata); | 555 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
| 556 | const u8 *enckey, *authkey; | ||
| 557 | int rc; | ||
| 559 | 558 | ||
| 560 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", | 559 | dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", |
| 561 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); | 560 | ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); |
| @@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 563 | /* STAT_PHASE_0: Init and sanity checks */ | 562 | /* STAT_PHASE_0: Init and sanity checks */ |
| 564 | 563 | ||
| 565 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ | 564 | if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ |
| 566 | if (!RTA_OK(rta, keylen)) | 565 | struct crypto_authenc_keys keys; |
| 567 | goto badkey; | 566 | |
| 568 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 567 | rc = crypto_authenc_extractkeys(&keys, key, keylen); |
| 569 | goto badkey; | 568 | if (rc) |
| 570 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 571 | goto badkey; | ||
| 572 | param = RTA_DATA(rta); | ||
| 573 | ctx->enc_keylen = be32_to_cpu(param->enckeylen); | ||
| 574 | key += RTA_ALIGN(rta->rta_len); | ||
| 575 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 576 | if (keylen < ctx->enc_keylen) | ||
| 577 | goto badkey; | 569 | goto badkey; |
| 578 | ctx->auth_keylen = keylen - ctx->enc_keylen; | 570 | enckey = keys.enckey; |
| 571 | authkey = keys.authkey; | ||
| 572 | ctx->enc_keylen = keys.enckeylen; | ||
| 573 | ctx->auth_keylen = keys.authkeylen; | ||
| 579 | 574 | ||
| 580 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { | 575 | if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
| 581 | /* the nonce is stored in bytes at end of key */ | 576 | /* the nonce is stored in bytes at end of key */ |
| 577 | rc = -EINVAL; | ||
| 582 | if (ctx->enc_keylen < | 578 | if (ctx->enc_keylen < |
| 583 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) | 579 | (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) |
| 584 | goto badkey; | 580 | goto badkey; |
| 585 | /* Copy nonce from last 4 bytes in CTR key to | 581 | /* Copy nonce from last 4 bytes in CTR key to |
| 586 | * first 4 bytes in CTR IV | 582 | * first 4 bytes in CTR IV |
| 587 | */ | 583 | */ |
| 588 | memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + | 584 | memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - |
| 589 | ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, | 585 | CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); |
| 590 | CTR_RFC3686_NONCE_SIZE); | ||
| 591 | /* Set CTR key size */ | 586 | /* Set CTR key size */ |
| 592 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; | 587 | ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; |
| 593 | } | 588 | } |
| 594 | } else { /* non-authenc - has just one key */ | 589 | } else { /* non-authenc - has just one key */ |
| 590 | enckey = key; | ||
| 591 | authkey = NULL; | ||
| 595 | ctx->enc_keylen = keylen; | 592 | ctx->enc_keylen = keylen; |
| 596 | ctx->auth_keylen = 0; | 593 | ctx->auth_keylen = 0; |
| 597 | } | 594 | } |
| @@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 603 | /* STAT_PHASE_1: Copy key to ctx */ | 600 | /* STAT_PHASE_1: Copy key to ctx */ |
| 604 | 601 | ||
| 605 | /* Get key material */ | 602 | /* Get key material */ |
| 606 | memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); | 603 | memcpy(ctx->enckey, enckey, ctx->enc_keylen); |
| 607 | if (ctx->enc_keylen == 24) | 604 | if (ctx->enc_keylen == 24) |
| 608 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); | 605 | memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); |
| 609 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { | 606 | if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { |
| 610 | memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); | 607 | memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, |
| 608 | ctx->auth_keylen); | ||
| 611 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ | 609 | } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ |
| 612 | rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); | 610 | rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); |
| 613 | if (rc) | 611 | if (rc) |
| 614 | goto badkey; | 612 | goto badkey; |
| 615 | } | 613 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 45e20707cef8..f8e2c5c3f4eb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1361 | struct talitos_private *priv = dev_get_drvdata(dev); | 1361 | struct talitos_private *priv = dev_get_drvdata(dev); |
| 1362 | bool is_sec1 = has_ftr_sec1(priv); | 1362 | bool is_sec1 = has_ftr_sec1(priv); |
| 1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; | 1363 | int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; |
| 1364 | void *err; | ||
| 1365 | 1364 | ||
| 1366 | if (cryptlen + authsize > max_len) { | 1365 | if (cryptlen + authsize > max_len) { |
| 1367 | dev_err(dev, "length exceeds h/w max limit\n"); | 1366 | dev_err(dev, "length exceeds h/w max limit\n"); |
| 1368 | return ERR_PTR(-EINVAL); | 1367 | return ERR_PTR(-EINVAL); |
| 1369 | } | 1368 | } |
| 1370 | 1369 | ||
| 1371 | if (ivsize) | ||
| 1372 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
| 1373 | |||
| 1374 | if (!dst || dst == src) { | 1370 | if (!dst || dst == src) { |
| 1375 | src_len = assoclen + cryptlen + authsize; | 1371 | src_len = assoclen + cryptlen + authsize; |
| 1376 | src_nents = sg_nents_for_len(src, src_len); | 1372 | src_nents = sg_nents_for_len(src, src_len); |
| 1377 | if (src_nents < 0) { | 1373 | if (src_nents < 0) { |
| 1378 | dev_err(dev, "Invalid number of src SG.\n"); | 1374 | dev_err(dev, "Invalid number of src SG.\n"); |
| 1379 | err = ERR_PTR(-EINVAL); | 1375 | return ERR_PTR(-EINVAL); |
| 1380 | goto error_sg; | ||
| 1381 | } | 1376 | } |
| 1382 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1377 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1383 | dst_nents = dst ? src_nents : 0; | 1378 | dst_nents = dst ? src_nents : 0; |
| @@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1387 | src_nents = sg_nents_for_len(src, src_len); | 1382 | src_nents = sg_nents_for_len(src, src_len); |
| 1388 | if (src_nents < 0) { | 1383 | if (src_nents < 0) { |
| 1389 | dev_err(dev, "Invalid number of src SG.\n"); | 1384 | dev_err(dev, "Invalid number of src SG.\n"); |
| 1390 | err = ERR_PTR(-EINVAL); | 1385 | return ERR_PTR(-EINVAL); |
| 1391 | goto error_sg; | ||
| 1392 | } | 1386 | } |
| 1393 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1387 | src_nents = (src_nents == 1) ? 0 : src_nents; |
| 1394 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); | 1388 | dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); |
| 1395 | dst_nents = sg_nents_for_len(dst, dst_len); | 1389 | dst_nents = sg_nents_for_len(dst, dst_len); |
| 1396 | if (dst_nents < 0) { | 1390 | if (dst_nents < 0) { |
| 1397 | dev_err(dev, "Invalid number of dst SG.\n"); | 1391 | dev_err(dev, "Invalid number of dst SG.\n"); |
| 1398 | err = ERR_PTR(-EINVAL); | 1392 | return ERR_PTR(-EINVAL); |
| 1399 | goto error_sg; | ||
| 1400 | } | 1393 | } |
| 1401 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1394 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
| 1402 | } | 1395 | } |
| @@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1423 | /* if its a ahash, add space for a second desc next to the first one */ | 1416 | /* if its a ahash, add space for a second desc next to the first one */ |
| 1424 | if (is_sec1 && !dst) | 1417 | if (is_sec1 && !dst) |
| 1425 | alloc_len += sizeof(struct talitos_desc); | 1418 | alloc_len += sizeof(struct talitos_desc); |
| 1419 | alloc_len += ivsize; | ||
| 1426 | 1420 | ||
| 1427 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1421 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
| 1428 | if (!edesc) { | 1422 | if (!edesc) |
| 1429 | err = ERR_PTR(-ENOMEM); | 1423 | return ERR_PTR(-ENOMEM); |
| 1430 | goto error_sg; | 1424 | if (ivsize) { |
| 1425 | iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); | ||
| 1426 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | ||
| 1431 | } | 1427 | } |
| 1432 | memset(&edesc->desc, 0, sizeof(edesc->desc)); | 1428 | memset(&edesc->desc, 0, sizeof(edesc->desc)); |
| 1433 | 1429 | ||
| @@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
| 1445 | DMA_BIDIRECTIONAL); | 1441 | DMA_BIDIRECTIONAL); |
| 1446 | } | 1442 | } |
| 1447 | return edesc; | 1443 | return edesc; |
| 1448 | error_sg: | ||
| 1449 | if (iv_dma) | ||
| 1450 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | ||
| 1451 | return err; | ||
| 1452 | } | 1444 | } |
| 1453 | 1445 | ||
| 1454 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1446 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dafc645b2e4e..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
| 531 | struct drm_gem_object *obj; | 531 | struct drm_gem_object *obj; |
| 532 | struct amdgpu_framebuffer *amdgpu_fb; | 532 | struct amdgpu_framebuffer *amdgpu_fb; |
| 533 | int ret; | 533 | int ret; |
| 534 | int height; | ||
| 535 | struct amdgpu_device *adev = dev->dev_private; | ||
| 536 | int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); | ||
| 537 | int pitch = mode_cmd->pitches[0] / cpp; | ||
| 538 | |||
| 539 | pitch = amdgpu_align_pitch(adev, pitch, cpp, false); | ||
| 540 | if (mode_cmd->pitches[0] != pitch) { | ||
| 541 | DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n", | ||
| 542 | pitch, mode_cmd->pitches[0]); | ||
| 543 | return ERR_PTR(-EINVAL); | ||
| 544 | } | ||
| 545 | 534 | ||
| 546 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); | 535 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); |
| 547 | if (obj == NULL) { | 536 | if (obj == NULL) { |
| @@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, | |||
| 556 | return ERR_PTR(-EINVAL); | 545 | return ERR_PTR(-EINVAL); |
| 557 | } | 546 | } |
| 558 | 547 | ||
| 559 | height = ALIGN(mode_cmd->height, 8); | ||
| 560 | if (obj->size < pitch * height) { | ||
| 561 | DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n", | ||
| 562 | pitch * height, obj->size); | ||
| 563 | return ERR_PTR(-EINVAL); | ||
| 564 | } | ||
| 565 | |||
| 566 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); | 548 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
| 567 | if (amdgpu_fb == NULL) { | 549 | if (amdgpu_fb == NULL) { |
| 568 | drm_gem_object_put_unlocked(obj); | 550 | drm_gem_object_put_unlocked(obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | config HSA_AMD | 5 | config HSA_AMD |
| 6 | bool "HSA kernel driver for AMD GPU devices" | 6 | bool "HSA kernel driver for AMD GPU devices" |
| 7 | depends on DRM_AMDGPU && X86_64 | 7 | depends on DRM_AMDGPU && (X86_64 || ARM64) |
| 8 | imply AMD_IOMMU_V2 | 8 | imply AMD_IOMMU_V2 if X86_64 |
| 9 | select MMU_NOTIFIER | 9 | select MMU_NOTIFIER |
| 10 | help | 10 | help |
| 11 | Enable this if you want to use HSA features on AMD GPU devices. | 11 | Enable this if you want to use HSA features on AMD GPU devices. |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..5d85ff341385 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
| @@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 863 | return 0; | 863 | return 0; |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | #if CONFIG_X86_64 | ||
| 866 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
| 867 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
| 868 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
| @@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 905 | 906 | ||
| 906 | return 0; | 907 | return 0; |
| 907 | } | 908 | } |
| 909 | #endif | ||
| 908 | 910 | ||
| 909 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU | 911 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU |
| 910 | * | 912 | * |
| @@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 920 | struct crat_subtype_generic *sub_type_hdr; | 922 | struct crat_subtype_generic *sub_type_hdr; |
| 921 | int avail_size = *size; | 923 | int avail_size = *size; |
| 922 | int numa_node_id; | 924 | int numa_node_id; |
| 925 | #ifdef CONFIG_X86_64 | ||
| 923 | uint32_t entries = 0; | 926 | uint32_t entries = 0; |
| 927 | #endif | ||
| 924 | int ret = 0; | 928 | int ret = 0; |
| 925 | 929 | ||
| 926 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) | 930 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) |
| @@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 982 | sub_type_hdr->length); | 986 | sub_type_hdr->length); |
| 983 | 987 | ||
| 984 | /* Fill in Subtype: IO Link */ | 988 | /* Fill in Subtype: IO Link */ |
| 989 | #ifdef CONFIG_X86_64 | ||
| 985 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, | 990 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, |
| 986 | &entries, | 991 | &entries, |
| 987 | (struct crat_subtype_iolink *)sub_type_hdr); | 992 | (struct crat_subtype_iolink *)sub_type_hdr); |
| @@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 992 | 997 | ||
| 993 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + | 998 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + |
| 994 | sub_type_hdr->length * entries); | 999 | sub_type_hdr->length * entries); |
| 1000 | #else | ||
| 1001 | pr_info("IO link not available for non x86 platforms\n"); | ||
| 1002 | #endif | ||
| 995 | 1003 | ||
| 996 | crat_table->num_domains++; | 1004 | crat_table->num_domains++; |
| 997 | } | 1005 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) | |||
| 1093 | * the GPU device is not already present in the topology device | 1093 | * the GPU device is not already present in the topology device |
| 1094 | * list then return NULL. This means a new topology device has to | 1094 | * list then return NULL. This means a new topology device has to |
| 1095 | * be created for this GPU. | 1095 | * be created for this GPU. |
| 1096 | * TODO: Rather than assiging @gpu to first topology device withtout | ||
| 1097 | * gpu attached, it will better to have more stringent check. | ||
| 1098 | */ | 1096 | */ |
| 1099 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | 1097 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) |
| 1100 | { | 1098 | { |
| @@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | |||
| 1102 | struct kfd_topology_device *out_dev = NULL; | 1100 | struct kfd_topology_device *out_dev = NULL; |
| 1103 | 1101 | ||
| 1104 | down_write(&topology_lock); | 1102 | down_write(&topology_lock); |
| 1105 | list_for_each_entry(dev, &topology_device_list, list) | 1103 | list_for_each_entry(dev, &topology_device_list, list) { |
| 1104 | /* Discrete GPUs need their own topology device list | ||
| 1105 | * entries. Don't assign them to CPU/APU nodes. | ||
| 1106 | */ | ||
| 1107 | if (!gpu->device_info->needs_iommu_device && | ||
| 1108 | dev->node_props.cpu_cores_count) | ||
| 1109 | continue; | ||
| 1110 | |||
| 1106 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { | 1111 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { |
| 1107 | dev->gpu = gpu; | 1112 | dev->gpu = gpu; |
| 1108 | out_dev = dev; | 1113 | out_dev = dev; |
| 1109 | break; | 1114 | break; |
| 1110 | } | 1115 | } |
| 1116 | } | ||
| 1111 | up_write(&topology_lock); | 1117 | up_write(&topology_lock); |
| 1112 | return out_dev; | 1118 | return out_dev; |
| 1113 | } | 1119 | } |
| @@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) | |||
| 1392 | 1398 | ||
| 1393 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | 1399 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) |
| 1394 | { | 1400 | { |
| 1395 | const struct cpuinfo_x86 *cpuinfo; | ||
| 1396 | int first_cpu_of_numa_node; | 1401 | int first_cpu_of_numa_node; |
| 1397 | 1402 | ||
| 1398 | if (!cpumask || cpumask == cpu_none_mask) | 1403 | if (!cpumask || cpumask == cpu_none_mask) |
| @@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | |||
| 1400 | first_cpu_of_numa_node = cpumask_first(cpumask); | 1405 | first_cpu_of_numa_node = cpumask_first(cpumask); |
| 1401 | if (first_cpu_of_numa_node >= nr_cpu_ids) | 1406 | if (first_cpu_of_numa_node >= nr_cpu_ids) |
| 1402 | return -1; | 1407 | return -1; |
| 1403 | cpuinfo = &cpu_data(first_cpu_of_numa_node); | 1408 | #ifdef CONFIG_X86_64 |
| 1404 | 1409 | return cpu_data(first_cpu_of_numa_node).apicid; | |
| 1405 | return cpuinfo->apicid; | 1410 | #else |
| 1411 | return first_cpu_of_numa_node; | ||
| 1412 | #endif | ||
| 1406 | } | 1413 | } |
| 1407 | 1414 | ||
| 1408 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor | 1415 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 34f35e9a3c46..f4fa40c387d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
| 1772 | + caps.min_input_signal * 0x101; | 1772 | + caps.min_input_signal * 0x101; |
| 1773 | 1773 | ||
| 1774 | if (dc_link_set_backlight_level(dm->backlight_link, | 1774 | if (dc_link_set_backlight_level(dm->backlight_link, |
| 1775 | brightness, 0, 0)) | 1775 | brightness, 0)) |
| 1776 | return 0; | 1776 | return 0; |
| 1777 | else | 1777 | else |
| 1778 | return 1; | 1778 | return 1; |
| @@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5933 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 5934 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
| 5935 | !new_crtc_state->color_mgmt_changed && | 5935 | !new_crtc_state->color_mgmt_changed && |
| 5936 | !new_crtc_state->vrr_enabled) | 5936 | old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) |
| 5937 | continue; | 5937 | continue; |
| 5938 | 5938 | ||
| 5939 | if (!new_crtc_state->enable) | 5939 | if (!new_crtc_state->enable) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 52deacf39841..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
| 2190 | 2190 | ||
| 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, | 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, |
| 2192 | uint32_t backlight_pwm_u16_16, | 2192 | uint32_t backlight_pwm_u16_16, |
| 2193 | uint32_t frame_ramp, | 2193 | uint32_t frame_ramp) |
| 2194 | const struct dc_stream_state *stream) | ||
| 2195 | { | 2194 | { |
| 2196 | struct dc *core_dc = link->ctx->dc; | 2195 | struct dc *core_dc = link->ctx->dc; |
| 2197 | struct abm *abm = core_dc->res_pool->abm; | 2196 | struct abm *abm = core_dc->res_pool->abm; |
| @@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
| 2206 | (abm->funcs->set_backlight_level_pwm == NULL)) | 2205 | (abm->funcs->set_backlight_level_pwm == NULL)) |
| 2207 | return false; | 2206 | return false; |
| 2208 | 2207 | ||
| 2209 | if (stream) | ||
| 2210 | ((struct dc_stream_state *)stream)->bl_pwm_level = | ||
| 2211 | backlight_pwm_u16_16; | ||
| 2212 | |||
| 2213 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2208 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
| 2214 | 2209 | ||
| 2215 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 2210 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
| @@ -2637,11 +2632,6 @@ void core_link_enable_stream( | |||
| 2637 | 2632 | ||
| 2638 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | 2633 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) |
| 2639 | enable_stream_features(pipe_ctx); | 2634 | enable_stream_features(pipe_ctx); |
| 2640 | |||
| 2641 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
| 2642 | pipe_ctx->stream->bl_pwm_level, | ||
| 2643 | 0, | ||
| 2644 | pipe_ctx->stream); | ||
| 2645 | } | 2635 | } |
| 2646 | 2636 | ||
| 2647 | } | 2637 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
| @@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
| 146 | */ | 146 | */ |
| 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, |
| 148 | uint32_t backlight_pwm_u16_16, | 148 | uint32_t backlight_pwm_u16_16, |
| 149 | uint32_t frame_ramp, | 149 | uint32_t frame_ramp); |
| 150 | const struct dc_stream_state *stream); | ||
| 151 | 150 | ||
| 152 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 151 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
| 153 | 152 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
| @@ -91,7 +91,6 @@ struct dc_stream_state { | |||
| 91 | 91 | ||
| 92 | /* DMCU info */ | 92 | /* DMCU info */ |
| 93 | unsigned int abm_level; | 93 | unsigned int abm_level; |
| 94 | unsigned int bl_pwm_level; | ||
| 95 | 94 | ||
| 96 | /* from core_stream struct */ | 95 | /* from core_stream struct */ |
| 97 | struct dc_context *ctx; | 96 | struct dc_context *ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4bf24758217f..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | |||
| 1000 | 1000 | ||
| 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); | 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); |
| 1002 | 1002 | ||
| 1003 | if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | 1003 | if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) |
| 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ |
| 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); |
| 1006 | /* un-mute audio */ | 1006 | /* un-mute audio */ |
| @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( |
| 1018 | pipe_ctx->stream_res.stream_enc, true); | 1018 | pipe_ctx->stream_res.stream_enc, true); |
| 1019 | if (pipe_ctx->stream_res.audio) { | 1019 | if (pipe_ctx->stream_res.audio) { |
| 1020 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
| 1021 | |||
| 1020 | if (option != KEEP_ACQUIRED_RESOURCE || | 1022 | if (option != KEEP_ACQUIRED_RESOURCE || |
| 1021 | !dc->debug.az_endpoint_mute_only) { | 1023 | !dc->debug.az_endpoint_mute_only) { |
| 1022 | /*only disalbe az_endpoint if power down or free*/ | 1024 | /*only disalbe az_endpoint if power down or free*/ |
| @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1036 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); | 1038 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); |
| 1037 | pipe_ctx->stream_res.audio = NULL; | 1039 | pipe_ctx->stream_res.audio = NULL; |
| 1038 | } | 1040 | } |
| 1041 | if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | ||
| 1042 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | ||
| 1043 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | ||
| 1039 | 1044 | ||
| 1040 | /* TODO: notify audio driver for if audio modes list changed | 1045 | /* TODO: notify audio driver for if audio modes list changed |
| 1041 | * add audio mode list change flag */ | 1046 | * add audio mode list change flag */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | |||
| @@ -463,7 +463,7 @@ void dpp1_set_cursor_position( | |||
| 463 | if (src_y_offset >= (int)param->viewport.height) | 463 | if (src_y_offset >= (int)param->viewport.height) |
| 464 | cur_en = 0; /* not visible beyond bottom edge*/ | 464 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 465 | 465 | ||
| 466 | if (src_y_offset < 0) | 466 | if (src_y_offset + (int)height <= 0) |
| 467 | cur_en = 0; /* not visible beyond top edge*/ | 467 | cur_en = 0; /* not visible beyond top edge*/ |
| 468 | 468 | ||
| 469 | REG_UPDATE(CURSOR0_CONTROL, | 469 | REG_UPDATE(CURSOR0_CONTROL, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
| @@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( | |||
| 1140 | if (src_y_offset >= (int)param->viewport.height) | 1140 | if (src_y_offset >= (int)param->viewport.height) |
| 1141 | cur_en = 0; /* not visible beyond bottom edge*/ | 1141 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 1142 | 1142 | ||
| 1143 | if (src_y_offset < 0) //+ (int)hubp->curs_attr.height | 1143 | if (src_y_offset + (int)hubp->curs_attr.height <= 0) |
| 1144 | cur_en = 0; /* not visible beyond top edge*/ | 1144 | cur_en = 0; /* not visible beyond top edge*/ |
| 1145 | 1145 | ||
| 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) | 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91e015e14355..58a12ddf12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
| @@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( | |||
| 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) | 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) |
| 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2358 | 2358 | tg = pipe_ctx->stream_res.tg; | |
| 2359 | /* Skip inactive pipes and ones already updated */ | 2359 | /* Skip inactive pipes and ones already updated */ |
| 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream |
| 2361 | || !pipe_ctx->plane_state) | 2361 | || !pipe_ctx->plane_state |
| 2362 | || !tg->funcs->is_tg_enabled(tg)) | ||
| 2362 | continue; | 2363 | continue; |
| 2363 | 2364 | ||
| 2364 | pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); | 2365 | tg->funcs->lock(tg); |
| 2365 | 2366 | ||
| 2366 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( | 2367 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( |
| 2367 | pipe_ctx->plane_res.hubp, | 2368 | pipe_ctx->plane_res.hubp, |
| 2368 | &pipe_ctx->dlg_regs, | 2369 | &pipe_ctx->dlg_regs, |
| 2369 | &pipe_ctx->ttu_regs); | 2370 | &pipe_ctx->ttu_regs); |
| 2370 | } | ||
| 2371 | |||
| 2372 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 2373 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 2374 | 2371 | ||
| 2375 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2372 | tg->funcs->unlock(tg); |
| 2376 | || !pipe_ctx->plane_state) | 2373 | } |
| 2377 | continue; | ||
| 2378 | |||
| 2379 | dcn10_pipe_control_lock(dc, pipe_ctx, false); | ||
| 2380 | } | ||
| 2381 | 2374 | ||
| 2382 | if (num_planes == 0) | 2375 | if (num_planes == 0) |
| 2383 | false_optc_underflow_wa(dc, stream, tg); | 2376 | false_optc_underflow_wa(dc, stream, tg); |
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c | |||
| @@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le | |||
| 57 | #define NUM_POWER_FN_SEGS 8 | 57 | #define NUM_POWER_FN_SEGS 8 |
| 58 | #define NUM_BL_CURVE_SEGS 16 | 58 | #define NUM_BL_CURVE_SEGS 16 |
| 59 | 59 | ||
| 60 | #pragma pack(push, 1) | ||
| 60 | /* NOTE: iRAM is 256B in size */ | 61 | /* NOTE: iRAM is 256B in size */ |
| 61 | struct iram_table_v_2 { | 62 | struct iram_table_v_2 { |
| 62 | /* flags */ | 63 | /* flags */ |
| @@ -100,6 +101,7 @@ struct iram_table_v_2 { | |||
| 100 | uint8_t dummy8; /* 0xfe */ | 101 | uint8_t dummy8; /* 0xfe */ |
| 101 | uint8_t dummy9; /* 0xff */ | 102 | uint8_t dummy9; /* 0xff */ |
| 102 | }; | 103 | }; |
| 104 | #pragma pack(pop) | ||
| 103 | 105 | ||
| 104 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) | 106 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) |
| 105 | { | 107 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
| @@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 753 | return 0; | 753 | return 0; |
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) | ||
| 757 | { | ||
| 758 | uint32_t result; | ||
| 759 | |||
| 760 | PP_ASSERT_WITH_CODE( | ||
| 761 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, | ||
| 762 | "[Run_ACG_BTC] Attempt to run ACG BTC failed!", | ||
| 763 | return -EINVAL); | ||
| 764 | |||
| 765 | result = smum_get_argument(hwmgr); | ||
| 766 | PP_ASSERT_WITH_CODE(result == 1, | ||
| 767 | "Failed to run ACG BTC!", return -EINVAL); | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 756 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) | 772 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) |
| 757 | { | 773 | { |
| 758 | struct vega12_hwmgr *data = | 774 | struct vega12_hwmgr *data = |
| @@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
| 931 | "Failed to initialize SMC table!", | 947 | "Failed to initialize SMC table!", |
| 932 | result = tmp_result); | 948 | result = tmp_result); |
| 933 | 949 | ||
| 950 | tmp_result = vega12_run_acg_btc(hwmgr); | ||
| 951 | PP_ASSERT_WITH_CODE(!tmp_result, | ||
| 952 | "Failed to run ACG BTC!", | ||
| 953 | result = tmp_result); | ||
| 954 | |||
| 934 | result = vega12_enable_all_smu_features(hwmgr); | 955 | result = vega12_enable_all_smu_features(hwmgr); |
| 935 | PP_ASSERT_WITH_CODE(!result, | 956 | PP_ASSERT_WITH_CODE(!result, |
| 936 | "Failed to enable all smu features!", | 957 | "Failed to enable all smu features!", |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
| 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2802 | MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
| 2802 | return 0; | 2803 | return 0; |
| 2803 | } | 2804 | } |
| 2804 | 2805 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
| @@ -41,7 +41,7 @@ struct intel_gvt_mpt { | |||
| 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
| 42 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
| 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
| 44 | void (*detach_vgpu)(unsigned long handle); | 44 | void (*detach_vgpu)(void *vgpu); |
| 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); | 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); |
| 46 | unsigned long (*from_virt_to_mfn)(void *p); | 46 | unsigned long (*from_virt_to_mfn)(void *p); |
| 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); | 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 996 | { | 996 | { |
| 997 | unsigned int index; | 997 | unsigned int index; |
| 998 | u64 virtaddr; | 998 | u64 virtaddr; |
| 999 | unsigned long req_size, pgoff = 0; | 999 | unsigned long req_size, pgoff, req_start; |
| 1000 | pgprot_t pg_prot; | 1000 | pgprot_t pg_prot; |
| 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
| 1002 | 1002 | ||
| @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 1014 | pg_prot = vma->vm_page_prot; | 1014 | pg_prot = vma->vm_page_prot; |
| 1015 | virtaddr = vma->vm_start; | 1015 | virtaddr = vma->vm_start; |
| 1016 | req_size = vma->vm_end - vma->vm_start; | 1016 | req_size = vma->vm_end - vma->vm_start; |
| 1017 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | 1017 | pgoff = vma->vm_pgoff & |
| 1018 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | ||
| 1019 | req_start = pgoff << PAGE_SHIFT; | ||
| 1020 | |||
| 1021 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | ||
| 1022 | return -EINVAL; | ||
| 1023 | if (req_start + req_size > | ||
| 1024 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | ||
| 1025 | return -EINVAL; | ||
| 1026 | |||
| 1027 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | ||
| 1018 | 1028 | ||
| 1019 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | 1029 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
| 1020 | } | 1030 | } |
| @@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) | |||
| 1662 | return 0; | 1672 | return 0; |
| 1663 | } | 1673 | } |
| 1664 | 1674 | ||
| 1665 | static void kvmgt_detach_vgpu(unsigned long handle) | 1675 | static void kvmgt_detach_vgpu(void *p_vgpu) |
| 1666 | { | 1676 | { |
| 1667 | /* nothing to do here */ | 1677 | int i; |
| 1678 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | ||
| 1679 | |||
| 1680 | if (!vgpu->vdev.region) | ||
| 1681 | return; | ||
| 1682 | |||
| 1683 | for (i = 0; i < vgpu->vdev.num_regions; i++) | ||
| 1684 | if (vgpu->vdev.region[i].ops->release) | ||
| 1685 | vgpu->vdev.region[i].ops->release(vgpu, | ||
| 1686 | &vgpu->vdev.region[i]); | ||
| 1687 | vgpu->vdev.num_regions = 0; | ||
| 1688 | kfree(vgpu->vdev.region); | ||
| 1689 | vgpu->vdev.region = NULL; | ||
| 1668 | } | 1690 | } |
| 1669 | 1691 | ||
| 1670 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | 1692 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
| @@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) | |||
| 101 | if (!intel_gvt_host.mpt->detach_vgpu) | 101 | if (!intel_gvt_host.mpt->detach_vgpu) |
| 102 | return; | 102 | return; |
| 103 | 103 | ||
| 104 | intel_gvt_host.mpt->detach_vgpu(vgpu->handle); | 104 | intel_gvt_host.mpt->detach_vgpu(vgpu); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) | 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) |
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
| @@ -46,7 +46,6 @@ struct meson_crtc { | |||
| 46 | struct drm_crtc base; | 46 | struct drm_crtc base; |
| 47 | struct drm_pending_vblank_event *event; | 47 | struct drm_pending_vblank_event *event; |
| 48 | struct meson_drm *priv; | 48 | struct meson_drm *priv; |
| 49 | bool enabled; | ||
| 50 | }; | 49 | }; |
| 51 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) | 50 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) |
| 52 | 51 | ||
| @@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { | |||
| 82 | 81 | ||
| 83 | }; | 82 | }; |
| 84 | 83 | ||
| 85 | static void meson_crtc_enable(struct drm_crtc *crtc) | 84 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, |
| 85 | struct drm_crtc_state *old_state) | ||
| 86 | { | 86 | { |
| 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 88 | struct drm_crtc_state *crtc_state = crtc->state; | 88 | struct drm_crtc_state *crtc_state = crtc->state; |
| @@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) | |||
| 108 | 108 | ||
| 109 | drm_crtc_vblank_on(crtc); | 109 | drm_crtc_vblank_on(crtc); |
| 110 | 110 | ||
| 111 | meson_crtc->enabled = true; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
| 115 | struct drm_crtc_state *old_state) | ||
| 116 | { | ||
| 117 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
| 118 | struct meson_drm *priv = meson_crtc->priv; | ||
| 119 | |||
| 120 | DRM_DEBUG_DRIVER("\n"); | ||
| 121 | |||
| 122 | if (!meson_crtc->enabled) | ||
| 123 | meson_crtc_enable(crtc); | ||
| 124 | |||
| 125 | priv->viu.osd1_enabled = true; | 111 | priv->viu.osd1_enabled = true; |
| 126 | } | 112 | } |
| 127 | 113 | ||
| @@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 153 | 139 | ||
| 154 | crtc->state->event = NULL; | 140 | crtc->state->event = NULL; |
| 155 | } | 141 | } |
| 156 | |||
| 157 | meson_crtc->enabled = false; | ||
| 158 | } | 142 | } |
| 159 | 143 | ||
| 160 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | 144 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, |
| @@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | |||
| 163 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 147 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 164 | unsigned long flags; | 148 | unsigned long flags; |
| 165 | 149 | ||
| 166 | if (crtc->state->enable && !meson_crtc->enabled) | ||
| 167 | meson_crtc_enable(crtc); | ||
| 168 | |||
| 169 | if (crtc->state->event) { | 150 | if (crtc->state->event) { |
| 170 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | 151 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); |
| 171 | 152 | ||
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
| @@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { | |||
| 75 | .fb_create = drm_gem_fb_create, | 75 | .fb_create = drm_gem_fb_create, |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { | ||
| 79 | .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, | ||
| 80 | }; | ||
| 81 | |||
| 78 | static irqreturn_t meson_irq(int irq, void *arg) | 82 | static irqreturn_t meson_irq(int irq, void *arg) |
| 79 | { | 83 | { |
| 80 | struct drm_device *dev = arg; | 84 | struct drm_device *dev = arg; |
| @@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | |||
| 266 | drm->mode_config.max_width = 3840; | 270 | drm->mode_config.max_width = 3840; |
| 267 | drm->mode_config.max_height = 2160; | 271 | drm->mode_config.max_height = 2160; |
| 268 | drm->mode_config.funcs = &meson_mode_config_funcs; | 272 | drm->mode_config.funcs = &meson_mode_config_funcs; |
| 273 | drm->mode_config.helper_private = &meson_mode_config_helpers; | ||
| 269 | 274 | ||
| 270 | /* Hardware Initialization */ | 275 | /* Hardware Initialization */ |
| 271 | 276 | ||
| @@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, | |||
| 388 | remote_node = of_graph_get_remote_port_parent(ep); | 393 | remote_node = of_graph_get_remote_port_parent(ep); |
| 389 | if (!remote_node || | 394 | if (!remote_node || |
| 390 | remote_node == parent || /* Ignore parent endpoint */ | 395 | remote_node == parent || /* Ignore parent endpoint */ |
| 391 | !of_device_is_available(remote_node)) | 396 | !of_device_is_available(remote_node)) { |
| 397 | of_node_put(remote_node); | ||
| 392 | continue; | 398 | continue; |
| 399 | } | ||
| 393 | 400 | ||
| 394 | count += meson_probe_remote(pdev, match, remote, remote_node); | 401 | count += meson_probe_remote(pdev, match, remote, remote_node); |
| 395 | 402 | ||
| @@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) | |||
| 408 | 415 | ||
| 409 | for_each_endpoint_of_node(np, ep) { | 416 | for_each_endpoint_of_node(np, ep) { |
| 410 | remote = of_graph_get_remote_port_parent(ep); | 417 | remote = of_graph_get_remote_port_parent(ep); |
| 411 | if (!remote || !of_device_is_available(remote)) | 418 | if (!remote || !of_device_is_available(remote)) { |
| 419 | of_node_put(remote); | ||
| 412 | continue; | 420 | continue; |
| 421 | } | ||
| 413 | 422 | ||
| 414 | count += meson_probe_remote(pdev, &match, np, remote); | 423 | count += meson_probe_remote(pdev, &match, np, remote); |
| 424 | of_node_put(remote); | ||
| 415 | } | 425 | } |
| 416 | 426 | ||
| 417 | if (count && !match) | 427 | if (count && !match) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
| @@ -2435,6 +2435,38 @@ nv140_chipset = { | |||
| 2435 | }; | 2435 | }; |
| 2436 | 2436 | ||
| 2437 | static const struct nvkm_device_chip | 2437 | static const struct nvkm_device_chip |
| 2438 | nv162_chipset = { | ||
| 2439 | .name = "TU102", | ||
| 2440 | .bar = tu104_bar_new, | ||
| 2441 | .bios = nvkm_bios_new, | ||
| 2442 | .bus = gf100_bus_new, | ||
| 2443 | .devinit = tu104_devinit_new, | ||
| 2444 | .fault = tu104_fault_new, | ||
| 2445 | .fb = gv100_fb_new, | ||
| 2446 | .fuse = gm107_fuse_new, | ||
| 2447 | .gpio = gk104_gpio_new, | ||
| 2448 | .i2c = gm200_i2c_new, | ||
| 2449 | .ibus = gm200_ibus_new, | ||
| 2450 | .imem = nv50_instmem_new, | ||
| 2451 | .ltc = gp102_ltc_new, | ||
| 2452 | .mc = tu104_mc_new, | ||
| 2453 | .mmu = tu104_mmu_new, | ||
| 2454 | .pci = gp100_pci_new, | ||
| 2455 | .pmu = gp102_pmu_new, | ||
| 2456 | .therm = gp100_therm_new, | ||
| 2457 | .timer = gk20a_timer_new, | ||
| 2458 | .top = gk104_top_new, | ||
| 2459 | .ce[0] = tu104_ce_new, | ||
| 2460 | .ce[1] = tu104_ce_new, | ||
| 2461 | .ce[2] = tu104_ce_new, | ||
| 2462 | .ce[3] = tu104_ce_new, | ||
| 2463 | .ce[4] = tu104_ce_new, | ||
| 2464 | .disp = tu104_disp_new, | ||
| 2465 | .dma = gv100_dma_new, | ||
| 2466 | .fifo = tu104_fifo_new, | ||
| 2467 | }; | ||
| 2468 | |||
| 2469 | static const struct nvkm_device_chip | ||
| 2438 | nv164_chipset = { | 2470 | nv164_chipset = { |
| 2439 | .name = "TU104", | 2471 | .name = "TU104", |
| 2440 | .bar = tu104_bar_new, | 2472 | .bar = tu104_bar_new, |
| @@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, | |||
| 2950 | case 0x138: device->chip = &nv138_chipset; break; | 2982 | case 0x138: device->chip = &nv138_chipset; break; |
| 2951 | case 0x13b: device->chip = &nv13b_chipset; break; | 2983 | case 0x13b: device->chip = &nv13b_chipset; break; |
| 2952 | case 0x140: device->chip = &nv140_chipset; break; | 2984 | case 0x140: device->chip = &nv140_chipset; break; |
| 2985 | case 0x162: device->chip = &nv162_chipset; break; | ||
| 2953 | case 0x164: device->chip = &nv164_chipset; break; | 2986 | case 0x164: device->chip = &nv164_chipset; break; |
| 2954 | case 0x166: device->chip = &nv166_chipset; break; | 2987 | case 0x166: device->chip = &nv166_chipset; break; |
| 2955 | default: | 2988 | default: |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
| @@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { | |||
| 250 | #if defined(CONFIG_DEBUG_FS) | 250 | #if defined(CONFIG_DEBUG_FS) |
| 251 | .debugfs_init = qxl_debugfs_init, | 251 | .debugfs_init = qxl_debugfs_init, |
| 252 | #endif | 252 | #endif |
| 253 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 254 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 255 | .gem_prime_export = drm_gem_prime_export, | 253 | .gem_prime_export = drm_gem_prime_export, |
| 256 | .gem_prime_import = drm_gem_prime_import, | 254 | .gem_prime_import = drm_gem_prime_import, |
| 257 | .gem_prime_pin = qxl_gem_prime_pin, | 255 | .gem_prime_pin = qxl_gem_prime_pin, |
| 258 | .gem_prime_unpin = qxl_gem_prime_unpin, | 256 | .gem_prime_unpin = qxl_gem_prime_unpin, |
| 259 | .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, | ||
| 260 | .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, | ||
| 261 | .gem_prime_vmap = qxl_gem_prime_vmap, | 257 | .gem_prime_vmap = qxl_gem_prime_vmap, |
| 262 | .gem_prime_vunmap = qxl_gem_prime_vunmap, | 258 | .gem_prime_vunmap = qxl_gem_prime_vunmap, |
| 263 | .gem_prime_mmap = qxl_gem_prime_mmap, | 259 | .gem_prime_mmap = qxl_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c | |||
| @@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 38 | WARN_ONCE(1, "not implemented"); | 38 | WARN_ONCE(1, "not implemented"); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 42 | { | ||
| 43 | WARN_ONCE(1, "not implemented"); | ||
| 44 | return ERR_PTR(-ENOSYS); | ||
| 45 | } | ||
| 46 | |||
| 47 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | ||
| 48 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 49 | struct sg_table *table) | ||
| 50 | { | ||
| 51 | WARN_ONCE(1, "not implemented"); | ||
| 52 | return ERR_PTR(-ENOSYS); | ||
| 53 | } | ||
| 54 | |||
| 55 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) | 41 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) |
| 56 | { | 42 | { |
| 57 | WARN_ONCE(1, "not implemented"); | 43 | WARN_ONCE(1, "not implemented"); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..37f93022a106 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
| @@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, | |||
| 113 | child_count++; | 113 | child_count++; |
| 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, | 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, |
| 115 | &panel, &bridge); | 115 | &panel, &bridge); |
| 116 | if (!ret) | 116 | if (!ret) { |
| 117 | of_node_put(endpoint); | ||
| 117 | break; | 118 | break; |
| 119 | } | ||
| 118 | } | 120 | } |
| 119 | 121 | ||
| 120 | of_node_put(port); | 122 | of_node_put(port); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
| @@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, | |||
| 786 | remote = of_graph_get_remote_port_parent(ep); | 786 | remote = of_graph_get_remote_port_parent(ep); |
| 787 | if (!remote) | 787 | if (!remote) |
| 788 | continue; | 788 | continue; |
| 789 | of_node_put(remote); | ||
| 789 | 790 | ||
| 790 | /* does this node match any registered engines? */ | 791 | /* does this node match any registered engines? */ |
| 791 | list_for_each_entry(frontend, &drv->frontend_list, list) { | 792 | list_for_each_entry(frontend, &drv->frontend_list, list) { |
| 792 | if (remote == frontend->node) { | 793 | if (remote == frontend->node) { |
| 793 | of_node_put(remote); | ||
| 794 | of_node_put(port); | 794 | of_node_put(port); |
| 795 | of_node_put(ep); | ||
| 795 | return frontend; | 796 | return frontend; |
| 796 | } | 797 | } |
| 797 | } | 798 | } |
| 798 | } | 799 | } |
| 799 | 800 | of_node_put(port); | |
| 800 | return ERR_PTR(-EINVAL); | 801 | return ERR_PTR(-EINVAL); |
| 801 | } | 802 | } |
| 802 | 803 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c | |||
| @@ -127,14 +127,10 @@ static struct drm_driver driver = { | |||
| 127 | #if defined(CONFIG_DEBUG_FS) | 127 | #if defined(CONFIG_DEBUG_FS) |
| 128 | .debugfs_init = virtio_gpu_debugfs_init, | 128 | .debugfs_init = virtio_gpu_debugfs_init, |
| 129 | #endif | 129 | #endif |
| 130 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 131 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 132 | .gem_prime_export = drm_gem_prime_export, | 130 | .gem_prime_export = drm_gem_prime_export, |
| 133 | .gem_prime_import = drm_gem_prime_import, | 131 | .gem_prime_import = drm_gem_prime_import, |
| 134 | .gem_prime_pin = virtgpu_gem_prime_pin, | 132 | .gem_prime_pin = virtgpu_gem_prime_pin, |
| 135 | .gem_prime_unpin = virtgpu_gem_prime_unpin, | 133 | .gem_prime_unpin = virtgpu_gem_prime_unpin, |
| 136 | .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, | ||
| 137 | .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, | ||
| 138 | .gem_prime_vmap = virtgpu_gem_prime_vmap, | 134 | .gem_prime_vmap = virtgpu_gem_prime_vmap, |
| 139 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, | 135 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, |
| 140 | .gem_prime_mmap = virtgpu_gem_prime_mmap, | 136 | .gem_prime_mmap = virtgpu_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
| @@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); | |||
| 372 | /* virtgpu_prime.c */ | 372 | /* virtgpu_prime.c */ |
| 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); | 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); | 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 375 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
| 376 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 377 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 378 | struct sg_table *sgt); | ||
| 379 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); | 375 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 380 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 376 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 381 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, | 377 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c | |||
| @@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 39 | WARN_ONCE(1, "not implemented"); | 39 | WARN_ONCE(1, "not implemented"); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 43 | { | ||
| 44 | WARN_ONCE(1, "not implemented"); | ||
| 45 | return ERR_PTR(-ENODEV); | ||
| 46 | } | ||
| 47 | |||
| 48 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 49 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 50 | struct sg_table *table) | ||
| 51 | { | ||
| 52 | WARN_ONCE(1, "not implemented"); | ||
| 53 | return ERR_PTR(-ENODEV); | ||
| 54 | } | ||
| 55 | |||
| 56 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) | 42 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
| 57 | { | 43 | { |
| 58 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 44 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
| @@ -21,6 +21,7 @@ config VGA_SWITCHEROO | |||
| 21 | bool "Laptop Hybrid Graphics - GPU switching support" | 21 | bool "Laptop Hybrid Graphics - GPU switching support" |
| 22 | depends on X86 | 22 | depends on X86 |
| 23 | depends on ACPI | 23 | depends on ACPI |
| 24 | depends on PCI | ||
| 24 | select VGA_ARB | 25 | select VGA_ARB |
| 25 | help | 26 | help |
| 26 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer | 27 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer |
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 0e30fa00204c..f9b8e3e23a8e 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c | |||
| @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, | |||
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | rv = lm80_read_value(client, LM80_REG_FANDIV); | 395 | rv = lm80_read_value(client, LM80_REG_FANDIV); |
| 396 | if (rv < 0) | 396 | if (rv < 0) { |
| 397 | mutex_unlock(&data->update_lock); | ||
| 397 | return rv; | 398 | return rv; |
| 399 | } | ||
| 398 | reg = (rv & ~(3 << (2 * (nr + 1)))) | 400 | reg = (rv & ~(3 << (2 * (nr + 1)))) |
| 399 | | (data->fan_div[nr] << (2 * (nr + 1))); | 401 | | (data->fan_div[nr] << (2 * (nr + 1))); |
| 400 | lm80_write_value(client, LM80_REG_FANDIV, reg); | 402 | lm80_write_value(client, LM80_REG_FANDIV, reg); |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c3040079b1cb..4adec4ab7d06 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -44,8 +44,8 @@ | |||
| 44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 | 44 | * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 |
| 45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 | 45 | * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 |
| 46 | * (0xd451) | 46 | * (0xd451) |
| 47 | * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 | 47 | * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3 |
| 48 | * (0xd459) | 48 | * (0xd429) |
| 49 | * | 49 | * |
| 50 | * #temp lists the number of monitored temperature sources (first value) plus | 50 | * #temp lists the number of monitored temperature sources (first value) plus |
| 51 | * the number of directly connectable temperature sensors (second value). | 51 | * the number of directly connectable temperature sensors (second value). |
| @@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); | |||
| 138 | #define SIO_NCT6795_ID 0xd350 | 138 | #define SIO_NCT6795_ID 0xd350 |
| 139 | #define SIO_NCT6796_ID 0xd420 | 139 | #define SIO_NCT6796_ID 0xd420 |
| 140 | #define SIO_NCT6797_ID 0xd450 | 140 | #define SIO_NCT6797_ID 0xd450 |
| 141 | #define SIO_NCT6798_ID 0xd458 | 141 | #define SIO_NCT6798_ID 0xd428 |
| 142 | #define SIO_ID_MASK 0xFFF8 | 142 | #define SIO_ID_MASK 0xFFF8 |
| 143 | 143 | ||
| 144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; | 144 | enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; |
| @@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev) | |||
| 4508 | 4508 | ||
| 4509 | if (data->kind == nct6791 || data->kind == nct6792 || | 4509 | if (data->kind == nct6791 || data->kind == nct6792 || |
| 4510 | data->kind == nct6793 || data->kind == nct6795 || | 4510 | data->kind == nct6793 || data->kind == nct6795 || |
| 4511 | data->kind == nct6796) | 4511 | data->kind == nct6796 || data->kind == nct6797 || |
| 4512 | data->kind == nct6798) | ||
| 4512 | nct6791_enable_io_mapping(sioreg); | 4513 | nct6791_enable_io_mapping(sioreg); |
| 4513 | 4514 | ||
| 4514 | superio_exit(sioreg); | 4515 | superio_exit(sioreg); |
| @@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data) | |||
| 4644 | 4645 | ||
| 4645 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || | 4646 | if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || |
| 4646 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || | 4647 | sio_data->kind == nct6793 || sio_data->kind == nct6795 || |
| 4647 | sio_data->kind == nct6796) | 4648 | sio_data->kind == nct6796 || sio_data->kind == nct6797 || |
| 4649 | sio_data->kind == nct6798) | ||
| 4648 | nct6791_enable_io_mapping(sioaddr); | 4650 | nct6791_enable_io_mapping(sioaddr); |
| 4649 | 4651 | ||
| 4650 | superio_exit(sioaddr); | 4652 | superio_exit(sioaddr); |
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 423903f87955..391118c8aae8 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
| @@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev, | |||
| 380 | val *= 1000000ULL; | 380 | val *= 1000000ULL; |
| 381 | break; | 381 | break; |
| 382 | case 2: | 382 | case 2: |
| 383 | val = get_unaligned_be32(&power->update_tag) * | 383 | val = (u64)get_unaligned_be32(&power->update_tag) * |
| 384 | occ->powr_sample_time_us; | 384 | occ->powr_sample_time_us; |
| 385 | break; | 385 | break; |
| 386 | case 3: | 386 | case 3: |
| 387 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 387 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
| @@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev, | |||
| 425 | &power->update_tag); | 425 | &power->update_tag); |
| 426 | break; | 426 | break; |
| 427 | case 2: | 427 | case 2: |
| 428 | val = get_unaligned_be32(&power->update_tag) * | 428 | val = (u64)get_unaligned_be32(&power->update_tag) * |
| 429 | occ->powr_sample_time_us; | 429 | occ->powr_sample_time_us; |
| 430 | break; | 430 | break; |
| 431 | case 3: | 431 | case 3: |
| 432 | val = get_unaligned_be16(&power->value) * 1000000ULL; | 432 | val = get_unaligned_be16(&power->value) * 1000000ULL; |
| @@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 463 | &power->system.update_tag); | 463 | &power->system.update_tag); |
| 464 | break; | 464 | break; |
| 465 | case 2: | 465 | case 2: |
| 466 | val = get_unaligned_be32(&power->system.update_tag) * | 466 | val = (u64)get_unaligned_be32(&power->system.update_tag) * |
| 467 | occ->powr_sample_time_us; | 467 | occ->powr_sample_time_us; |
| 468 | break; | 468 | break; |
| 469 | case 3: | 469 | case 3: |
| 470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; | 470 | val = get_unaligned_be16(&power->system.value) * 1000000ULL; |
| @@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 477 | &power->proc.update_tag); | 477 | &power->proc.update_tag); |
| 478 | break; | 478 | break; |
| 479 | case 6: | 479 | case 6: |
| 480 | val = get_unaligned_be32(&power->proc.update_tag) * | 480 | val = (u64)get_unaligned_be32(&power->proc.update_tag) * |
| 481 | occ->powr_sample_time_us; | 481 | occ->powr_sample_time_us; |
| 482 | break; | 482 | break; |
| 483 | case 7: | 483 | case 7: |
| 484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; | 484 | val = get_unaligned_be16(&power->proc.value) * 1000000ULL; |
| @@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 491 | &power->vdd.update_tag); | 491 | &power->vdd.update_tag); |
| 492 | break; | 492 | break; |
| 493 | case 10: | 493 | case 10: |
| 494 | val = get_unaligned_be32(&power->vdd.update_tag) * | 494 | val = (u64)get_unaligned_be32(&power->vdd.update_tag) * |
| 495 | occ->powr_sample_time_us; | 495 | occ->powr_sample_time_us; |
| 496 | break; | 496 | break; |
| 497 | case 11: | 497 | case 11: |
| 498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; | 498 | val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; |
| @@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev, | |||
| 505 | &power->vdn.update_tag); | 505 | &power->vdn.update_tag); |
| 506 | break; | 506 | break; |
| 507 | case 14: | 507 | case 14: |
| 508 | val = get_unaligned_be32(&power->vdn.update_tag) * | 508 | val = (u64)get_unaligned_be32(&power->vdn.update_tag) * |
| 509 | occ->powr_sample_time_us; | 509 | occ->powr_sample_time_us; |
| 510 | break; | 510 | break; |
| 511 | case 15: | 511 | case 15: |
| 512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; | 512 | val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; |
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 8844c9565d2a..7053be59ad2e 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c | |||
| @@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = { | |||
| 88 | .data = (void *)2 | 88 | .data = (void *)2 |
| 89 | }, | 89 | }, |
| 90 | { | 90 | { |
| 91 | .compatible = "ti,tmp422", | 91 | .compatible = "ti,tmp442", |
| 92 | .data = (void *)3 | 92 | .data = (void *)3 |
| 93 | }, | 93 | }, |
| 94 | { }, | 94 | { }, |
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b532e2c9cf5c..f8c00b94817f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c | |||
| @@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, | |||
| 901 | master->regs + | 901 | master->regs + |
| 902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 902 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
| 903 | 903 | ||
| 904 | if (!old_dyn_addr) | ||
| 905 | return 0; | ||
| 906 | |||
| 907 | master->addrs[data->index] = dev->info.dyn_addr; | 904 | master->addrs[data->index] = dev->info.dyn_addr; |
| 908 | 905 | ||
| 909 | return 0; | 906 | return 0; |
| @@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev) | |||
| 925 | return -ENOMEM; | 922 | return -ENOMEM; |
| 926 | 923 | ||
| 927 | data->index = pos; | 924 | data->index = pos; |
| 928 | master->addrs[pos] = dev->info.dyn_addr; | 925 | master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr; |
| 929 | master->free_pos &= ~BIT(pos); | 926 | master->free_pos &= ~BIT(pos); |
| 930 | i3c_dev_set_master_data(dev, data); | 927 | i3c_dev_set_master_data(dev, data); |
| 931 | 928 | ||
| 932 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), | 929 | writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]), |
| 933 | master->regs + | 930 | master->regs + |
| 934 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); | 931 | DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); |
| 935 | 932 | ||
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index bbd79b8b1a80..8889a4fdb454 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c | |||
| @@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) | |||
| 1556 | return PTR_ERR(master->pclk); | 1556 | return PTR_ERR(master->pclk); |
| 1557 | 1557 | ||
| 1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); | 1558 | master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); |
| 1559 | if (IS_ERR(master->pclk)) | 1559 | if (IS_ERR(master->sysclk)) |
| 1560 | return PTR_ERR(master->pclk); | 1560 | return PTR_ERR(master->sysclk); |
| 1561 | 1561 | ||
| 1562 | irq = platform_get_irq(pdev, 0); | 1562 | irq = platform_get_irq(pdev, 0); |
| 1563 | if (irq < 0) | 1563 | if (irq < 0) |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 63a7cc00bae0..84f077b2b90a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, | |||
| 494 | id_priv->id.route.addr.dev_addr.transport = | 494 | id_priv->id.route.addr.dev_addr.transport = |
| 495 | rdma_node_get_transport(cma_dev->device->node_type); | 495 | rdma_node_get_transport(cma_dev->device->node_type); |
| 496 | list_add_tail(&id_priv->list, &cma_dev->id_list); | 496 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
| 497 | rdma_restrack_kadd(&id_priv->res); | 497 | if (id_priv->res.kern_name) |
| 498 | rdma_restrack_kadd(&id_priv->res); | ||
| 499 | else | ||
| 500 | rdma_restrack_uadd(&id_priv->res); | ||
| 498 | } | 501 | } |
| 499 | 502 | ||
| 500 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, | 503 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e600fc23ae62..3c97a8b6bf1e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c | |||
| @@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, | |||
| 584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, | 584 | if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, |
| 585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) | 585 | atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) |
| 586 | goto err; | 586 | goto err; |
| 587 | if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && | ||
| 588 | nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, | ||
| 589 | pd->unsafe_global_rkey)) | ||
| 590 | goto err; | ||
| 591 | 587 | ||
| 592 | if (fill_res_name_pid(msg, res)) | 588 | if (fill_res_name_pid(msg, res)) |
| 593 | goto err; | 589 | goto err; |
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index be6b8e1257d0..69f8db66925e 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h | |||
| @@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj, | |||
| 106 | enum uverbs_obj_access access, | 106 | enum uverbs_obj_access access, |
| 107 | bool commit); | 107 | bool commit); |
| 108 | 108 | ||
| 109 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); | ||
| 110 | |||
| 109 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 111 | void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
| 110 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); | 112 | void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); |
| 111 | 113 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6b12cc5f97b2..3317300ab036 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, | |||
| 60 | { | 60 | { |
| 61 | int ret; | 61 | int ret; |
| 62 | 62 | ||
| 63 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
| 64 | return uverbs_copy_to_struct_or_zero( | ||
| 65 | attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); | ||
| 66 | |||
| 63 | if (copy_to_user(attrs->ucore.outbuf, resp, | 67 | if (copy_to_user(attrs->ucore.outbuf, resp, |
| 64 | min(attrs->ucore.outlen, resp_len))) | 68 | min(attrs->ucore.outlen, resp_len))) |
| 65 | return -EFAULT; | 69 | return -EFAULT; |
| @@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) | |||
| 1181 | goto out_put; | 1185 | goto out_put; |
| 1182 | } | 1186 | } |
| 1183 | 1187 | ||
| 1188 | if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) | ||
| 1189 | ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); | ||
| 1190 | |||
| 1184 | ret = 0; | 1191 | ret = 0; |
| 1185 | 1192 | ||
| 1186 | out_put: | 1193 | out_put: |
| @@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) | |||
| 2012 | return -ENOMEM; | 2019 | return -ENOMEM; |
| 2013 | 2020 | ||
| 2014 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); | 2021 | qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); |
| 2015 | if (!qp) | 2022 | if (!qp) { |
| 2023 | ret = -EINVAL; | ||
| 2016 | goto out; | 2024 | goto out; |
| 2025 | } | ||
| 2017 | 2026 | ||
| 2018 | is_ud = qp->qp_type == IB_QPT_UD; | 2027 | is_ud = qp->qp_type == IB_QPT_UD; |
| 2019 | sg_ind = 0; | 2028 | sg_ind = 0; |
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8c81ff698052..0ca04d224015 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c | |||
| @@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, | |||
| 144 | 0, uattr->len - len); | 144 | 0, uattr->len - len); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, | ||
| 148 | const struct uverbs_attr *attr) | ||
| 149 | { | ||
| 150 | struct bundle_priv *pbundle = | ||
| 151 | container_of(bundle, struct bundle_priv, bundle); | ||
| 152 | u16 flags; | ||
| 153 | |||
| 154 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | ||
| 155 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
| 156 | if (put_user(flags, | ||
| 157 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
| 158 | return -EFAULT; | ||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | |||
| 147 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, | 162 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, |
| 148 | const struct uverbs_api_attr *attr_uapi, | 163 | const struct uverbs_api_attr *attr_uapi, |
| 149 | struct uverbs_objs_arr_attr *attr, | 164 | struct uverbs_objs_arr_attr *attr, |
| @@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, | |||
| 456 | } | 471 | } |
| 457 | 472 | ||
| 458 | /* | 473 | /* |
| 474 | * Until the drivers are revised to use the bundle directly we have to | ||
| 475 | * assume that the driver wrote to its UHW_OUT and flag userspace | ||
| 476 | * appropriately. | ||
| 477 | */ | ||
| 478 | if (!ret && pbundle->method_elm->has_udata) { | ||
| 479 | const struct uverbs_attr *attr = | ||
| 480 | uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); | ||
| 481 | |||
| 482 | if (!IS_ERR(attr)) | ||
| 483 | ret = uverbs_set_output(&pbundle->bundle, attr); | ||
| 484 | } | ||
| 485 | |||
| 486 | /* | ||
| 459 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can | 487 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can |
| 460 | * not invoke the method because the request is not supported. No | 488 | * not invoke the method because the request is not supported. No |
| 461 | * other cases should return this code. | 489 | * other cases should return this code. |
| @@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, | |||
| 706 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | 734 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, |
| 707 | const void *from, size_t size) | 735 | const void *from, size_t size) |
| 708 | { | 736 | { |
| 709 | struct bundle_priv *pbundle = | ||
| 710 | container_of(bundle, struct bundle_priv, bundle); | ||
| 711 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 737 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
| 712 | u16 flags; | ||
| 713 | size_t min_size; | 738 | size_t min_size; |
| 714 | 739 | ||
| 715 | if (IS_ERR(attr)) | 740 | if (IS_ERR(attr)) |
| @@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, | |||
| 719 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) | 744 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) |
| 720 | return -EFAULT; | 745 | return -EFAULT; |
| 721 | 746 | ||
| 722 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | | 747 | return uverbs_set_output(bundle, attr); |
| 723 | UVERBS_ATTR_F_VALID_OUTPUT; | ||
| 724 | if (put_user(flags, | ||
| 725 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) | ||
| 726 | return -EFAULT; | ||
| 727 | |||
| 728 | return 0; | ||
| 729 | } | 748 | } |
| 730 | EXPORT_SYMBOL(uverbs_copy_to); | 749 | EXPORT_SYMBOL(uverbs_copy_to); |
| 731 | 750 | ||
| 751 | |||
| 752 | /* | ||
| 753 | * This is only used if the caller has directly used copy_to_use to write the | ||
| 754 | * data. It signals to user space that the buffer is filled in. | ||
| 755 | */ | ||
| 756 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) | ||
| 757 | { | ||
| 758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | ||
| 759 | |||
| 760 | if (IS_ERR(attr)) | ||
| 761 | return PTR_ERR(attr); | ||
| 762 | |||
| 763 | return uverbs_set_output(bundle, attr); | ||
| 764 | } | ||
| 765 | |||
| 732 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, | 766 | int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, |
| 733 | size_t idx, s64 lower_bound, u64 upper_bound, | 767 | size_t idx, s64 lower_bound, u64 upper_bound, |
| 734 | s64 *def_val) | 768 | s64 *def_val) |
| @@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, | |||
| 757 | { | 791 | { |
| 758 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); | 792 | const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); |
| 759 | 793 | ||
| 760 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), | 794 | if (size < attr->ptr_attr.len) { |
| 761 | attr->ptr_attr.len)) | 795 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, |
| 762 | return -EFAULT; | 796 | attr->ptr_attr.len - size)) |
| 797 | return -EFAULT; | ||
| 798 | } | ||
| 763 | return uverbs_copy_to(bundle, idx, from, size); | 799 | return uverbs_copy_to(bundle, idx, from, size); |
| 764 | } | 800 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb0007aa0c27..2890a77339e1 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
| 690 | 690 | ||
| 691 | buf += sizeof(hdr); | 691 | buf += sizeof(hdr); |
| 692 | 692 | ||
| 693 | memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); | ||
| 693 | bundle.ufile = file; | 694 | bundle.ufile = file; |
| 694 | if (!method_elm->is_ex) { | 695 | if (!method_elm->is_ex) { |
| 695 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); | 696 | size_t in_len = hdr.in_words * 4 - sizeof(hdr); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 82cb6b71ac7c..e3e9dd54caa2 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
| @@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
| 534 | { | 534 | { |
| 535 | struct mthca_ucontext *context; | 535 | struct mthca_ucontext *context; |
| 536 | 536 | ||
| 537 | qp = kmalloc(sizeof *qp, GFP_KERNEL); | 537 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
| 538 | if (!qp) | 538 | if (!qp) |
| 539 | return ERR_PTR(-ENOMEM); | 539 | return ERR_PTR(-ENOMEM); |
| 540 | 540 | ||
| @@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, | |||
| 600 | if (udata) | 600 | if (udata) |
| 601 | return ERR_PTR(-EINVAL); | 601 | return ERR_PTR(-EINVAL); |
| 602 | 602 | ||
| 603 | qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); | 603 | qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); |
| 604 | if (!qp) | 604 | if (!qp) |
| 605 | return ERR_PTR(-ENOMEM); | 605 | return ERR_PTR(-ENOMEM); |
| 606 | 606 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 42b8685c997e..3c633ab58052 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
| @@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) | |||
| 427 | 427 | ||
| 428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) | 428 | static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) |
| 429 | { | 429 | { |
| 430 | return (enum pvrdma_wr_opcode)op; | 430 | switch (op) { |
| 431 | case IB_WR_RDMA_WRITE: | ||
| 432 | return PVRDMA_WR_RDMA_WRITE; | ||
| 433 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
| 434 | return PVRDMA_WR_RDMA_WRITE_WITH_IMM; | ||
| 435 | case IB_WR_SEND: | ||
| 436 | return PVRDMA_WR_SEND; | ||
| 437 | case IB_WR_SEND_WITH_IMM: | ||
| 438 | return PVRDMA_WR_SEND_WITH_IMM; | ||
| 439 | case IB_WR_RDMA_READ: | ||
| 440 | return PVRDMA_WR_RDMA_READ; | ||
| 441 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
| 442 | return PVRDMA_WR_ATOMIC_CMP_AND_SWP; | ||
| 443 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
| 444 | return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; | ||
| 445 | case IB_WR_LSO: | ||
| 446 | return PVRDMA_WR_LSO; | ||
| 447 | case IB_WR_SEND_WITH_INV: | ||
| 448 | return PVRDMA_WR_SEND_WITH_INV; | ||
| 449 | case IB_WR_RDMA_READ_WITH_INV: | ||
| 450 | return PVRDMA_WR_RDMA_READ_WITH_INV; | ||
| 451 | case IB_WR_LOCAL_INV: | ||
| 452 | return PVRDMA_WR_LOCAL_INV; | ||
| 453 | case IB_WR_REG_MR: | ||
| 454 | return PVRDMA_WR_FAST_REG_MR; | ||
| 455 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | ||
| 456 | return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; | ||
| 457 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: | ||
| 458 | return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; | ||
| 459 | case IB_WR_REG_SIG_MR: | ||
| 460 | return PVRDMA_WR_REG_SIG_MR; | ||
| 461 | default: | ||
| 462 | return PVRDMA_WR_ERROR; | ||
| 463 | } | ||
| 431 | } | 464 | } |
| 432 | 465 | ||
| 433 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( | 466 | static inline enum ib_wc_status pvrdma_wc_status_to_ib( |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 3acf74cbe266..1ec3646087ba 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
| @@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, | |||
| 721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | 721 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
| 722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; | 722 | wqe_hdr->ex.imm_data = wr->ex.imm_data; |
| 723 | 723 | ||
| 724 | if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { | ||
| 725 | *bad_wr = wr; | ||
| 726 | ret = -EINVAL; | ||
| 727 | goto out; | ||
| 728 | } | ||
| 729 | |||
| 724 | switch (qp->ibqp.qp_type) { | 730 | switch (qp->ibqp.qp_type) { |
| 725 | case IB_QPT_GSI: | 731 | case IB_QPT_GSI: |
| 726 | case IB_QPT_UD: | 732 | case IB_QPT_UD: |
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index a2e74feee2b2..fd64df5a57a5 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c | |||
| @@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) | |||
| 318 | 318 | ||
| 319 | /* Let the programs run for couple of ms and check the engine status */ | 319 | /* Let the programs run for couple of ms and check the engine status */ |
| 320 | usleep_range(3000, 6000); | 320 | usleep_range(3000, 6000); |
| 321 | lp55xx_read(chip, LP5523_REG_STATUS, &status); | 321 | ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); |
| 322 | if (ret) | ||
| 323 | return ret; | ||
| 322 | status &= LP5523_ENG_STATUS_MASK; | 324 | status &= LP5523_ENG_STATUS_MASK; |
| 323 | 325 | ||
| 324 | if (status != LP5523_ENG_STATUS_MASK) { | 326 | if (status != LP5523_ENG_STATUS_MASK) { |
diff --git a/drivers/md/md.c b/drivers/md/md.c index fd4af4de03b4..05ffffb8b769 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -207,15 +207,10 @@ static bool create_on_open = true; | |||
| 207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 207 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
| 208 | struct mddev *mddev) | 208 | struct mddev *mddev) |
| 209 | { | 209 | { |
| 210 | struct bio *b; | ||
| 211 | |||
| 212 | if (!mddev || !bioset_initialized(&mddev->bio_set)) | 210 | if (!mddev || !bioset_initialized(&mddev->bio_set)) |
| 213 | return bio_alloc(gfp_mask, nr_iovecs); | 211 | return bio_alloc(gfp_mask, nr_iovecs); |
| 214 | 212 | ||
| 215 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); | 213 | return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); |
| 216 | if (!b) | ||
| 217 | return NULL; | ||
| 218 | return b; | ||
| 219 | } | 214 | } |
| 220 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 215 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
| 221 | 216 | ||
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index d01821a6906a..89d9c4c21037 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c | |||
| @@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) | |||
| 807 | struct vb2_v4l2_buffer *vbuf; | 807 | struct vb2_v4l2_buffer *vbuf; |
| 808 | unsigned long flags; | 808 | unsigned long flags; |
| 809 | 809 | ||
| 810 | cancel_delayed_work_sync(&dev->work_run); | 810 | if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) |
| 811 | cancel_delayed_work_sync(&dev->work_run); | ||
| 812 | |||
| 811 | for (;;) { | 813 | for (;;) { |
| 812 | if (V4L2_TYPE_IS_OUTPUT(q->type)) | 814 | if (V4L2_TYPE_IS_OUTPUT(q->type)) |
| 813 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); | 815 | vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); |
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 1441a73ce64c..90aad465f9ed 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c | |||
| @@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
| 287 | const struct v4l2_window *win; | 287 | const struct v4l2_window *win; |
| 288 | const struct v4l2_sdr_format *sdr; | 288 | const struct v4l2_sdr_format *sdr; |
| 289 | const struct v4l2_meta_format *meta; | 289 | const struct v4l2_meta_format *meta; |
| 290 | u32 planes; | ||
| 290 | unsigned i; | 291 | unsigned i; |
| 291 | 292 | ||
| 292 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); | 293 | pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); |
| @@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only) | |||
| 317 | prt_names(mp->field, v4l2_field_names), | 318 | prt_names(mp->field, v4l2_field_names), |
| 318 | mp->colorspace, mp->num_planes, mp->flags, | 319 | mp->colorspace, mp->num_planes, mp->flags, |
| 319 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); | 320 | mp->ycbcr_enc, mp->quantization, mp->xfer_func); |
| 320 | for (i = 0; i < mp->num_planes; i++) | 321 | planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); |
| 322 | for (i = 0; i < planes; i++) | ||
| 321 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, | 323 | printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, |
| 322 | mp->plane_fmt[i].bytesperline, | 324 | mp->plane_fmt[i].bytesperline, |
| 323 | mp->plane_fmt[i].sizeimage); | 325 | mp->plane_fmt[i].sizeimage); |
| @@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1551 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) | 1553 | if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) |
| 1552 | break; | 1554 | break; |
| 1553 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1556 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1557 | break; | ||
| 1554 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1558 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1555 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1559 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1560 | bytesperline); | ||
| 1556 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); | 1561 | return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); |
| 1557 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1562 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
| 1558 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) | 1563 | if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) |
| @@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1581 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) | 1586 | if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) |
| 1582 | break; | 1587 | break; |
| 1583 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1588 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1589 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1590 | break; | ||
| 1584 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1591 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1585 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1592 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1593 | bytesperline); | ||
| 1586 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); | 1594 | return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); |
| 1587 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1595 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
| 1588 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) | 1596 | if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) |
| @@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1648 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) | 1656 | if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) |
| 1649 | break; | 1657 | break; |
| 1650 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1658 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1659 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1660 | break; | ||
| 1651 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1661 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1652 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1662 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1663 | bytesperline); | ||
| 1653 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); | 1664 | return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); |
| 1654 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: | 1665 | case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
| 1655 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) | 1666 | if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) |
| @@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, | |||
| 1678 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) | 1689 | if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) |
| 1679 | break; | 1690 | break; |
| 1680 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); | 1691 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
| 1692 | if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) | ||
| 1693 | break; | ||
| 1681 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) | 1694 | for (i = 0; i < p->fmt.pix_mp.num_planes; i++) |
| 1682 | CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); | 1695 | CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], |
| 1696 | bytesperline); | ||
| 1683 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); | 1697 | return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); |
| 1684 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: | 1698 | case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
| 1685 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) | 1699 | if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) |
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 6b212c8b78e7..2bfa3a903bf9 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c | |||
| @@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, | |||
| 394 | struct _vop_vdev *vdev = to_vopvdev(dev); | 394 | struct _vop_vdev *vdev = to_vopvdev(dev); |
| 395 | struct vop_device *vpdev = vdev->vpdev; | 395 | struct vop_device *vpdev = vdev->vpdev; |
| 396 | struct mic_device_ctrl __iomem *dc = vdev->dc; | 396 | struct mic_device_ctrl __iomem *dc = vdev->dc; |
| 397 | int i, err, retry; | 397 | int i, err, retry, queue_idx = 0; |
| 398 | 398 | ||
| 399 | /* We must have this many virtqueues. */ | 399 | /* We must have this many virtqueues. */ |
| 400 | if (nvqs > ioread8(&vdev->desc->num_vq)) | 400 | if (nvqs > ioread8(&vdev->desc->num_vq)) |
| 401 | return -ENOENT; | 401 | return -ENOENT; |
| 402 | 402 | ||
| 403 | for (i = 0; i < nvqs; ++i) { | 403 | for (i = 0; i < nvqs; ++i) { |
| 404 | if (!names[i]) { | ||
| 405 | vqs[i] = NULL; | ||
| 406 | continue; | ||
| 407 | } | ||
| 408 | |||
| 404 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", | 409 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", |
| 405 | __func__, i, names[i]); | 410 | __func__, i, names[i]); |
| 406 | vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], | 411 | vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], |
| 407 | ctx ? ctx[i] : false); | 412 | ctx ? ctx[i] : false); |
| 408 | if (IS_ERR(vqs[i])) { | 413 | if (IS_ERR(vqs[i])) { |
| 409 | err = PTR_ERR(vqs[i]); | 414 | err = PTR_ERR(vqs[i]); |
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index eebac35304c6..6e8edc9375dd 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
| @@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali) | |||
| 1322 | } | 1322 | } |
| 1323 | 1323 | ||
| 1324 | /* clk rate info is needed for setup_data_interface */ | 1324 | /* clk rate info is needed for setup_data_interface */ |
| 1325 | if (denali->clk_rate && denali->clk_x_rate) | 1325 | if (!denali->clk_rate || !denali->clk_x_rate) |
| 1326 | chip->options |= NAND_KEEP_TIMINGS; | 1326 | chip->options |= NAND_KEEP_TIMINGS; |
| 1327 | 1327 | ||
| 1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; | 1328 | chip->legacy.dummy_controller.ops = &denali_controller_ops; |
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 325b4414dccc..c9149a37f8f0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c | |||
| @@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf, | |||
| 593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); | 593 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | /* fsmc_select_chip - assert or deassert nCE */ | ||
| 597 | static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert) | ||
| 598 | { | ||
| 599 | u32 pc = readl(host->regs_va + FSMC_PC); | ||
| 600 | |||
| 601 | if (!assert) | ||
| 602 | writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
| 603 | else | ||
| 604 | writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC); | ||
| 605 | |||
| 606 | /* | ||
| 607 | * nCE line changes must be applied before returning from this | ||
| 608 | * function. | ||
| 609 | */ | ||
| 610 | mb(); | ||
| 611 | } | ||
| 612 | |||
| 613 | /* | 596 | /* |
| 614 | * fsmc_exec_op - hook called by the core to execute NAND operations | 597 | * fsmc_exec_op - hook called by the core to execute NAND operations |
| 615 | * | 598 | * |
| @@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
| 627 | 610 | ||
| 628 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); | 611 | pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); |
| 629 | 612 | ||
| 630 | fsmc_ce_ctrl(host, true); | ||
| 631 | |||
| 632 | for (op_id = 0; op_id < op->ninstrs; op_id++) { | 613 | for (op_id = 0; op_id < op->ninstrs; op_id++) { |
| 633 | instr = &op->instrs[op_id]; | 614 | instr = &op->instrs[op_id]; |
| 634 | 615 | ||
| @@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, | |||
| 686 | } | 667 | } |
| 687 | } | 668 | } |
| 688 | 669 | ||
| 689 | fsmc_ce_ctrl(host, false); | ||
| 690 | |||
| 691 | return ret; | 670 | return ret; |
| 692 | } | 671 | } |
| 693 | 672 | ||
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c index f92ae5aa2a54..9526d5b23c80 100644 --- a/drivers/mtd/nand/raw/jz4740_nand.c +++ b/drivers/mtd/nand/raw/jz4740_nand.c | |||
| @@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, | |||
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, | 262 | static int jz_nand_ioremap_resource(struct platform_device *pdev, |
| 263 | const char *name, struct resource **res, void *__iomem *base) | 263 | const char *name, struct resource **res, void __iomem **base) |
| 264 | { | 264 | { |
| 265 | int ret; | 265 | int ret; |
| 266 | 266 | ||
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 2b2cf4e554d3..e5ffd5733540 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
| @@ -54,12 +54,12 @@ struct nvdimm { | |||
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | static inline enum nvdimm_security_state nvdimm_security_state( | 56 | static inline enum nvdimm_security_state nvdimm_security_state( |
| 57 | struct nvdimm *nvdimm, bool master) | 57 | struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) |
| 58 | { | 58 | { |
| 59 | if (!nvdimm->sec.ops) | 59 | if (!nvdimm->sec.ops) |
| 60 | return -ENXIO; | 60 | return -ENXIO; |
| 61 | 61 | ||
| 62 | return nvdimm->sec.ops->state(nvdimm, master); | 62 | return nvdimm->sec.ops->state(nvdimm, ptype); |
| 63 | } | 63 | } |
| 64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); | 64 | int nvdimm_security_freeze(struct nvdimm *nvdimm); |
| 65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) | 65 | #if IS_ENABLED(CONFIG_NVDIMM_KEYS) |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index deb1a66bf117..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | |||
| 2041 | return ret; | 2041 | return ret; |
| 2042 | } | 2042 | } |
| 2043 | 2043 | ||
| 2044 | /* irq_queues covers admin queue */ | ||
| 2044 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | 2045 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) |
| 2045 | { | 2046 | { |
| 2046 | unsigned int this_w_queues = write_queues; | 2047 | unsigned int this_w_queues = write_queues; |
| 2047 | 2048 | ||
| 2049 | WARN_ON(!irq_queues); | ||
| 2050 | |||
| 2048 | /* | 2051 | /* |
| 2049 | * Setup read/write queue split | 2052 | * Setup read/write queue split, assign admin queue one independent |
| 2053 | * irq vector if irq_queues is > 1. | ||
| 2050 | */ | 2054 | */ |
| 2051 | if (irq_queues == 1) { | 2055 | if (irq_queues <= 2) { |
| 2052 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; | 2056 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
| 2053 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2057 | dev->io_queues[HCTX_TYPE_READ] = 0; |
| 2054 | return; | 2058 | return; |
| @@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | |||
| 2056 | 2060 | ||
| 2057 | /* | 2061 | /* |
| 2058 | * If 'write_queues' is set, ensure it leaves room for at least | 2062 | * If 'write_queues' is set, ensure it leaves room for at least |
| 2059 | * one read queue | 2063 | * one read queue and one admin queue |
| 2060 | */ | 2064 | */ |
| 2061 | if (this_w_queues >= irq_queues) | 2065 | if (this_w_queues >= irq_queues) |
| 2062 | this_w_queues = irq_queues - 1; | 2066 | this_w_queues = irq_queues - 2; |
| 2063 | 2067 | ||
| 2064 | /* | 2068 | /* |
| 2065 | * If 'write_queues' is set to zero, reads and writes will share | 2069 | * If 'write_queues' is set to zero, reads and writes will share |
| 2066 | * a queue set. | 2070 | * a queue set. |
| 2067 | */ | 2071 | */ |
| 2068 | if (!this_w_queues) { | 2072 | if (!this_w_queues) { |
| 2069 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; | 2073 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; |
| 2070 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2074 | dev->io_queues[HCTX_TYPE_READ] = 0; |
| 2071 | } else { | 2075 | } else { |
| 2072 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; | 2076 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; |
| 2073 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; | 2077 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; |
| 2074 | } | 2078 | } |
| 2075 | } | 2079 | } |
| 2076 | 2080 | ||
| @@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
| 2095 | this_p_queues = nr_io_queues - 1; | 2099 | this_p_queues = nr_io_queues - 1; |
| 2096 | irq_queues = 1; | 2100 | irq_queues = 1; |
| 2097 | } else { | 2101 | } else { |
| 2098 | irq_queues = nr_io_queues - this_p_queues; | 2102 | irq_queues = nr_io_queues - this_p_queues + 1; |
| 2099 | } | 2103 | } |
| 2100 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; | 2104 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
| 2101 | 2105 | ||
| @@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
| 2115 | * If we got a failure and we're down to asking for just | 2119 | * If we got a failure and we're down to asking for just |
| 2116 | * 1 + 1 queues, just ask for a single vector. We'll share | 2120 | * 1 + 1 queues, just ask for a single vector. We'll share |
| 2117 | * that between the single IO queue and the admin queue. | 2121 | * that between the single IO queue and the admin queue. |
| 2122 | * Otherwise, we assign one independent vector to admin queue. | ||
| 2118 | */ | 2123 | */ |
| 2119 | if (result >= 0 && irq_queues > 1) | 2124 | if (irq_queues > 1) |
| 2120 | irq_queues = irq_sets[0] + irq_sets[1] + 1; | 2125 | irq_queues = irq_sets[0] + irq_sets[1] + 1; |
| 2121 | 2126 | ||
| 2122 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, | 2127 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, |
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 44b37b202e39..ad0df786fe93 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c | |||
| @@ -1089,7 +1089,7 @@ out: | |||
| 1089 | 1089 | ||
| 1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) | 1090 | static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) |
| 1091 | { | 1091 | { |
| 1092 | int result; | 1092 | int result = 0; |
| 1093 | 1093 | ||
| 1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) | 1094 | if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) |
| 1095 | return 0; | 1095 | return 0; |
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index a09c1c3cf831..49b16f76d78e 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c | |||
| @@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np) | |||
| 207 | 207 | ||
| 208 | if (!of_node_check_flag(np, OF_OVERLAY)) { | 208 | if (!of_node_check_flag(np, OF_OVERLAY)) { |
| 209 | np->name = __of_get_property(np, "name", NULL); | 209 | np->name = __of_get_property(np, "name", NULL); |
| 210 | np->type = __of_get_property(np, "device_type", NULL); | ||
| 211 | if (!np->name) | 210 | if (!np->name) |
| 212 | np->name = "<NULL>"; | 211 | np->name = "<NULL>"; |
| 213 | if (!np->type) | ||
| 214 | np->type = "<NULL>"; | ||
| 215 | 212 | ||
| 216 | phandle = __of_get_property(np, "phandle", &sz); | 213 | phandle = __of_get_property(np, "phandle", &sz); |
| 217 | if (!phandle) | 214 | if (!phandle) |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7099c652c6a5..9cc1461aac7d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -314,12 +314,8 @@ static bool populate_node(const void *blob, | |||
| 314 | populate_properties(blob, offset, mem, np, pathp, dryrun); | 314 | populate_properties(blob, offset, mem, np, pathp, dryrun); |
| 315 | if (!dryrun) { | 315 | if (!dryrun) { |
| 316 | np->name = of_get_property(np, "name", NULL); | 316 | np->name = of_get_property(np, "name", NULL); |
| 317 | np->type = of_get_property(np, "device_type", NULL); | ||
| 318 | |||
| 319 | if (!np->name) | 317 | if (!np->name) |
| 320 | np->name = "<NULL>"; | 318 | np->name = "<NULL>"; |
| 321 | if (!np->type) | ||
| 322 | np->type = "<NULL>"; | ||
| 323 | } | 319 | } |
| 324 | 320 | ||
| 325 | *pnp = np; | 321 | *pnp = np; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 2b5ac43a5690..c423e94baf0f 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
| @@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs, | |||
| 423 | 423 | ||
| 424 | tchild->parent = target->np; | 424 | tchild->parent = target->np; |
| 425 | tchild->name = __of_get_property(node, "name", NULL); | 425 | tchild->name = __of_get_property(node, "name", NULL); |
| 426 | tchild->type = __of_get_property(node, "device_type", NULL); | ||
| 427 | 426 | ||
| 428 | if (!tchild->name) | 427 | if (!tchild->name) |
| 429 | tchild->name = "<NULL>"; | 428 | tchild->name = "<NULL>"; |
| 430 | if (!tchild->type) | ||
| 431 | tchild->type = "<NULL>"; | ||
| 432 | 429 | ||
| 433 | /* ignore obsolete "linux,phandle" */ | 430 | /* ignore obsolete "linux,phandle" */ |
| 434 | phandle = __of_get_property(node, "phandle", &size); | 431 | phandle = __of_get_property(node, "phandle", &size); |
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index d3185063d369..7eda43c66c91 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c | |||
| @@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
| 155 | dp->parent = parent; | 155 | dp->parent = parent; |
| 156 | 156 | ||
| 157 | dp->name = of_pdt_get_one_property(node, "name"); | 157 | dp->name = of_pdt_get_one_property(node, "name"); |
| 158 | dp->type = of_pdt_get_one_property(node, "device_type"); | ||
| 159 | dp->phandle = node; | 158 | dp->phandle = node; |
| 160 | 159 | ||
| 161 | dp->properties = of_pdt_build_prop_list(node); | 160 | dp->properties = of_pdt_build_prop_list(node); |
diff --git a/drivers/of/property.c b/drivers/of/property.c index 08430031bd28..8631efa1daa1 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, | |||
| 806 | 806 | ||
| 807 | if (!of_device_is_available(remote)) { | 807 | if (!of_device_is_available(remote)) { |
| 808 | pr_debug("not available for remote node\n"); | 808 | pr_debug("not available for remote node\n"); |
| 809 | of_node_put(remote); | ||
| 809 | return NULL; | 810 | return NULL; |
| 810 | } | 811 | } |
| 811 | 812 | ||
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 4310c7a4212e..2ab92409210a 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
| @@ -21,13 +21,14 @@ menuconfig PCI | |||
| 21 | support for PCI-X and the foundations for PCI Express support. | 21 | support for PCI-X and the foundations for PCI Express support. |
| 22 | Say 'Y' here unless you know what you are doing. | 22 | Say 'Y' here unless you know what you are doing. |
| 23 | 23 | ||
| 24 | if PCI | ||
| 25 | |||
| 24 | config PCI_DOMAINS | 26 | config PCI_DOMAINS |
| 25 | bool | 27 | bool |
| 26 | depends on PCI | 28 | depends on PCI |
| 27 | 29 | ||
| 28 | config PCI_DOMAINS_GENERIC | 30 | config PCI_DOMAINS_GENERIC |
| 29 | bool | 31 | bool |
| 30 | depends on PCI | ||
| 31 | select PCI_DOMAINS | 32 | select PCI_DOMAINS |
| 32 | 33 | ||
| 33 | config PCI_SYSCALL | 34 | config PCI_SYSCALL |
| @@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig" | |||
| 37 | 38 | ||
| 38 | config PCI_MSI | 39 | config PCI_MSI |
| 39 | bool "Message Signaled Interrupts (MSI and MSI-X)" | 40 | bool "Message Signaled Interrupts (MSI and MSI-X)" |
| 40 | depends on PCI | ||
| 41 | select GENERIC_MSI_IRQ | 41 | select GENERIC_MSI_IRQ |
| 42 | help | 42 | help |
| 43 | This allows device drivers to enable MSI (Message Signaled | 43 | This allows device drivers to enable MSI (Message Signaled |
| @@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN | |||
| 59 | config PCI_QUIRKS | 59 | config PCI_QUIRKS |
| 60 | default y | 60 | default y |
| 61 | bool "Enable PCI quirk workarounds" if EXPERT | 61 | bool "Enable PCI quirk workarounds" if EXPERT |
| 62 | depends on PCI | ||
| 63 | help | 62 | help |
| 64 | This enables workarounds for various PCI chipset bugs/quirks. | 63 | This enables workarounds for various PCI chipset bugs/quirks. |
| 65 | Disable this only if your target machine is unaffected by PCI | 64 | Disable this only if your target machine is unaffected by PCI |
| @@ -67,7 +66,7 @@ config PCI_QUIRKS | |||
| 67 | 66 | ||
| 68 | config PCI_DEBUG | 67 | config PCI_DEBUG |
| 69 | bool "PCI Debugging" | 68 | bool "PCI Debugging" |
| 70 | depends on PCI && DEBUG_KERNEL | 69 | depends on DEBUG_KERNEL |
| 71 | help | 70 | help |
| 72 | Say Y here if you want the PCI core to produce a bunch of debug | 71 | Say Y here if you want the PCI core to produce a bunch of debug |
| 73 | messages to the system log. Select this if you are having a | 72 | messages to the system log. Select this if you are having a |
| @@ -77,7 +76,6 @@ config PCI_DEBUG | |||
| 77 | 76 | ||
| 78 | config PCI_REALLOC_ENABLE_AUTO | 77 | config PCI_REALLOC_ENABLE_AUTO |
| 79 | bool "Enable PCI resource re-allocation detection" | 78 | bool "Enable PCI resource re-allocation detection" |
| 80 | depends on PCI | ||
| 81 | depends on PCI_IOV | 79 | depends on PCI_IOV |
| 82 | help | 80 | help |
| 83 | Say Y here if you want the PCI core to detect if PCI resource | 81 | Say Y here if you want the PCI core to detect if PCI resource |
| @@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO | |||
| 90 | 88 | ||
| 91 | config PCI_STUB | 89 | config PCI_STUB |
| 92 | tristate "PCI Stub driver" | 90 | tristate "PCI Stub driver" |
| 93 | depends on PCI | ||
| 94 | help | 91 | help |
| 95 | Say Y or M here if you want be able to reserve a PCI device | 92 | Say Y or M here if you want be able to reserve a PCI device |
| 96 | when it is going to be assigned to a guest operating system. | 93 | when it is going to be assigned to a guest operating system. |
| @@ -99,7 +96,6 @@ config PCI_STUB | |||
| 99 | 96 | ||
| 100 | config PCI_PF_STUB | 97 | config PCI_PF_STUB |
| 101 | tristate "PCI PF Stub driver" | 98 | tristate "PCI PF Stub driver" |
| 102 | depends on PCI | ||
| 103 | depends on PCI_IOV | 99 | depends on PCI_IOV |
| 104 | help | 100 | help |
| 105 | Say Y or M here if you want to enable support for devices that | 101 | Say Y or M here if you want to enable support for devices that |
| @@ -111,7 +107,7 @@ config PCI_PF_STUB | |||
| 111 | 107 | ||
| 112 | config XEN_PCIDEV_FRONTEND | 108 | config XEN_PCIDEV_FRONTEND |
| 113 | tristate "Xen PCI Frontend" | 109 | tristate "Xen PCI Frontend" |
| 114 | depends on PCI && X86 && XEN | 110 | depends on X86 && XEN |
| 115 | select PCI_XEN | 111 | select PCI_XEN |
| 116 | select XEN_XENBUS_FRONTEND | 112 | select XEN_XENBUS_FRONTEND |
| 117 | default y | 113 | default y |
| @@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL | |||
| 133 | 129 | ||
| 134 | config PCI_IOV | 130 | config PCI_IOV |
| 135 | bool "PCI IOV support" | 131 | bool "PCI IOV support" |
| 136 | depends on PCI | ||
| 137 | select PCI_ATS | 132 | select PCI_ATS |
| 138 | help | 133 | help |
| 139 | I/O Virtualization is a PCI feature supported by some devices | 134 | I/O Virtualization is a PCI feature supported by some devices |
| @@ -144,7 +139,6 @@ config PCI_IOV | |||
| 144 | 139 | ||
| 145 | config PCI_PRI | 140 | config PCI_PRI |
| 146 | bool "PCI PRI support" | 141 | bool "PCI PRI support" |
| 147 | depends on PCI | ||
| 148 | select PCI_ATS | 142 | select PCI_ATS |
| 149 | help | 143 | help |
| 150 | PRI is the PCI Page Request Interface. It allows PCI devices that are | 144 | PRI is the PCI Page Request Interface. It allows PCI devices that are |
| @@ -154,7 +148,6 @@ config PCI_PRI | |||
| 154 | 148 | ||
| 155 | config PCI_PASID | 149 | config PCI_PASID |
| 156 | bool "PCI PASID support" | 150 | bool "PCI PASID support" |
| 157 | depends on PCI | ||
| 158 | select PCI_ATS | 151 | select PCI_ATS |
| 159 | help | 152 | help |
| 160 | Process Address Space Identifiers (PASIDs) can be used by PCI devices | 153 | Process Address Space Identifiers (PASIDs) can be used by PCI devices |
| @@ -167,7 +160,7 @@ config PCI_PASID | |||
| 167 | 160 | ||
| 168 | config PCI_P2PDMA | 161 | config PCI_P2PDMA |
| 169 | bool "PCI peer-to-peer transfer support" | 162 | bool "PCI peer-to-peer transfer support" |
| 170 | depends on PCI && ZONE_DEVICE | 163 | depends on ZONE_DEVICE |
| 171 | select GENERIC_ALLOCATOR | 164 | select GENERIC_ALLOCATOR |
| 172 | help | 165 | help |
| 173 | Enableѕ drivers to do PCI peer-to-peer transactions to and from | 166 | Enableѕ drivers to do PCI peer-to-peer transactions to and from |
| @@ -184,12 +177,11 @@ config PCI_P2PDMA | |||
| 184 | 177 | ||
| 185 | config PCI_LABEL | 178 | config PCI_LABEL |
| 186 | def_bool y if (DMI || ACPI) | 179 | def_bool y if (DMI || ACPI) |
| 187 | depends on PCI | ||
| 188 | select NLS | 180 | select NLS |
| 189 | 181 | ||
| 190 | config PCI_HYPERV | 182 | config PCI_HYPERV |
| 191 | tristate "Hyper-V PCI Frontend" | 183 | tristate "Hyper-V PCI Frontend" |
| 192 | depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 | 184 | depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 |
| 193 | help | 185 | help |
| 194 | The PCI device frontend driver allows the kernel to import arbitrary | 186 | The PCI device frontend driver allows the kernel to import arbitrary |
| 195 | PCI devices from a PCI backend to support PCI driver domains. | 187 | PCI devices from a PCI backend to support PCI driver domains. |
| @@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig" | |||
| 198 | source "drivers/pci/controller/Kconfig" | 190 | source "drivers/pci/controller/Kconfig" |
| 199 | source "drivers/pci/endpoint/Kconfig" | 191 | source "drivers/pci/endpoint/Kconfig" |
| 200 | source "drivers/pci/switch/Kconfig" | 192 | source "drivers/pci/switch/Kconfig" |
| 193 | |||
| 194 | endif | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 7a1c8a09efa5..4c0b47867258 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1168 | const struct irq_affinity *affd) | 1168 | const struct irq_affinity *affd) |
| 1169 | { | 1169 | { |
| 1170 | static const struct irq_affinity msi_default_affd; | 1170 | static const struct irq_affinity msi_default_affd; |
| 1171 | int vecs = -ENOSPC; | 1171 | int msix_vecs = -ENOSPC; |
| 1172 | int msi_vecs = -ENOSPC; | ||
| 1172 | 1173 | ||
| 1173 | if (flags & PCI_IRQ_AFFINITY) { | 1174 | if (flags & PCI_IRQ_AFFINITY) { |
| 1174 | if (!affd) | 1175 | if (!affd) |
| @@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1179 | } | 1180 | } |
| 1180 | 1181 | ||
| 1181 | if (flags & PCI_IRQ_MSIX) { | 1182 | if (flags & PCI_IRQ_MSIX) { |
| 1182 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1183 | msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, |
| 1183 | affd); | 1184 | max_vecs, affd); |
| 1184 | if (vecs > 0) | 1185 | if (msix_vecs > 0) |
| 1185 | return vecs; | 1186 | return msix_vecs; |
| 1186 | } | 1187 | } |
| 1187 | 1188 | ||
| 1188 | if (flags & PCI_IRQ_MSI) { | 1189 | if (flags & PCI_IRQ_MSI) { |
| 1189 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); | 1190 | msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, |
| 1190 | if (vecs > 0) | 1191 | affd); |
| 1191 | return vecs; | 1192 | if (msi_vecs > 0) |
| 1193 | return msi_vecs; | ||
| 1192 | } | 1194 | } |
| 1193 | 1195 | ||
| 1194 | /* use legacy irq if allowed */ | 1196 | /* use legacy irq if allowed */ |
| @@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1199 | } | 1201 | } |
| 1200 | } | 1202 | } |
| 1201 | 1203 | ||
| 1202 | return vecs; | 1204 | if (msix_vecs == -ENOSPC) |
| 1205 | return -ENOSPC; | ||
| 1206 | return msi_vecs; | ||
| 1203 | } | 1207 | } |
| 1204 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); | 1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); |
| 1205 | 1209 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..c25acace7d91 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str) | |||
| 6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { | 6195 | } else if (!strncmp(str, "pcie_scan_all", 13)) { |
| 6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); | 6196 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); |
| 6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { | 6197 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
| 6198 | disable_acs_redir_param = str + 18; | 6198 | disable_acs_redir_param = |
| 6199 | kstrdup(str + 18, GFP_KERNEL); | ||
| 6199 | } else { | 6200 | } else { |
| 6200 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 6201 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
| 6201 | str); | 6202 | str); |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index e3b62c2ee8d1..5e2109c54c7c 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL | |||
| 1009 | 1009 | ||
| 1010 | config INTEL_IPS | 1010 | config INTEL_IPS |
| 1011 | tristate "Intel Intelligent Power Sharing" | 1011 | tristate "Intel Intelligent Power Sharing" |
| 1012 | depends on ACPI | 1012 | depends on ACPI && PCI |
| 1013 | ---help--- | 1013 | ---help--- |
| 1014 | Intel Calpella platforms support dynamic power sharing between the | 1014 | Intel Calpella platforms support dynamic power sharing between the |
| 1015 | CPU and GPU, maximizing performance in a given TDP. This driver, | 1015 | CPU and GPU, maximizing performance in a given TDP. This driver, |
| @@ -1135,7 +1135,7 @@ config SAMSUNG_Q10 | |||
| 1135 | 1135 | ||
| 1136 | config APPLE_GMUX | 1136 | config APPLE_GMUX |
| 1137 | tristate "Apple Gmux Driver" | 1137 | tristate "Apple Gmux Driver" |
| 1138 | depends on ACPI | 1138 | depends on ACPI && PCI |
| 1139 | depends on PNP | 1139 | depends on PNP |
| 1140 | depends on BACKLIGHT_CLASS_DEVICE | 1140 | depends on BACKLIGHT_CLASS_DEVICE |
| 1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE | 1141 | depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE |
| @@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT | |||
| 1174 | 1174 | ||
| 1175 | config INTEL_PMC_IPC | 1175 | config INTEL_PMC_IPC |
| 1176 | tristate "Intel PMC IPC Driver" | 1176 | tristate "Intel PMC IPC Driver" |
| 1177 | depends on ACPI | 1177 | depends on ACPI && PCI |
| 1178 | ---help--- | 1178 | ---help--- |
| 1179 | This driver provides support for PMC control on some Intel platforms. | 1179 | This driver provides support for PMC control on some Intel platforms. |
| 1180 | The PMC is an ARC processor which defines IPC commands for communication | 1180 | The PMC is an ARC processor which defines IPC commands for communication |
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 183fc42a510a..2d7cd344f3bf 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c | |||
| @@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, | |||
| 153 | const bool * ctx, | 153 | const bool * ctx, |
| 154 | struct irq_affinity *desc) | 154 | struct irq_affinity *desc) |
| 155 | { | 155 | { |
| 156 | int i, ret; | 156 | int i, ret, queue_idx = 0; |
| 157 | 157 | ||
| 158 | for (i = 0; i < nvqs; ++i) { | 158 | for (i = 0; i < nvqs; ++i) { |
| 159 | vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], | 159 | if (!names[i]) { |
| 160 | vqs[i] = NULL; | ||
| 161 | continue; | ||
| 162 | } | ||
| 163 | |||
| 164 | vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
| 160 | ctx ? ctx[i] : false); | 165 | ctx ? ctx[i] : false); |
| 161 | if (IS_ERR(vqs[i])) { | 166 | if (IS_ERR(vqs[i])) { |
| 162 | ret = PTR_ERR(vqs[i]); | 167 | ret = PTR_ERR(vqs[i]); |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index fc9dbad476c0..ae1d56da671d 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
| @@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 635 | { | 635 | { |
| 636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); | 636 | struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
| 637 | unsigned long *indicatorp = NULL; | 637 | unsigned long *indicatorp = NULL; |
| 638 | int ret, i; | 638 | int ret, i, queue_idx = 0; |
| 639 | struct ccw1 *ccw; | 639 | struct ccw1 *ccw; |
| 640 | 640 | ||
| 641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); | 641 | ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
| @@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 643 | return -ENOMEM; | 643 | return -ENOMEM; |
| 644 | 644 | ||
| 645 | for (i = 0; i < nvqs; ++i) { | 645 | for (i = 0; i < nvqs; ++i) { |
| 646 | vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], | 646 | if (!names[i]) { |
| 647 | ctx ? ctx[i] : false, ccw); | 647 | vqs[i] = NULL; |
| 648 | continue; | ||
| 649 | } | ||
| 650 | |||
| 651 | vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], | ||
| 652 | names[i], ctx ? ctx[i] : false, | ||
| 653 | ccw); | ||
| 648 | if (IS_ERR(vqs[i])) { | 654 | if (IS_ERR(vqs[i])) { |
| 649 | ret = PTR_ERR(vqs[i]); | 655 | ret = PTR_ERR(vqs[i]); |
| 650 | vqs[i] = NULL; | 656 | vqs[i] = NULL; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 8a20411699d9..75e1273a44b3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
| @@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, | |||
| 1144 | } | 1144 | } |
| 1145 | 1145 | ||
| 1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | 1146 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, |
| 1147 | unsigned int tid, int pg_idx, bool reply) | 1147 | unsigned int tid, int pg_idx) |
| 1148 | { | 1148 | { |
| 1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1149 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
| 1150 | GFP_KERNEL); | 1150 | GFP_KERNEL); |
| @@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
| 1160 | req = (struct cpl_set_tcb_field *)skb->head; | 1160 | req = (struct cpl_set_tcb_field *)skb->head; |
| 1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1161 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
| 1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1162 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 1163 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1163 | req->reply = V_NO_REPLY(1); |
| 1164 | req->cpu_idx = 0; | 1164 | req->cpu_idx = 0; |
| 1165 | req->word = htons(31); | 1165 | req->word = htons(31); |
| 1166 | req->mask = cpu_to_be64(0xF0000000); | 1166 | req->mask = cpu_to_be64(0xF0000000); |
| @@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, | |||
| 1177 | * @tid: connection id | 1177 | * @tid: connection id |
| 1178 | * @hcrc: header digest enabled | 1178 | * @hcrc: header digest enabled |
| 1179 | * @dcrc: data digest enabled | 1179 | * @dcrc: data digest enabled |
| 1180 | * @reply: request reply from h/w | ||
| 1181 | * set up the iscsi digest settings for a connection identified by tid | 1180 | * set up the iscsi digest settings for a connection identified by tid |
| 1182 | */ | 1181 | */ |
| 1183 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 1182 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
| 1184 | int hcrc, int dcrc, int reply) | 1183 | int hcrc, int dcrc) |
| 1185 | { | 1184 | { |
| 1186 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, | 1185 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
| 1187 | GFP_KERNEL); | 1186 | GFP_KERNEL); |
| @@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 1197 | req = (struct cpl_set_tcb_field *)skb->head; | 1196 | req = (struct cpl_set_tcb_field *)skb->head; |
| 1198 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | 1197 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); |
| 1199 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 1198 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 1200 | req->reply = V_NO_REPLY(reply ? 0 : 1); | 1199 | req->reply = V_NO_REPLY(1); |
| 1201 | req->cpu_idx = 0; | 1200 | req->cpu_idx = 0; |
| 1202 | req->word = htons(31); | 1201 | req->word = htons(31); |
| 1203 | req->mask = cpu_to_be64(0x0F000000); | 1202 | req->mask = cpu_to_be64(0x0F000000); |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 49f8028ac524..d26f50af00ea 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 1548 | struct cxgbi_sock *csk; | 1548 | struct cxgbi_sock *csk; |
| 1549 | 1549 | ||
| 1550 | csk = lookup_tid(t, tid); | 1550 | csk = lookup_tid(t, tid); |
| 1551 | if (!csk) | 1551 | if (!csk) { |
| 1552 | pr_err("can't find conn. for tid %u.\n", tid); | 1552 | pr_err("can't find conn. for tid %u.\n", tid); |
| 1553 | return; | ||
| 1554 | } | ||
| 1553 | 1555 | ||
| 1554 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1556 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 1555 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", | 1557 | "csk 0x%p,%u,%lx,%u, status 0x%x.\n", |
| 1556 | csk, csk->state, csk->flags, csk->tid, rpl->status); | 1558 | csk, csk->state, csk->flags, csk->tid, rpl->status); |
| 1557 | 1559 | ||
| 1558 | if (rpl->status != CPL_ERR_NONE) | 1560 | if (rpl->status != CPL_ERR_NONE) { |
| 1559 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", | 1561 | pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", |
| 1560 | csk, tid, rpl->status); | 1562 | csk, tid, rpl->status); |
| 1563 | csk->err = -EINVAL; | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | complete(&csk->cmpl); | ||
| 1561 | 1567 | ||
| 1562 | __kfree_skb(skb); | 1568 | __kfree_skb(skb); |
| 1563 | } | 1569 | } |
| @@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, | |||
| 1983 | } | 1989 | } |
| 1984 | 1990 | ||
| 1985 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | 1991 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, |
| 1986 | int pg_idx, bool reply) | 1992 | int pg_idx) |
| 1987 | { | 1993 | { |
| 1988 | struct sk_buff *skb; | 1994 | struct sk_buff *skb; |
| 1989 | struct cpl_set_tcb_field *req; | 1995 | struct cpl_set_tcb_field *req; |
| @@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
| 1999 | req = (struct cpl_set_tcb_field *)skb->head; | 2005 | req = (struct cpl_set_tcb_field *)skb->head; |
| 2000 | INIT_TP_WR(req, csk->tid); | 2006 | INIT_TP_WR(req, csk->tid); |
| 2001 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | 2007 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); |
| 2002 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2008 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
| 2003 | req->word_cookie = htons(0); | 2009 | req->word_cookie = htons(0); |
| 2004 | req->mask = cpu_to_be64(0x3 << 8); | 2010 | req->mask = cpu_to_be64(0x3 << 8); |
| 2005 | req->val = cpu_to_be64(pg_idx << 8); | 2011 | req->val = cpu_to_be64(pg_idx << 8); |
| @@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2008 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2014 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 2009 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); | 2015 | "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); |
| 2010 | 2016 | ||
| 2017 | reinit_completion(&csk->cmpl); | ||
| 2011 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2018 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
| 2012 | return 0; | 2019 | wait_for_completion(&csk->cmpl); |
| 2020 | |||
| 2021 | return csk->err; | ||
| 2013 | } | 2022 | } |
| 2014 | 2023 | ||
| 2015 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | 2024 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, |
| 2016 | int hcrc, int dcrc, int reply) | 2025 | int hcrc, int dcrc) |
| 2017 | { | 2026 | { |
| 2018 | struct sk_buff *skb; | 2027 | struct sk_buff *skb; |
| 2019 | struct cpl_set_tcb_field *req; | 2028 | struct cpl_set_tcb_field *req; |
| @@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2031 | req = (struct cpl_set_tcb_field *)skb->head; | 2040 | req = (struct cpl_set_tcb_field *)skb->head; |
| 2032 | INIT_TP_WR(req, tid); | 2041 | INIT_TP_WR(req, tid); |
| 2033 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | 2042 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); |
| 2034 | req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); | 2043 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); |
| 2035 | req->word_cookie = htons(0); | 2044 | req->word_cookie = htons(0); |
| 2036 | req->mask = cpu_to_be64(0x3 << 4); | 2045 | req->mask = cpu_to_be64(0x3 << 4); |
| 2037 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | | 2046 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | |
| @@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |||
| 2041 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 2050 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 2042 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); | 2051 | "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); |
| 2043 | 2052 | ||
| 2053 | reinit_completion(&csk->cmpl); | ||
| 2044 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); | 2054 | cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); |
| 2045 | return 0; | 2055 | wait_for_completion(&csk->cmpl); |
| 2056 | |||
| 2057 | return csk->err; | ||
| 2046 | } | 2058 | } |
| 2047 | 2059 | ||
| 2048 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) | 2060 | static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 75f876409fb9..245742557c03 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
| @@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) | |||
| 573 | skb_queue_head_init(&csk->receive_queue); | 573 | skb_queue_head_init(&csk->receive_queue); |
| 574 | skb_queue_head_init(&csk->write_queue); | 574 | skb_queue_head_init(&csk->write_queue); |
| 575 | timer_setup(&csk->retry_timer, NULL, 0); | 575 | timer_setup(&csk->retry_timer, NULL, 0); |
| 576 | init_completion(&csk->cmpl); | ||
| 576 | rwlock_init(&csk->callback_lock); | 577 | rwlock_init(&csk->callback_lock); |
| 577 | csk->cdev = cdev; | 578 | csk->cdev = cdev; |
| 578 | csk->flags = 0; | 579 | csk->flags = 0; |
| @@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | |||
| 2251 | if (!err && conn->hdrdgst_en) | 2252 | if (!err && conn->hdrdgst_en) |
| 2252 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2253 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
| 2253 | conn->hdrdgst_en, | 2254 | conn->hdrdgst_en, |
| 2254 | conn->datadgst_en, 0); | 2255 | conn->datadgst_en); |
| 2255 | break; | 2256 | break; |
| 2256 | case ISCSI_PARAM_DATADGST_EN: | 2257 | case ISCSI_PARAM_DATADGST_EN: |
| 2257 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2258 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
| 2258 | if (!err && conn->datadgst_en) | 2259 | if (!err && conn->datadgst_en) |
| 2259 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2260 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
| 2260 | conn->hdrdgst_en, | 2261 | conn->hdrdgst_en, |
| 2261 | conn->datadgst_en, 0); | 2262 | conn->datadgst_en); |
| 2262 | break; | 2263 | break; |
| 2263 | case ISCSI_PARAM_MAX_R2T: | 2264 | case ISCSI_PARAM_MAX_R2T: |
| 2264 | return iscsi_tcp_set_max_r2t(conn, buf); | 2265 | return iscsi_tcp_set_max_r2t(conn, buf); |
| @@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, | |||
| 2384 | 2385 | ||
| 2385 | ppm = csk->cdev->cdev2ppm(csk->cdev); | 2386 | ppm = csk->cdev->cdev2ppm(csk->cdev); |
| 2386 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, | 2387 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, |
| 2387 | ppm->tformat.pgsz_idx_dflt, 0); | 2388 | ppm->tformat.pgsz_idx_dflt); |
| 2388 | if (err < 0) | 2389 | if (err < 0) |
| 2389 | return err; | 2390 | return err; |
| 2390 | 2391 | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 5d5d8b50d842..1917ff57651d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
| @@ -149,6 +149,7 @@ struct cxgbi_sock { | |||
| 149 | struct sk_buff_head receive_queue; | 149 | struct sk_buff_head receive_queue; |
| 150 | struct sk_buff_head write_queue; | 150 | struct sk_buff_head write_queue; |
| 151 | struct timer_list retry_timer; | 151 | struct timer_list retry_timer; |
| 152 | struct completion cmpl; | ||
| 152 | int err; | 153 | int err; |
| 153 | rwlock_t callback_lock; | 154 | rwlock_t callback_lock; |
| 154 | void *user_data; | 155 | void *user_data; |
| @@ -490,9 +491,9 @@ struct cxgbi_device { | |||
| 490 | struct cxgbi_ppm *, | 491 | struct cxgbi_ppm *, |
| 491 | struct cxgbi_task_tag_info *); | 492 | struct cxgbi_task_tag_info *); |
| 492 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, | 493 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, |
| 493 | unsigned int, int, int, int); | 494 | unsigned int, int, int); |
| 494 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, | 495 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, |
| 495 | unsigned int, int, bool); | 496 | unsigned int, int); |
| 496 | 497 | ||
| 497 | void (*csk_release_offload_resources)(struct cxgbi_sock *); | 498 | void (*csk_release_offload_resources)(struct cxgbi_sock *); |
| 498 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); | 499 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index e2420a810e99..c92b3822c408 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | |||
| @@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; | 2507 | sha->sas_port[i] = &hisi_hba->port[i].sas_port; |
| 2508 | } | 2508 | } |
| 2509 | 2509 | ||
| 2510 | if (hisi_hba->prot_mask) { | ||
| 2511 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
| 2512 | prot_mask); | ||
| 2513 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
| 2514 | } | ||
| 2515 | |||
| 2510 | rc = scsi_add_host(shost, dev); | 2516 | rc = scsi_add_host(shost, dev); |
| 2511 | if (rc) | 2517 | if (rc) |
| 2512 | goto err_out_ha; | 2518 | goto err_out_ha; |
| @@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2519 | if (rc) | 2525 | if (rc) |
| 2520 | goto err_out_register_ha; | 2526 | goto err_out_register_ha; |
| 2521 | 2527 | ||
| 2522 | if (hisi_hba->prot_mask) { | ||
| 2523 | dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", | ||
| 2524 | prot_mask); | ||
| 2525 | scsi_host_set_prot(hisi_hba->shost, prot_mask); | ||
| 2526 | } | ||
| 2527 | |||
| 2528 | scsi_scan_host(shost); | 2528 | scsi_scan_host(shost); |
| 2529 | 2529 | ||
| 2530 | return 0; | 2530 | return 0; |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 68b90c4f79a3..1727d0c71b12 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
| @@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) | |||
| 576 | shost->max_lun = ~0; | 576 | shost->max_lun = ~0; |
| 577 | shost->max_cmd_len = MAX_COMMAND_SIZE; | 577 | shost->max_cmd_len = MAX_COMMAND_SIZE; |
| 578 | 578 | ||
| 579 | /* turn on DIF support */ | ||
| 580 | scsi_host_set_prot(shost, | ||
| 581 | SHOST_DIF_TYPE1_PROTECTION | | ||
| 582 | SHOST_DIF_TYPE2_PROTECTION | | ||
| 583 | SHOST_DIF_TYPE3_PROTECTION); | ||
| 584 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); | ||
| 585 | |||
| 579 | err = scsi_add_host(shost, &pdev->dev); | 586 | err = scsi_add_host(shost, &pdev->dev); |
| 580 | if (err) | 587 | if (err) |
| 581 | goto err_shost; | 588 | goto err_shost; |
| @@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 663 | goto err_host_alloc; | 670 | goto err_host_alloc; |
| 664 | } | 671 | } |
| 665 | pci_info->hosts[i] = h; | 672 | pci_info->hosts[i] = h; |
| 666 | |||
| 667 | /* turn on DIF support */ | ||
| 668 | scsi_host_set_prot(to_shost(h), | ||
| 669 | SHOST_DIF_TYPE1_PROTECTION | | ||
| 670 | SHOST_DIF_TYPE2_PROTECTION | | ||
| 671 | SHOST_DIF_TYPE3_PROTECTION); | ||
| 672 | scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); | ||
| 673 | } | 673 | } |
| 674 | 674 | ||
| 675 | err = isci_setup_interrupts(pdev); | 675 | err = isci_setup_interrupts(pdev); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 12fd74761ae0..2242e9b3ca12 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
| @@ -9407,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
| 9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; | 9407 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
| 9408 | if (phba->link_flag & LS_LOOPBACK_MODE) | 9408 | if (phba->link_flag & LS_LOOPBACK_MODE) |
| 9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); | 9409 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); |
| 9410 | /* fall through */ | ||
| 9410 | case CMD_XMIT_SEQUENCE64_CR: | 9411 | case CMD_XMIT_SEQUENCE64_CR: |
| 9411 | /* word3 iocb=io_tag32 wqe=reserved */ | 9412 | /* word3 iocb=io_tag32 wqe=reserved */ |
| 9412 | wqe->xmit_sequence.rsvd3 = 0; | 9413 | wqe->xmit_sequence.rsvd3 = 0; |
| @@ -13528,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
| 13528 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13529 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 13529 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13530 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 13530 | "2537 Receive Frame Truncated!!\n"); | 13531 | "2537 Receive Frame Truncated!!\n"); |
| 13532 | /* fall through */ | ||
| 13531 | case FC_STATUS_RQ_SUCCESS: | 13533 | case FC_STATUS_RQ_SUCCESS: |
| 13532 | spin_lock_irqsave(&phba->hbalock, iflags); | 13534 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13533 | lpfc_sli4_rq_release(hrq, drq); | 13535 | lpfc_sli4_rq_release(hrq, drq); |
| @@ -13937,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 13937 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 13939 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
| 13938 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 13940 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 13939 | "6126 Receive Frame Truncated!!\n"); | 13941 | "6126 Receive Frame Truncated!!\n"); |
| 13940 | /* Drop thru */ | 13942 | /* fall through */ |
| 13941 | case FC_STATUS_RQ_SUCCESS: | 13943 | case FC_STATUS_RQ_SUCCESS: |
| 13942 | spin_lock_irqsave(&phba->hbalock, iflags); | 13944 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 13943 | lpfc_sli4_rq_release(hrq, drq); | 13945 | lpfc_sli4_rq_release(hrq, drq); |
| @@ -14849,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) | |||
| 14849 | eq->entry_count); | 14851 | eq->entry_count); |
| 14850 | if (eq->entry_count < 256) | 14852 | if (eq->entry_count < 256) |
| 14851 | return -EINVAL; | 14853 | return -EINVAL; |
| 14852 | /* otherwise default to smallest count (drop through) */ | 14854 | /* fall through - otherwise default to smallest count */ |
| 14853 | case 256: | 14855 | case 256: |
| 14854 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | 14856 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
| 14855 | LPFC_EQ_CNT_256); | 14857 | LPFC_EQ_CNT_256); |
| @@ -14980,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 14980 | LPFC_CQ_CNT_WORD7); | 14982 | LPFC_CQ_CNT_WORD7); |
| 14981 | break; | 14983 | break; |
| 14982 | } | 14984 | } |
| 14983 | /* Fall Thru */ | 14985 | /* fall through */ |
| 14984 | default: | 14986 | default: |
| 14985 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 14987 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 14986 | "0361 Unsupported CQ count: " | 14988 | "0361 Unsupported CQ count: " |
| @@ -14991,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
| 14991 | status = -EINVAL; | 14993 | status = -EINVAL; |
| 14992 | goto out; | 14994 | goto out; |
| 14993 | } | 14995 | } |
| 14994 | /* otherwise default to smallest count (drop through) */ | 14996 | /* fall through - otherwise default to smallest count */ |
| 14995 | case 256: | 14997 | case 256: |
| 14996 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | 14998 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
| 14997 | LPFC_CQ_CNT_256); | 14999 | LPFC_CQ_CNT_256); |
| @@ -15151,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
| 15151 | LPFC_CQ_CNT_WORD7); | 15153 | LPFC_CQ_CNT_WORD7); |
| 15152 | break; | 15154 | break; |
| 15153 | } | 15155 | } |
| 15154 | /* Fall Thru */ | 15156 | /* fall through */ |
| 15155 | default: | 15157 | default: |
| 15156 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 15158 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
| 15157 | "3118 Bad CQ count. (%d)\n", | 15159 | "3118 Bad CQ count. (%d)\n", |
| @@ -15160,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, | |||
| 15160 | status = -EINVAL; | 15162 | status = -EINVAL; |
| 15161 | goto out; | 15163 | goto out; |
| 15162 | } | 15164 | } |
| 15163 | /* otherwise default to smallest (drop thru) */ | 15165 | /* fall through - otherwise default to smallest */ |
| 15164 | case 256: | 15166 | case 256: |
| 15165 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, | 15167 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, |
| 15166 | &cq_set->u.request, LPFC_CQ_CNT_256); | 15168 | &cq_set->u.request, LPFC_CQ_CNT_256); |
| @@ -15432,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
| 15432 | status = -EINVAL; | 15434 | status = -EINVAL; |
| 15433 | goto out; | 15435 | goto out; |
| 15434 | } | 15436 | } |
| 15435 | /* otherwise default to smallest count (drop through) */ | 15437 | /* fall through - otherwise default to smallest count */ |
| 15436 | case 16: | 15438 | case 16: |
| 15437 | bf_set(lpfc_mq_context_ring_size, | 15439 | bf_set(lpfc_mq_context_ring_size, |
| 15438 | &mq_create_ext->u.request.context, | 15440 | &mq_create_ext->u.request.context, |
| @@ -15851,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
| 15851 | status = -EINVAL; | 15853 | status = -EINVAL; |
| 15852 | goto out; | 15854 | goto out; |
| 15853 | } | 15855 | } |
| 15854 | /* otherwise default to smallest count (drop through) */ | 15856 | /* fall through - otherwise default to smallest count */ |
| 15855 | case 512: | 15857 | case 512: |
| 15856 | bf_set(lpfc_rq_context_rqe_count, | 15858 | bf_set(lpfc_rq_context_rqe_count, |
| 15857 | &rq_create->u.request.context, | 15859 | &rq_create->u.request.context, |
| @@ -15988,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | |||
| 15988 | status = -EINVAL; | 15990 | status = -EINVAL; |
| 15989 | goto out; | 15991 | goto out; |
| 15990 | } | 15992 | } |
| 15991 | /* otherwise default to smallest count (drop through) */ | 15993 | /* fall through - otherwise default to smallest count */ |
| 15992 | case 512: | 15994 | case 512: |
| 15993 | bf_set(lpfc_rq_context_rqe_count, | 15995 | bf_set(lpfc_rq_context_rqe_count, |
| 15994 | &rq_create->u.request.context, | 15996 | &rq_create->u.request.context, |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7eaa400f6328..fcbff83c0097 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance) | |||
| 6236 | instance->consistent_mask_64bit = true; | 6236 | instance->consistent_mask_64bit = true; |
| 6237 | 6237 | ||
| 6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", | 6238 | dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", |
| 6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), | 6239 | ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), |
| 6240 | (instance->consistent_mask_64bit ? "63" : "32")); | 6240 | (instance->consistent_mask_64bit ? "63" : "32")); |
| 6241 | 6241 | ||
| 6242 | return 0; | 6242 | return 0; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a9a25f0eaf6f..647f48a28f85 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance) | |||
| 175 | /* | 175 | /* |
| 176 | * Check if it is our interrupt | 176 | * Check if it is our interrupt |
| 177 | */ | 177 | */ |
| 178 | status = readl(®s->outbound_intr_status); | 178 | status = megasas_readl(instance, |
| 179 | ®s->outbound_intr_status); | ||
| 179 | 180 | ||
| 180 | if (status & 1) { | 181 | if (status & 1) { |
| 181 | writel(status, ®s->outbound_intr_status); | 182 | writel(status, ®s->outbound_intr_status); |
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 4c5a3d23e010..084f2fcced0a 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
| @@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) | |||
| 657 | if (dev->dev_type == SAS_SATA_DEV) { | 657 | if (dev->dev_type == SAS_SATA_DEV) { |
| 658 | pm8001_device->attached_phy = | 658 | pm8001_device->attached_phy = |
| 659 | dev->rphy->identify.phy_identifier; | 659 | dev->rphy->identify.phy_identifier; |
| 660 | flag = 1; /* directly sata*/ | 660 | flag = 1; /* directly sata */ |
| 661 | } | 661 | } |
| 662 | } /*register this device to HBA*/ | 662 | } /*register this device to HBA*/ |
| 663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); | 663 | PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4da660c1c431..6d6d6013e35b 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
| @@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | |||
| 953 | 953 | ||
| 954 | qedi_ep = ep->dd_data; | 954 | qedi_ep = ep->dd_data; |
| 955 | if (qedi_ep->state == EP_STATE_IDLE || | 955 | if (qedi_ep->state == EP_STATE_IDLE || |
| 956 | qedi_ep->state == EP_STATE_OFLDCONN_NONE || | ||
| 956 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) | 957 | qedi_ep->state == EP_STATE_OFLDCONN_FAILED) |
| 957 | return -1; | 958 | return -1; |
| 958 | 959 | ||
| @@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) | |||
| 1035 | 1036 | ||
| 1036 | switch (qedi_ep->state) { | 1037 | switch (qedi_ep->state) { |
| 1037 | case EP_STATE_OFLDCONN_START: | 1038 | case EP_STATE_OFLDCONN_START: |
| 1039 | case EP_STATE_OFLDCONN_NONE: | ||
| 1038 | goto ep_release_conn; | 1040 | goto ep_release_conn; |
| 1039 | case EP_STATE_OFLDCONN_FAILED: | 1041 | case EP_STATE_OFLDCONN_FAILED: |
| 1040 | break; | 1042 | break; |
| @@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) | |||
| 1225 | 1227 | ||
| 1226 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { | 1228 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { |
| 1227 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); | 1229 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); |
| 1230 | qedi_ep->state = EP_STATE_OFLDCONN_NONE; | ||
| 1228 | ret = -EIO; | 1231 | ret = -EIO; |
| 1229 | goto set_path_exit; | 1232 | goto set_path_exit; |
| 1230 | } | 1233 | } |
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 11260776212f..892d70d54553 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h | |||
| @@ -59,6 +59,7 @@ enum { | |||
| 59 | EP_STATE_OFLDCONN_FAILED = 0x2000, | 59 | EP_STATE_OFLDCONN_FAILED = 0x2000, |
| 60 | EP_STATE_CONNECT_FAILED = 0x4000, | 60 | EP_STATE_CONNECT_FAILED = 0x4000, |
| 61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, | 61 | EP_STATE_DISCONN_TIMEDOUT = 0x8000, |
| 62 | EP_STATE_OFLDCONN_NONE = 0x10000, | ||
| 62 | }; | 63 | }; |
| 63 | 64 | ||
| 64 | struct qedi_conn; | 65 | struct qedi_conn; |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index a414f51302b7..6856dfdfa473 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
| @@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 4248 | ha->devnum = devnum; /* specifies microcode load address */ | 4248 | ha->devnum = devnum; /* specifies microcode load address */ |
| 4249 | 4249 | ||
| 4250 | #ifdef QLA_64BIT_PTR | 4250 | #ifdef QLA_64BIT_PTR |
| 4251 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { | 4251 | if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) { |
| 4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { | 4252 | if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { |
| 4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " | 4253 | printk(KERN_WARNING "scsi(%li): Unable to set a " |
| 4254 | "suitable DMA mask - aborting\n", ha->host_no); | 4254 | "suitable DMA mask - aborting\n", ha->host_no); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 26b93c563f92..d1fc4958222a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host { | |||
| 4394 | uint16_t n2n_id; | 4394 | uint16_t n2n_id; |
| 4395 | struct list_head gpnid_list; | 4395 | struct list_head gpnid_list; |
| 4396 | struct fab_scan scan; | 4396 | struct fab_scan scan; |
| 4397 | |||
| 4398 | unsigned int irq_offset; | ||
| 4397 | } scsi_qla_host_t; | 4399 | } scsi_qla_host_t; |
| 4398 | 4400 | ||
| 4399 | struct qla27xx_image_status { | 4401 | struct qla27xx_image_status { |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 30d3090842f8..8507c43b918c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
| 3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); | 3446 | "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); |
| 3447 | } | 3447 | } |
| 3448 | } | 3448 | } |
| 3449 | vha->irq_offset = desc.pre_vectors; | ||
| 3449 | ha->msix_entries = kcalloc(ha->msix_count, | 3450 | ha->msix_entries = kcalloc(ha->msix_count, |
| 3450 | sizeof(struct qla_msix_entry), | 3451 | sizeof(struct qla_msix_entry), |
| 3451 | GFP_KERNEL); | 3452 | GFP_KERNEL); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ea69dafc9774..c6ef83d0d99b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) | |||
| 6939 | if (USER_CTRL_IRQ(vha->hw)) | 6939 | if (USER_CTRL_IRQ(vha->hw)) |
| 6940 | rc = blk_mq_map_queues(qmap); | 6940 | rc = blk_mq_map_queues(qmap); |
| 6941 | else | 6941 | else |
| 6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); | 6942 | rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); |
| 6943 | return rc; | 6943 | return rc; |
| 6944 | } | 6944 | } |
| 6945 | 6945 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index cfdfcda28072..a77bfb224248 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
| @@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, | |||
| 7232 | 7232 | ||
| 7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, | 7233 | rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, |
| 7234 | fw_ddb_entry); | 7234 | fw_ddb_entry); |
| 7235 | if (rc) | ||
| 7236 | goto free_sess; | ||
| 7235 | 7237 | ||
| 7236 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", | 7238 | ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", |
| 7237 | __func__, fnode_sess->dev.kobj.name); | 7239 | __func__, fnode_sess->dev.kobj.name); |
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index a2b4179bfdf7..7639df91b110 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c | |||
| @@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, | |||
| 80 | 80 | ||
| 81 | if (err == 0) { | 81 | if (err == 0) { |
| 82 | pm_runtime_disable(dev); | 82 | pm_runtime_disable(dev); |
| 83 | pm_runtime_set_active(dev); | 83 | err = pm_runtime_set_active(dev); |
| 84 | pm_runtime_enable(dev); | 84 | pm_runtime_enable(dev); |
| 85 | |||
| 86 | /* | ||
| 87 | * Forcibly set runtime PM status of request queue to "active" | ||
| 88 | * to make sure we can again get requests from the queue | ||
| 89 | * (see also blk_pm_peek_request()). | ||
| 90 | * | ||
| 91 | * The resume hook will correct runtime PM status of the disk. | ||
| 92 | */ | ||
| 93 | if (!err && scsi_is_sdev_device(dev)) { | ||
| 94 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 95 | |||
| 96 | if (sdev->request_queue->dev) | ||
| 97 | blk_set_runtime_active(sdev->request_queue); | ||
| 98 | } | ||
| 85 | } | 99 | } |
| 86 | 100 | ||
| 87 | return err; | 101 | return err; |
| @@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, | |||
| 140 | else | 154 | else |
| 141 | fn = NULL; | 155 | fn = NULL; |
| 142 | 156 | ||
| 143 | /* | ||
| 144 | * Forcibly set runtime PM status of request queue to "active" to | ||
| 145 | * make sure we can again get requests from the queue (see also | ||
| 146 | * blk_pm_peek_request()). | ||
| 147 | * | ||
| 148 | * The resume hook will correct runtime PM status of the disk. | ||
| 149 | */ | ||
| 150 | if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) | ||
| 151 | blk_set_runtime_active(to_scsi_device(dev)->request_queue); | ||
| 152 | |||
| 153 | if (fn) { | 157 | if (fn) { |
| 154 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); | 158 | async_schedule_domain(fn, dev, &scsi_sd_pm_domain); |
| 155 | 159 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1a44f52e0e8..b2da8a00ec33 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, | |||
| 206 | sp = buffer_data[0] & 0x80 ? 1 : 0; | 206 | sp = buffer_data[0] & 0x80 ? 1 : 0; |
| 207 | buffer_data[0] &= ~0x80; | 207 | buffer_data[0] &= ~0x80; |
| 208 | 208 | ||
| 209 | /* | ||
| 210 | * Ensure WP, DPOFUA, and RESERVED fields are cleared in | ||
| 211 | * received mode parameter buffer before doing MODE SELECT. | ||
| 212 | */ | ||
| 213 | data.device_specific = 0; | ||
| 214 | |||
| 209 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, | 215 | if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, |
| 210 | SD_MAX_RETRIES, &data, &sshdr)) { | 216 | SD_MAX_RETRIES, &data, &sshdr)) { |
| 211 | if (scsi_sense_valid(&sshdr)) | 217 | if (scsi_sense_valid(&sshdr)) |
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7bde6c809442..f564af8949e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c | |||
| @@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) | |||
| 323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, | 323 | static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, |
| 324 | struct pqi_scsi_dev *device) | 324 | struct pqi_scsi_dev *device) |
| 325 | { | 325 | { |
| 326 | return device->in_remove & !ctrl_info->in_shutdown; | 326 | return device->in_remove && !ctrl_info->in_shutdown; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static inline void pqi_schedule_rescan_worker_with_delay( | 329 | static inline void pqi_schedule_rescan_worker_with_delay( |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index dd65fea07687..6d176815e6ce 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
| @@ -195,7 +195,7 @@ enum ufs_desc_def_size { | |||
| 195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, | 195 | QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, |
| 196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, | 196 | QUERY_DESC_UNIT_DEF_SIZE = 0x23, |
| 197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, | 197 | QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, |
| 198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, | 198 | QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, |
| 199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, | 199 | QUERY_DESC_POWER_DEF_SIZE = 0x62, |
| 200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, | 200 | QUERY_DESC_HEALTH_DEF_SIZE = 0x25, |
| 201 | }; | 201 | }; |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9ba7671b84f8..71334aaf1447 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -8001,6 +8001,8 @@ out: | |||
| 8001 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, | 8001 | trace_ufshcd_system_resume(dev_name(hba->dev), ret, |
| 8002 | ktime_to_us(ktime_sub(ktime_get(), start)), | 8002 | ktime_to_us(ktime_sub(ktime_get(), start)), |
| 8003 | hba->curr_dev_pwr_mode, hba->uic_link_state); | 8003 | hba->curr_dev_pwr_mode, hba->uic_link_state); |
| 8004 | if (!ret) | ||
| 8005 | hba->is_sys_suspended = false; | ||
| 8004 | return ret; | 8006 | return ret; |
| 8005 | } | 8007 | } |
| 8006 | EXPORT_SYMBOL(ufshcd_system_resume); | 8008 | EXPORT_SYMBOL(ufshcd_system_resume); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 984941e036c8..bd15a564fe24 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void) | |||
| 714 | sizeof(struct iscsi_queue_req), | 714 | sizeof(struct iscsi_queue_req), |
| 715 | __alignof__(struct iscsi_queue_req), 0, NULL); | 715 | __alignof__(struct iscsi_queue_req), 0, NULL); |
| 716 | if (!lio_qr_cache) { | 716 | if (!lio_qr_cache) { |
| 717 | pr_err("nable to kmem_cache_create() for" | 717 | pr_err("Unable to kmem_cache_create() for" |
| 718 | " lio_qr_cache\n"); | 718 | " lio_qr_cache\n"); |
| 719 | goto bitmap_out; | 719 | goto bitmap_out; |
| 720 | } | 720 | } |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1e6d24943565..c34c88ef3319 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -148,7 +148,7 @@ struct tcmu_dev { | |||
| 148 | size_t ring_size; | 148 | size_t ring_size; |
| 149 | 149 | ||
| 150 | struct mutex cmdr_lock; | 150 | struct mutex cmdr_lock; |
| 151 | struct list_head cmdr_queue; | 151 | struct list_head qfull_queue; |
| 152 | 152 | ||
| 153 | uint32_t dbi_max; | 153 | uint32_t dbi_max; |
| 154 | uint32_t dbi_thresh; | 154 | uint32_t dbi_thresh; |
| @@ -159,6 +159,7 @@ struct tcmu_dev { | |||
| 159 | 159 | ||
| 160 | struct timer_list cmd_timer; | 160 | struct timer_list cmd_timer; |
| 161 | unsigned int cmd_time_out; | 161 | unsigned int cmd_time_out; |
| 162 | struct list_head inflight_queue; | ||
| 162 | 163 | ||
| 163 | struct timer_list qfull_timer; | 164 | struct timer_list qfull_timer; |
| 164 | int qfull_time_out; | 165 | int qfull_time_out; |
| @@ -179,7 +180,7 @@ struct tcmu_dev { | |||
| 179 | struct tcmu_cmd { | 180 | struct tcmu_cmd { |
| 180 | struct se_cmd *se_cmd; | 181 | struct se_cmd *se_cmd; |
| 181 | struct tcmu_dev *tcmu_dev; | 182 | struct tcmu_dev *tcmu_dev; |
| 182 | struct list_head cmdr_queue_entry; | 183 | struct list_head queue_entry; |
| 183 | 184 | ||
| 184 | uint16_t cmd_id; | 185 | uint16_t cmd_id; |
| 185 | 186 | ||
| @@ -192,6 +193,7 @@ struct tcmu_cmd { | |||
| 192 | unsigned long deadline; | 193 | unsigned long deadline; |
| 193 | 194 | ||
| 194 | #define TCMU_CMD_BIT_EXPIRED 0 | 195 | #define TCMU_CMD_BIT_EXPIRED 0 |
| 196 | #define TCMU_CMD_BIT_INFLIGHT 1 | ||
| 195 | unsigned long flags; | 197 | unsigned long flags; |
| 196 | }; | 198 | }; |
| 197 | /* | 199 | /* |
| @@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |||
| 586 | if (!tcmu_cmd) | 588 | if (!tcmu_cmd) |
| 587 | return NULL; | 589 | return NULL; |
| 588 | 590 | ||
| 589 | INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); | 591 | INIT_LIST_HEAD(&tcmu_cmd->queue_entry); |
| 590 | tcmu_cmd->se_cmd = se_cmd; | 592 | tcmu_cmd->se_cmd = se_cmd; |
| 591 | tcmu_cmd->tcmu_dev = udev; | 593 | tcmu_cmd->tcmu_dev = udev; |
| 592 | 594 | ||
| @@ -915,11 +917,13 @@ setup_timer: | |||
| 915 | return 0; | 917 | return 0; |
| 916 | 918 | ||
| 917 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); | 919 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); |
| 918 | mod_timer(timer, tcmu_cmd->deadline); | 920 | if (!timer_pending(timer)) |
| 921 | mod_timer(timer, tcmu_cmd->deadline); | ||
| 922 | |||
| 919 | return 0; | 923 | return 0; |
| 920 | } | 924 | } |
| 921 | 925 | ||
| 922 | static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | 926 | static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) |
| 923 | { | 927 | { |
| 924 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | 928 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; |
| 925 | unsigned int tmo; | 929 | unsigned int tmo; |
| @@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) | |||
| 942 | if (ret) | 946 | if (ret) |
| 943 | return ret; | 947 | return ret; |
| 944 | 948 | ||
| 945 | list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); | 949 | list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); |
| 946 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", | 950 | pr_debug("adding cmd %u on dev %s to ring space wait queue\n", |
| 947 | tcmu_cmd->cmd_id, udev->name); | 951 | tcmu_cmd->cmd_id, udev->name); |
| 948 | return 0; | 952 | return 0; |
| @@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
| 999 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); | 1003 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); |
| 1000 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | 1004 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); |
| 1001 | 1005 | ||
| 1002 | if (!list_empty(&udev->cmdr_queue)) | 1006 | if (!list_empty(&udev->qfull_queue)) |
| 1003 | goto queue; | 1007 | goto queue; |
| 1004 | 1008 | ||
| 1005 | mb = udev->mb_addr; | 1009 | mb = udev->mb_addr; |
| @@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) | |||
| 1096 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | 1100 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); |
| 1097 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 1101 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
| 1098 | 1102 | ||
| 1103 | list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); | ||
| 1104 | set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags); | ||
| 1105 | |||
| 1099 | /* TODO: only if FLUSH and FUA? */ | 1106 | /* TODO: only if FLUSH and FUA? */ |
| 1100 | uio_event_notify(&udev->uio_info); | 1107 | uio_event_notify(&udev->uio_info); |
| 1101 | 1108 | ||
| 1102 | return 0; | 1109 | return 0; |
| 1103 | 1110 | ||
| 1104 | queue: | 1111 | queue: |
| 1105 | if (add_to_cmdr_queue(tcmu_cmd)) { | 1112 | if (add_to_qfull_queue(tcmu_cmd)) { |
| 1106 | *scsi_err = TCM_OUT_OF_RESOURCES; | 1113 | *scsi_err = TCM_OUT_OF_RESOURCES; |
| 1107 | return -1; | 1114 | return -1; |
| 1108 | } | 1115 | } |
| @@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * | |||
| 1145 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | 1152 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) |
| 1146 | goto out; | 1153 | goto out; |
| 1147 | 1154 | ||
| 1155 | list_del_init(&cmd->queue_entry); | ||
| 1156 | |||
| 1148 | tcmu_cmd_reset_dbi_cur(cmd); | 1157 | tcmu_cmd_reset_dbi_cur(cmd); |
| 1149 | 1158 | ||
| 1150 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { | 1159 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
| @@ -1194,9 +1203,29 @@ out: | |||
| 1194 | tcmu_free_cmd(cmd); | 1203 | tcmu_free_cmd(cmd); |
| 1195 | } | 1204 | } |
| 1196 | 1205 | ||
| 1206 | static void tcmu_set_next_deadline(struct list_head *queue, | ||
| 1207 | struct timer_list *timer) | ||
| 1208 | { | ||
| 1209 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | ||
| 1210 | unsigned long deadline = 0; | ||
| 1211 | |||
| 1212 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { | ||
| 1213 | if (!time_after(jiffies, tcmu_cmd->deadline)) { | ||
| 1214 | deadline = tcmu_cmd->deadline; | ||
| 1215 | break; | ||
| 1216 | } | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | if (deadline) | ||
| 1220 | mod_timer(timer, deadline); | ||
| 1221 | else | ||
| 1222 | del_timer(timer); | ||
| 1223 | } | ||
| 1224 | |||
| 1197 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | 1225 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) |
| 1198 | { | 1226 | { |
| 1199 | struct tcmu_mailbox *mb; | 1227 | struct tcmu_mailbox *mb; |
| 1228 | struct tcmu_cmd *cmd; | ||
| 1200 | int handled = 0; | 1229 | int handled = 0; |
| 1201 | 1230 | ||
| 1202 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | 1231 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { |
| @@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1210 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { | 1239 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
| 1211 | 1240 | ||
| 1212 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 1241 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
| 1213 | struct tcmu_cmd *cmd; | ||
| 1214 | 1242 | ||
| 1215 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | 1243 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
| 1216 | 1244 | ||
| @@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1243 | /* no more pending commands */ | 1271 | /* no more pending commands */ |
| 1244 | del_timer(&udev->cmd_timer); | 1272 | del_timer(&udev->cmd_timer); |
| 1245 | 1273 | ||
| 1246 | if (list_empty(&udev->cmdr_queue)) { | 1274 | if (list_empty(&udev->qfull_queue)) { |
| 1247 | /* | 1275 | /* |
| 1248 | * no more pending or waiting commands so try to | 1276 | * no more pending or waiting commands so try to |
| 1249 | * reclaim blocks if needed. | 1277 | * reclaim blocks if needed. |
| @@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
| 1252 | tcmu_global_max_blocks) | 1280 | tcmu_global_max_blocks) |
| 1253 | schedule_delayed_work(&tcmu_unmap_work, 0); | 1281 | schedule_delayed_work(&tcmu_unmap_work, 0); |
| 1254 | } | 1282 | } |
| 1283 | } else if (udev->cmd_time_out) { | ||
| 1284 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
| 1255 | } | 1285 | } |
| 1256 | 1286 | ||
| 1257 | return handled; | 1287 | return handled; |
| @@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
| 1271 | if (!time_after(jiffies, cmd->deadline)) | 1301 | if (!time_after(jiffies, cmd->deadline)) |
| 1272 | return 0; | 1302 | return 0; |
| 1273 | 1303 | ||
| 1274 | is_running = list_empty(&cmd->cmdr_queue_entry); | 1304 | is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); |
| 1275 | se_cmd = cmd->se_cmd; | 1305 | se_cmd = cmd->se_cmd; |
| 1276 | 1306 | ||
| 1277 | if (is_running) { | 1307 | if (is_running) { |
| @@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) | |||
| 1288 | */ | 1318 | */ |
| 1289 | scsi_status = SAM_STAT_CHECK_CONDITION; | 1319 | scsi_status = SAM_STAT_CHECK_CONDITION; |
| 1290 | } else { | 1320 | } else { |
| 1291 | list_del_init(&cmd->cmdr_queue_entry); | ||
| 1292 | |||
| 1293 | idr_remove(&udev->commands, id); | 1321 | idr_remove(&udev->commands, id); |
| 1294 | tcmu_free_cmd(cmd); | 1322 | tcmu_free_cmd(cmd); |
| 1295 | scsi_status = SAM_STAT_TASK_SET_FULL; | 1323 | scsi_status = SAM_STAT_TASK_SET_FULL; |
| 1296 | } | 1324 | } |
| 1325 | list_del_init(&cmd->queue_entry); | ||
| 1297 | 1326 | ||
| 1298 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", | 1327 | pr_debug("Timing out cmd %u on dev %s that is %s.\n", |
| 1299 | id, udev->name, is_running ? "inflight" : "queued"); | 1328 | id, udev->name, is_running ? "inflight" : "queued"); |
| @@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 1372 | 1401 | ||
| 1373 | INIT_LIST_HEAD(&udev->node); | 1402 | INIT_LIST_HEAD(&udev->node); |
| 1374 | INIT_LIST_HEAD(&udev->timedout_entry); | 1403 | INIT_LIST_HEAD(&udev->timedout_entry); |
| 1375 | INIT_LIST_HEAD(&udev->cmdr_queue); | 1404 | INIT_LIST_HEAD(&udev->qfull_queue); |
| 1405 | INIT_LIST_HEAD(&udev->inflight_queue); | ||
| 1376 | idr_init(&udev->commands); | 1406 | idr_init(&udev->commands); |
| 1377 | 1407 | ||
| 1378 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); | 1408 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
| @@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 1383 | return &udev->se_dev; | 1413 | return &udev->se_dev; |
| 1384 | } | 1414 | } |
| 1385 | 1415 | ||
| 1386 | static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | 1416 | static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) |
| 1387 | { | 1417 | { |
| 1388 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; | 1418 | struct tcmu_cmd *tcmu_cmd, *tmp_cmd; |
| 1389 | LIST_HEAD(cmds); | 1419 | LIST_HEAD(cmds); |
| @@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
| 1391 | sense_reason_t scsi_ret; | 1421 | sense_reason_t scsi_ret; |
| 1392 | int ret; | 1422 | int ret; |
| 1393 | 1423 | ||
| 1394 | if (list_empty(&udev->cmdr_queue)) | 1424 | if (list_empty(&udev->qfull_queue)) |
| 1395 | return true; | 1425 | return true; |
| 1396 | 1426 | ||
| 1397 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); | 1427 | pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); |
| 1398 | 1428 | ||
| 1399 | list_splice_init(&udev->cmdr_queue, &cmds); | 1429 | list_splice_init(&udev->qfull_queue, &cmds); |
| 1400 | 1430 | ||
| 1401 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { | 1431 | list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { |
| 1402 | list_del_init(&tcmu_cmd->cmdr_queue_entry); | 1432 | list_del_init(&tcmu_cmd->queue_entry); |
| 1403 | 1433 | ||
| 1404 | pr_debug("removing cmd %u on dev %s from queue\n", | 1434 | pr_debug("removing cmd %u on dev %s from queue\n", |
| 1405 | tcmu_cmd->cmd_id, udev->name); | 1435 | tcmu_cmd->cmd_id, udev->name); |
| @@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) | |||
| 1437 | * cmd was requeued, so just put all cmds back in | 1467 | * cmd was requeued, so just put all cmds back in |
| 1438 | * the queue | 1468 | * the queue |
| 1439 | */ | 1469 | */ |
| 1440 | list_splice_tail(&cmds, &udev->cmdr_queue); | 1470 | list_splice_tail(&cmds, &udev->qfull_queue); |
| 1441 | drained = false; | 1471 | drained = false; |
| 1442 | goto done; | 1472 | break; |
| 1443 | } | 1473 | } |
| 1444 | } | 1474 | } |
| 1445 | if (list_empty(&udev->cmdr_queue)) | 1475 | |
| 1446 | del_timer(&udev->qfull_timer); | 1476 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); |
| 1447 | done: | ||
| 1448 | return drained; | 1477 | return drained; |
| 1449 | } | 1478 | } |
| 1450 | 1479 | ||
| @@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) | |||
| 1454 | 1483 | ||
| 1455 | mutex_lock(&udev->cmdr_lock); | 1484 | mutex_lock(&udev->cmdr_lock); |
| 1456 | tcmu_handle_completions(udev); | 1485 | tcmu_handle_completions(udev); |
| 1457 | run_cmdr_queue(udev, false); | 1486 | run_qfull_queue(udev, false); |
| 1458 | mutex_unlock(&udev->cmdr_lock); | 1487 | mutex_unlock(&udev->cmdr_lock); |
| 1459 | 1488 | ||
| 1460 | return 0; | 1489 | return 0; |
| @@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev) | |||
| 1982 | /* complete IO that has executed successfully */ | 2011 | /* complete IO that has executed successfully */ |
| 1983 | tcmu_handle_completions(udev); | 2012 | tcmu_handle_completions(udev); |
| 1984 | /* fail IO waiting to be queued */ | 2013 | /* fail IO waiting to be queued */ |
| 1985 | run_cmdr_queue(udev, true); | 2014 | run_qfull_queue(udev, true); |
| 1986 | 2015 | ||
| 1987 | unlock: | 2016 | unlock: |
| 1988 | mutex_unlock(&udev->cmdr_lock); | 2017 | mutex_unlock(&udev->cmdr_lock); |
| @@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
| 1997 | mutex_lock(&udev->cmdr_lock); | 2026 | mutex_lock(&udev->cmdr_lock); |
| 1998 | 2027 | ||
| 1999 | idr_for_each_entry(&udev->commands, cmd, i) { | 2028 | idr_for_each_entry(&udev->commands, cmd, i) { |
| 2000 | if (!list_empty(&cmd->cmdr_queue_entry)) | 2029 | if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) |
| 2001 | continue; | 2030 | continue; |
| 2002 | 2031 | ||
| 2003 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", | 2032 | pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", |
| @@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) | |||
| 2006 | 2035 | ||
| 2007 | idr_remove(&udev->commands, i); | 2036 | idr_remove(&udev->commands, i); |
| 2008 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | 2037 | if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { |
| 2038 | list_del_init(&cmd->queue_entry); | ||
| 2009 | if (err_level == 1) { | 2039 | if (err_level == 1) { |
| 2010 | /* | 2040 | /* |
| 2011 | * Userspace was not able to start the | 2041 | * Userspace was not able to start the |
| @@ -2666,6 +2696,10 @@ static void check_timedout_devices(void) | |||
| 2666 | 2696 | ||
| 2667 | mutex_lock(&udev->cmdr_lock); | 2697 | mutex_lock(&udev->cmdr_lock); |
| 2668 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); | 2698 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); |
| 2699 | |||
| 2700 | tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); | ||
| 2701 | tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); | ||
| 2702 | |||
| 2669 | mutex_unlock(&udev->cmdr_lock); | 2703 | mutex_unlock(&udev->cmdr_lock); |
| 2670 | 2704 | ||
| 2671 | spin_lock_bh(&timed_out_udevs_lock); | 2705 | spin_lock_bh(&timed_out_udevs_lock); |
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 0582bd12a239..0ca908d12750 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | config INT340X_THERMAL | 5 | config INT340X_THERMAL |
| 6 | tristate "ACPI INT340X thermal drivers" | 6 | tristate "ACPI INT340X thermal drivers" |
| 7 | depends on X86 && ACPI | 7 | depends on X86 && ACPI && PCI |
| 8 | select THERMAL_GOV_USER_SPACE | 8 | select THERMAL_GOV_USER_SPACE |
| 9 | select ACPI_THERMAL_REL | 9 | select ACPI_THERMAL_REL |
| 10 | select ACPI_FAN | 10 | select ACPI_FAN |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8e10ab436d1f..344684f3e2e4 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
| @@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, | |||
| 1127 | struct vhost_virtqueue *vq, | 1127 | struct vhost_virtqueue *vq, |
| 1128 | struct vhost_scsi_ctx *vc) | 1128 | struct vhost_scsi_ctx *vc) |
| 1129 | { | 1129 | { |
| 1130 | struct virtio_scsi_ctrl_tmf_resp __user *resp; | ||
| 1131 | struct virtio_scsi_ctrl_tmf_resp rsp; | 1130 | struct virtio_scsi_ctrl_tmf_resp rsp; |
| 1131 | struct iov_iter iov_iter; | ||
| 1132 | int ret; | 1132 | int ret; |
| 1133 | 1133 | ||
| 1134 | pr_debug("%s\n", __func__); | 1134 | pr_debug("%s\n", __func__); |
| 1135 | memset(&rsp, 0, sizeof(rsp)); | 1135 | memset(&rsp, 0, sizeof(rsp)); |
| 1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; | 1136 | rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; |
| 1137 | resp = vq->iov[vc->out].iov_base; | 1137 | |
| 1138 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1138 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
| 1139 | if (!ret) | 1139 | |
| 1140 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
| 1141 | if (likely(ret == sizeof(rsp))) | ||
| 1140 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1142 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
| 1141 | else | 1143 | else |
| 1142 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); | 1144 | pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); |
| @@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs, | |||
| 1147 | struct vhost_virtqueue *vq, | 1149 | struct vhost_virtqueue *vq, |
| 1148 | struct vhost_scsi_ctx *vc) | 1150 | struct vhost_scsi_ctx *vc) |
| 1149 | { | 1151 | { |
| 1150 | struct virtio_scsi_ctrl_an_resp __user *resp; | ||
| 1151 | struct virtio_scsi_ctrl_an_resp rsp; | 1152 | struct virtio_scsi_ctrl_an_resp rsp; |
| 1153 | struct iov_iter iov_iter; | ||
| 1152 | int ret; | 1154 | int ret; |
| 1153 | 1155 | ||
| 1154 | pr_debug("%s\n", __func__); | 1156 | pr_debug("%s\n", __func__); |
| 1155 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ | 1157 | memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ |
| 1156 | rsp.response = VIRTIO_SCSI_S_OK; | 1158 | rsp.response = VIRTIO_SCSI_S_OK; |
| 1157 | resp = vq->iov[vc->out].iov_base; | 1159 | |
| 1158 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | 1160 | iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); |
| 1159 | if (!ret) | 1161 | |
| 1162 | ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); | ||
| 1163 | if (likely(ret == sizeof(rsp))) | ||
| 1160 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); | 1164 | vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); |
| 1161 | else | 1165 | else |
| 1162 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); | 1166 | pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index babbb32b9bf0..15a216cdd507 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
| 1034 | int type, ret; | 1034 | int type, ret; |
| 1035 | 1035 | ||
| 1036 | ret = copy_from_iter(&type, sizeof(type), from); | 1036 | ret = copy_from_iter(&type, sizeof(type), from); |
| 1037 | if (ret != sizeof(type)) | 1037 | if (ret != sizeof(type)) { |
| 1038 | ret = -EINVAL; | ||
| 1038 | goto done; | 1039 | goto done; |
| 1040 | } | ||
| 1039 | 1041 | ||
| 1040 | switch (type) { | 1042 | switch (type) { |
| 1041 | case VHOST_IOTLB_MSG: | 1043 | case VHOST_IOTLB_MSG: |
| @@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | |||
| 1054 | 1056 | ||
| 1055 | iov_iter_advance(from, offset); | 1057 | iov_iter_advance(from, offset); |
| 1056 | ret = copy_from_iter(&msg, sizeof(msg), from); | 1058 | ret = copy_from_iter(&msg, sizeof(msg), from); |
| 1057 | if (ret != sizeof(msg)) | 1059 | if (ret != sizeof(msg)) { |
| 1060 | ret = -EINVAL; | ||
| 1058 | goto done; | 1061 | goto done; |
| 1062 | } | ||
| 1059 | if (vhost_process_iotlb_msg(dev, &msg)) { | 1063 | if (vhost_process_iotlb_msg(dev, &msg)) { |
| 1060 | ret = -EFAULT; | 1064 | ret = -EFAULT; |
| 1061 | goto done; | 1065 | goto done; |
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8976190b6c1f..bfa1360ec750 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c | |||
| @@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt) | |||
| 510 | continue; | 510 | continue; |
| 511 | } | 511 | } |
| 512 | #endif | 512 | #endif |
| 513 | |||
| 514 | if (!strncmp(options, "logo-pos:", 9)) { | ||
| 515 | options += 9; | ||
| 516 | if (!strcmp(options, "center")) | ||
| 517 | fb_center_logo = true; | ||
| 518 | continue; | ||
| 519 | } | ||
| 513 | } | 520 | } |
| 514 | return 1; | 521 | return 1; |
| 515 | } | 522 | } |
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 558ed2ed3124..cb43a2258c51 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c | |||
| @@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb); | |||
| 53 | int num_registered_fb __read_mostly; | 53 | int num_registered_fb __read_mostly; |
| 54 | EXPORT_SYMBOL(num_registered_fb); | 54 | EXPORT_SYMBOL(num_registered_fb); |
| 55 | 55 | ||
| 56 | bool fb_center_logo __read_mostly; | ||
| 57 | EXPORT_SYMBOL(fb_center_logo); | ||
| 58 | |||
| 56 | static struct fb_info *get_fb_info(unsigned int idx) | 59 | static struct fb_info *get_fb_info(unsigned int idx) |
| 57 | { | 60 | { |
| 58 | struct fb_info *fb_info; | 61 | struct fb_info *fb_info; |
| @@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
| 506 | fb_set_logo(info, logo, logo_new, fb_logo.depth); | 509 | fb_set_logo(info, logo, logo_new, fb_logo.depth); |
| 507 | } | 510 | } |
| 508 | 511 | ||
| 509 | #ifdef CONFIG_FB_LOGO_CENTER | 512 | if (fb_center_logo) { |
| 510 | { | ||
| 511 | int xres = info->var.xres; | 513 | int xres = info->var.xres; |
| 512 | int yres = info->var.yres; | 514 | int yres = info->var.yres; |
| 513 | 515 | ||
| @@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, | |||
| 520 | --n; | 522 | --n; |
| 521 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; | 523 | image.dx = (xres - n * (logo->width + 8) - 8) / 2; |
| 522 | image.dy = y ?: (yres - logo->height) / 2; | 524 | image.dy = y ?: (yres - logo->height) / 2; |
| 525 | } else { | ||
| 526 | image.dx = 0; | ||
| 527 | image.dy = y; | ||
| 523 | } | 528 | } |
| 524 | #else | 529 | |
| 525 | image.dx = 0; | ||
| 526 | image.dy = y; | ||
| 527 | #endif | ||
| 528 | image.width = logo->width; | 530 | image.width = logo->width; |
| 529 | image.height = logo->height; | 531 | image.height = logo->height; |
| 530 | 532 | ||
| @@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate) | |||
| 684 | } | 686 | } |
| 685 | 687 | ||
| 686 | height = fb_logo.logo->height; | 688 | height = fb_logo.logo->height; |
| 687 | #ifdef CONFIG_FB_LOGO_CENTER | 689 | if (fb_center_logo) |
| 688 | height += (yres - fb_logo.logo->height) / 2; | 690 | height += (yres - fb_logo.logo->height) / 2; |
| 689 | #endif | ||
| 690 | 691 | ||
| 691 | return fb_prepare_extra_logos(info, height, yres); | 692 | return fb_prepare_extra_logos(info, height, yres); |
| 692 | } | 693 | } |
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 31f769d67195..057d3cdef92e 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c | |||
| @@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, | |||
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, | 320 | static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, |
| 321 | const char *name, unsigned long address) | 321 | unsigned long address) |
| 322 | { | 322 | { |
| 323 | struct offb_par *par = (struct offb_par *) info->par; | 323 | struct offb_par *par = (struct offb_par *) info->par; |
| 324 | 324 | ||
| 325 | if (dp && !strncmp(name, "ATY,Rage128", 11)) { | 325 | if (of_node_name_prefix(dp, "ATY,Rage128")) { |
| 326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 326 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 327 | if (par->cmap_adr) | 327 | if (par->cmap_adr) |
| 328 | par->cmap_type = cmap_r128; | 328 | par->cmap_type = cmap_r128; |
| 329 | } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) | 329 | } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || |
| 330 | || !strncmp(name, "ATY,RageM3p12A", 14))) { | 330 | of_node_name_prefix(dp, "ATY,RageM3p12A")) { |
| 331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 331 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 332 | if (par->cmap_adr) | 332 | if (par->cmap_adr) |
| 333 | par->cmap_type = cmap_M3A; | 333 | par->cmap_type = cmap_M3A; |
| 334 | } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { | 334 | } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { |
| 335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); | 335 | par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
| 336 | if (par->cmap_adr) | 336 | if (par->cmap_adr) |
| 337 | par->cmap_type = cmap_M3B; | 337 | par->cmap_type = cmap_M3B; |
| 338 | } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { | 338 | } else if (of_node_name_prefix(dp, "ATY,Rage6")) { |
| 339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); | 339 | par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); |
| 340 | if (par->cmap_adr) | 340 | if (par->cmap_adr) |
| 341 | par->cmap_type = cmap_radeon; | 341 | par->cmap_type = cmap_radeon; |
| 342 | } else if (!strncmp(name, "ATY,", 4)) { | 342 | } else if (of_node_name_prefix(dp, "ATY,")) { |
| 343 | unsigned long base = address & 0xff000000UL; | 343 | unsigned long base = address & 0xff000000UL; |
| 344 | par->cmap_adr = | 344 | par->cmap_adr = |
| 345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; | 345 | ioremap(base + 0x7ff000, 0x1000) + 0xcc0; |
| @@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp | |||
| 350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); | 350 | par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); |
| 351 | if (par->cmap_adr) | 351 | if (par->cmap_adr) |
| 352 | par->cmap_type = cmap_gxt2000; | 352 | par->cmap_type = cmap_gxt2000; |
| 353 | } else if (dp && !strncmp(name, "vga,Display-", 12)) { | 353 | } else if (of_node_name_prefix(dp, "vga,Display-")) { |
| 354 | /* Look for AVIVO initialized by SLOF */ | 354 | /* Look for AVIVO initialized by SLOF */ |
| 355 | struct device_node *pciparent = of_get_parent(dp); | 355 | struct device_node *pciparent = of_get_parent(dp); |
| 356 | const u32 *vid, *did; | 356 | const u32 *vid, *did; |
| @@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, | |||
| 438 | 438 | ||
| 439 | par->cmap_type = cmap_unknown; | 439 | par->cmap_type = cmap_unknown; |
| 440 | if (depth == 8) | 440 | if (depth == 8) |
| 441 | offb_init_palette_hacks(info, dp, name, address); | 441 | offb_init_palette_hacks(info, dp, address); |
| 442 | else | 442 | else |
| 443 | fix->visual = FB_VISUAL_TRUECOLOR; | 443 | fix->visual = FB_VISUAL_TRUECOLOR; |
| 444 | 444 | ||
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 53f93616c671..8e23160ec59f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c | |||
| @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) | |||
| 609 | 609 | ||
| 610 | int r = 0; | 610 | int r = 0; |
| 611 | 611 | ||
| 612 | memset(&p, 0, sizeof(p)); | ||
| 613 | |||
| 612 | switch (cmd) { | 614 | switch (cmd) { |
| 613 | case OMAPFB_SYNC_GFX: | 615 | case OMAPFB_SYNC_GFX: |
| 614 | DBG("ioctl SYNC_GFX\n"); | 616 | DBG("ioctl SYNC_GFX\n"); |
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig index 1e972c4e88b1..d1f6196c8b9a 100644 --- a/drivers/video/logo/Kconfig +++ b/drivers/video/logo/Kconfig | |||
| @@ -10,15 +10,6 @@ menuconfig LOGO | |||
| 10 | 10 | ||
| 11 | if LOGO | 11 | if LOGO |
| 12 | 12 | ||
| 13 | config FB_LOGO_CENTER | ||
| 14 | bool "Center the logo" | ||
| 15 | depends on FB=y | ||
| 16 | help | ||
| 17 | When this option is selected, the bootup logo is centered both | ||
| 18 | horizontally and vertically. If more than one logo is displayed | ||
| 19 | due to multiple CPUs, the collected line of logos is centered | ||
| 20 | as a whole. | ||
| 21 | |||
| 22 | config FB_LOGO_EXTRA | 13 | config FB_LOGO_EXTRA |
| 23 | bool | 14 | bool |
| 24 | depends on FB=y | 15 | depends on FB=y |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 728ecd1eea30..fb12fe205f86 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -61,6 +61,10 @@ enum virtio_balloon_vq { | |||
| 61 | VIRTIO_BALLOON_VQ_MAX | 61 | VIRTIO_BALLOON_VQ_MAX |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | enum virtio_balloon_config_read { | ||
| 65 | VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, | ||
| 66 | }; | ||
| 67 | |||
| 64 | struct virtio_balloon { | 68 | struct virtio_balloon { |
| 65 | struct virtio_device *vdev; | 69 | struct virtio_device *vdev; |
| 66 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; | 70 | struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; |
| @@ -77,14 +81,20 @@ struct virtio_balloon { | |||
| 77 | /* Prevent updating balloon when it is being canceled. */ | 81 | /* Prevent updating balloon when it is being canceled. */ |
| 78 | spinlock_t stop_update_lock; | 82 | spinlock_t stop_update_lock; |
| 79 | bool stop_update; | 83 | bool stop_update; |
| 84 | /* Bitmap to indicate if reading the related config fields are needed */ | ||
| 85 | unsigned long config_read_bitmap; | ||
| 80 | 86 | ||
| 81 | /* The list of allocated free pages, waiting to be given back to mm */ | 87 | /* The list of allocated free pages, waiting to be given back to mm */ |
| 82 | struct list_head free_page_list; | 88 | struct list_head free_page_list; |
| 83 | spinlock_t free_page_list_lock; | 89 | spinlock_t free_page_list_lock; |
| 84 | /* The number of free page blocks on the above list */ | 90 | /* The number of free page blocks on the above list */ |
| 85 | unsigned long num_free_page_blocks; | 91 | unsigned long num_free_page_blocks; |
| 86 | /* The cmd id received from host */ | 92 | /* |
| 87 | u32 cmd_id_received; | 93 | * The cmd id received from host. |
| 94 | * Read it via virtio_balloon_cmd_id_received to get the latest value | ||
| 95 | * sent from host. | ||
| 96 | */ | ||
| 97 | u32 cmd_id_received_cache; | ||
| 88 | /* The cmd id that is actively in use */ | 98 | /* The cmd id that is actively in use */ |
| 89 | __virtio32 cmd_id_active; | 99 | __virtio32 cmd_id_active; |
| 90 | /* Buffer to store the stop sign */ | 100 | /* Buffer to store the stop sign */ |
| @@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, | |||
| 390 | return num_returned; | 400 | return num_returned; |
| 391 | } | 401 | } |
| 392 | 402 | ||
| 403 | static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) | ||
| 404 | { | ||
| 405 | if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) | ||
| 406 | return; | ||
| 407 | |||
| 408 | /* No need to queue the work if the bit was already set. */ | ||
| 409 | if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
| 410 | &vb->config_read_bitmap)) | ||
| 411 | return; | ||
| 412 | |||
| 413 | queue_work(vb->balloon_wq, &vb->report_free_page_work); | ||
| 414 | } | ||
| 415 | |||
| 393 | static void virtballoon_changed(struct virtio_device *vdev) | 416 | static void virtballoon_changed(struct virtio_device *vdev) |
| 394 | { | 417 | { |
| 395 | struct virtio_balloon *vb = vdev->priv; | 418 | struct virtio_balloon *vb = vdev->priv; |
| 396 | unsigned long flags; | 419 | unsigned long flags; |
| 397 | s64 diff = towards_target(vb); | ||
| 398 | |||
| 399 | if (diff) { | ||
| 400 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
| 401 | if (!vb->stop_update) | ||
| 402 | queue_work(system_freezable_wq, | ||
| 403 | &vb->update_balloon_size_work); | ||
| 404 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 405 | } | ||
| 406 | 420 | ||
| 407 | if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { | 421 | spin_lock_irqsave(&vb->stop_update_lock, flags); |
| 408 | virtio_cread(vdev, struct virtio_balloon_config, | 422 | if (!vb->stop_update) { |
| 409 | free_page_report_cmd_id, &vb->cmd_id_received); | 423 | queue_work(system_freezable_wq, |
| 410 | if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | 424 | &vb->update_balloon_size_work); |
| 411 | /* Pass ULONG_MAX to give back all the free pages */ | 425 | virtio_balloon_queue_free_page_work(vb); |
| 412 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
| 413 | } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
| 414 | vb->cmd_id_received != | ||
| 415 | virtio32_to_cpu(vdev, vb->cmd_id_active)) { | ||
| 416 | spin_lock_irqsave(&vb->stop_update_lock, flags); | ||
| 417 | if (!vb->stop_update) { | ||
| 418 | queue_work(vb->balloon_wq, | ||
| 419 | &vb->report_free_page_work); | ||
| 420 | } | ||
| 421 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 422 | } | ||
| 423 | } | 426 | } |
| 427 | spin_unlock_irqrestore(&vb->stop_update_lock, flags); | ||
| 424 | } | 428 | } |
| 425 | 429 | ||
| 426 | static void update_balloon_size(struct virtio_balloon *vb) | 430 | static void update_balloon_size(struct virtio_balloon *vb) |
| @@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) | |||
| 527 | return 0; | 531 | return 0; |
| 528 | } | 532 | } |
| 529 | 533 | ||
| 534 | static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) | ||
| 535 | { | ||
| 536 | if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, | ||
| 537 | &vb->config_read_bitmap)) | ||
| 538 | virtio_cread(vb->vdev, struct virtio_balloon_config, | ||
| 539 | free_page_report_cmd_id, | ||
| 540 | &vb->cmd_id_received_cache); | ||
| 541 | |||
| 542 | return vb->cmd_id_received_cache; | ||
| 543 | } | ||
| 544 | |||
| 530 | static int send_cmd_id_start(struct virtio_balloon *vb) | 545 | static int send_cmd_id_start(struct virtio_balloon *vb) |
| 531 | { | 546 | { |
| 532 | struct scatterlist sg; | 547 | struct scatterlist sg; |
| @@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) | |||
| 537 | while (virtqueue_get_buf(vq, &unused)) | 552 | while (virtqueue_get_buf(vq, &unused)) |
| 538 | ; | 553 | ; |
| 539 | 554 | ||
| 540 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); | 555 | vb->cmd_id_active = virtio32_to_cpu(vb->vdev, |
| 556 | virtio_balloon_cmd_id_received(vb)); | ||
| 541 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); | 557 | sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); |
| 542 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); | 558 | err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); |
| 543 | if (!err) | 559 | if (!err) |
| @@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
| 620 | * stop the reporting. | 636 | * stop the reporting. |
| 621 | */ | 637 | */ |
| 622 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); | 638 | cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); |
| 623 | if (cmd_id_active != vb->cmd_id_received) | 639 | if (unlikely(cmd_id_active != |
| 640 | virtio_balloon_cmd_id_received(vb))) | ||
| 624 | break; | 641 | break; |
| 625 | 642 | ||
| 626 | /* | 643 | /* |
| @@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) | |||
| 637 | return 0; | 654 | return 0; |
| 638 | } | 655 | } |
| 639 | 656 | ||
| 640 | static void report_free_page_func(struct work_struct *work) | 657 | static void virtio_balloon_report_free_page(struct virtio_balloon *vb) |
| 641 | { | 658 | { |
| 642 | int err; | 659 | int err; |
| 643 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
| 644 | report_free_page_work); | ||
| 645 | struct device *dev = &vb->vdev->dev; | 660 | struct device *dev = &vb->vdev->dev; |
| 646 | 661 | ||
| 647 | /* Start by sending the received cmd id to host with an outbuf. */ | 662 | /* Start by sending the received cmd id to host with an outbuf. */ |
| @@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) | |||
| 659 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); | 674 | dev_err(dev, "Failed to send a stop id, err = %d\n", err); |
| 660 | } | 675 | } |
| 661 | 676 | ||
| 677 | static void report_free_page_func(struct work_struct *work) | ||
| 678 | { | ||
| 679 | struct virtio_balloon *vb = container_of(work, struct virtio_balloon, | ||
| 680 | report_free_page_work); | ||
| 681 | u32 cmd_id_received; | ||
| 682 | |||
| 683 | cmd_id_received = virtio_balloon_cmd_id_received(vb); | ||
| 684 | if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { | ||
| 685 | /* Pass ULONG_MAX to give back all the free pages */ | ||
| 686 | return_free_pages_to_mm(vb, ULONG_MAX); | ||
| 687 | } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && | ||
| 688 | cmd_id_received != | ||
| 689 | virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { | ||
| 690 | virtio_balloon_report_free_page(vb); | ||
| 691 | } | ||
| 692 | } | ||
| 693 | |||
| 662 | #ifdef CONFIG_BALLOON_COMPACTION | 694 | #ifdef CONFIG_BALLOON_COMPACTION |
| 663 | /* | 695 | /* |
| 664 | * virtballoon_migratepage - perform the balloon page migration on behalf of | 696 | * virtballoon_migratepage - perform the balloon page migration on behalf of |
| @@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
| 885 | goto out_del_vqs; | 917 | goto out_del_vqs; |
| 886 | } | 918 | } |
| 887 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); | 919 | INIT_WORK(&vb->report_free_page_work, report_free_page_func); |
| 888 | vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; | 920 | vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; |
| 889 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, | 921 | vb->cmd_id_active = cpu_to_virtio32(vb->vdev, |
| 890 | VIRTIO_BALLOON_CMD_ID_STOP); | 922 | VIRTIO_BALLOON_CMD_ID_STOP); |
| 891 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, | 923 | vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 4cd9ea5c75be..d9dd0f789279 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
| @@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 468 | { | 468 | { |
| 469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 469 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
| 470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); | 470 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); |
| 471 | int i, err; | 471 | int i, err, queue_idx = 0; |
| 472 | 472 | ||
| 473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, | 473 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, |
| 474 | dev_name(&vdev->dev), vm_dev); | 474 | dev_name(&vdev->dev), vm_dev); |
| @@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |||
| 476 | return err; | 476 | return err; |
| 477 | 477 | ||
| 478 | for (i = 0; i < nvqs; ++i) { | 478 | for (i = 0; i < nvqs; ++i) { |
| 479 | vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], | 479 | if (!names[i]) { |
| 480 | vqs[i] = NULL; | ||
| 481 | continue; | ||
| 482 | } | ||
| 483 | |||
| 484 | vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], | ||
| 480 | ctx ? ctx[i] : false); | 485 | ctx ? ctx[i] : false); |
| 481 | if (IS_ERR(vqs[i])) { | 486 | if (IS_ERR(vqs[i])) { |
| 482 | vm_del_vqs(vdev); | 487 | vm_del_vqs(vdev); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 465a6f5142cc..d0584c040c60 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 285 | { | 285 | { |
| 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 287 | u16 msix_vec; | 287 | u16 msix_vec; |
| 288 | int i, err, nvectors, allocated_vectors; | 288 | int i, err, nvectors, allocated_vectors, queue_idx = 0; |
| 289 | 289 | ||
| 290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 290 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
| 291 | if (!vp_dev->vqs) | 291 | if (!vp_dev->vqs) |
| @@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 321 | msix_vec = allocated_vectors++; | 321 | msix_vec = allocated_vectors++; |
| 322 | else | 322 | else |
| 323 | msix_vec = VP_MSIX_VQ_VECTOR; | 323 | msix_vec = VP_MSIX_VQ_VECTOR; |
| 324 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 324 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
| 325 | ctx ? ctx[i] : false, | 325 | ctx ? ctx[i] : false, |
| 326 | msix_vec); | 326 | msix_vec); |
| 327 | if (IS_ERR(vqs[i])) { | 327 | if (IS_ERR(vqs[i])) { |
| @@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
| 356 | const char * const names[], const bool *ctx) | 356 | const char * const names[], const bool *ctx) |
| 357 | { | 357 | { |
| 358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 358 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 359 | int i, err; | 359 | int i, err, queue_idx = 0; |
| 360 | 360 | ||
| 361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); | 361 | vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); |
| 362 | if (!vp_dev->vqs) | 362 | if (!vp_dev->vqs) |
| @@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, | |||
| 374 | vqs[i] = NULL; | 374 | vqs[i] = NULL; |
| 375 | continue; | 375 | continue; |
| 376 | } | 376 | } |
| 377 | vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], | 377 | vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
| 378 | ctx ? ctx[i] : false, | 378 | ctx ? ctx[i] : false, |
| 379 | VIRTIO_MSI_NO_VECTOR); | 379 | VIRTIO_MSI_NO_VECTOR); |
| 380 | if (IS_ERR(vqs[i])) { | 380 | if (IS_ERR(vqs[i])) { |
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c index 5c4a764717c4..81208cd3f4ec 100644 --- a/drivers/watchdog/mt7621_wdt.c +++ b/drivers/watchdog/mt7621_wdt.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/watchdog.h> | 17 | #include <linux/watchdog.h> |
| 18 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/mod_devicetable.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/mach-ralink/ralink_regs.h> | 22 | #include <asm/mach-ralink/ralink_regs.h> |
| 22 | 23 | ||
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 98967f0a7d10..db7c57d82cfd 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/watchdog.h> | 18 | #include <linux/watchdog.h> |
| 19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
| 20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/mod_devicetable.h> | ||
| 21 | 22 | ||
| 22 | #include <asm/mach-ralink/ralink_regs.h> | 23 | #include <asm/mach-ralink/ralink_regs.h> |
| 23 | 24 | ||
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c index 0d3a0fbbd7a5..52941207a12a 100644 --- a/drivers/watchdog/tqmx86_wdt.c +++ b/drivers/watchdog/tqmx86_wdt.c | |||
| @@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev) | |||
| 79 | return -ENOMEM; | 79 | return -ENOMEM; |
| 80 | 80 | ||
| 81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 81 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
| 82 | if (IS_ERR(res)) | 82 | if (!res) |
| 83 | return PTR_ERR(res); | 83 | return -ENODEV; |
| 84 | 84 | ||
| 85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, | 85 | priv->io_base = devm_ioport_map(&pdev->dev, res->start, |
| 86 | resource_size(res)); | 86 | resource_size(res)); |
| 87 | if (IS_ERR(priv->io_base)) | 87 | if (!priv->io_base) |
| 88 | return PTR_ERR(priv->io_base); | 88 | return -ENOMEM; |
| 89 | 89 | ||
| 90 | watchdog_set_drvdata(&priv->wdd, priv); | 90 | watchdog_set_drvdata(&priv->wdd, priv); |
| 91 | 91 | ||
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 93194f3e7540..117e76b2f939 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -1650,7 +1650,7 @@ void xen_callback_vector(void) | |||
| 1650 | xen_have_vector_callback = 0; | 1650 | xen_have_vector_callback = 0; |
| 1651 | return; | 1651 | return; |
| 1652 | } | 1652 | } |
| 1653 | pr_info("Xen HVM callback vector for event delivery is enabled\n"); | 1653 | pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); |
| 1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, | 1654 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
| 1655 | xen_hvm_callback_vector); | 1655 | xen_hvm_callback_vector); |
| 1656 | } | 1656 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 2e5d845b5091..7aa64d1b119c 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
| @@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque) | |||
| 160 | 160 | ||
| 161 | /* write the data, then modify the indexes */ | 161 | /* write the data, then modify the indexes */ |
| 162 | virt_wmb(); | 162 | virt_wmb(); |
| 163 | if (ret < 0) | 163 | if (ret < 0) { |
| 164 | atomic_set(&map->read, 0); | ||
| 164 | intf->in_error = ret; | 165 | intf->in_error = ret; |
| 165 | else | 166 | } else |
| 166 | intf->in_prod = prod + ret; | 167 | intf->in_prod = prod + ret; |
| 167 | /* update the indexes, then notify the other end */ | 168 | /* update the indexes, then notify the other end */ |
| 168 | virt_wmb(); | 169 | virt_wmb(); |
| @@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev, | |||
| 282 | static void pvcalls_sk_state_change(struct sock *sock) | 283 | static void pvcalls_sk_state_change(struct sock *sock) |
| 283 | { | 284 | { |
| 284 | struct sock_mapping *map = sock->sk_user_data; | 285 | struct sock_mapping *map = sock->sk_user_data; |
| 285 | struct pvcalls_data_intf *intf; | ||
| 286 | 286 | ||
| 287 | if (map == NULL) | 287 | if (map == NULL) |
| 288 | return; | 288 | return; |
| 289 | 289 | ||
| 290 | intf = map->ring; | 290 | atomic_inc(&map->read); |
| 291 | intf->in_error = -ENOTCONN; | ||
| 292 | notify_remote_via_irq(map->irq); | 291 | notify_remote_via_irq(map->irq); |
| 293 | } | 292 | } |
| 294 | 293 | ||
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 77224d8f3e6f..8a249c95c193 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
| @@ -31,6 +31,12 @@ | |||
| 31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) | 31 | #define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) |
| 32 | #define PVCALLS_FRONT_MAX_SPIN 5000 | 32 | #define PVCALLS_FRONT_MAX_SPIN 5000 |
| 33 | 33 | ||
| 34 | static struct proto pvcalls_proto = { | ||
| 35 | .name = "PVCalls", | ||
| 36 | .owner = THIS_MODULE, | ||
| 37 | .obj_size = sizeof(struct sock), | ||
| 38 | }; | ||
| 39 | |||
| 34 | struct pvcalls_bedata { | 40 | struct pvcalls_bedata { |
| 35 | struct xen_pvcalls_front_ring ring; | 41 | struct xen_pvcalls_front_ring ring; |
| 36 | grant_ref_t ref; | 42 | grant_ref_t ref; |
| @@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock) | |||
| 335 | return ret; | 341 | return ret; |
| 336 | } | 342 | } |
| 337 | 343 | ||
| 344 | static void free_active_ring(struct sock_mapping *map) | ||
| 345 | { | ||
| 346 | if (!map->active.ring) | ||
| 347 | return; | ||
| 348 | |||
| 349 | free_pages((unsigned long)map->active.data.in, | ||
| 350 | map->active.ring->ring_order); | ||
| 351 | free_page((unsigned long)map->active.ring); | ||
| 352 | } | ||
| 353 | |||
| 354 | static int alloc_active_ring(struct sock_mapping *map) | ||
| 355 | { | ||
| 356 | void *bytes; | ||
| 357 | |||
| 358 | map->active.ring = (struct pvcalls_data_intf *) | ||
| 359 | get_zeroed_page(GFP_KERNEL); | ||
| 360 | if (!map->active.ring) | ||
| 361 | goto out; | ||
| 362 | |||
| 363 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
| 364 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 365 | PVCALLS_RING_ORDER); | ||
| 366 | if (!bytes) | ||
| 367 | goto out; | ||
| 368 | |||
| 369 | map->active.data.in = bytes; | ||
| 370 | map->active.data.out = bytes + | ||
| 371 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
| 372 | |||
| 373 | return 0; | ||
| 374 | |||
| 375 | out: | ||
| 376 | free_active_ring(map); | ||
| 377 | return -ENOMEM; | ||
| 378 | } | ||
| 379 | |||
| 338 | static int create_active(struct sock_mapping *map, int *evtchn) | 380 | static int create_active(struct sock_mapping *map, int *evtchn) |
| 339 | { | 381 | { |
| 340 | void *bytes; | 382 | void *bytes; |
| @@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 343 | *evtchn = -1; | 385 | *evtchn = -1; |
| 344 | init_waitqueue_head(&map->active.inflight_conn_req); | 386 | init_waitqueue_head(&map->active.inflight_conn_req); |
| 345 | 387 | ||
| 346 | map->active.ring = (struct pvcalls_data_intf *) | 388 | bytes = map->active.data.in; |
| 347 | __get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
| 348 | if (map->active.ring == NULL) | ||
| 349 | goto out_error; | ||
| 350 | map->active.ring->ring_order = PVCALLS_RING_ORDER; | ||
| 351 | bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 352 | PVCALLS_RING_ORDER); | ||
| 353 | if (bytes == NULL) | ||
| 354 | goto out_error; | ||
| 355 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) | 389 | for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) |
| 356 | map->active.ring->ref[i] = gnttab_grant_foreign_access( | 390 | map->active.ring->ref[i] = gnttab_grant_foreign_access( |
| 357 | pvcalls_front_dev->otherend_id, | 391 | pvcalls_front_dev->otherend_id, |
| @@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 361 | pvcalls_front_dev->otherend_id, | 395 | pvcalls_front_dev->otherend_id, |
| 362 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); | 396 | pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); |
| 363 | 397 | ||
| 364 | map->active.data.in = bytes; | ||
| 365 | map->active.data.out = bytes + | ||
| 366 | XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | ||
| 367 | |||
| 368 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); | 398 | ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); |
| 369 | if (ret) | 399 | if (ret) |
| 370 | goto out_error; | 400 | goto out_error; |
| @@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) | |||
| 385 | out_error: | 415 | out_error: |
| 386 | if (*evtchn >= 0) | 416 | if (*evtchn >= 0) |
| 387 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); | 417 | xenbus_free_evtchn(pvcalls_front_dev, *evtchn); |
| 388 | free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER); | ||
| 389 | free_page((unsigned long)map->active.ring); | ||
| 390 | return ret; | 418 | return ret; |
| 391 | } | 419 | } |
| 392 | 420 | ||
| @@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, | |||
| 406 | return PTR_ERR(map); | 434 | return PTR_ERR(map); |
| 407 | 435 | ||
| 408 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 436 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
| 437 | ret = alloc_active_ring(map); | ||
| 438 | if (ret < 0) { | ||
| 439 | pvcalls_exit_sock(sock); | ||
| 440 | return ret; | ||
| 441 | } | ||
| 409 | 442 | ||
| 410 | spin_lock(&bedata->socket_lock); | 443 | spin_lock(&bedata->socket_lock); |
| 411 | ret = get_request(bedata, &req_id); | 444 | ret = get_request(bedata, &req_id); |
| 412 | if (ret < 0) { | 445 | if (ret < 0) { |
| 413 | spin_unlock(&bedata->socket_lock); | 446 | spin_unlock(&bedata->socket_lock); |
| 447 | free_active_ring(map); | ||
| 414 | pvcalls_exit_sock(sock); | 448 | pvcalls_exit_sock(sock); |
| 415 | return ret; | 449 | return ret; |
| 416 | } | 450 | } |
| 417 | ret = create_active(map, &evtchn); | 451 | ret = create_active(map, &evtchn); |
| 418 | if (ret < 0) { | 452 | if (ret < 0) { |
| 419 | spin_unlock(&bedata->socket_lock); | 453 | spin_unlock(&bedata->socket_lock); |
| 454 | free_active_ring(map); | ||
| 420 | pvcalls_exit_sock(sock); | 455 | pvcalls_exit_sock(sock); |
| 421 | return ret; | 456 | return ret; |
| 422 | } | 457 | } |
| @@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf, | |||
| 469 | virt_mb(); | 504 | virt_mb(); |
| 470 | 505 | ||
| 471 | size = pvcalls_queued(prod, cons, array_size); | 506 | size = pvcalls_queued(prod, cons, array_size); |
| 472 | if (size >= array_size) | 507 | if (size > array_size) |
| 473 | return -EINVAL; | 508 | return -EINVAL; |
| 509 | if (size == array_size) | ||
| 510 | return 0; | ||
| 474 | if (len > array_size - size) | 511 | if (len > array_size - size) |
| 475 | len = array_size - size; | 512 | len = array_size - size; |
| 476 | 513 | ||
| @@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf, | |||
| 560 | error = intf->in_error; | 597 | error = intf->in_error; |
| 561 | /* get pointers before reading from the ring */ | 598 | /* get pointers before reading from the ring */ |
| 562 | virt_rmb(); | 599 | virt_rmb(); |
| 563 | if (error < 0) | ||
| 564 | return error; | ||
| 565 | 600 | ||
| 566 | size = pvcalls_queued(prod, cons, array_size); | 601 | size = pvcalls_queued(prod, cons, array_size); |
| 567 | masked_prod = pvcalls_mask(prod, array_size); | 602 | masked_prod = pvcalls_mask(prod, array_size); |
| 568 | masked_cons = pvcalls_mask(cons, array_size); | 603 | masked_cons = pvcalls_mask(cons, array_size); |
| 569 | 604 | ||
| 570 | if (size == 0) | 605 | if (size == 0) |
| 571 | return 0; | 606 | return error ?: size; |
| 572 | 607 | ||
| 573 | if (len > size) | 608 | if (len > size) |
| 574 | len = size; | 609 | len = size; |
| @@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 780 | } | 815 | } |
| 781 | } | 816 | } |
| 782 | 817 | ||
| 783 | spin_lock(&bedata->socket_lock); | 818 | map2 = kzalloc(sizeof(*map2), GFP_KERNEL); |
| 784 | ret = get_request(bedata, &req_id); | 819 | if (map2 == NULL) { |
| 785 | if (ret < 0) { | ||
| 786 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 820 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 787 | (void *)&map->passive.flags); | 821 | (void *)&map->passive.flags); |
| 788 | spin_unlock(&bedata->socket_lock); | 822 | pvcalls_exit_sock(sock); |
| 823 | return -ENOMEM; | ||
| 824 | } | ||
| 825 | ret = alloc_active_ring(map2); | ||
| 826 | if (ret < 0) { | ||
| 827 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | ||
| 828 | (void *)&map->passive.flags); | ||
| 829 | kfree(map2); | ||
| 789 | pvcalls_exit_sock(sock); | 830 | pvcalls_exit_sock(sock); |
| 790 | return ret; | 831 | return ret; |
| 791 | } | 832 | } |
| 792 | map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); | 833 | spin_lock(&bedata->socket_lock); |
| 793 | if (map2 == NULL) { | 834 | ret = get_request(bedata, &req_id); |
| 835 | if (ret < 0) { | ||
| 794 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 836 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 795 | (void *)&map->passive.flags); | 837 | (void *)&map->passive.flags); |
| 796 | spin_unlock(&bedata->socket_lock); | 838 | spin_unlock(&bedata->socket_lock); |
| 839 | free_active_ring(map2); | ||
| 840 | kfree(map2); | ||
| 797 | pvcalls_exit_sock(sock); | 841 | pvcalls_exit_sock(sock); |
| 798 | return -ENOMEM; | 842 | return ret; |
| 799 | } | 843 | } |
| 844 | |||
| 800 | ret = create_active(map2, &evtchn); | 845 | ret = create_active(map2, &evtchn); |
| 801 | if (ret < 0) { | 846 | if (ret < 0) { |
| 847 | free_active_ring(map2); | ||
| 802 | kfree(map2); | 848 | kfree(map2); |
| 803 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 849 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
| 804 | (void *)&map->passive.flags); | 850 | (void *)&map->passive.flags); |
| @@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 839 | 885 | ||
| 840 | received: | 886 | received: |
| 841 | map2->sock = newsock; | 887 | map2->sock = newsock; |
| 842 | newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); | 888 | newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false); |
| 843 | if (!newsock->sk) { | 889 | if (!newsock->sk) { |
| 844 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 890 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
| 845 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; | 891 | map->passive.inflight_req_id = PVCALLS_INVALID_ID; |
| @@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock) | |||
| 1032 | spin_lock(&bedata->socket_lock); | 1078 | spin_lock(&bedata->socket_lock); |
| 1033 | list_del(&map->list); | 1079 | list_del(&map->list); |
| 1034 | spin_unlock(&bedata->socket_lock); | 1080 | spin_unlock(&bedata->socket_lock); |
| 1035 | if (READ_ONCE(map->passive.inflight_req_id) != | 1081 | if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && |
| 1036 | PVCALLS_INVALID_ID) { | 1082 | READ_ONCE(map->passive.inflight_req_id) != 0) { |
| 1037 | pvcalls_front_free_map(bedata, | 1083 | pvcalls_front_free_map(bedata, |
| 1038 | map->passive.accept_map); | 1084 | map->passive.accept_map); |
| 1039 | } | 1085 | } |
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 0568fd986821..e432bd27a2e7 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c | |||
| @@ -208,7 +208,7 @@ again: | |||
| 208 | /* The new front of the queue now owns the state variables. */ | 208 | /* The new front of the queue now owns the state variables. */ |
| 209 | next = list_entry(vnode->pending_locks.next, | 209 | next = list_entry(vnode->pending_locks.next, |
| 210 | struct file_lock, fl_u.afs.link); | 210 | struct file_lock, fl_u.afs.link); |
| 211 | vnode->lock_key = afs_file_key(next->fl_file); | 211 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
| 212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 212 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
| 213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 213 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
| 214 | goto again; | 214 | goto again; |
| @@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl) | |||
| 413 | /* The new front of the queue now owns the state variables. */ | 413 | /* The new front of the queue now owns the state variables. */ |
| 414 | next = list_entry(vnode->pending_locks.next, | 414 | next = list_entry(vnode->pending_locks.next, |
| 415 | struct file_lock, fl_u.afs.link); | 415 | struct file_lock, fl_u.afs.link); |
| 416 | vnode->lock_key = afs_file_key(next->fl_file); | 416 | vnode->lock_key = key_get(afs_file_key(next->fl_file)); |
| 417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; | 417 | vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; |
| 418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; | 418 | vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; |
| 419 | afs_lock_may_be_available(vnode); | 419 | afs_lock_may_be_available(vnode); |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 6b17d3620414..1a4ce07fb406 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
| @@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
| 414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { | 414 | } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { |
| 415 | valid = true; | 415 | valid = true; |
| 416 | } else { | 416 | } else { |
| 417 | vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; | ||
| 418 | vnode->cb_v_break = vnode->volume->cb_v_break; | 417 | vnode->cb_v_break = vnode->volume->cb_v_break; |
| 419 | valid = false; | 418 | valid = false; |
| 420 | } | 419 | } |
| @@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode) | |||
| 546 | #endif | 545 | #endif |
| 547 | 546 | ||
| 548 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); | 547 | afs_put_permits(rcu_access_pointer(vnode->permit_cache)); |
| 548 | key_put(vnode->lock_key); | ||
| 549 | vnode->lock_key = NULL; | ||
| 549 | _leave(""); | 550 | _leave(""); |
| 550 | } | 551 | } |
| 551 | 552 | ||
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index 07bc10f076aa..d443e2bfa094 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h | |||
| @@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus { | |||
| 161 | struct yfs_xdr_u64 max_quota; | 161 | struct yfs_xdr_u64 max_quota; |
| 162 | struct yfs_xdr_u64 file_quota; | 162 | struct yfs_xdr_u64 file_quota; |
| 163 | } __packed; | 163 | } __packed; |
| 164 | |||
| 165 | enum yfs_lock_type { | ||
| 166 | yfs_LockNone = -1, | ||
| 167 | yfs_LockRead = 0, | ||
| 168 | yfs_LockWrite = 1, | ||
| 169 | yfs_LockExtend = 2, | ||
| 170 | yfs_LockRelease = 3, | ||
| 171 | yfs_LockMandatoryRead = 0x100, | ||
| 172 | yfs_LockMandatoryWrite = 0x101, | ||
| 173 | yfs_LockMandatoryExtend = 0x102, | ||
| 174 | }; | ||
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a7b44863d502..2c588f9bbbda 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls; | |||
| 23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); | 23 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); |
| 24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); | 24 | static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); |
| 25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); | 25 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 26 | static void afs_delete_async_call(struct work_struct *); | ||
| 26 | static void afs_process_async_call(struct work_struct *); | 27 | static void afs_process_async_call(struct work_struct *); |
| 27 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); | 28 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 28 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); | 29 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); |
| @@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call) | |||
| 203 | } | 204 | } |
| 204 | } | 205 | } |
| 205 | 206 | ||
| 207 | static struct afs_call *afs_get_call(struct afs_call *call, | ||
| 208 | enum afs_call_trace why) | ||
| 209 | { | ||
| 210 | int u = atomic_inc_return(&call->usage); | ||
| 211 | |||
| 212 | trace_afs_call(call, why, u, | ||
| 213 | atomic_read(&call->net->nr_outstanding_calls), | ||
| 214 | __builtin_return_address(0)); | ||
| 215 | return call; | ||
| 216 | } | ||
| 217 | |||
| 206 | /* | 218 | /* |
| 207 | * Queue the call for actual work. | 219 | * Queue the call for actual work. |
| 208 | */ | 220 | */ |
| 209 | static void afs_queue_call_work(struct afs_call *call) | 221 | static void afs_queue_call_work(struct afs_call *call) |
| 210 | { | 222 | { |
| 211 | if (call->type->work) { | 223 | if (call->type->work) { |
| 212 | int u = atomic_inc_return(&call->usage); | ||
| 213 | |||
| 214 | trace_afs_call(call, afs_call_trace_work, u, | ||
| 215 | atomic_read(&call->net->nr_outstanding_calls), | ||
| 216 | __builtin_return_address(0)); | ||
| 217 | |||
| 218 | INIT_WORK(&call->work, call->type->work); | 224 | INIT_WORK(&call->work, call->type->work); |
| 219 | 225 | ||
| 226 | afs_get_call(call, afs_call_trace_work); | ||
| 220 | if (!queue_work(afs_wq, &call->work)) | 227 | if (!queue_work(afs_wq, &call->work)) |
| 221 | afs_put_call(call); | 228 | afs_put_call(call); |
| 222 | } | 229 | } |
| @@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
| 398 | } | 405 | } |
| 399 | } | 406 | } |
| 400 | 407 | ||
| 408 | /* If the call is going to be asynchronous, we need an extra ref for | ||
| 409 | * the call to hold itself so the caller need not hang on to its ref. | ||
| 410 | */ | ||
| 411 | if (call->async) | ||
| 412 | afs_get_call(call, afs_call_trace_get); | ||
| 413 | |||
| 401 | /* create a call */ | 414 | /* create a call */ |
| 402 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, | 415 | rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, |
| 403 | (unsigned long)call, | 416 | (unsigned long)call, |
| @@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, | |||
| 438 | goto error_do_abort; | 451 | goto error_do_abort; |
| 439 | } | 452 | } |
| 440 | 453 | ||
| 441 | /* at this point, an async call may no longer exist as it may have | 454 | /* Note that at this point, we may have received the reply or an abort |
| 442 | * already completed */ | 455 | * - and an asynchronous call may already have completed. |
| 443 | if (call->async) | 456 | */ |
| 457 | if (call->async) { | ||
| 458 | afs_put_call(call); | ||
| 444 | return -EINPROGRESS; | 459 | return -EINPROGRESS; |
| 460 | } | ||
| 445 | 461 | ||
| 446 | return afs_wait_for_call_to_complete(call, ac); | 462 | return afs_wait_for_call_to_complete(call, ac); |
| 447 | 463 | ||
| 448 | error_do_abort: | 464 | error_do_abort: |
| 449 | call->state = AFS_CALL_COMPLETE; | ||
| 450 | if (ret != -ECONNABORTED) { | 465 | if (ret != -ECONNABORTED) { |
| 451 | rxrpc_kernel_abort_call(call->net->socket, rxcall, | 466 | rxrpc_kernel_abort_call(call->net->socket, rxcall, |
| 452 | RX_USER_ABORT, ret, "KSD"); | 467 | RX_USER_ABORT, ret, "KSD"); |
| @@ -463,8 +478,24 @@ error_do_abort: | |||
| 463 | error_kill_call: | 478 | error_kill_call: |
| 464 | if (call->type->done) | 479 | if (call->type->done) |
| 465 | call->type->done(call); | 480 | call->type->done(call); |
| 466 | afs_put_call(call); | 481 | |
| 482 | /* We need to dispose of the extra ref we grabbed for an async call. | ||
| 483 | * The call, however, might be queued on afs_async_calls and we need to | ||
| 484 | * make sure we don't get any more notifications that might requeue it. | ||
| 485 | */ | ||
| 486 | if (call->rxcall) { | ||
| 487 | rxrpc_kernel_end_call(call->net->socket, call->rxcall); | ||
| 488 | call->rxcall = NULL; | ||
| 489 | } | ||
| 490 | if (call->async) { | ||
| 491 | if (cancel_work_sync(&call->async_work)) | ||
| 492 | afs_put_call(call); | ||
| 493 | afs_put_call(call); | ||
| 494 | } | ||
| 495 | |||
| 467 | ac->error = ret; | 496 | ac->error = ret; |
| 497 | call->state = AFS_CALL_COMPLETE; | ||
| 498 | afs_put_call(call); | ||
| 468 | _leave(" = %d", ret); | 499 | _leave(" = %d", ret); |
| 469 | return ret; | 500 | return ret; |
| 470 | } | 501 | } |
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 95d0761cdb34..155dc14caef9 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c | |||
| @@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, | |||
| 42 | if (vldb->fs_mask[i] & type_mask) | 42 | if (vldb->fs_mask[i] & type_mask) |
| 43 | nr_servers++; | 43 | nr_servers++; |
| 44 | 44 | ||
| 45 | slist = kzalloc(sizeof(struct afs_server_list) + | 45 | slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); |
| 46 | sizeof(struct afs_server_entry) * nr_servers, | ||
| 47 | GFP_KERNEL); | ||
| 48 | if (!slist) | 46 | if (!slist) |
| 49 | goto error; | 47 | goto error; |
| 50 | 48 | ||
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 12658c1363ae..5aa57929e8c2 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c | |||
| @@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc, | |||
| 803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); | 803 | bp = xdr_encode_YFSFid(bp, &vnode->fid); |
| 804 | bp = xdr_encode_string(bp, name, namesz); | 804 | bp = xdr_encode_string(bp, name, namesz); |
| 805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); | 805 | bp = xdr_encode_YFSStoreStatus_mode(bp, mode); |
| 806 | bp = xdr_encode_u32(bp, 0); /* ViceLockType */ | 806 | bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ |
| 807 | yfs_check_req(call, bp); | 807 | yfs_check_req(call, bp); |
| 808 | 808 | ||
| 809 | afs_use_fs_server(call, fc->cbi); | 809 | afs_use_fs_server(call, fc->cbi); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index c546cdce77e6..58a4c1217fa8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) | |||
| 104 | } | 104 | } |
| 105 | EXPORT_SYMBOL(invalidate_bdev); | 105 | EXPORT_SYMBOL(invalidate_bdev); |
| 106 | 106 | ||
| 107 | static void set_init_blocksize(struct block_device *bdev) | ||
| 108 | { | ||
| 109 | unsigned bsize = bdev_logical_block_size(bdev); | ||
| 110 | loff_t size = i_size_read(bdev->bd_inode); | ||
| 111 | |||
| 112 | while (bsize < PAGE_SIZE) { | ||
| 113 | if (size & bsize) | ||
| 114 | break; | ||
| 115 | bsize <<= 1; | ||
| 116 | } | ||
| 117 | bdev->bd_block_size = bsize; | ||
| 118 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
| 119 | } | ||
| 120 | |||
| 107 | int set_blocksize(struct block_device *bdev, int size) | 121 | int set_blocksize(struct block_device *bdev, int size) |
| 108 | { | 122 | { |
| 109 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ | 123 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
| @@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); | |||
| 1431 | 1445 | ||
| 1432 | void bd_set_size(struct block_device *bdev, loff_t size) | 1446 | void bd_set_size(struct block_device *bdev, loff_t size) |
| 1433 | { | 1447 | { |
| 1434 | unsigned bsize = bdev_logical_block_size(bdev); | ||
| 1435 | |||
| 1436 | inode_lock(bdev->bd_inode); | 1448 | inode_lock(bdev->bd_inode); |
| 1437 | i_size_write(bdev->bd_inode, size); | 1449 | i_size_write(bdev->bd_inode, size); |
| 1438 | inode_unlock(bdev->bd_inode); | 1450 | inode_unlock(bdev->bd_inode); |
| 1439 | while (bsize < PAGE_SIZE) { | ||
| 1440 | if (size & bsize) | ||
| 1441 | break; | ||
| 1442 | bsize <<= 1; | ||
| 1443 | } | ||
| 1444 | bdev->bd_block_size = bsize; | ||
| 1445 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); | ||
| 1446 | } | 1451 | } |
| 1447 | EXPORT_SYMBOL(bd_set_size); | 1452 | EXPORT_SYMBOL(bd_set_size); |
| 1448 | 1453 | ||
| @@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1519 | } | 1524 | } |
| 1520 | } | 1525 | } |
| 1521 | 1526 | ||
| 1522 | if (!ret) | 1527 | if (!ret) { |
| 1523 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); | 1528 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
| 1529 | set_init_blocksize(bdev); | ||
| 1530 | } | ||
| 1524 | 1531 | ||
| 1525 | /* | 1532 | /* |
| 1526 | * If the device is invalidated, rescan partition | 1533 | * If the device is invalidated, rescan partition |
| @@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1555 | goto out_clear; | 1562 | goto out_clear; |
| 1556 | } | 1563 | } |
| 1557 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); | 1564 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
| 1565 | set_init_blocksize(bdev); | ||
| 1558 | } | 1566 | } |
| 1559 | 1567 | ||
| 1560 | if (bdev->bd_bdi == &noop_backing_dev_info) | 1568 | if (bdev->bd_bdi == &noop_backing_dev_info) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0a68cf7032f5..7a2a2621f0d9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | struct btrfs_trans_handle; | 35 | struct btrfs_trans_handle; |
| 36 | struct btrfs_transaction; | 36 | struct btrfs_transaction; |
| 37 | struct btrfs_pending_snapshot; | 37 | struct btrfs_pending_snapshot; |
| 38 | struct btrfs_delayed_ref_root; | ||
| 38 | extern struct kmem_cache *btrfs_trans_handle_cachep; | 39 | extern struct kmem_cache *btrfs_trans_handle_cachep; |
| 39 | extern struct kmem_cache *btrfs_bit_radix_cachep; | 40 | extern struct kmem_cache *btrfs_bit_radix_cachep; |
| 40 | extern struct kmem_cache *btrfs_path_cachep; | 41 | extern struct kmem_cache *btrfs_path_cachep; |
| @@ -786,6 +787,9 @@ enum { | |||
| 786 | * main phase. The fs_info::balance_ctl is initialized. | 787 | * main phase. The fs_info::balance_ctl is initialized. |
| 787 | */ | 788 | */ |
| 788 | BTRFS_FS_BALANCE_RUNNING, | 789 | BTRFS_FS_BALANCE_RUNNING, |
| 790 | |||
| 791 | /* Indicate that the cleaner thread is awake and doing something. */ | ||
| 792 | BTRFS_FS_CLEANER_RUNNING, | ||
| 789 | }; | 793 | }; |
| 790 | 794 | ||
| 791 | struct btrfs_fs_info { | 795 | struct btrfs_fs_info { |
| @@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2661 | unsigned long count); | 2665 | unsigned long count); |
| 2662 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, | 2666 | int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, |
| 2663 | unsigned long count, u64 transid, int wait); | 2667 | unsigned long count, u64 transid, int wait); |
| 2668 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, | ||
| 2669 | struct btrfs_delayed_ref_root *delayed_refs, | ||
| 2670 | struct btrfs_delayed_ref_head *head); | ||
| 2664 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); | 2671 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); |
| 2665 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | 2672 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
| 2666 | struct btrfs_fs_info *fs_info, u64 bytenr, | 2673 | struct btrfs_fs_info *fs_info, u64 bytenr, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8da2f380d3c0..6a2a2a951705 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg) | |||
| 1682 | while (1) { | 1682 | while (1) { |
| 1683 | again = 0; | 1683 | again = 0; |
| 1684 | 1684 | ||
| 1685 | set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
| 1686 | |||
| 1685 | /* Make the cleaner go to sleep early. */ | 1687 | /* Make the cleaner go to sleep early. */ |
| 1686 | if (btrfs_need_cleaner_sleep(fs_info)) | 1688 | if (btrfs_need_cleaner_sleep(fs_info)) |
| 1687 | goto sleep; | 1689 | goto sleep; |
| @@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg) | |||
| 1728 | */ | 1730 | */ |
| 1729 | btrfs_delete_unused_bgs(fs_info); | 1731 | btrfs_delete_unused_bgs(fs_info); |
| 1730 | sleep: | 1732 | sleep: |
| 1733 | clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); | ||
| 1731 | if (kthread_should_park()) | 1734 | if (kthread_should_park()) |
| 1732 | kthread_parkme(); | 1735 | kthread_parkme(); |
| 1733 | if (kthread_should_stop()) | 1736 | if (kthread_should_stop()) |
| @@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) | |||
| 4201 | spin_lock(&fs_info->ordered_root_lock); | 4204 | spin_lock(&fs_info->ordered_root_lock); |
| 4202 | } | 4205 | } |
| 4203 | spin_unlock(&fs_info->ordered_root_lock); | 4206 | spin_unlock(&fs_info->ordered_root_lock); |
| 4207 | |||
| 4208 | /* | ||
| 4209 | * We need this here because if we've been flipped read-only we won't | ||
| 4210 | * get sync() from the umount, so we need to make sure any ordered | ||
| 4211 | * extents that haven't had their dirty pages IO start writeout yet | ||
| 4212 | * actually get run and error out properly. | ||
| 4213 | */ | ||
| 4214 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | ||
| 4204 | } | 4215 | } |
| 4205 | 4216 | ||
| 4206 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | 4217 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
| @@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
| 4265 | if (pin_bytes) | 4276 | if (pin_bytes) |
| 4266 | btrfs_pin_extent(fs_info, head->bytenr, | 4277 | btrfs_pin_extent(fs_info, head->bytenr, |
| 4267 | head->num_bytes, 1); | 4278 | head->num_bytes, 1); |
| 4279 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); | ||
| 4268 | btrfs_put_delayed_ref_head(head); | 4280 | btrfs_put_delayed_ref_head(head); |
| 4269 | cond_resched(); | 4281 | cond_resched(); |
| 4270 | spin_lock(&delayed_refs->lock); | 4282 | spin_lock(&delayed_refs->lock); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b15afeae16df..d81035b7ea7d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, | |||
| 2456 | return ret ? ret : 1; | 2456 | return ret ? ret : 1; |
| 2457 | } | 2457 | } |
| 2458 | 2458 | ||
| 2459 | static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, | 2459 | void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, |
| 2460 | struct btrfs_delayed_ref_head *head) | 2460 | struct btrfs_delayed_ref_root *delayed_refs, |
| 2461 | struct btrfs_delayed_ref_head *head) | ||
| 2461 | { | 2462 | { |
| 2462 | struct btrfs_fs_info *fs_info = trans->fs_info; | ||
| 2463 | struct btrfs_delayed_ref_root *delayed_refs = | ||
| 2464 | &trans->transaction->delayed_refs; | ||
| 2465 | int nr_items = 1; /* Dropping this ref head update. */ | 2463 | int nr_items = 1; /* Dropping this ref head update. */ |
| 2466 | 2464 | ||
| 2467 | if (head->total_ref_mod < 0) { | 2465 | if (head->total_ref_mod < 0) { |
| @@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, | |||
| 2544 | } | 2542 | } |
| 2545 | } | 2543 | } |
| 2546 | 2544 | ||
| 2547 | cleanup_ref_head_accounting(trans, head); | 2545 | btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
| 2548 | 2546 | ||
| 2549 | trace_run_delayed_ref_head(fs_info, head, 0); | 2547 | trace_run_delayed_ref_head(fs_info, head, 0); |
| 2550 | btrfs_delayed_ref_unlock(head); | 2548 | btrfs_delayed_ref_unlock(head); |
| @@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info, | |||
| 4954 | ret = 0; | 4952 | ret = 0; |
| 4955 | break; | 4953 | break; |
| 4956 | case COMMIT_TRANS: | 4954 | case COMMIT_TRANS: |
| 4955 | /* | ||
| 4956 | * If we have pending delayed iputs then we could free up a | ||
| 4957 | * bunch of pinned space, so make sure we run the iputs before | ||
| 4958 | * we do our pinned bytes check below. | ||
| 4959 | */ | ||
| 4960 | mutex_lock(&fs_info->cleaner_delayed_iput_mutex); | ||
| 4961 | btrfs_run_delayed_iputs(fs_info); | ||
| 4962 | mutex_unlock(&fs_info->cleaner_delayed_iput_mutex); | ||
| 4963 | |||
| 4957 | ret = may_commit_transaction(fs_info, space_info); | 4964 | ret = may_commit_transaction(fs_info, space_info); |
| 4958 | break; | 4965 | break; |
| 4959 | default: | 4966 | default: |
| @@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
| 7188 | if (head->must_insert_reserved) | 7195 | if (head->must_insert_reserved) |
| 7189 | ret = 1; | 7196 | ret = 1; |
| 7190 | 7197 | ||
| 7191 | cleanup_ref_head_accounting(trans, head); | 7198 | btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); |
| 7192 | mutex_unlock(&head->mutex); | 7199 | mutex_unlock(&head->mutex); |
| 7193 | btrfs_put_delayed_ref_head(head); | 7200 | btrfs_put_delayed_ref_head(head); |
| 7194 | return ret; | 7201 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 43eb4535319d..5c349667c761 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -3129,9 +3129,6 @@ out: | |||
| 3129 | /* once for the tree */ | 3129 | /* once for the tree */ |
| 3130 | btrfs_put_ordered_extent(ordered_extent); | 3130 | btrfs_put_ordered_extent(ordered_extent); |
| 3131 | 3131 | ||
| 3132 | /* Try to release some metadata so we don't get an OOM but don't wait */ | ||
| 3133 | btrfs_btree_balance_dirty_nodelay(fs_info); | ||
| 3134 | |||
| 3135 | return ret; | 3132 | return ret; |
| 3136 | } | 3133 | } |
| 3137 | 3134 | ||
| @@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode) | |||
| 3254 | ASSERT(list_empty(&binode->delayed_iput)); | 3251 | ASSERT(list_empty(&binode->delayed_iput)); |
| 3255 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); | 3252 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); |
| 3256 | spin_unlock(&fs_info->delayed_iput_lock); | 3253 | spin_unlock(&fs_info->delayed_iput_lock); |
| 3254 | if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) | ||
| 3255 | wake_up_process(fs_info->cleaner_kthread); | ||
| 3257 | } | 3256 | } |
| 3258 | 3257 | ||
| 3259 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) | 3258 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) |
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 46d691ba04bc..45b2322e092d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c | |||
| @@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, | |||
| 133 | struct file *file_out, loff_t pos_out, | 133 | struct file *file_out, loff_t pos_out, |
| 134 | size_t count, unsigned int flags) | 134 | size_t count, unsigned int flags) |
| 135 | { | 135 | { |
| 136 | ssize_t ret; | ||
| 137 | |||
| 138 | if (file_inode(file_in) == file_inode(file_out)) | 136 | if (file_inode(file_in) == file_inode(file_out)) |
| 139 | return -EINVAL; | 137 | return -EINVAL; |
| 140 | retry: | 138 | return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); |
| 141 | ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count); | ||
| 142 | if (ret == -EAGAIN) | ||
| 143 | goto retry; | ||
| 144 | return ret; | ||
| 145 | } | 139 | } |
| 146 | 140 | ||
| 147 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) | 141 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) |
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h index ad6f55dabd6d..0f2e0fe45ca4 100644 --- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h | |||
| @@ -1,12 +1,11 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ | ||
| 1 | /* | 2 | /* |
| 2 | * | ||
| 3 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
| 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2017 Amlogic, inc. | 6 | * Copyright (c) 2017 Amlogic, inc. |
| 7 | * Author: Yixun Lan <yixun.lan@amlogic.com> | 7 | * Author: Yixun Lan <yixun.lan@amlogic.com> |
| 8 | * | 8 | * |
| 9 | * SPDX-License-Identifier: (GPL-2.0+ OR BSD) | ||
| 10 | */ | 9 | */ |
| 11 | 10 | ||
| 12 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H | 11 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H |
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | struct bcma_soc { | 7 | struct bcma_soc { |
| 8 | struct bcma_bus bus; | 8 | struct bcma_bus bus; |
| 9 | struct device *dev; | ||
| 9 | }; | 10 | }; |
| 10 | 11 | ||
| 11 | int __init bcma_host_soc_register(struct bcma_soc *soc); | 12 | int __init bcma_host_soc_register(struct bcma_soc *soc); |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
| @@ -3,9 +3,8 @@ | |||
| 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." |
| 4 | #endif | 4 | #endif |
| 5 | 5 | ||
| 6 | /* Some compiler specific definitions are overwritten here | 6 | /* Compiler specific definitions for Clang compiler */ |
| 7 | * for Clang compiler | 7 | |
| 8 | */ | ||
| 9 | #define uninitialized_var(x) x = *(&(x)) | 8 | #define uninitialized_var(x) x = *(&(x)) |
| 10 | 9 | ||
| 11 | /* same as gcc, this was present in clang-2.6 so we can assume it works | 10 | /* same as gcc, this was present in clang-2.6 so we can assume it works |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dd8268f5f5f0..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -58,10 +58,6 @@ | |||
| 58 | (typeof(ptr)) (__ptr + (off)); \ | 58 | (typeof(ptr)) (__ptr + (off)); \ |
| 59 | }) | 59 | }) |
| 60 | 60 | ||
| 61 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ | ||
| 62 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 63 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 64 | |||
| 65 | /* | 61 | /* |
| 66 | * A trick to suppress uninitialized variable warning without generating any | 62 | * A trick to suppress uninitialized variable warning without generating any |
| 67 | * code | 63 | * code |
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
| @@ -5,9 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #ifdef __ECC | 6 | #ifdef __ECC |
| 7 | 7 | ||
| 8 | /* Some compiler specific definitions are overwritten here | 8 | /* Compiler specific definitions for Intel ECC compiler */ |
| 9 | * for Intel ECC compiler | ||
| 10 | */ | ||
| 11 | 9 | ||
| 12 | #include <asm/intrinsics.h> | 10 | #include <asm/intrinsics.h> |
| 13 | 11 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
| 161 | #endif | 161 | #endif |
| 162 | 162 | ||
| 163 | #ifndef OPTIMIZER_HIDE_VAR | 163 | #ifndef OPTIMIZER_HIDE_VAR |
| 164 | #define OPTIMIZER_HIDE_VAR(var) barrier() | 164 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 165 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 166 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 165 | #endif | 167 | #endif |
| 166 | 168 | ||
| 167 | /* Not-quite-unique ID. */ | 169 | /* Not-quite-unique ID. */ |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7cdd31a69719..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); | |||
| 653 | 653 | ||
| 654 | extern struct fb_info *registered_fb[FB_MAX]; | 654 | extern struct fb_info *registered_fb[FB_MAX]; |
| 655 | extern int num_registered_fb; | 655 | extern int num_registered_fb; |
| 656 | extern bool fb_center_logo; | ||
| 656 | extern struct class *fb_class; | 657 | extern struct class *fb_class; |
| 657 | 658 | ||
| 658 | #define for_each_registered_fb(i) \ | 659 | #define for_each_registered_fb(i) \ |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..7315977b64da 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
| @@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( | |||
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | enum nvdimm_security_state { | 162 | enum nvdimm_security_state { |
| 163 | NVDIMM_SECURITY_ERROR = -1, | ||
| 163 | NVDIMM_SECURITY_DISABLED, | 164 | NVDIMM_SECURITY_DISABLED, |
| 164 | NVDIMM_SECURITY_UNLOCKED, | 165 | NVDIMM_SECURITY_UNLOCKED, |
| 165 | NVDIMM_SECURITY_LOCKED, | 166 | NVDIMM_SECURITY_LOCKED, |
diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -50,7 +50,6 @@ struct of_irq_controller; | |||
| 50 | 50 | ||
| 51 | struct device_node { | 51 | struct device_node { |
| 52 | const char *name; | 52 | const char *name; |
| 53 | const char *type; | ||
| 54 | phandle phandle; | 53 | phandle phandle; |
| 55 | const char *full_name; | 54 | const char *full_name; |
| 56 | struct fwnode_handle fwnode; | 55 | struct fwnode_handle fwnode; |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..987b6491b946 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -12,6 +12,11 @@ struct irq_affinity; | |||
| 12 | 12 | ||
| 13 | /** | 13 | /** |
| 14 | * virtio_config_ops - operations for configuring a virtio device | 14 | * virtio_config_ops - operations for configuring a virtio device |
| 15 | * Note: Do not assume that a transport implements all of the operations | ||
| 16 | * getting/setting a value as a simple read/write! Generally speaking, | ||
| 17 | * any of @get/@set, @get_status/@set_status, or @get_features/ | ||
| 18 | * @finalize_features are NOT safe to be called from an atomic | ||
| 19 | * context. | ||
| 15 | * @get: read the value of a configuration field | 20 | * @get: read the value of a configuration field |
| 16 | * vdev: the virtio_device | 21 | * vdev: the virtio_device |
| 17 | * offset: the offset of the configuration field | 22 | * offset: the offset of the configuration field |
| @@ -22,7 +27,7 @@ struct irq_affinity; | |||
| 22 | * offset: the offset of the configuration field | 27 | * offset: the offset of the configuration field |
| 23 | * buf: the buffer to read the field value from. | 28 | * buf: the buffer to read the field value from. |
| 24 | * len: the length of the buffer | 29 | * len: the length of the buffer |
| 25 | * @generation: config generation counter | 30 | * @generation: config generation counter (optional) |
| 26 | * vdev: the virtio_device | 31 | * vdev: the virtio_device |
| 27 | * Returns the config generation counter | 32 | * Returns the config generation counter |
| 28 | * @get_status: read the status byte | 33 | * @get_status: read the status byte |
| @@ -48,17 +53,17 @@ struct irq_affinity; | |||
| 48 | * @del_vqs: free virtqueues found by find_vqs(). | 53 | * @del_vqs: free virtqueues found by find_vqs(). |
| 49 | * @get_features: get the array of feature bits for this device. | 54 | * @get_features: get the array of feature bits for this device. |
| 50 | * vdev: the virtio_device | 55 | * vdev: the virtio_device |
| 51 | * Returns the first 32 feature bits (all we currently need). | 56 | * Returns the first 64 feature bits (all we currently need). |
| 52 | * @finalize_features: confirm what device features we'll be using. | 57 | * @finalize_features: confirm what device features we'll be using. |
| 53 | * vdev: the virtio_device | 58 | * vdev: the virtio_device |
| 54 | * This gives the final feature bits for the device: it can change | 59 | * This gives the final feature bits for the device: it can change |
| 55 | * the dev->feature bits if it wants. | 60 | * the dev->feature bits if it wants. |
| 56 | * Returns 0 on success or error status | 61 | * Returns 0 on success or error status |
| 57 | * @bus_name: return the bus name associated with the device | 62 | * @bus_name: return the bus name associated with the device (optional) |
| 58 | * vdev: the virtio_device | 63 | * vdev: the virtio_device |
| 59 | * This returns a pointer to the bus name a la pci_name from which | 64 | * This returns a pointer to the bus name a la pci_name from which |
| 60 | * the caller can then copy. | 65 | * the caller can then copy. |
| 61 | * @set_vq_affinity: set the affinity for a virtqueue. | 66 | * @set_vq_affinity: set the affinity for a virtqueue (optional). |
| 62 | * @get_vq_affinity: get the affinity for a virtqueue (optional). | 67 | * @get_vq_affinity: get the affinity for a virtqueue (optional). |
| 63 | */ | 68 | */ |
| 64 | typedef void vq_callback_t(struct virtqueue *); | 69 | typedef void vq_callback_t(struct virtqueue *); |
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 33d291888ba9..e3f005eae1f7 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | enum afs_call_trace { | 25 | enum afs_call_trace { |
| 26 | afs_call_trace_alloc, | 26 | afs_call_trace_alloc, |
| 27 | afs_call_trace_free, | 27 | afs_call_trace_free, |
| 28 | afs_call_trace_get, | ||
| 28 | afs_call_trace_put, | 29 | afs_call_trace_put, |
| 29 | afs_call_trace_wake, | 30 | afs_call_trace_wake, |
| 30 | afs_call_trace_work, | 31 | afs_call_trace_work, |
| @@ -159,6 +160,7 @@ enum afs_file_error { | |||
| 159 | #define afs_call_traces \ | 160 | #define afs_call_traces \ |
| 160 | EM(afs_call_trace_alloc, "ALLOC") \ | 161 | EM(afs_call_trace_alloc, "ALLOC") \ |
| 161 | EM(afs_call_trace_free, "FREE ") \ | 162 | EM(afs_call_trace_free, "FREE ") \ |
| 163 | EM(afs_call_trace_get, "GET ") \ | ||
| 162 | EM(afs_call_trace_put, "PUT ") \ | 164 | EM(afs_call_trace_put, "PUT ") \ |
| 163 | EM(afs_call_trace_wake, "WAKE ") \ | 165 | EM(afs_call_trace_wake, "WAKE ") \ |
| 164 | E_(afs_call_trace_work, "WORK ") | 166 | E_(afs_call_trace_work, "WORK ") |
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index d13fd490b66d..6e73f0274e41 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h | |||
| @@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { | |||
| 78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 78 | PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
| 79 | PVRDMA_WR_BIND_MW, | 79 | PVRDMA_WR_BIND_MW, |
| 80 | PVRDMA_WR_REG_SIG_MR, | 80 | PVRDMA_WR_REG_SIG_MR, |
| 81 | PVRDMA_WR_ERROR, | ||
| 81 | }; | 82 | }; |
| 82 | 83 | ||
| 83 | enum pvrdma_wc_status { | 84 | enum pvrdma_wc_status { |
diff --git a/init/Kconfig b/init/Kconfig index d47cb77a220e..513fa544a134 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION | |||
| 1124 | bool "Dead code and data elimination (EXPERIMENTAL)" | 1124 | bool "Dead code and data elimination (EXPERIMENTAL)" |
| 1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION | 1125 | depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION |
| 1126 | depends on EXPERT | 1126 | depends on EXPERT |
| 1127 | depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) | ||
| 1127 | depends on $(cc-option,-ffunction-sections -fdata-sections) | 1128 | depends on $(cc-option,-ffunction-sections -fdata-sections) |
| 1128 | depends on $(ld-option,--gc-sections) | 1129 | depends on $(ld-option,--gc-sections) |
| 1129 | help | 1130 | help |
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index d6361776dc5c..1fb6fd68b9c7 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c | |||
| @@ -378,6 +378,8 @@ void __init swiotlb_exit(void) | |||
| 378 | memblock_free_late(io_tlb_start, | 378 | memblock_free_late(io_tlb_start, |
| 379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 379 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
| 380 | } | 380 | } |
| 381 | io_tlb_start = 0; | ||
| 382 | io_tlb_end = 0; | ||
| 381 | io_tlb_nslabs = 0; | 383 | io_tlb_nslabs = 0; |
| 382 | max_segment = 0; | 384 | max_segment = 0; |
| 383 | } | 385 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d7f538847b84..e815781ed751 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) | |||
| 976 | struct seccomp_filter *filter = file->private_data; | 976 | struct seccomp_filter *filter = file->private_data; |
| 977 | struct seccomp_knotif *knotif; | 977 | struct seccomp_knotif *knotif; |
| 978 | 978 | ||
| 979 | if (!filter) | ||
| 980 | return 0; | ||
| 981 | |||
| 979 | mutex_lock(&filter->notify_lock); | 982 | mutex_lock(&filter->notify_lock); |
| 980 | 983 | ||
| 981 | /* | 984 | /* |
| @@ -1300,6 +1303,7 @@ out: | |||
| 1300 | out_put_fd: | 1303 | out_put_fd: |
| 1301 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { | 1304 | if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { |
| 1302 | if (ret < 0) { | 1305 | if (ret < 0) { |
| 1306 | listener_f->private_data = NULL; | ||
| 1303 | fput(listener_f); | 1307 | fput(listener_f); |
| 1304 | put_unused_fd(listener); | 1308 | put_unused_fd(listener); |
| 1305 | } else { | 1309 | } else { |
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index 14436f4ca6bd..30e0f9770f88 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c | |||
| @@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) | |||
| 52 | if (x <= ULONG_MAX) | 52 | if (x <= ULONG_MAX) |
| 53 | return int_sqrt((unsigned long) x); | 53 | return int_sqrt((unsigned long) x); |
| 54 | 54 | ||
| 55 | m = 1ULL << (fls64(x) & ~1ULL); | 55 | m = 1ULL << ((fls64(x) - 1) & ~1ULL); |
| 56 | while (m != 0) { | 56 | while (m != 0) { |
| 57 | b = y + m; | 57 | b = y + m; |
| 58 | y >>= 1; | 58 | y >>= 1; |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 1ff9768f5456..f3023bbc0b7f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
| @@ -41,6 +41,9 @@ static unsigned long number_cred_unused; | |||
| 41 | 41 | ||
| 42 | static struct cred machine_cred = { | 42 | static struct cred machine_cred = { |
| 43 | .usage = ATOMIC_INIT(1), | 43 | .usage = ATOMIC_INIT(1), |
| 44 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
| 45 | .magic = CRED_MAGIC, | ||
| 46 | #endif | ||
| 44 | }; | 47 | }; |
| 45 | 48 | ||
| 46 | /* | 49 | /* |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index dc86713b32b6..1531b0219344 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 1549 | cred_len = p++; | 1549 | cred_len = p++; |
| 1550 | 1550 | ||
| 1551 | spin_lock(&ctx->gc_seq_lock); | 1551 | spin_lock(&ctx->gc_seq_lock); |
| 1552 | req->rq_seqno = ctx->gc_seq++; | 1552 | req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ; |
| 1553 | spin_unlock(&ctx->gc_seq_lock); | 1553 | spin_unlock(&ctx->gc_seq_lock); |
| 1554 | if (req->rq_seqno == MAXSEQ) | ||
| 1555 | goto out_expired; | ||
| 1554 | 1556 | ||
| 1555 | *p++ = htonl((u32) RPC_GSS_VERSION); | 1557 | *p++ = htonl((u32) RPC_GSS_VERSION); |
| 1556 | *p++ = htonl((u32) ctx->gc_proc); | 1558 | *p++ = htonl((u32) ctx->gc_proc); |
| @@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p) | |||
| 1572 | mic.data = (u8 *)(p + 1); | 1574 | mic.data = (u8 *)(p + 1); |
| 1573 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); | 1575 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
| 1574 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 1576 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
| 1575 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | 1577 | goto out_expired; |
| 1576 | } else if (maj_stat != 0) { | 1578 | } else if (maj_stat != 0) { |
| 1577 | printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); | 1579 | pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); |
| 1580 | task->tk_status = -EIO; | ||
| 1578 | goto out_put_ctx; | 1581 | goto out_put_ctx; |
| 1579 | } | 1582 | } |
| 1580 | p = xdr_encode_opaque(p, NULL, mic.len); | 1583 | p = xdr_encode_opaque(p, NULL, mic.len); |
| 1581 | gss_put_ctx(ctx); | 1584 | gss_put_ctx(ctx); |
| 1582 | return p; | 1585 | return p; |
| 1586 | out_expired: | ||
| 1587 | clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); | ||
| 1588 | task->tk_status = -EKEYEXPIRED; | ||
| 1583 | out_put_ctx: | 1589 | out_put_ctx: |
| 1584 | gss_put_ctx(ctx); | 1590 | gss_put_ctx(ctx); |
| 1585 | return NULL; | 1591 | return NULL; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 71d9599b5816..d7ec6132c046 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task) | |||
| 1739 | xdr_buf_init(&req->rq_rcv_buf, | 1739 | xdr_buf_init(&req->rq_rcv_buf, |
| 1740 | req->rq_rbuffer, | 1740 | req->rq_rbuffer, |
| 1741 | req->rq_rcvsize); | 1741 | req->rq_rcvsize); |
| 1742 | req->rq_bytes_sent = 0; | ||
| 1743 | 1742 | ||
| 1744 | p = rpc_encode_header(task); | 1743 | p = rpc_encode_header(task); |
| 1745 | if (p == NULL) { | 1744 | if (p == NULL) |
| 1746 | printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); | ||
| 1747 | rpc_exit(task, -EIO); | ||
| 1748 | return; | 1745 | return; |
| 1749 | } | ||
| 1750 | 1746 | ||
| 1751 | encode = task->tk_msg.rpc_proc->p_encode; | 1747 | encode = task->tk_msg.rpc_proc->p_encode; |
| 1752 | if (encode == NULL) | 1748 | if (encode == NULL) |
| @@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task) | |||
| 1771 | /* Did the encode result in an error condition? */ | 1767 | /* Did the encode result in an error condition? */ |
| 1772 | if (task->tk_status != 0) { | 1768 | if (task->tk_status != 0) { |
| 1773 | /* Was the error nonfatal? */ | 1769 | /* Was the error nonfatal? */ |
| 1774 | if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) | 1770 | switch (task->tk_status) { |
| 1771 | case -EAGAIN: | ||
| 1772 | case -ENOMEM: | ||
| 1775 | rpc_delay(task, HZ >> 4); | 1773 | rpc_delay(task, HZ >> 4); |
| 1776 | else | 1774 | break; |
| 1775 | case -EKEYEXPIRED: | ||
| 1776 | task->tk_action = call_refresh; | ||
| 1777 | break; | ||
| 1778 | default: | ||
| 1777 | rpc_exit(task, task->tk_status); | 1779 | rpc_exit(task, task->tk_status); |
| 1780 | } | ||
| 1778 | return; | 1781 | return; |
| 1779 | } | 1782 | } |
| 1780 | 1783 | ||
| @@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task) | |||
| 2336 | *p++ = htonl(clnt->cl_vers); /* program version */ | 2339 | *p++ = htonl(clnt->cl_vers); /* program version */ |
| 2337 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ | 2340 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ |
| 2338 | p = rpcauth_marshcred(task, p); | 2341 | p = rpcauth_marshcred(task, p); |
| 2339 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | 2342 | if (p) |
| 2343 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | ||
| 2340 | return p; | 2344 | return p; |
| 2341 | } | 2345 | } |
| 2342 | 2346 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 73547d17d3c6..f1ec2110efeb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
| 1151 | struct rpc_xprt *xprt = req->rq_xprt; | 1151 | struct rpc_xprt *xprt = req->rq_xprt; |
| 1152 | 1152 | ||
| 1153 | if (xprt_request_need_enqueue_transmit(task, req)) { | 1153 | if (xprt_request_need_enqueue_transmit(task, req)) { |
| 1154 | req->rq_bytes_sent = 0; | ||
| 1154 | spin_lock(&xprt->queue_lock); | 1155 | spin_lock(&xprt->queue_lock); |
| 1155 | /* | 1156 | /* |
| 1156 | * Requests that carry congestion control credits are added | 1157 | * Requests that carry congestion control credits are added |
| @@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task) | |||
| 1177 | INIT_LIST_HEAD(&req->rq_xmit2); | 1178 | INIT_LIST_HEAD(&req->rq_xmit2); |
| 1178 | goto out; | 1179 | goto out; |
| 1179 | } | 1180 | } |
| 1180 | } else { | 1181 | } else if (!req->rq_seqno) { |
| 1181 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { | 1182 | list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { |
| 1182 | if (pos->rq_task->tk_owner != task->tk_owner) | 1183 | if (pos->rq_task->tk_owner != task->tk_owner) |
| 1183 | continue; | 1184 | continue; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7749a2bf6887..4994e75945b8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |||
| 845 | for (i = 0; i <= buf->rb_sc_last; i++) { | 845 | for (i = 0; i <= buf->rb_sc_last; i++) { |
| 846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | 846 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); |
| 847 | if (!sc) | 847 | if (!sc) |
| 848 | goto out_destroy; | 848 | return -ENOMEM; |
| 849 | 849 | ||
| 850 | sc->sc_xprt = r_xprt; | 850 | sc->sc_xprt = r_xprt; |
| 851 | buf->rb_sc_ctxs[i] = sc; | 851 | buf->rb_sc_ctxs[i] = sc; |
| 852 | } | 852 | } |
| 853 | 853 | ||
| 854 | return 0; | 854 | return 0; |
| 855 | |||
| 856 | out_destroy: | ||
| 857 | rpcrdma_sendctxs_destroy(buf); | ||
| 858 | return -ENOMEM; | ||
| 859 | } | 855 | } |
| 860 | 856 | ||
| 861 | /* The sendctx queue is not guaranteed to have a size that is a | 857 | /* The sendctx queue is not guaranteed to have a size that is a |
| @@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) | |||
| 1113 | WQ_MEM_RECLAIM | WQ_HIGHPRI, | 1109 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
| 1114 | 0, | 1110 | 0, |
| 1115 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); | 1111 | r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); |
| 1116 | if (!buf->rb_completion_wq) | 1112 | if (!buf->rb_completion_wq) { |
| 1113 | rc = -ENOMEM; | ||
| 1117 | goto out; | 1114 | goto out; |
| 1115 | } | ||
| 1118 | 1116 | ||
| 1119 | return 0; | 1117 | return 0; |
| 1120 | out: | 1118 | out: |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 13559e6a460b..7754aa3e434f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <net/udp.h> | 48 | #include <net/udp.h> |
| 49 | #include <net/tcp.h> | 49 | #include <net/tcp.h> |
| 50 | #include <linux/bvec.h> | 50 | #include <linux/bvec.h> |
| 51 | #include <linux/highmem.h> | ||
| 51 | #include <linux/uio.h> | 52 | #include <linux/uio.h> |
| 52 | 53 | ||
| 53 | #include <trace/events/sunrpc.h> | 54 | #include <trace/events/sunrpc.h> |
| @@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, | |||
| 376 | return sock_recvmsg(sock, msg, flags); | 377 | return sock_recvmsg(sock, msg, flags); |
| 377 | } | 378 | } |
| 378 | 379 | ||
| 380 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | ||
| 381 | static void | ||
| 382 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
| 383 | { | ||
| 384 | struct bvec_iter bi = { | ||
| 385 | .bi_size = count, | ||
| 386 | }; | ||
| 387 | struct bio_vec bv; | ||
| 388 | |||
| 389 | bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); | ||
| 390 | for_each_bvec(bv, bvec, bi, bi) | ||
| 391 | flush_dcache_page(bv.bv_page); | ||
| 392 | } | ||
| 393 | #else | ||
| 394 | static inline void | ||
| 395 | xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) | ||
| 396 | { | ||
| 397 | } | ||
| 398 | #endif | ||
| 399 | |||
| 379 | static ssize_t | 400 | static ssize_t |
| 380 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | 401 | xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, |
| 381 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) | 402 | struct xdr_buf *buf, size_t count, size_t seek, size_t *read) |
| @@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, | |||
| 409 | seek + buf->page_base); | 430 | seek + buf->page_base); |
| 410 | if (ret <= 0) | 431 | if (ret <= 0) |
| 411 | goto sock_err; | 432 | goto sock_err; |
| 433 | xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); | ||
| 412 | offset += ret - buf->page_base; | 434 | offset += ret - buf->page_base; |
| 413 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) | 435 | if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
| 414 | goto out; | 436 | goto out; |
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 525bff667a52..30816037036e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include | |||
| @@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d) | |||
| 24 | basetarget = $(basename $(notdir $@)) | 24 | basetarget = $(basename $(notdir $@)) |
| 25 | 25 | ||
| 26 | ### | 26 | ### |
| 27 | # filename of first prerequisite with directory and extension stripped | ||
| 28 | baseprereq = $(basename $(notdir $<)) | ||
| 29 | |||
| 30 | ### | ||
| 31 | # Escape single quote for use in echo statements | 27 | # Escape single quote for use in echo statements |
| 32 | escsq = $(subst $(squote),'\$(squote)',$1) | 28 | escsq = $(subst $(squote),'\$(squote)',$1) |
| 33 | 29 | ||
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index c05ab001b54c..181973509a05 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
| @@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $< | |||
| 206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE | 206 | $(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE |
| 207 | $(call filechk,conf_cfg) | 207 | $(call filechk,conf_cfg) |
| 208 | 208 | ||
| 209 | clean-files += conf-cfg | 209 | clean-files += *conf-cfg |
diff --git a/security/security.c b/security/security.c index f1b8d2587639..55bc49027ba9 100644 --- a/security/security.c +++ b/security/security.c | |||
| @@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) | |||
| 1027 | 1027 | ||
| 1028 | void security_cred_free(struct cred *cred) | 1028 | void security_cred_free(struct cred *cred) |
| 1029 | { | 1029 | { |
| 1030 | /* | ||
| 1031 | * There is a failure case in prepare_creds() that | ||
| 1032 | * may result in a call here with ->security being NULL. | ||
| 1033 | */ | ||
| 1034 | if (unlikely(cred->security == NULL)) | ||
| 1035 | return; | ||
| 1036 | |||
| 1030 | call_void_hook(cred_free, cred); | 1037 | call_void_hook(cred_free, cred); |
| 1031 | } | 1038 | } |
| 1032 | 1039 | ||
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index a50d625e7946..c1c31e33657a 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c | |||
| @@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) | |||
| 732 | kfree(key); | 732 | kfree(key); |
| 733 | if (datum) { | 733 | if (datum) { |
| 734 | levdatum = datum; | 734 | levdatum = datum; |
| 735 | ebitmap_destroy(&levdatum->level->cat); | 735 | if (levdatum->level) |
| 736 | ebitmap_destroy(&levdatum->level->cat); | ||
| 736 | kfree(levdatum->level); | 737 | kfree(levdatum->level); |
| 737 | } | 738 | } |
| 738 | kfree(datum); | 739 | kfree(datum); |
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index ffda91a4a1aa..02514fe558b4 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
| @@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, | |||
| 368 | break; | 368 | break; |
| 369 | case YAMA_SCOPE_RELATIONAL: | 369 | case YAMA_SCOPE_RELATIONAL: |
| 370 | rcu_read_lock(); | 370 | rcu_read_lock(); |
| 371 | if (!task_is_descendant(current, child) && | 371 | if (!pid_alive(child)) |
| 372 | rc = -EPERM; | ||
| 373 | if (!rc && !task_is_descendant(current, child) && | ||
| 372 | !ptracer_exception_found(current, child) && | 374 | !ptracer_exception_found(current, child) && |
| 373 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) | 375 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
| 374 | rc = -EPERM; | 376 | rc = -EPERM; |
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h index ff91192407d1..f599064dd8dc 100644 --- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h | |||
| @@ -47,6 +47,7 @@ enum perf_event_powerpc_regs { | |||
| 47 | PERF_REG_POWERPC_DAR, | 47 | PERF_REG_POWERPC_DAR, |
| 48 | PERF_REG_POWERPC_DSISR, | 48 | PERF_REG_POWERPC_DSISR, |
| 49 | PERF_REG_POWERPC_SIER, | 49 | PERF_REG_POWERPC_SIER, |
| 50 | PERF_REG_POWERPC_MMCRA, | ||
| 50 | PERF_REG_POWERPC_MAX, | 51 | PERF_REG_POWERPC_MAX, |
| 51 | }; | 52 | }; |
| 52 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ | 53 | #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ |
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h index 1076393e6f43..e18a3556f5e3 100644 --- a/tools/perf/arch/powerpc/include/perf_regs.h +++ b/tools/perf/arch/powerpc/include/perf_regs.h | |||
| @@ -63,7 +63,8 @@ static const char *reg_names[] = { | |||
| 63 | [PERF_REG_POWERPC_TRAP] = "trap", | 63 | [PERF_REG_POWERPC_TRAP] = "trap", |
| 64 | [PERF_REG_POWERPC_DAR] = "dar", | 64 | [PERF_REG_POWERPC_DAR] = "dar", |
| 65 | [PERF_REG_POWERPC_DSISR] = "dsisr", | 65 | [PERF_REG_POWERPC_DSISR] = "dsisr", |
| 66 | [PERF_REG_POWERPC_SIER] = "sier" | 66 | [PERF_REG_POWERPC_SIER] = "sier", |
| 67 | [PERF_REG_POWERPC_MMCRA] = "mmcra" | ||
| 67 | }; | 68 | }; |
| 68 | 69 | ||
| 69 | static inline const char *perf_reg_name(int id) | 70 | static inline const char *perf_reg_name(int id) |
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c index 07fcd977d93e..34d5134681d9 100644 --- a/tools/perf/arch/powerpc/util/perf_regs.c +++ b/tools/perf/arch/powerpc/util/perf_regs.c | |||
| @@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = { | |||
| 53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), | 53 | SMPL_REG(dar, PERF_REG_POWERPC_DAR), |
| 54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), | 54 | SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), |
| 55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), | 55 | SMPL_REG(sier, PERF_REG_POWERPC_SIER), |
| 56 | SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), | ||
| 56 | SMPL_REG_END | 57 | SMPL_REG_END |
| 57 | }; | 58 | }; |
| 58 | 59 | ||
