diff options
138 files changed, 1486 insertions, 741 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 9bffdfc648dc..85b022179104 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN | |||
66 | route/max_size - INTEGER | 66 | route/max_size - INTEGER |
67 | Maximum number of routes allowed in the kernel. Increase | 67 | Maximum number of routes allowed in the kernel. Increase |
68 | this when using large numbers of interfaces and/or routes. | 68 | this when using large numbers of interfaces and/or routes. |
69 | From linux kernel 3.6 onwards, this is deprecated for ipv4 | ||
70 | as route cache is no longer used. | ||
69 | 71 | ||
70 | neigh/default/gc_thresh1 - INTEGER | 72 | neigh/default/gc_thresh1 - INTEGER |
71 | Minimum number of entries to keep. Garbage collector will not | 73 | Minimum number of entries to keep. Garbage collector will not |
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 230ce71f4d75..2b47704f75cb 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py | |||
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
389 | buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" | 389 | buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" |
390 | buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" | 390 | buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" |
391 | buf += " .close_session = " + fabric_mod_name + "_close_session,\n" | 391 | buf += " .close_session = " + fabric_mod_name + "_close_session,\n" |
392 | buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" | ||
393 | buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" | ||
394 | buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" | ||
395 | buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" | 392 | buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" |
396 | buf += " .sess_get_initiator_sid = NULL,\n" | 393 | buf += " .sess_get_initiator_sid = NULL,\n" |
397 | buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" | 394 | buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" |
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
402 | buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" | 399 | buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" |
403 | buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" | 400 | buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" |
404 | buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" | 401 | buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" |
405 | buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" | 402 | buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" |
406 | buf += " /*\n" | 403 | buf += " /*\n" |
407 | buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" | 404 | buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" |
408 | buf += " */\n" | 405 | buf += " */\n" |
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
428 | buf += " /*\n" | 425 | buf += " /*\n" |
429 | buf += " * Register the top level struct config_item_type with TCM core\n" | 426 | buf += " * Register the top level struct config_item_type with TCM core\n" |
430 | buf += " */\n" | 427 | buf += " */\n" |
431 | buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" | 428 | buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" |
432 | buf += " if (IS_ERR(fabric)) {\n" | 429 | buf += " if (IS_ERR(fabric)) {\n" |
433 | buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" | 430 | buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" |
434 | buf += " return PTR_ERR(fabric);\n" | 431 | buf += " return PTR_ERR(fabric);\n" |
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
595 | if re.search('get_fabric_name', fo): | 592 | if re.search('get_fabric_name', fo): |
596 | buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" | 593 | buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" |
597 | buf += "{\n" | 594 | buf += "{\n" |
598 | buf += " return \"" + fabric_mod_name[4:] + "\";\n" | 595 | buf += " return \"" + fabric_mod_name + "\";\n" |
599 | buf += "}\n\n" | 596 | buf += "}\n\n" |
600 | bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" | 597 | bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" |
601 | continue | 598 | continue |
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
820 | buf += "}\n\n" | 817 | buf += "}\n\n" |
821 | bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" | 818 | bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" |
822 | 819 | ||
823 | if re.search('stop_session\)\(', fo): | ||
824 | buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" | ||
825 | buf += "{\n" | ||
826 | buf += " return;\n" | ||
827 | buf += "}\n\n" | ||
828 | bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" | ||
829 | |||
830 | if re.search('fall_back_to_erl0\)\(', fo): | ||
831 | buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" | ||
832 | buf += "{\n" | ||
833 | buf += " return;\n" | ||
834 | buf += "}\n\n" | ||
835 | bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" | ||
836 | |||
837 | if re.search('sess_logged_in\)\(', fo): | ||
838 | buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" | ||
839 | buf += "{\n" | ||
840 | buf += " return 0;\n" | ||
841 | buf += "}\n\n" | ||
842 | bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" | ||
843 | |||
844 | if re.search('sess_get_index\)\(', fo): | 820 | if re.search('sess_get_index\)\(', fo): |
845 | buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" | 821 | buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" |
846 | buf += "{\n" | 822 | buf += "{\n" |
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | |||
898 | bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" | 874 | bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" |
899 | 875 | ||
900 | if re.search('queue_tm_rsp\)\(', fo): | 876 | if re.search('queue_tm_rsp\)\(', fo): |
901 | buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" | 877 | buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" |
902 | buf += "{\n" | 878 | buf += "{\n" |
903 | buf += " return 0;\n" | 879 | buf += " return;\n" |
904 | buf += "}\n\n" | 880 | buf += "}\n\n" |
905 | bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" | 881 | bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" |
906 | 882 | ||
907 | if re.search('is_state_remove\)\(', fo): | 883 | if re.search('aborted_task\)\(', fo): |
908 | buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" | 884 | buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" |
909 | buf += "{\n" | 885 | buf += "{\n" |
910 | buf += " return 0;\n" | 886 | buf += " return;\n" |
911 | buf += "}\n\n" | 887 | buf += "}\n\n" |
912 | bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" | 888 | bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" |
913 | |||
914 | 889 | ||
915 | ret = p.write(buf) | 890 | ret = p.write(buf) |
916 | if ret: | 891 | if ret: |
@@ -1018,11 +993,11 @@ def main(modname, proto_ident): | |||
1018 | tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) | 993 | tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) |
1019 | tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) | 994 | tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) |
1020 | 995 | ||
1021 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") | 996 | input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") |
1022 | if input == "yes" or input == "y": | 997 | if input == "yes" or input == "y": |
1023 | tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) | 998 | tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) |
1024 | 999 | ||
1025 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") | 1000 | input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") |
1026 | if input == "yes" or input == "y": | 1001 | if input == "yes" or input == "y": |
1027 | tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) | 1002 | tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) |
1028 | 1003 | ||
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt index fca24c931ec8..753e47cc2e20 100644 --- a/Documentation/thermal/cpu-cooling-api.txt +++ b/Documentation/thermal/cpu-cooling-api.txt | |||
@@ -3,7 +3,7 @@ CPU cooling APIs How To | |||
3 | 3 | ||
4 | Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> | 4 | Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> |
5 | 5 | ||
6 | Updated: 12 May 2012 | 6 | Updated: 6 Jan 2015 |
7 | 7 | ||
8 | Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) | 8 | Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) |
9 | 9 | ||
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer. | |||
25 | 25 | ||
26 | clip_cpus: cpumask of cpus where the frequency constraints will happen. | 26 | clip_cpus: cpumask of cpus where the frequency constraints will happen. |
27 | 27 | ||
28 | 1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | 28 | 1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register( |
29 | struct device_node *np, const struct cpumask *clip_cpus) | ||
30 | |||
31 | This interface function registers the cpufreq cooling device with | ||
32 | the name "thermal-cpufreq-%x" linking it with a device tree node, in | ||
33 | order to bind it via the thermal DT code. This api can support multiple | ||
34 | instances of cpufreq cooling devices. | ||
35 | |||
36 | np: pointer to the cooling device device tree node | ||
37 | clip_cpus: cpumask of cpus where the frequency constraints will happen. | ||
38 | |||
39 | 1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | ||
29 | 40 | ||
30 | This interface function unregisters the "thermal-cpufreq-%x" cooling device. | 41 | This interface function unregisters the "thermal-cpufreq-%x" cooling device. |
31 | 42 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3589d67437f8..ab6610e6c610 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4749,7 +4749,7 @@ S: Supported | |||
4749 | F: drivers/scsi/ipr.* | 4749 | F: drivers/scsi/ipr.* |
4750 | 4750 | ||
4751 | IBM Power Virtual Ethernet Device Driver | 4751 | IBM Power Virtual Ethernet Device Driver |
4752 | M: Santiago Leon <santil@linux.vnet.ibm.com> | 4752 | M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> |
4753 | L: netdev@vger.kernel.org | 4753 | L: netdev@vger.kernel.org |
4754 | S: Supported | 4754 | S: Supported |
4755 | F: drivers/net/ethernet/ibm/ibmveth.* | 4755 | F: drivers/net/ethernet/ibm/ibmveth.* |
@@ -5280,6 +5280,15 @@ W: www.open-iscsi.org | |||
5280 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5280 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
5281 | F: drivers/infiniband/ulp/iser/ | 5281 | F: drivers/infiniband/ulp/iser/ |
5282 | 5282 | ||
5283 | ISCSI EXTENSIONS FOR RDMA (ISER) TARGET | ||
5284 | M: Sagi Grimberg <sagig@mellanox.com> | ||
5285 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master | ||
5286 | L: linux-rdma@vger.kernel.org | ||
5287 | L: target-devel@vger.kernel.org | ||
5288 | S: Supported | ||
5289 | W: http://www.linux-iscsi.org | ||
5290 | F: drivers/infiniband/ulp/isert | ||
5291 | |||
5283 | ISDN SUBSYSTEM | 5292 | ISDN SUBSYSTEM |
5284 | M: Karsten Keil <isdn@linux-pingi.de> | 5293 | M: Karsten Keil <isdn@linux-pingi.de> |
5285 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) | 5294 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
@@ -9534,7 +9543,8 @@ F: drivers/platform/x86/thinkpad_acpi.c | |||
9534 | TI BANDGAP AND THERMAL DRIVER | 9543 | TI BANDGAP AND THERMAL DRIVER |
9535 | M: Eduardo Valentin <edubezval@gmail.com> | 9544 | M: Eduardo Valentin <edubezval@gmail.com> |
9536 | L: linux-pm@vger.kernel.org | 9545 | L: linux-pm@vger.kernel.org |
9537 | S: Supported | 9546 | L: linux-omap@vger.kernel.org |
9547 | S: Maintained | ||
9538 | F: drivers/thermal/ti-soc-thermal/ | 9548 | F: drivers/thermal/ti-soc-thermal/ |
9539 | 9549 | ||
9540 | TI CLOCK DRIVER | 9550 | TI CLOCK DRIVER |
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 1e6e5cc1c14c..8c1febd7e3f2 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts | |||
@@ -159,13 +159,28 @@ | |||
159 | pinctrl-0 = <&pinctrl_enet1>; | 159 | pinctrl-0 = <&pinctrl_enet1>; |
160 | phy-supply = <®_enet_3v3>; | 160 | phy-supply = <®_enet_3v3>; |
161 | phy-mode = "rgmii"; | 161 | phy-mode = "rgmii"; |
162 | phy-handle = <ðphy1>; | ||
162 | status = "okay"; | 163 | status = "okay"; |
164 | |||
165 | mdio { | ||
166 | #address-cells = <1>; | ||
167 | #size-cells = <0>; | ||
168 | |||
169 | ethphy1: ethernet-phy@0 { | ||
170 | reg = <0>; | ||
171 | }; | ||
172 | |||
173 | ethphy2: ethernet-phy@1 { | ||
174 | reg = <1>; | ||
175 | }; | ||
176 | }; | ||
163 | }; | 177 | }; |
164 | 178 | ||
165 | &fec2 { | 179 | &fec2 { |
166 | pinctrl-names = "default"; | 180 | pinctrl-names = "default"; |
167 | pinctrl-0 = <&pinctrl_enet2>; | 181 | pinctrl-0 = <&pinctrl_enet2>; |
168 | phy-mode = "rgmii"; | 182 | phy-mode = "rgmii"; |
183 | phy-handle = <ðphy2>; | ||
169 | status = "okay"; | 184 | status = "okay"; |
170 | }; | 185 | }; |
171 | 186 | ||
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts index a0f762159cb2..f2b64b1b00fa 100644 --- a/arch/arm/boot/dts/vf610-twr.dts +++ b/arch/arm/boot/dts/vf610-twr.dts | |||
@@ -129,13 +129,28 @@ | |||
129 | 129 | ||
130 | &fec0 { | 130 | &fec0 { |
131 | phy-mode = "rmii"; | 131 | phy-mode = "rmii"; |
132 | phy-handle = <ðphy0>; | ||
132 | pinctrl-names = "default"; | 133 | pinctrl-names = "default"; |
133 | pinctrl-0 = <&pinctrl_fec0>; | 134 | pinctrl-0 = <&pinctrl_fec0>; |
134 | status = "okay"; | 135 | status = "okay"; |
136 | |||
137 | mdio { | ||
138 | #address-cells = <1>; | ||
139 | #size-cells = <0>; | ||
140 | |||
141 | ethphy0: ethernet-phy@0 { | ||
142 | reg = <0>; | ||
143 | }; | ||
144 | |||
145 | ethphy1: ethernet-phy@1 { | ||
146 | reg = <1>; | ||
147 | }; | ||
148 | }; | ||
135 | }; | 149 | }; |
136 | 150 | ||
137 | &fec1 { | 151 | &fec1 { |
138 | phy-mode = "rmii"; | 152 | phy-mode = "rmii"; |
153 | phy-handle = <ðphy1>; | ||
139 | pinctrl-names = "default"; | 154 | pinctrl-names = "default"; |
140 | pinctrl-0 = <&pinctrl_fec1>; | 155 | pinctrl-0 = <&pinctrl_fec1>; |
141 | status = "okay"; | 156 | status = "okay"; |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45e2637..865a7e28ea2d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | |||
41 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 41 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
42 | { | 42 | { |
43 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | 43 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; |
44 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) | ||
45 | vcpu->arch.hcr_el2 &= ~HCR_RW; | ||
44 | } | 46 | } |
45 | 47 | ||
46 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | 48 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fbe909fb0a1a..c3ca89c27c6b 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) | |||
1014 | * Instead, we invalidate Stage-2 for this IPA, and the | 1014 | * Instead, we invalidate Stage-2 for this IPA, and the |
1015 | * whole of Stage-1. Weep... | 1015 | * whole of Stage-1. Weep... |
1016 | */ | 1016 | */ |
1017 | lsr x1, x1, #12 | ||
1017 | tlbi ipas2e1is, x1 | 1018 | tlbi ipas2e1is, x1 |
1018 | /* | 1019 | /* |
1019 | * We have to ensure completion of the invalidation at Stage-2, | 1020 | * We have to ensure completion of the invalidation at Stage-2, |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 70a7816535cd..0b4326578985 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
90 | if (!cpu_has_32bit_el1()) | 90 | if (!cpu_has_32bit_el1()) |
91 | return -EINVAL; | 91 | return -EINVAL; |
92 | cpu_reset = &default_regs_reset32; | 92 | cpu_reset = &default_regs_reset32; |
93 | vcpu->arch.hcr_el2 &= ~HCR_RW; | ||
94 | } else { | 93 | } else { |
95 | cpu_reset = &default_regs_reset; | 94 | cpu_reset = &default_regs_reset; |
96 | } | 95 | } |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 75e75d7b1702..244e0dbe45db 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 355 | 7 | #define NR_syscalls 356 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 2c1bec9a14b6..61fb6cb9d2ae 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -360,5 +360,6 @@ | |||
360 | #define __NR_getrandom 352 | 360 | #define __NR_getrandom 352 |
361 | #define __NR_memfd_create 353 | 361 | #define __NR_memfd_create 353 |
362 | #define __NR_bpf 354 | 362 | #define __NR_bpf 354 |
363 | #define __NR_execveat 355 | ||
363 | 364 | ||
364 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 365 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 2ca219e184cd..a0ec4303f2c8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -375,4 +375,5 @@ ENTRY(sys_call_table) | |||
375 | .long sys_getrandom | 375 | .long sys_getrandom |
376 | .long sys_memfd_create | 376 | .long sys_memfd_create |
377 | .long sys_bpf | 377 | .long sys_bpf |
378 | .long sys_execveat /* 355 */ | ||
378 | 379 | ||
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ebc4f165690a..0be6c681cab1 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -23,9 +23,9 @@ | |||
23 | #define THREAD_SIZE (1 << THREAD_SHIFT) | 23 | #define THREAD_SIZE (1 << THREAD_SHIFT) |
24 | 24 | ||
25 | #ifdef CONFIG_PPC64 | 25 | #ifdef CONFIG_PPC64 |
26 | #define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT | 26 | #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) |
27 | #else | 27 | #else |
28 | #define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT | 28 | #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
@@ -71,12 +71,13 @@ struct thread_info { | |||
71 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) | 71 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) |
72 | 72 | ||
73 | /* how to get the thread information struct from C */ | 73 | /* how to get the thread information struct from C */ |
74 | register unsigned long __current_r1 asm("r1"); | ||
75 | static inline struct thread_info *current_thread_info(void) | 74 | static inline struct thread_info *current_thread_info(void) |
76 | { | 75 | { |
77 | /* gcc4, at least, is smart enough to turn this into a single | 76 | unsigned long val; |
78 | * rlwinm for ppc32 and clrrdi for ppc64 */ | 77 | |
79 | return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); | 78 | asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); |
79 | |||
80 | return (struct thread_info *)val; | ||
80 | } | 81 | } |
81 | 82 | ||
82 | #endif /* __ASSEMBLY__ */ | 83 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 54eca8b3b288..0509bca5e830 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \ | |||
40 | b 1f; \ | 40 | b 1f; \ |
41 | END_FTR_SECTION(0, 1); \ | 41 | END_FTR_SECTION(0, 1); \ |
42 | ld r12,opal_tracepoint_refcount@toc(r2); \ | 42 | ld r12,opal_tracepoint_refcount@toc(r2); \ |
43 | std r12,32(r1); \ | ||
44 | cmpdi r12,0; \ | 43 | cmpdi r12,0; \ |
45 | bne- LABEL; \ | 44 | bne- LABEL; \ |
46 | 1: | 45 | 1: |
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 32040ace00ea..afbe07907c10 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c | |||
@@ -231,7 +231,7 @@ failed: | |||
231 | struct dbfs_d2fc_hdr { | 231 | struct dbfs_d2fc_hdr { |
232 | u64 len; /* Length of d2fc buffer without header */ | 232 | u64 len; /* Length of d2fc buffer without header */ |
233 | u16 version; /* Version of header */ | 233 | u16 version; /* Version of header */ |
234 | char tod_ext[16]; /* TOD clock for d2fc */ | 234 | char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ |
235 | u64 count; /* Number of VM guests in d2fc buffer */ | 235 | u64 count; /* Number of VM guests in d2fc buffer */ |
236 | char reserved[30]; | 236 | char reserved[30]; |
237 | } __attribute__ ((packed)); | 237 | } __attribute__ ((packed)); |
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 37b9091ab8c0..16aa0c779e07 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h | |||
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags) | |||
36 | 36 | ||
37 | static inline notrace unsigned long arch_local_save_flags(void) | 37 | static inline notrace unsigned long arch_local_save_flags(void) |
38 | { | 38 | { |
39 | return __arch_local_irq_stosm(0x00); | 39 | return __arch_local_irq_stnsm(0xff); |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline notrace unsigned long arch_local_irq_save(void) | 42 | static inline notrace unsigned long arch_local_irq_save(void) |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 8beee1cceba4..98eb2a579223 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp) | |||
67 | set_clock_comparator(S390_lowcore.clock_comparator); | 67 | set_clock_comparator(S390_lowcore.clock_comparator); |
68 | } | 68 | } |
69 | 69 | ||
70 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ | 70 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ |
71 | #define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ | ||
71 | 72 | ||
72 | typedef unsigned long long cycles_t; | 73 | typedef unsigned long long cycles_t; |
73 | 74 | ||
74 | static inline void get_tod_clock_ext(char clk[16]) | 75 | static inline void get_tod_clock_ext(char *clk) |
75 | { | 76 | { |
76 | typedef struct { char _[sizeof(clk)]; } addrtype; | 77 | typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; |
77 | 78 | ||
78 | asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); | 79 | asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); |
79 | } | 80 | } |
80 | 81 | ||
81 | static inline unsigned long long get_tod_clock(void) | 82 | static inline unsigned long long get_tod_clock(void) |
82 | { | 83 | { |
83 | unsigned char clk[16]; | 84 | unsigned char clk[STORE_CLOCK_EXT_SIZE]; |
85 | |||
84 | get_tod_clock_ext(clk); | 86 | get_tod_clock_ext(clk); |
85 | return *((unsigned long long *)&clk[1]); | 87 | return *((unsigned long long *)&clk[1]); |
86 | } | 88 | } |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 2b446cf0cc65..67878af257a0 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
@@ -289,7 +289,8 @@ | |||
289 | #define __NR_bpf 351 | 289 | #define __NR_bpf 351 |
290 | #define __NR_s390_pci_mmio_write 352 | 290 | #define __NR_s390_pci_mmio_write 352 |
291 | #define __NR_s390_pci_mmio_read 353 | 291 | #define __NR_s390_pci_mmio_read 353 |
292 | #define NR_syscalls 354 | 292 | #define __NR_execveat 354 |
293 | #define NR_syscalls 355 | ||
293 | 294 | ||
294 | /* | 295 | /* |
295 | * There are some system calls that are not present on 64 bit, some | 296 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index a2987243bc76..939ec474b1dd 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ | |||
362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) | 362 | SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) |
363 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) | 363 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) |
364 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) | 364 | SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) |
365 | SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) | ||
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index f6b3cd056ec2..cc7328080b60 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c | |||
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) | |||
48 | return false; | 48 | return false; |
49 | } | 49 | } |
50 | 50 | ||
51 | static int check_per_event(unsigned short cause, unsigned long control, | ||
52 | struct pt_regs *regs) | ||
53 | { | ||
54 | if (!(regs->psw.mask & PSW_MASK_PER)) | ||
55 | return 0; | ||
56 | /* user space single step */ | ||
57 | if (control == 0) | ||
58 | return 1; | ||
59 | /* over indication for storage alteration */ | ||
60 | if ((control & 0x20200000) && (cause & 0x2000)) | ||
61 | return 1; | ||
62 | if (cause & 0x8000) { | ||
63 | /* all branches */ | ||
64 | if ((control & 0x80800000) == 0x80000000) | ||
65 | return 1; | ||
66 | /* branch into selected range */ | ||
67 | if (((control & 0x80800000) == 0x80800000) && | ||
68 | regs->psw.addr >= current->thread.per_user.start && | ||
69 | regs->psw.addr <= current->thread.per_user.end) | ||
70 | return 1; | ||
71 | } | ||
72 | return 0; | ||
73 | } | ||
74 | |||
51 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | 75 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
52 | { | 76 | { |
53 | int fixup = probe_get_fixup_type(auprobe->insn); | 77 | int fixup = probe_get_fixup_type(auprobe->insn); |
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
71 | if (regs->psw.addr - utask->xol_vaddr == ilen) | 95 | if (regs->psw.addr - utask->xol_vaddr == ilen) |
72 | regs->psw.addr = utask->vaddr + ilen; | 96 | regs->psw.addr = utask->vaddr + ilen; |
73 | } | 97 | } |
74 | /* If per tracing was active generate trap */ | 98 | if (check_per_event(current->thread.per_event.cause, |
75 | if (regs->psw.mask & PSW_MASK_PER) | 99 | current->thread.per_user.control, regs)) { |
76 | do_per_trap(regs); | 100 | /* fix per address */ |
101 | current->thread.per_event.address = utask->vaddr; | ||
102 | /* trigger per event */ | ||
103 | set_pt_regs_flag(regs, PIF_PER_TRAP); | ||
104 | } | ||
77 | return 0; | 105 | return 0; |
78 | } | 106 | } |
79 | 107 | ||
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
106 | clear_thread_flag(TIF_UPROBE_SINGLESTEP); | 134 | clear_thread_flag(TIF_UPROBE_SINGLESTEP); |
107 | regs->int_code = auprobe->saved_int_code; | 135 | regs->int_code = auprobe->saved_int_code; |
108 | regs->psw.addr = current->utask->vaddr; | 136 | regs->psw.addr = current->utask->vaddr; |
137 | current->thread.per_event.address = current->utask->vaddr; | ||
109 | } | 138 | } |
110 | 139 | ||
111 | unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, | 140 | unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, |
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) | |||
146 | __rc; \ | 175 | __rc; \ |
147 | }) | 176 | }) |
148 | 177 | ||
149 | #define emu_store_ril(ptr, input) \ | 178 | #define emu_store_ril(regs, ptr, input) \ |
150 | ({ \ | 179 | ({ \ |
151 | unsigned int mask = sizeof(*(ptr)) - 1; \ | 180 | unsigned int mask = sizeof(*(ptr)) - 1; \ |
181 | __typeof__(ptr) __ptr = (ptr); \ | ||
152 | int __rc = 0; \ | 182 | int __rc = 0; \ |
153 | \ | 183 | \ |
154 | if (!test_facility(34)) \ | 184 | if (!test_facility(34)) \ |
155 | __rc = EMU_ILLEGAL_OP; \ | 185 | __rc = EMU_ILLEGAL_OP; \ |
156 | else if ((u64 __force)ptr & mask) \ | 186 | else if ((u64 __force)__ptr & mask) \ |
157 | __rc = EMU_SPECIFICATION; \ | 187 | __rc = EMU_SPECIFICATION; \ |
158 | else if (put_user(*(input), ptr)) \ | 188 | else if (put_user(*(input), __ptr)) \ |
159 | __rc = EMU_ADDRESSING; \ | 189 | __rc = EMU_ADDRESSING; \ |
190 | if (__rc == 0) \ | ||
191 | sim_stor_event(regs, __ptr, mask + 1); \ | ||
160 | __rc; \ | 192 | __rc; \ |
161 | }) | 193 | }) |
162 | 194 | ||
@@ -198,6 +230,25 @@ union split_register { | |||
198 | }; | 230 | }; |
199 | 231 | ||
200 | /* | 232 | /* |
233 | * If user per registers are setup to trace storage alterations and an | ||
234 | * emulated store took place on a fitting address a user trap is generated. | ||
235 | */ | ||
236 | static void sim_stor_event(struct pt_regs *regs, void *addr, int len) | ||
237 | { | ||
238 | if (!(regs->psw.mask & PSW_MASK_PER)) | ||
239 | return; | ||
240 | if (!(current->thread.per_user.control & PER_EVENT_STORE)) | ||
241 | return; | ||
242 | if ((void *)current->thread.per_user.start > (addr + len)) | ||
243 | return; | ||
244 | if ((void *)current->thread.per_user.end < addr) | ||
245 | return; | ||
246 | current->thread.per_event.address = regs->psw.addr; | ||
247 | current->thread.per_event.cause = PER_EVENT_STORE >> 16; | ||
248 | set_pt_regs_flag(regs, PIF_PER_TRAP); | ||
249 | } | ||
250 | |||
251 | /* | ||
201 | * pc relative instructions are emulated, since parameters may not be | 252 | * pc relative instructions are emulated, since parameters may not be |
202 | * accessible from the xol area due to range limitations. | 253 | * accessible from the xol area due to range limitations. |
203 | */ | 254 | */ |
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
249 | rc = emu_load_ril((u32 __user *)uptr, &rx->u64); | 300 | rc = emu_load_ril((u32 __user *)uptr, &rx->u64); |
250 | break; | 301 | break; |
251 | case 0x07: /* sthrl */ | 302 | case 0x07: /* sthrl */ |
252 | rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); | 303 | rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); |
253 | break; | 304 | break; |
254 | case 0x0b: /* stgrl */ | 305 | case 0x0b: /* stgrl */ |
255 | rc = emu_store_ril((u64 __user *)uptr, &rx->u64); | 306 | rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); |
256 | break; | 307 | break; |
257 | case 0x0f: /* strl */ | 308 | case 0x0f: /* strl */ |
258 | rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); | 309 | rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); |
259 | break; | 310 | break; |
260 | } | 311 | } |
261 | break; | 312 | break; |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 7f0089d9a4aa..e34122e539a1 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk) | |||
128 | struct thread_info *ti = task_thread_info(tsk); | 128 | struct thread_info *ti = task_thread_info(tsk); |
129 | u64 timer, system; | 129 | u64 timer, system; |
130 | 130 | ||
131 | WARN_ON_ONCE(!irqs_disabled()); | ||
132 | |||
133 | timer = S390_lowcore.last_update_timer; | 131 | timer = S390_lowcore.last_update_timer; |
134 | S390_lowcore.last_update_timer = get_vtimer(); | 132 | S390_lowcore.last_update_timer = get_vtimer(); |
135 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 133 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index be99357d238c..3cf8cc03fff6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, | |||
322 | static unsigned long __gmap_segment_gaddr(unsigned long *entry) | 322 | static unsigned long __gmap_segment_gaddr(unsigned long *entry) |
323 | { | 323 | { |
324 | struct page *page; | 324 | struct page *page; |
325 | unsigned long offset; | 325 | unsigned long offset, mask; |
326 | 326 | ||
327 | offset = (unsigned long) entry / sizeof(unsigned long); | 327 | offset = (unsigned long) entry / sizeof(unsigned long); |
328 | offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; | 328 | offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; |
329 | page = pmd_to_page((pmd_t *) entry); | 329 | mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); |
330 | page = virt_to_page((void *)((unsigned long) entry & mask)); | ||
330 | return page->index + offset; | 331 | return page->index + offset; |
331 | } | 332 | } |
332 | 333 | ||
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index c52ac77408ca..524496d47ef5 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, | |||
431 | EMIT4_DISP(0x88500000, K); | 431 | EMIT4_DISP(0x88500000, K); |
432 | break; | 432 | break; |
433 | case BPF_ALU | BPF_NEG: /* A = -A */ | 433 | case BPF_ALU | BPF_NEG: /* A = -A */ |
434 | /* lnr %r5,%r5 */ | 434 | /* lcr %r5,%r5 */ |
435 | EMIT2(0x1155); | 435 | EMIT2(0x1355); |
436 | break; | 436 | break; |
437 | case BPF_JMP | BPF_JA: /* ip += K */ | 437 | case BPF_JMP | BPF_JA: /* ip += K */ |
438 | offset = addrs[i + K] + jit->start - jit->prg; | 438 | offset = addrs[i + K] + jit->start - jit->prg; |
@@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) { | |||
502 | xbranch: /* Emit compare if the branch targets are different */ | 502 | xbranch: /* Emit compare if the branch targets are different */ |
503 | if (filter->jt != filter->jf) { | 503 | if (filter->jt != filter->jf) { |
504 | jit->seen |= SEEN_XREG; | 504 | jit->seen |= SEEN_XREG; |
505 | /* cr %r5,%r12 */ | 505 | /* clr %r5,%r12 */ |
506 | EMIT2(0x195c); | 506 | EMIT2(0x155c); |
507 | } | 507 | } |
508 | goto branch; | 508 | goto branch; |
509 | case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ | 509 | case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f7e3cd50ece0..98f654d466e5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
1020 | regs->flags &= ~X86_EFLAGS_IF; | 1020 | regs->flags &= ~X86_EFLAGS_IF; |
1021 | trace_hardirqs_off(); | 1021 | trace_hardirqs_off(); |
1022 | regs->ip = (unsigned long)(jp->entry); | 1022 | regs->ip = (unsigned long)(jp->entry); |
1023 | |||
1024 | /* | ||
1025 | * jprobes use jprobe_return() which skips the normal return | ||
1026 | * path of the function, and this messes up the accounting of the | ||
1027 | * function graph tracer to get messed up. | ||
1028 | * | ||
1029 | * Pause function graph tracing while performing the jprobe function. | ||
1030 | */ | ||
1031 | pause_graph_tracing(); | ||
1023 | return 1; | 1032 | return 1; |
1024 | } | 1033 | } |
1025 | NOKPROBE_SYMBOL(setjmp_pre_handler); | 1034 | NOKPROBE_SYMBOL(setjmp_pre_handler); |
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1048 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1057 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1049 | u8 *addr = (u8 *) (regs->ip - 1); | 1058 | u8 *addr = (u8 *) (regs->ip - 1); |
1050 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 1059 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
1060 | void *saved_sp = kcb->jprobe_saved_sp; | ||
1051 | 1061 | ||
1052 | if ((addr > (u8 *) jprobe_return) && | 1062 | if ((addr > (u8 *) jprobe_return) && |
1053 | (addr < (u8 *) jprobe_return_end)) { | 1063 | (addr < (u8 *) jprobe_return_end)) { |
1054 | if (stack_addr(regs) != kcb->jprobe_saved_sp) { | 1064 | if (stack_addr(regs) != saved_sp) { |
1055 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; | 1065 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
1056 | printk(KERN_ERR | 1066 | printk(KERN_ERR |
1057 | "current sp %p does not match saved sp %p\n", | 1067 | "current sp %p does not match saved sp %p\n", |
1058 | stack_addr(regs), kcb->jprobe_saved_sp); | 1068 | stack_addr(regs), saved_sp); |
1059 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); | 1069 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); |
1060 | show_regs(saved_regs); | 1070 | show_regs(saved_regs); |
1061 | printk(KERN_ERR "Current registers\n"); | 1071 | printk(KERN_ERR "Current registers\n"); |
1062 | show_regs(regs); | 1072 | show_regs(regs); |
1063 | BUG(); | 1073 | BUG(); |
1064 | } | 1074 | } |
1075 | /* It's OK to start function graph tracing again */ | ||
1076 | unpause_graph_tracing(); | ||
1065 | *regs = kcb->jprobe_saved_regs; | 1077 | *regs = kcb->jprobe_saved_regs; |
1066 | memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), | 1078 | memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); |
1067 | kcb->jprobes_stack, | ||
1068 | MIN_STACK_SIZE(kcb->jprobe_saved_sp)); | ||
1069 | preempt_enable_no_resched(); | 1079 | preempt_enable_no_resched(); |
1070 | return 1; | 1080 | return 1; |
1071 | } | 1081 | } |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 6bf3a13e3e0f..78a881b7fc41 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <xen/interface/physdev.h> | 40 | #include <xen/interface/physdev.h> |
41 | #include <xen/interface/vcpu.h> | 41 | #include <xen/interface/vcpu.h> |
42 | #include <xen/interface/memory.h> | 42 | #include <xen/interface/memory.h> |
43 | #include <xen/interface/nmi.h> | ||
43 | #include <xen/interface/xen-mca.h> | 44 | #include <xen/interface/xen-mca.h> |
44 | #include <xen/features.h> | 45 | #include <xen/features.h> |
45 | #include <xen/page.h> | 46 | #include <xen/page.h> |
@@ -66,6 +67,7 @@ | |||
66 | #include <asm/reboot.h> | 67 | #include <asm/reboot.h> |
67 | #include <asm/stackprotector.h> | 68 | #include <asm/stackprotector.h> |
68 | #include <asm/hypervisor.h> | 69 | #include <asm/hypervisor.h> |
70 | #include <asm/mach_traps.h> | ||
69 | #include <asm/mwait.h> | 71 | #include <asm/mwait.h> |
70 | #include <asm/pci_x86.h> | 72 | #include <asm/pci_x86.h> |
71 | #include <asm/pat.h> | 73 | #include <asm/pat.h> |
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = { | |||
1351 | .emergency_restart = xen_emergency_restart, | 1353 | .emergency_restart = xen_emergency_restart, |
1352 | }; | 1354 | }; |
1353 | 1355 | ||
1356 | static unsigned char xen_get_nmi_reason(void) | ||
1357 | { | ||
1358 | unsigned char reason = 0; | ||
1359 | |||
1360 | /* Construct a value which looks like it came from port 0x61. */ | ||
1361 | if (test_bit(_XEN_NMIREASON_io_error, | ||
1362 | &HYPERVISOR_shared_info->arch.nmi_reason)) | ||
1363 | reason |= NMI_REASON_IOCHK; | ||
1364 | if (test_bit(_XEN_NMIREASON_pci_serr, | ||
1365 | &HYPERVISOR_shared_info->arch.nmi_reason)) | ||
1366 | reason |= NMI_REASON_SERR; | ||
1367 | |||
1368 | return reason; | ||
1369 | } | ||
1370 | |||
1354 | static void __init xen_boot_params_init_edd(void) | 1371 | static void __init xen_boot_params_init_edd(void) |
1355 | { | 1372 | { |
1356 | #if IS_ENABLED(CONFIG_EDD) | 1373 | #if IS_ENABLED(CONFIG_EDD) |
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1535 | pv_info = xen_info; | 1552 | pv_info = xen_info; |
1536 | pv_init_ops = xen_init_ops; | 1553 | pv_init_ops = xen_init_ops; |
1537 | pv_apic_ops = xen_apic_ops; | 1554 | pv_apic_ops = xen_apic_ops; |
1538 | if (!xen_pvh_domain()) | 1555 | if (!xen_pvh_domain()) { |
1539 | pv_cpu_ops = xen_cpu_ops; | 1556 | pv_cpu_ops = xen_cpu_ops; |
1540 | 1557 | ||
1558 | x86_platform.get_nmi_reason = xen_get_nmi_reason; | ||
1559 | } | ||
1560 | |||
1541 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 1561 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
1542 | x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; | 1562 | x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; |
1543 | else | 1563 | else |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index edbc7a63fd73..70fb5075c901 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void) | |||
167 | return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); | 167 | return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* Only to be called in case of a race for a page just allocated! */ | 170 | static void __ref free_p2m_page(void *p) |
171 | static void free_p2m_page(void *p) | ||
172 | { | 171 | { |
173 | BUG_ON(!slab_is_available()); | 172 | if (unlikely(!slab_is_available())) { |
173 | free_bootmem((unsigned long)p, PAGE_SIZE); | ||
174 | return; | ||
175 | } | ||
176 | |||
174 | free_page((unsigned long)p); | 177 | free_page((unsigned long)p); |
175 | } | 178 | } |
176 | 179 | ||
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) | |||
375 | p2m_missing_pte : p2m_identity_pte; | 378 | p2m_missing_pte : p2m_identity_pte; |
376 | for (i = 0; i < PMDS_PER_MID_PAGE; i++) { | 379 | for (i = 0; i < PMDS_PER_MID_PAGE; i++) { |
377 | pmdp = populate_extra_pmd( | 380 | pmdp = populate_extra_pmd( |
378 | (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); | 381 | (unsigned long)(p2m + pfn) + i * PMD_SIZE); |
379 | set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); | 382 | set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); |
380 | } | 383 | } |
381 | } | 384 | } |
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine); | |||
436 | * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual | 439 | * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual |
437 | * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! | 440 | * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! |
438 | */ | 441 | */ |
439 | static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) | 442 | static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) |
440 | { | 443 | { |
441 | pte_t *ptechk; | 444 | pte_t *ptechk; |
442 | pte_t *pteret = ptep; | ||
443 | pte_t *pte_newpg[PMDS_PER_MID_PAGE]; | 445 | pte_t *pte_newpg[PMDS_PER_MID_PAGE]; |
444 | pmd_t *pmdp; | 446 | pmd_t *pmdp; |
445 | unsigned int level; | 447 | unsigned int level; |
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) | |||
473 | if (ptechk == pte_pg) { | 475 | if (ptechk == pte_pg) { |
474 | set_pmd(pmdp, | 476 | set_pmd(pmdp, |
475 | __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); | 477 | __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); |
476 | if (vaddr == (addr & ~(PMD_SIZE - 1))) | ||
477 | pteret = pte_offset_kernel(pmdp, addr); | ||
478 | pte_newpg[i] = NULL; | 478 | pte_newpg[i] = NULL; |
479 | } | 479 | } |
480 | 480 | ||
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) | |||
488 | vaddr += PMD_SIZE; | 488 | vaddr += PMD_SIZE; |
489 | } | 489 | } |
490 | 490 | ||
491 | return pteret; | 491 | return lookup_address(addr, &level); |
492 | } | 492 | } |
493 | 493 | ||
494 | /* | 494 | /* |
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
517 | 517 | ||
518 | if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { | 518 | if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { |
519 | /* PMD level is missing, allocate a new one */ | 519 | /* PMD level is missing, allocate a new one */ |
520 | ptep = alloc_p2m_pmd(addr, ptep, pte_pg); | 520 | ptep = alloc_p2m_pmd(addr, pte_pg); |
521 | if (!ptep) | 521 | if (!ptep) |
522 | return false; | 522 | return false; |
523 | } | 523 | } |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index dfd77dec8e2b..865e56cea7a0 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size) | |||
140 | unsigned long __ref xen_chk_extra_mem(unsigned long pfn) | 140 | unsigned long __ref xen_chk_extra_mem(unsigned long pfn) |
141 | { | 141 | { |
142 | int i; | 142 | int i; |
143 | unsigned long addr = PFN_PHYS(pfn); | 143 | phys_addr_t addr = PFN_PHYS(pfn); |
144 | 144 | ||
145 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { | 145 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
146 | if (addr >= xen_extra_mem[i].start && | 146 | if (addr >= xen_extra_mem[i].start && |
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void) | |||
160 | int i; | 160 | int i; |
161 | 161 | ||
162 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { | 162 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
163 | if (!xen_extra_mem[i].size) | ||
164 | continue; | ||
163 | pfn_s = PFN_DOWN(xen_extra_mem[i].start); | 165 | pfn_s = PFN_DOWN(xen_extra_mem[i].start); |
164 | pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); | 166 | pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); |
165 | for (pfn = pfn_s; pfn < pfn_e; pfn++) | 167 | for (pfn = pfn_s; pfn < pfn_e; pfn++) |
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn) | |||
229 | * as a fallback if the remapping fails. | 231 | * as a fallback if the remapping fails. |
230 | */ | 232 | */ |
231 | static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, | 233 | static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, |
232 | unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, | 234 | unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) |
233 | unsigned long *released) | ||
234 | { | 235 | { |
235 | unsigned long len = 0; | ||
236 | unsigned long pfn, end; | 236 | unsigned long pfn, end; |
237 | int ret; | 237 | int ret; |
238 | 238 | ||
239 | WARN_ON(start_pfn > end_pfn); | 239 | WARN_ON(start_pfn > end_pfn); |
240 | 240 | ||
241 | /* Release pages first. */ | ||
241 | end = min(end_pfn, nr_pages); | 242 | end = min(end_pfn, nr_pages); |
242 | for (pfn = start_pfn; pfn < end; pfn++) { | 243 | for (pfn = start_pfn; pfn < end; pfn++) { |
243 | unsigned long mfn = pfn_to_mfn(pfn); | 244 | unsigned long mfn = pfn_to_mfn(pfn); |
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, | |||
250 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); | 251 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); |
251 | 252 | ||
252 | if (ret == 1) { | 253 | if (ret == 1) { |
254 | (*released)++; | ||
253 | if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) | 255 | if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) |
254 | break; | 256 | break; |
255 | len++; | ||
256 | } else | 257 | } else |
257 | break; | 258 | break; |
258 | } | 259 | } |
259 | 260 | ||
260 | /* Need to release pages first */ | 261 | set_phys_range_identity(start_pfn, end_pfn); |
261 | *released += len; | ||
262 | *identity += set_phys_range_identity(start_pfn, end_pfn); | ||
263 | } | 262 | } |
264 | 263 | ||
265 | /* | 264 | /* |
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) | |||
287 | } | 286 | } |
288 | 287 | ||
289 | /* Update kernel mapping, but not for highmem. */ | 288 | /* Update kernel mapping, but not for highmem. */ |
290 | if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) | 289 | if (pfn >= PFN_UP(__pa(high_memory - 1))) |
291 | return; | 290 | return; |
292 | 291 | ||
293 | if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), | 292 | if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), |
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk( | |||
318 | unsigned long ident_pfn_iter, remap_pfn_iter; | 317 | unsigned long ident_pfn_iter, remap_pfn_iter; |
319 | unsigned long ident_end_pfn = start_pfn + size; | 318 | unsigned long ident_end_pfn = start_pfn + size; |
320 | unsigned long left = size; | 319 | unsigned long left = size; |
321 | unsigned long ident_cnt = 0; | ||
322 | unsigned int i, chunk; | 320 | unsigned int i, chunk; |
323 | 321 | ||
324 | WARN_ON(size == 0); | 322 | WARN_ON(size == 0); |
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk( | |||
347 | xen_remap_mfn = mfn; | 345 | xen_remap_mfn = mfn; |
348 | 346 | ||
349 | /* Set identity map */ | 347 | /* Set identity map */ |
350 | ident_cnt += set_phys_range_identity(ident_pfn_iter, | 348 | set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); |
351 | ident_pfn_iter + chunk); | ||
352 | 349 | ||
353 | left -= chunk; | 350 | left -= chunk; |
354 | } | 351 | } |
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk( | |||
371 | static unsigned long __init xen_set_identity_and_remap_chunk( | 368 | static unsigned long __init xen_set_identity_and_remap_chunk( |
372 | const struct e820entry *list, size_t map_size, unsigned long start_pfn, | 369 | const struct e820entry *list, size_t map_size, unsigned long start_pfn, |
373 | unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, | 370 | unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, |
374 | unsigned long *identity, unsigned long *released) | 371 | unsigned long *released, unsigned long *remapped) |
375 | { | 372 | { |
376 | unsigned long pfn; | 373 | unsigned long pfn; |
377 | unsigned long i = 0; | 374 | unsigned long i = 0; |
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( | |||
386 | /* Do not remap pages beyond the current allocation */ | 383 | /* Do not remap pages beyond the current allocation */ |
387 | if (cur_pfn >= nr_pages) { | 384 | if (cur_pfn >= nr_pages) { |
388 | /* Identity map remaining pages */ | 385 | /* Identity map remaining pages */ |
389 | *identity += set_phys_range_identity(cur_pfn, | 386 | set_phys_range_identity(cur_pfn, cur_pfn + size); |
390 | cur_pfn + size); | ||
391 | break; | 387 | break; |
392 | } | 388 | } |
393 | if (cur_pfn + size > nr_pages) | 389 | if (cur_pfn + size > nr_pages) |
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( | |||
398 | if (!remap_range_size) { | 394 | if (!remap_range_size) { |
399 | pr_warning("Unable to find available pfn range, not remapping identity pages\n"); | 395 | pr_warning("Unable to find available pfn range, not remapping identity pages\n"); |
400 | xen_set_identity_and_release_chunk(cur_pfn, | 396 | xen_set_identity_and_release_chunk(cur_pfn, |
401 | cur_pfn + left, nr_pages, identity, released); | 397 | cur_pfn + left, nr_pages, released); |
402 | break; | 398 | break; |
403 | } | 399 | } |
404 | /* Adjust size to fit in current e820 RAM region */ | 400 | /* Adjust size to fit in current e820 RAM region */ |
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( | |||
410 | /* Update variables to reflect new mappings. */ | 406 | /* Update variables to reflect new mappings. */ |
411 | i += size; | 407 | i += size; |
412 | remap_pfn += size; | 408 | remap_pfn += size; |
413 | *identity += size; | 409 | *remapped += size; |
414 | } | 410 | } |
415 | 411 | ||
416 | /* | 412 | /* |
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( | |||
427 | 423 | ||
428 | static void __init xen_set_identity_and_remap( | 424 | static void __init xen_set_identity_and_remap( |
429 | const struct e820entry *list, size_t map_size, unsigned long nr_pages, | 425 | const struct e820entry *list, size_t map_size, unsigned long nr_pages, |
430 | unsigned long *released) | 426 | unsigned long *released, unsigned long *remapped) |
431 | { | 427 | { |
432 | phys_addr_t start = 0; | 428 | phys_addr_t start = 0; |
433 | unsigned long identity = 0; | ||
434 | unsigned long last_pfn = nr_pages; | 429 | unsigned long last_pfn = nr_pages; |
435 | const struct e820entry *entry; | 430 | const struct e820entry *entry; |
436 | unsigned long num_released = 0; | 431 | unsigned long num_released = 0; |
432 | unsigned long num_remapped = 0; | ||
437 | int i; | 433 | int i; |
438 | 434 | ||
439 | /* | 435 | /* |
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap( | |||
460 | last_pfn = xen_set_identity_and_remap_chunk( | 456 | last_pfn = xen_set_identity_and_remap_chunk( |
461 | list, map_size, start_pfn, | 457 | list, map_size, start_pfn, |
462 | end_pfn, nr_pages, last_pfn, | 458 | end_pfn, nr_pages, last_pfn, |
463 | &identity, &num_released); | 459 | &num_released, &num_remapped); |
464 | start = end; | 460 | start = end; |
465 | } | 461 | } |
466 | } | 462 | } |
467 | 463 | ||
468 | *released = num_released; | 464 | *released = num_released; |
465 | *remapped = num_remapped; | ||
469 | 466 | ||
470 | pr_info("Set %ld page(s) to 1-1 mapping\n", identity); | ||
471 | pr_info("Released %ld page(s)\n", num_released); | 467 | pr_info("Released %ld page(s)\n", num_released); |
472 | } | 468 | } |
473 | 469 | ||
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void) | |||
586 | struct xen_memory_map memmap; | 582 | struct xen_memory_map memmap; |
587 | unsigned long max_pages; | 583 | unsigned long max_pages; |
588 | unsigned long extra_pages = 0; | 584 | unsigned long extra_pages = 0; |
585 | unsigned long remapped_pages; | ||
589 | int i; | 586 | int i; |
590 | int op; | 587 | int op; |
591 | 588 | ||
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void) | |||
635 | * underlying RAM. | 632 | * underlying RAM. |
636 | */ | 633 | */ |
637 | xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, | 634 | xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, |
638 | &xen_released_pages); | 635 | &xen_released_pages, &remapped_pages); |
639 | 636 | ||
640 | extra_pages += xen_released_pages; | 637 | extra_pages += xen_released_pages; |
638 | extra_pages += remapped_pages; | ||
641 | 639 | ||
642 | /* | 640 | /* |
643 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO | 641 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index f473d268d387..69087341d9ae 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent = | |||
391 | 391 | ||
392 | struct xen_clock_event_device { | 392 | struct xen_clock_event_device { |
393 | struct clock_event_device evt; | 393 | struct clock_event_device evt; |
394 | char *name; | 394 | char name[16]; |
395 | }; | 395 | }; |
396 | static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; | 396 | static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; |
397 | 397 | ||
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu) | |||
420 | if (evt->irq >= 0) { | 420 | if (evt->irq >= 0) { |
421 | unbind_from_irqhandler(evt->irq, NULL); | 421 | unbind_from_irqhandler(evt->irq, NULL); |
422 | evt->irq = -1; | 422 | evt->irq = -1; |
423 | kfree(per_cpu(xen_clock_events, cpu).name); | ||
424 | per_cpu(xen_clock_events, cpu).name = NULL; | ||
425 | } | 423 | } |
426 | } | 424 | } |
427 | 425 | ||
428 | void xen_setup_timer(int cpu) | 426 | void xen_setup_timer(int cpu) |
429 | { | 427 | { |
430 | char *name; | 428 | struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); |
431 | struct clock_event_device *evt; | 429 | struct clock_event_device *evt = &xevt->evt; |
432 | int irq; | 430 | int irq; |
433 | 431 | ||
434 | evt = &per_cpu(xen_clock_events, cpu).evt; | ||
435 | WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); | 432 | WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); |
436 | if (evt->irq >= 0) | 433 | if (evt->irq >= 0) |
437 | xen_teardown_timer(cpu); | 434 | xen_teardown_timer(cpu); |
438 | 435 | ||
439 | printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); | 436 | printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); |
440 | 437 | ||
441 | name = kasprintf(GFP_KERNEL, "timer%d", cpu); | 438 | snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); |
442 | if (!name) | ||
443 | name = "<timer kasprintf failed>"; | ||
444 | 439 | ||
445 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 440 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
446 | IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| | 441 | IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| |
447 | IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, | 442 | IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, |
448 | name, NULL); | 443 | xevt->name, NULL); |
449 | (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); | 444 | (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); |
450 | 445 | ||
451 | memcpy(evt, xen_clockevent, sizeof(*evt)); | 446 | memcpy(evt, xen_clockevent, sizeof(*evt)); |
452 | 447 | ||
453 | evt->cpumask = cpumask_of(cpu); | 448 | evt->cpumask = cpumask_of(cpu); |
454 | evt->irq = irq; | 449 | evt->irq = irq; |
455 | per_cpu(xen_clock_events, cpu).name = name; | ||
456 | } | 450 | } |
457 | 451 | ||
458 | 452 | ||
459 | void xen_setup_cpu_clockevents(void) | 453 | void xen_setup_cpu_clockevents(void) |
460 | { | 454 | { |
461 | BUG_ON(preemptible()); | ||
462 | |||
463 | clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); | 455 | clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); |
464 | } | 456 | } |
465 | 457 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153a40c2..3ad405571dcc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) | |||
473 | } | 473 | } |
474 | EXPORT_SYMBOL_GPL(blk_queue_bypass_end); | 474 | EXPORT_SYMBOL_GPL(blk_queue_bypass_end); |
475 | 475 | ||
476 | void blk_set_queue_dying(struct request_queue *q) | ||
477 | { | ||
478 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | ||
479 | |||
480 | if (q->mq_ops) | ||
481 | blk_mq_wake_waiters(q); | ||
482 | else { | ||
483 | struct request_list *rl; | ||
484 | |||
485 | blk_queue_for_each_rl(rl, q) { | ||
486 | if (rl->rq_pool) { | ||
487 | wake_up(&rl->wait[BLK_RW_SYNC]); | ||
488 | wake_up(&rl->wait[BLK_RW_ASYNC]); | ||
489 | } | ||
490 | } | ||
491 | } | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(blk_set_queue_dying); | ||
494 | |||
476 | /** | 495 | /** |
477 | * blk_cleanup_queue - shutdown a request queue | 496 | * blk_cleanup_queue - shutdown a request queue |
478 | * @q: request queue to shutdown | 497 | * @q: request queue to shutdown |
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) | |||
486 | 505 | ||
487 | /* mark @q DYING, no new request or merges will be allowed afterwards */ | 506 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
488 | mutex_lock(&q->sysfs_lock); | 507 | mutex_lock(&q->sysfs_lock); |
489 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); | 508 | blk_set_queue_dying(q); |
490 | spin_lock_irq(lock); | 509 | spin_lock_irq(lock); |
491 | 510 | ||
492 | /* | 511 | /* |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb9ad1c..60c9d4a93fe4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Wakeup all potentially sleeping on normal (non-reserved) tags | 71 | * Wakeup all potentially sleeping on tags |
72 | */ | 72 | */ |
73 | static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) | 73 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
74 | { | 74 | { |
75 | struct blk_mq_bitmap_tags *bt; | 75 | struct blk_mq_bitmap_tags *bt; |
76 | int i, wake_index; | 76 | int i, wake_index; |
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) | |||
85 | 85 | ||
86 | wake_index = bt_index_inc(wake_index); | 86 | wake_index = bt_index_inc(wake_index); |
87 | } | 87 | } |
88 | |||
89 | if (include_reserve) { | ||
90 | bt = &tags->breserved_tags; | ||
91 | if (waitqueue_active(&bt->bs[0].wait)) | ||
92 | wake_up(&bt->bs[0].wait); | ||
93 | } | ||
88 | } | 94 | } |
89 | 95 | ||
90 | /* | 96 | /* |
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |||
100 | 106 | ||
101 | atomic_dec(&tags->active_queues); | 107 | atomic_dec(&tags->active_queues); |
102 | 108 | ||
103 | blk_mq_tag_wakeup_all(tags); | 109 | blk_mq_tag_wakeup_all(tags, false); |
104 | } | 110 | } |
105 | 111 | ||
106 | /* | 112 | /* |
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) | |||
584 | * static and should never need resizing. | 590 | * static and should never need resizing. |
585 | */ | 591 | */ |
586 | bt_update_count(&tags->bitmap_tags, tdepth); | 592 | bt_update_count(&tags->bitmap_tags, tdepth); |
587 | blk_mq_tag_wakeup_all(tags); | 593 | blk_mq_tag_wakeup_all(tags, false); |
588 | return 0; | 594 | return 0; |
589 | } | 595 | } |
590 | 596 | ||
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed17ef76..a6fa0fc9d41a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); | |||
54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | 54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); |
55 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); | 55 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); |
56 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); | 56 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); |
57 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); | ||
57 | 58 | ||
58 | enum { | 59 | enum { |
59 | BLK_MQ_TAG_CACHE_MIN = 1, | 60 | BLK_MQ_TAG_CACHE_MIN = 1, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab5641227..2f95747c287e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) | |||
107 | wake_up_all(&q->mq_freeze_wq); | 107 | wake_up_all(&q->mq_freeze_wq); |
108 | } | 108 | } |
109 | 109 | ||
110 | static void blk_mq_freeze_queue_start(struct request_queue *q) | 110 | void blk_mq_freeze_queue_start(struct request_queue *q) |
111 | { | 111 | { |
112 | bool freeze; | 112 | bool freeze; |
113 | 113 | ||
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) | |||
120 | blk_mq_run_queues(q, false); | 120 | blk_mq_run_queues(q, false); |
121 | } | 121 | } |
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); | ||
123 | 124 | ||
124 | static void blk_mq_freeze_queue_wait(struct request_queue *q) | 125 | static void blk_mq_freeze_queue_wait(struct request_queue *q) |
125 | { | 126 | { |
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) | |||
136 | blk_mq_freeze_queue_wait(q); | 137 | blk_mq_freeze_queue_wait(q); |
137 | } | 138 | } |
138 | 139 | ||
139 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 140 | void blk_mq_unfreeze_queue(struct request_queue *q) |
140 | { | 141 | { |
141 | bool wake; | 142 | bool wake; |
142 | 143 | ||
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) | |||
149 | wake_up_all(&q->mq_freeze_wq); | 150 | wake_up_all(&q->mq_freeze_wq); |
150 | } | 151 | } |
151 | } | 152 | } |
153 | EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); | ||
154 | |||
155 | void blk_mq_wake_waiters(struct request_queue *q) | ||
156 | { | ||
157 | struct blk_mq_hw_ctx *hctx; | ||
158 | unsigned int i; | ||
159 | |||
160 | queue_for_each_hw_ctx(q, hctx, i) | ||
161 | if (blk_mq_hw_queue_mapped(hctx)) | ||
162 | blk_mq_tag_wakeup_all(hctx->tags, true); | ||
163 | |||
164 | /* | ||
165 | * If we are called because the queue has now been marked as | ||
166 | * dying, we need to ensure that processes currently waiting on | ||
167 | * the queue are notified as well. | ||
168 | */ | ||
169 | wake_up_all(&q->mq_freeze_wq); | ||
170 | } | ||
152 | 171 | ||
153 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | 172 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) |
154 | { | 173 | { |
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, | |||
258 | ctx = alloc_data.ctx; | 277 | ctx = alloc_data.ctx; |
259 | } | 278 | } |
260 | blk_mq_put_ctx(ctx); | 279 | blk_mq_put_ctx(ctx); |
261 | if (!rq) | 280 | if (!rq) { |
281 | blk_mq_queue_exit(q); | ||
262 | return ERR_PTR(-EWOULDBLOCK); | 282 | return ERR_PTR(-EWOULDBLOCK); |
283 | } | ||
263 | return rq; | 284 | return rq; |
264 | } | 285 | } |
265 | EXPORT_SYMBOL(blk_mq_alloc_request); | 286 | EXPORT_SYMBOL(blk_mq_alloc_request); |
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) | |||
383 | } | 404 | } |
384 | EXPORT_SYMBOL(blk_mq_complete_request); | 405 | EXPORT_SYMBOL(blk_mq_complete_request); |
385 | 406 | ||
407 | int blk_mq_request_started(struct request *rq) | ||
408 | { | ||
409 | return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | ||
410 | } | ||
411 | EXPORT_SYMBOL_GPL(blk_mq_request_started); | ||
412 | |||
386 | void blk_mq_start_request(struct request *rq) | 413 | void blk_mq_start_request(struct request *rq) |
387 | { | 414 | { |
388 | struct request_queue *q = rq->q; | 415 | struct request_queue *q = rq->q; |
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) | |||
500 | } | 527 | } |
501 | EXPORT_SYMBOL(blk_mq_add_to_requeue_list); | 528 | EXPORT_SYMBOL(blk_mq_add_to_requeue_list); |
502 | 529 | ||
530 | void blk_mq_cancel_requeue_work(struct request_queue *q) | ||
531 | { | ||
532 | cancel_work_sync(&q->requeue_work); | ||
533 | } | ||
534 | EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); | ||
535 | |||
503 | void blk_mq_kick_requeue_list(struct request_queue *q) | 536 | void blk_mq_kick_requeue_list(struct request_queue *q) |
504 | { | 537 | { |
505 | kblockd_schedule_work(&q->requeue_work); | 538 | kblockd_schedule_work(&q->requeue_work); |
506 | } | 539 | } |
507 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 540 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
508 | 541 | ||
542 | void blk_mq_abort_requeue_list(struct request_queue *q) | ||
543 | { | ||
544 | unsigned long flags; | ||
545 | LIST_HEAD(rq_list); | ||
546 | |||
547 | spin_lock_irqsave(&q->requeue_lock, flags); | ||
548 | list_splice_init(&q->requeue_list, &rq_list); | ||
549 | spin_unlock_irqrestore(&q->requeue_lock, flags); | ||
550 | |||
551 | while (!list_empty(&rq_list)) { | ||
552 | struct request *rq; | ||
553 | |||
554 | rq = list_first_entry(&rq_list, struct request, queuelist); | ||
555 | list_del_init(&rq->queuelist); | ||
556 | rq->errors = -EIO; | ||
557 | blk_mq_end_request(rq, rq->errors); | ||
558 | } | ||
559 | } | ||
560 | EXPORT_SYMBOL(blk_mq_abort_requeue_list); | ||
561 | |||
509 | static inline bool is_flush_request(struct request *rq, | 562 | static inline bool is_flush_request(struct request *rq, |
510 | struct blk_flush_queue *fq, unsigned int tag) | 563 | struct blk_flush_queue *fq, unsigned int tag) |
511 | { | 564 | { |
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) | |||
566 | break; | 619 | break; |
567 | } | 620 | } |
568 | } | 621 | } |
569 | 622 | ||
570 | static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | 623 | static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, |
571 | struct request *rq, void *priv, bool reserved) | 624 | struct request *rq, void *priv, bool reserved) |
572 | { | 625 | { |
573 | struct blk_mq_timeout_data *data = priv; | 626 | struct blk_mq_timeout_data *data = priv; |
574 | 627 | ||
575 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) | 628 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { |
629 | /* | ||
630 | * If a request wasn't started before the queue was | ||
631 | * marked dying, kill it here or it'll go unnoticed. | ||
632 | */ | ||
633 | if (unlikely(blk_queue_dying(rq->q))) { | ||
634 | rq->errors = -EIO; | ||
635 | blk_mq_complete_request(rq); | ||
636 | } | ||
637 | return; | ||
638 | } | ||
639 | if (rq->cmd_flags & REQ_NO_TIMEOUT) | ||
576 | return; | 640 | return; |
577 | 641 | ||
578 | if (time_after_eq(jiffies, rq->deadline)) { | 642 | if (time_after_eq(jiffies, rq->deadline)) { |
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q, | |||
1601 | hctx->queue = q; | 1665 | hctx->queue = q; |
1602 | hctx->queue_num = hctx_idx; | 1666 | hctx->queue_num = hctx_idx; |
1603 | hctx->flags = set->flags; | 1667 | hctx->flags = set->flags; |
1604 | hctx->cmd_size = set->cmd_size; | ||
1605 | 1668 | ||
1606 | blk_mq_init_cpu_notifier(&hctx->cpu_notifier, | 1669 | blk_mq_init_cpu_notifier(&hctx->cpu_notifier, |
1607 | blk_mq_hctx_notify, hctx); | 1670 | blk_mq_hctx_notify, hctx); |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e64f79..4f4f943c22c3 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); | |||
32 | void blk_mq_clone_flush_request(struct request *flush_rq, | 32 | void blk_mq_clone_flush_request(struct request *flush_rq, |
33 | struct request *orig_rq); | 33 | struct request *orig_rq); |
34 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | 34 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
35 | void blk_mq_wake_waiters(struct request_queue *q); | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * CPU hotplug helpers | 38 | * CPU hotplug helpers |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c025894cdf..246dfb16c3d9 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) | |||
190 | struct request_queue *q = req->q; | 190 | struct request_queue *q = req->q; |
191 | unsigned long expiry; | 191 | unsigned long expiry; |
192 | 192 | ||
193 | if (req->cmd_flags & REQ_NO_TIMEOUT) | ||
194 | return; | ||
195 | |||
193 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ | 196 | /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ |
194 | if (!q->mq_ops && !q->rq_timed_out_fn) | 197 | if (!q->mq_ops && !q->rq_timed_out_fn) |
195 | return; | 198 | return; |
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index a27d31d1ba24..9dcf83682e36 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c | |||
@@ -14,10 +14,10 @@ | |||
14 | 14 | ||
15 | #include "internal.h" | 15 | #include "internal.h" |
16 | 16 | ||
17 | #define DO_ENUMERATION 0x01 | 17 | #define INT3401_DEVICE 0X01 |
18 | static const struct acpi_device_id int340x_thermal_device_ids[] = { | 18 | static const struct acpi_device_id int340x_thermal_device_ids[] = { |
19 | {"INT3400", DO_ENUMERATION }, | 19 | {"INT3400"}, |
20 | {"INT3401"}, | 20 | {"INT3401", INT3401_DEVICE}, |
21 | {"INT3402"}, | 21 | {"INT3402"}, |
22 | {"INT3403"}, | 22 | {"INT3403"}, |
23 | {"INT3404"}, | 23 | {"INT3404"}, |
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev, | |||
34 | const struct acpi_device_id *id) | 34 | const struct acpi_device_id *id) |
35 | { | 35 | { |
36 | #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) | 36 | #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) |
37 | if (id->driver_data == DO_ENUMERATION) | 37 | acpi_create_platform_device(adev); |
38 | #elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) | ||
39 | /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ | ||
40 | if (id->driver_data == INT3401_DEVICE) | ||
38 | acpi_create_platform_device(adev); | 41 | acpi_create_platform_device(adev); |
39 | #endif | 42 | #endif |
40 | return 1; | 43 | return 1; |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ae9f615382f6..aa2224aa7caa 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -530,7 +530,7 @@ static int null_add_dev(void) | |||
530 | goto out_cleanup_queues; | 530 | goto out_cleanup_queues; |
531 | 531 | ||
532 | nullb->q = blk_mq_init_queue(&nullb->tag_set); | 532 | nullb->q = blk_mq_init_queue(&nullb->tag_set); |
533 | if (!nullb->q) { | 533 | if (IS_ERR(nullb->q)) { |
534 | rv = -ENOMEM; | 534 | rv = -ENOMEM; |
535 | goto out_cleanup_tags; | 535 | goto out_cleanup_tags; |
536 | } | 536 | } |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b1d5d8797315..cb529e9a82dd 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx, | |||
215 | cmd->fn = handler; | 215 | cmd->fn = handler; |
216 | cmd->ctx = ctx; | 216 | cmd->ctx = ctx; |
217 | cmd->aborted = 0; | 217 | cmd->aborted = 0; |
218 | blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | /* Special values must be less than 0x1000 */ | 221 | /* Special values must be less than 0x1000 */ |
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
431 | if (unlikely(status)) { | 432 | if (unlikely(status)) { |
432 | if (!(status & NVME_SC_DNR || blk_noretry_request(req)) | 433 | if (!(status & NVME_SC_DNR || blk_noretry_request(req)) |
433 | && (jiffies - req->start_time) < req->timeout) { | 434 | && (jiffies - req->start_time) < req->timeout) { |
435 | unsigned long flags; | ||
436 | |||
434 | blk_mq_requeue_request(req); | 437 | blk_mq_requeue_request(req); |
435 | blk_mq_kick_requeue_list(req->q); | 438 | spin_lock_irqsave(req->q->queue_lock, flags); |
439 | if (!blk_queue_stopped(req->q)) | ||
440 | blk_mq_kick_requeue_list(req->q); | ||
441 | spin_unlock_irqrestore(req->q->queue_lock, flags); | ||
436 | return; | 442 | return; |
437 | } | 443 | } |
438 | req->errors = nvme_error_status(status); | 444 | req->errors = nvme_error_status(status); |
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
664 | } | 670 | } |
665 | } | 671 | } |
666 | 672 | ||
667 | blk_mq_start_request(req); | ||
668 | |||
669 | nvme_set_info(cmd, iod, req_completion); | 673 | nvme_set_info(cmd, iod, req_completion); |
670 | spin_lock_irq(&nvmeq->q_lock); | 674 | spin_lock_irq(&nvmeq->q_lock); |
671 | if (req->cmd_flags & REQ_DISCARD) | 675 | if (req->cmd_flags & REQ_DISCARD) |
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) | |||
835 | if (IS_ERR(req)) | 839 | if (IS_ERR(req)) |
836 | return PTR_ERR(req); | 840 | return PTR_ERR(req); |
837 | 841 | ||
842 | req->cmd_flags |= REQ_NO_TIMEOUT; | ||
838 | cmd_info = blk_mq_rq_to_pdu(req); | 843 | cmd_info = blk_mq_rq_to_pdu(req); |
839 | nvme_set_info(cmd_info, req, async_req_completion); | 844 | nvme_set_info(cmd_info, req, async_req_completion); |
840 | 845 | ||
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req) | |||
1016 | struct nvme_command cmd; | 1021 | struct nvme_command cmd; |
1017 | 1022 | ||
1018 | if (!nvmeq->qid || cmd_rq->aborted) { | 1023 | if (!nvmeq->qid || cmd_rq->aborted) { |
1024 | unsigned long flags; | ||
1025 | |||
1026 | spin_lock_irqsave(&dev_list_lock, flags); | ||
1019 | if (work_busy(&dev->reset_work)) | 1027 | if (work_busy(&dev->reset_work)) |
1020 | return; | 1028 | goto out; |
1021 | list_del_init(&dev->node); | 1029 | list_del_init(&dev->node); |
1022 | dev_warn(&dev->pci_dev->dev, | 1030 | dev_warn(&dev->pci_dev->dev, |
1023 | "I/O %d QID %d timeout, reset controller\n", | 1031 | "I/O %d QID %d timeout, reset controller\n", |
1024 | req->tag, nvmeq->qid); | 1032 | req->tag, nvmeq->qid); |
1025 | dev->reset_workfn = nvme_reset_failed_dev; | 1033 | dev->reset_workfn = nvme_reset_failed_dev; |
1026 | queue_work(nvme_workq, &dev->reset_work); | 1034 | queue_work(nvme_workq, &dev->reset_work); |
1035 | out: | ||
1036 | spin_unlock_irqrestore(&dev_list_lock, flags); | ||
1027 | return; | 1037 | return; |
1028 | } | 1038 | } |
1029 | 1039 | ||
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, | |||
1064 | void *ctx; | 1074 | void *ctx; |
1065 | nvme_completion_fn fn; | 1075 | nvme_completion_fn fn; |
1066 | struct nvme_cmd_info *cmd; | 1076 | struct nvme_cmd_info *cmd; |
1067 | static struct nvme_completion cqe = { | 1077 | struct nvme_completion cqe; |
1068 | .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), | 1078 | |
1069 | }; | 1079 | if (!blk_mq_request_started(req)) |
1080 | return; | ||
1070 | 1081 | ||
1071 | cmd = blk_mq_rq_to_pdu(req); | 1082 | cmd = blk_mq_rq_to_pdu(req); |
1072 | 1083 | ||
1073 | if (cmd->ctx == CMD_CTX_CANCELLED) | 1084 | if (cmd->ctx == CMD_CTX_CANCELLED) |
1074 | return; | 1085 | return; |
1075 | 1086 | ||
1087 | if (blk_queue_dying(req->q)) | ||
1088 | cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); | ||
1089 | else | ||
1090 | cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); | ||
1091 | |||
1092 | |||
1076 | dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", | 1093 | dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", |
1077 | req->tag, nvmeq->qid); | 1094 | req->tag, nvmeq->qid); |
1078 | ctx = cancel_cmd_info(cmd, &fn); | 1095 | ctx = cancel_cmd_info(cmd, &fn); |
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
1084 | struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); | 1101 | struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); |
1085 | struct nvme_queue *nvmeq = cmd->nvmeq; | 1102 | struct nvme_queue *nvmeq = cmd->nvmeq; |
1086 | 1103 | ||
1087 | dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, | ||
1088 | nvmeq->qid); | ||
1089 | if (nvmeq->dev->initialized) | ||
1090 | nvme_abort_req(req); | ||
1091 | |||
1092 | /* | 1104 | /* |
1093 | * The aborted req will be completed on receiving the abort req. | 1105 | * The aborted req will be completed on receiving the abort req. |
1094 | * We enable the timer again. If hit twice, it'll cause a device reset, | 1106 | * We enable the timer again. If hit twice, it'll cause a device reset, |
1095 | * as the device then is in a faulty state. | 1107 | * as the device then is in a faulty state. |
1096 | */ | 1108 | */ |
1097 | return BLK_EH_RESET_TIMER; | 1109 | int ret = BLK_EH_RESET_TIMER; |
1110 | |||
1111 | dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, | ||
1112 | nvmeq->qid); | ||
1113 | |||
1114 | spin_lock_irq(&nvmeq->q_lock); | ||
1115 | if (!nvmeq->dev->initialized) { | ||
1116 | /* | ||
1117 | * Force cancelled command frees the request, which requires we | ||
1118 | * return BLK_EH_NOT_HANDLED. | ||
1119 | */ | ||
1120 | nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); | ||
1121 | ret = BLK_EH_NOT_HANDLED; | ||
1122 | } else | ||
1123 | nvme_abort_req(req); | ||
1124 | spin_unlock_irq(&nvmeq->q_lock); | ||
1125 | |||
1126 | return ret; | ||
1098 | } | 1127 | } |
1099 | 1128 | ||
1100 | static void nvme_free_queue(struct nvme_queue *nvmeq) | 1129 | static void nvme_free_queue(struct nvme_queue *nvmeq) |
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) | |||
1131 | */ | 1160 | */ |
1132 | static int nvme_suspend_queue(struct nvme_queue *nvmeq) | 1161 | static int nvme_suspend_queue(struct nvme_queue *nvmeq) |
1133 | { | 1162 | { |
1134 | int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; | 1163 | int vector; |
1135 | 1164 | ||
1136 | spin_lock_irq(&nvmeq->q_lock); | 1165 | spin_lock_irq(&nvmeq->q_lock); |
1166 | if (nvmeq->cq_vector == -1) { | ||
1167 | spin_unlock_irq(&nvmeq->q_lock); | ||
1168 | return 1; | ||
1169 | } | ||
1170 | vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; | ||
1137 | nvmeq->dev->online_queues--; | 1171 | nvmeq->dev->online_queues--; |
1172 | nvmeq->cq_vector = -1; | ||
1138 | spin_unlock_irq(&nvmeq->q_lock); | 1173 | spin_unlock_irq(&nvmeq->q_lock); |
1139 | 1174 | ||
1140 | irq_set_affinity_hint(vector, NULL); | 1175 | irq_set_affinity_hint(vector, NULL); |
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) | |||
1169 | adapter_delete_sq(dev, qid); | 1204 | adapter_delete_sq(dev, qid); |
1170 | adapter_delete_cq(dev, qid); | 1205 | adapter_delete_cq(dev, qid); |
1171 | } | 1206 | } |
1207 | if (!qid && dev->admin_q) | ||
1208 | blk_mq_freeze_queue_start(dev->admin_q); | ||
1172 | nvme_clear_queue(nvmeq); | 1209 | nvme_clear_queue(nvmeq); |
1173 | } | 1210 | } |
1174 | 1211 | ||
1175 | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | 1212 | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, |
1176 | int depth, int vector) | 1213 | int depth) |
1177 | { | 1214 | { |
1178 | struct device *dmadev = &dev->pci_dev->dev; | 1215 | struct device *dmadev = &dev->pci_dev->dev; |
1179 | struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); | 1216 | struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); |
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
1199 | nvmeq->cq_phase = 1; | 1236 | nvmeq->cq_phase = 1; |
1200 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | 1237 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
1201 | nvmeq->q_depth = depth; | 1238 | nvmeq->q_depth = depth; |
1202 | nvmeq->cq_vector = vector; | ||
1203 | nvmeq->qid = qid; | 1239 | nvmeq->qid = qid; |
1204 | dev->queue_count++; | 1240 | dev->queue_count++; |
1205 | dev->queues[qid] = nvmeq; | 1241 | dev->queues[qid] = nvmeq; |
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1244 | struct nvme_dev *dev = nvmeq->dev; | 1280 | struct nvme_dev *dev = nvmeq->dev; |
1245 | int result; | 1281 | int result; |
1246 | 1282 | ||
1283 | nvmeq->cq_vector = qid - 1; | ||
1247 | result = adapter_alloc_cq(dev, qid, nvmeq); | 1284 | result = adapter_alloc_cq(dev, qid, nvmeq); |
1248 | if (result < 0) | 1285 | if (result < 0) |
1249 | return result; | 1286 | return result; |
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = { | |||
1355 | .timeout = nvme_timeout, | 1392 | .timeout = nvme_timeout, |
1356 | }; | 1393 | }; |
1357 | 1394 | ||
1395 | static void nvme_dev_remove_admin(struct nvme_dev *dev) | ||
1396 | { | ||
1397 | if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { | ||
1398 | blk_cleanup_queue(dev->admin_q); | ||
1399 | blk_mq_free_tag_set(&dev->admin_tagset); | ||
1400 | } | ||
1401 | } | ||
1402 | |||
1358 | static int nvme_alloc_admin_tags(struct nvme_dev *dev) | 1403 | static int nvme_alloc_admin_tags(struct nvme_dev *dev) |
1359 | { | 1404 | { |
1360 | if (!dev->admin_q) { | 1405 | if (!dev->admin_q) { |
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) | |||
1370 | return -ENOMEM; | 1415 | return -ENOMEM; |
1371 | 1416 | ||
1372 | dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); | 1417 | dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); |
1373 | if (!dev->admin_q) { | 1418 | if (IS_ERR(dev->admin_q)) { |
1374 | blk_mq_free_tag_set(&dev->admin_tagset); | 1419 | blk_mq_free_tag_set(&dev->admin_tagset); |
1375 | return -ENOMEM; | 1420 | return -ENOMEM; |
1376 | } | 1421 | } |
1377 | } | 1422 | if (!blk_get_queue(dev->admin_q)) { |
1423 | nvme_dev_remove_admin(dev); | ||
1424 | return -ENODEV; | ||
1425 | } | ||
1426 | } else | ||
1427 | blk_mq_unfreeze_queue(dev->admin_q); | ||
1378 | 1428 | ||
1379 | return 0; | 1429 | return 0; |
1380 | } | 1430 | } |
1381 | 1431 | ||
1382 | static void nvme_free_admin_tags(struct nvme_dev *dev) | ||
1383 | { | ||
1384 | if (dev->admin_q) | ||
1385 | blk_mq_free_tag_set(&dev->admin_tagset); | ||
1386 | } | ||
1387 | |||
1388 | static int nvme_configure_admin_queue(struct nvme_dev *dev) | 1432 | static int nvme_configure_admin_queue(struct nvme_dev *dev) |
1389 | { | 1433 | { |
1390 | int result; | 1434 | int result; |
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1416 | 1460 | ||
1417 | nvmeq = dev->queues[0]; | 1461 | nvmeq = dev->queues[0]; |
1418 | if (!nvmeq) { | 1462 | if (!nvmeq) { |
1419 | nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); | 1463 | nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); |
1420 | if (!nvmeq) | 1464 | if (!nvmeq) |
1421 | return -ENOMEM; | 1465 | return -ENOMEM; |
1422 | } | 1466 | } |
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1439 | if (result) | 1483 | if (result) |
1440 | goto free_nvmeq; | 1484 | goto free_nvmeq; |
1441 | 1485 | ||
1442 | result = nvme_alloc_admin_tags(dev); | 1486 | nvmeq->cq_vector = 0; |
1443 | if (result) | ||
1444 | goto free_nvmeq; | ||
1445 | |||
1446 | result = queue_request_irq(dev, nvmeq, nvmeq->irqname); | 1487 | result = queue_request_irq(dev, nvmeq, nvmeq->irqname); |
1447 | if (result) | 1488 | if (result) |
1448 | goto free_tags; | 1489 | goto free_nvmeq; |
1449 | 1490 | ||
1450 | return result; | 1491 | return result; |
1451 | 1492 | ||
1452 | free_tags: | ||
1453 | nvme_free_admin_tags(dev); | ||
1454 | free_nvmeq: | 1493 | free_nvmeq: |
1455 | nvme_free_queues(dev, 0); | 1494 | nvme_free_queues(dev, 0); |
1456 | return result; | 1495 | return result; |
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev) | |||
1944 | unsigned i; | 1983 | unsigned i; |
1945 | 1984 | ||
1946 | for (i = dev->queue_count; i <= dev->max_qid; i++) | 1985 | for (i = dev->queue_count; i <= dev->max_qid; i++) |
1947 | if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) | 1986 | if (!nvme_alloc_queue(dev, i, dev->q_depth)) |
1948 | break; | 1987 | break; |
1949 | 1988 | ||
1950 | for (i = dev->online_queues; i <= dev->queue_count - 1; i++) | 1989 | for (i = dev->online_queues; i <= dev->queue_count - 1; i++) |
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) | |||
2235 | break; | 2274 | break; |
2236 | if (!schedule_timeout(ADMIN_TIMEOUT) || | 2275 | if (!schedule_timeout(ADMIN_TIMEOUT) || |
2237 | fatal_signal_pending(current)) { | 2276 | fatal_signal_pending(current)) { |
2277 | /* | ||
2278 | * Disable the controller first since we can't trust it | ||
2279 | * at this point, but leave the admin queue enabled | ||
2280 | * until all queue deletion requests are flushed. | ||
2281 | * FIXME: This may take a while if there are more h/w | ||
2282 | * queues than admin tags. | ||
2283 | */ | ||
2238 | set_current_state(TASK_RUNNING); | 2284 | set_current_state(TASK_RUNNING); |
2239 | |||
2240 | nvme_disable_ctrl(dev, readq(&dev->bar->cap)); | 2285 | nvme_disable_ctrl(dev, readq(&dev->bar->cap)); |
2241 | nvme_disable_queue(dev, 0); | 2286 | nvme_clear_queue(dev->queues[0]); |
2242 | |||
2243 | send_sig(SIGKILL, dq->worker->task, 1); | ||
2244 | flush_kthread_worker(dq->worker); | 2287 | flush_kthread_worker(dq->worker); |
2288 | nvme_disable_queue(dev, 0); | ||
2245 | return; | 2289 | return; |
2246 | } | 2290 | } |
2247 | } | 2291 | } |
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work) | |||
2318 | { | 2362 | { |
2319 | struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, | 2363 | struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, |
2320 | cmdinfo.work); | 2364 | cmdinfo.work); |
2321 | allow_signal(SIGKILL); | ||
2322 | if (nvme_delete_sq(nvmeq)) | 2365 | if (nvme_delete_sq(nvmeq)) |
2323 | nvme_del_queue_end(nvmeq); | 2366 | nvme_del_queue_end(nvmeq); |
2324 | } | 2367 | } |
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev) | |||
2376 | kthread_stop(tmp); | 2419 | kthread_stop(tmp); |
2377 | } | 2420 | } |
2378 | 2421 | ||
2422 | static void nvme_freeze_queues(struct nvme_dev *dev) | ||
2423 | { | ||
2424 | struct nvme_ns *ns; | ||
2425 | |||
2426 | list_for_each_entry(ns, &dev->namespaces, list) { | ||
2427 | blk_mq_freeze_queue_start(ns->queue); | ||
2428 | |||
2429 | spin_lock(ns->queue->queue_lock); | ||
2430 | queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); | ||
2431 | spin_unlock(ns->queue->queue_lock); | ||
2432 | |||
2433 | blk_mq_cancel_requeue_work(ns->queue); | ||
2434 | blk_mq_stop_hw_queues(ns->queue); | ||
2435 | } | ||
2436 | } | ||
2437 | |||
2438 | static void nvme_unfreeze_queues(struct nvme_dev *dev) | ||
2439 | { | ||
2440 | struct nvme_ns *ns; | ||
2441 | |||
2442 | list_for_each_entry(ns, &dev->namespaces, list) { | ||
2443 | queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); | ||
2444 | blk_mq_unfreeze_queue(ns->queue); | ||
2445 | blk_mq_start_stopped_hw_queues(ns->queue, true); | ||
2446 | blk_mq_kick_requeue_list(ns->queue); | ||
2447 | } | ||
2448 | } | ||
2449 | |||
2379 | static void nvme_dev_shutdown(struct nvme_dev *dev) | 2450 | static void nvme_dev_shutdown(struct nvme_dev *dev) |
2380 | { | 2451 | { |
2381 | int i; | 2452 | int i; |
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) | |||
2384 | dev->initialized = 0; | 2455 | dev->initialized = 0; |
2385 | nvme_dev_list_remove(dev); | 2456 | nvme_dev_list_remove(dev); |
2386 | 2457 | ||
2387 | if (dev->bar) | 2458 | if (dev->bar) { |
2459 | nvme_freeze_queues(dev); | ||
2388 | csts = readl(&dev->bar->csts); | 2460 | csts = readl(&dev->bar->csts); |
2461 | } | ||
2389 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { | 2462 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { |
2390 | for (i = dev->queue_count - 1; i >= 0; i--) { | 2463 | for (i = dev->queue_count - 1; i >= 0; i--) { |
2391 | struct nvme_queue *nvmeq = dev->queues[i]; | 2464 | struct nvme_queue *nvmeq = dev->queues[i]; |
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) | |||
2400 | nvme_dev_unmap(dev); | 2473 | nvme_dev_unmap(dev); |
2401 | } | 2474 | } |
2402 | 2475 | ||
2403 | static void nvme_dev_remove_admin(struct nvme_dev *dev) | ||
2404 | { | ||
2405 | if (dev->admin_q && !blk_queue_dying(dev->admin_q)) | ||
2406 | blk_cleanup_queue(dev->admin_q); | ||
2407 | } | ||
2408 | |||
2409 | static void nvme_dev_remove(struct nvme_dev *dev) | 2476 | static void nvme_dev_remove(struct nvme_dev *dev) |
2410 | { | 2477 | { |
2411 | struct nvme_ns *ns; | 2478 | struct nvme_ns *ns; |
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev) | |||
2413 | list_for_each_entry(ns, &dev->namespaces, list) { | 2480 | list_for_each_entry(ns, &dev->namespaces, list) { |
2414 | if (ns->disk->flags & GENHD_FL_UP) | 2481 | if (ns->disk->flags & GENHD_FL_UP) |
2415 | del_gendisk(ns->disk); | 2482 | del_gendisk(ns->disk); |
2416 | if (!blk_queue_dying(ns->queue)) | 2483 | if (!blk_queue_dying(ns->queue)) { |
2484 | blk_mq_abort_requeue_list(ns->queue); | ||
2417 | blk_cleanup_queue(ns->queue); | 2485 | blk_cleanup_queue(ns->queue); |
2486 | } | ||
2418 | } | 2487 | } |
2419 | } | 2488 | } |
2420 | 2489 | ||
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref) | |||
2495 | nvme_free_namespaces(dev); | 2564 | nvme_free_namespaces(dev); |
2496 | nvme_release_instance(dev); | 2565 | nvme_release_instance(dev); |
2497 | blk_mq_free_tag_set(&dev->tagset); | 2566 | blk_mq_free_tag_set(&dev->tagset); |
2567 | blk_put_queue(dev->admin_q); | ||
2498 | kfree(dev->queues); | 2568 | kfree(dev->queues); |
2499 | kfree(dev->entry); | 2569 | kfree(dev->entry); |
2500 | kfree(dev); | 2570 | kfree(dev); |
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev) | |||
2591 | } | 2661 | } |
2592 | 2662 | ||
2593 | nvme_init_queue(dev->queues[0], 0); | 2663 | nvme_init_queue(dev->queues[0], 0); |
2664 | result = nvme_alloc_admin_tags(dev); | ||
2665 | if (result) | ||
2666 | goto disable; | ||
2594 | 2667 | ||
2595 | result = nvme_setup_io_queues(dev); | 2668 | result = nvme_setup_io_queues(dev); |
2596 | if (result) | 2669 | if (result) |
2597 | goto disable; | 2670 | goto free_tags; |
2598 | 2671 | ||
2599 | nvme_set_irq_hints(dev); | 2672 | nvme_set_irq_hints(dev); |
2600 | 2673 | ||
2601 | return result; | 2674 | return result; |
2602 | 2675 | ||
2676 | free_tags: | ||
2677 | nvme_dev_remove_admin(dev); | ||
2603 | disable: | 2678 | disable: |
2604 | nvme_disable_queue(dev, 0); | 2679 | nvme_disable_queue(dev, 0); |
2605 | nvme_dev_list_remove(dev); | 2680 | nvme_dev_list_remove(dev); |
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) | |||
2639 | dev->reset_workfn = nvme_remove_disks; | 2714 | dev->reset_workfn = nvme_remove_disks; |
2640 | queue_work(nvme_workq, &dev->reset_work); | 2715 | queue_work(nvme_workq, &dev->reset_work); |
2641 | spin_unlock(&dev_list_lock); | 2716 | spin_unlock(&dev_list_lock); |
2717 | } else { | ||
2718 | nvme_unfreeze_queues(dev); | ||
2719 | nvme_set_irq_hints(dev); | ||
2642 | } | 2720 | } |
2643 | dev->initialized = 1; | 2721 | dev->initialized = 1; |
2644 | return 0; | 2722 | return 0; |
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev) | |||
2776 | pci_set_drvdata(pdev, NULL); | 2854 | pci_set_drvdata(pdev, NULL); |
2777 | flush_work(&dev->reset_work); | 2855 | flush_work(&dev->reset_work); |
2778 | misc_deregister(&dev->miscdev); | 2856 | misc_deregister(&dev->miscdev); |
2779 | nvme_dev_remove(dev); | ||
2780 | nvme_dev_shutdown(dev); | 2857 | nvme_dev_shutdown(dev); |
2858 | nvme_dev_remove(dev); | ||
2781 | nvme_dev_remove_admin(dev); | 2859 | nvme_dev_remove_admin(dev); |
2782 | nvme_free_queues(dev, 0); | 2860 | nvme_free_queues(dev, 0); |
2783 | nvme_free_admin_tags(dev); | ||
2784 | nvme_release_prp_pools(dev); | 2861 | nvme_release_prp_pools(dev); |
2785 | kref_put(&dev->kref, nvme_free_dev); | 2862 | kref_put(&dev->kref, nvme_free_dev); |
2786 | } | 2863 | } |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ef7c098708f..cdfbd21e3597 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
638 | goto out_put_disk; | 638 | goto out_put_disk; |
639 | 639 | ||
640 | q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); | 640 | q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); |
641 | if (!q) { | 641 | if (IS_ERR(q)) { |
642 | err = -ENOMEM; | 642 | err = -ENOMEM; |
643 | goto out_free_tags; | 643 | goto out_free_tags; |
644 | } | 644 | } |
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 978b51eae2ec..ce3c1558cb0a 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c | |||
@@ -47,13 +47,6 @@ | |||
47 | 47 | ||
48 | #define DLN2_GPIO_MAX_PINS 32 | 48 | #define DLN2_GPIO_MAX_PINS 32 |
49 | 49 | ||
50 | struct dln2_irq_work { | ||
51 | struct work_struct work; | ||
52 | struct dln2_gpio *dln2; | ||
53 | int pin; | ||
54 | int type; | ||
55 | }; | ||
56 | |||
57 | struct dln2_gpio { | 50 | struct dln2_gpio { |
58 | struct platform_device *pdev; | 51 | struct platform_device *pdev; |
59 | struct gpio_chip gpio; | 52 | struct gpio_chip gpio; |
@@ -64,10 +57,12 @@ struct dln2_gpio { | |||
64 | */ | 57 | */ |
65 | DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); | 58 | DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); |
66 | 59 | ||
67 | DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); | 60 | /* active IRQs - not synced to hardware */ |
68 | DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); | 61 | DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS); |
69 | DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); | 62 | /* active IRQS - synced to hardware */ |
70 | struct dln2_irq_work *irq_work; | 63 | DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS); |
64 | int irq_type[DLN2_GPIO_MAX_PINS]; | ||
65 | struct mutex irq_lock; | ||
71 | }; | 66 | }; |
72 | 67 | ||
73 | struct dln2_gpio_pin { | 68 | struct dln2_gpio_pin { |
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin) | |||
141 | return !!ret; | 136 | return !!ret; |
142 | } | 137 | } |
143 | 138 | ||
144 | static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, | 139 | static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, |
145 | unsigned int pin, int value) | 140 | unsigned int pin, int value) |
146 | { | 141 | { |
147 | struct dln2_gpio_pin_val req = { | 142 | struct dln2_gpio_pin_val req = { |
148 | .pin = cpu_to_le16(pin), | 143 | .pin = cpu_to_le16(pin), |
149 | .value = value, | 144 | .value = value, |
150 | }; | 145 | }; |
151 | 146 | ||
152 | dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, | 147 | return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, |
153 | sizeof(req)); | 148 | sizeof(req)); |
154 | } | 149 | } |
155 | 150 | ||
156 | #define DLN2_GPIO_DIRECTION_IN 0 | 151 | #define DLN2_GPIO_DIRECTION_IN 0 |
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
267 | static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | 262 | static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, |
268 | int value) | 263 | int value) |
269 | { | 264 | { |
265 | struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio); | ||
266 | int ret; | ||
267 | |||
268 | ret = dln2_gpio_pin_set_out_val(dln2, offset, value); | ||
269 | if (ret < 0) | ||
270 | return ret; | ||
271 | |||
270 | return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); | 272 | return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); |
271 | } | 273 | } |
272 | 274 | ||
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin, | |||
297 | &req, sizeof(req)); | 299 | &req, sizeof(req)); |
298 | } | 300 | } |
299 | 301 | ||
300 | static void dln2_irq_work(struct work_struct *w) | 302 | static void dln2_irq_unmask(struct irq_data *irqd) |
301 | { | ||
302 | struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work); | ||
303 | struct dln2_gpio *dln2 = iw->dln2; | ||
304 | u8 type = iw->type & DLN2_GPIO_EVENT_MASK; | ||
305 | |||
306 | if (test_bit(iw->pin, dln2->irqs_enabled)) | ||
307 | dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0); | ||
308 | else | ||
309 | dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0); | ||
310 | } | ||
311 | |||
312 | static void dln2_irq_enable(struct irq_data *irqd) | ||
313 | { | ||
314 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); | ||
315 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | ||
316 | int pin = irqd_to_hwirq(irqd); | ||
317 | |||
318 | set_bit(pin, dln2->irqs_enabled); | ||
319 | schedule_work(&dln2->irq_work[pin].work); | ||
320 | } | ||
321 | |||
322 | static void dln2_irq_disable(struct irq_data *irqd) | ||
323 | { | 303 | { |
324 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); | 304 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); |
325 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | 305 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); |
326 | int pin = irqd_to_hwirq(irqd); | 306 | int pin = irqd_to_hwirq(irqd); |
327 | 307 | ||
328 | clear_bit(pin, dln2->irqs_enabled); | 308 | set_bit(pin, dln2->unmasked_irqs); |
329 | schedule_work(&dln2->irq_work[pin].work); | ||
330 | } | 309 | } |
331 | 310 | ||
332 | static void dln2_irq_mask(struct irq_data *irqd) | 311 | static void dln2_irq_mask(struct irq_data *irqd) |
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd) | |||
335 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | 314 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); |
336 | int pin = irqd_to_hwirq(irqd); | 315 | int pin = irqd_to_hwirq(irqd); |
337 | 316 | ||
338 | set_bit(pin, dln2->irqs_masked); | 317 | clear_bit(pin, dln2->unmasked_irqs); |
339 | } | ||
340 | |||
341 | static void dln2_irq_unmask(struct irq_data *irqd) | ||
342 | { | ||
343 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); | ||
344 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | ||
345 | struct device *dev = dln2->gpio.dev; | ||
346 | int pin = irqd_to_hwirq(irqd); | ||
347 | |||
348 | if (test_and_clear_bit(pin, dln2->irqs_pending)) { | ||
349 | int irq; | ||
350 | |||
351 | irq = irq_find_mapping(dln2->gpio.irqdomain, pin); | ||
352 | if (!irq) { | ||
353 | dev_err(dev, "pin %d not mapped to IRQ\n", pin); | ||
354 | return; | ||
355 | } | ||
356 | |||
357 | generic_handle_irq(irq); | ||
358 | } | ||
359 | } | 318 | } |
360 | 319 | ||
361 | static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) | 320 | static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) |
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) | |||
366 | 325 | ||
367 | switch (type) { | 326 | switch (type) { |
368 | case IRQ_TYPE_LEVEL_HIGH: | 327 | case IRQ_TYPE_LEVEL_HIGH: |
369 | dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; | 328 | dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH; |
370 | break; | 329 | break; |
371 | case IRQ_TYPE_LEVEL_LOW: | 330 | case IRQ_TYPE_LEVEL_LOW: |
372 | dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; | 331 | dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW; |
373 | break; | 332 | break; |
374 | case IRQ_TYPE_EDGE_BOTH: | 333 | case IRQ_TYPE_EDGE_BOTH: |
375 | dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; | 334 | dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE; |
376 | break; | 335 | break; |
377 | case IRQ_TYPE_EDGE_RISING: | 336 | case IRQ_TYPE_EDGE_RISING: |
378 | dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; | 337 | dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING; |
379 | break; | 338 | break; |
380 | case IRQ_TYPE_EDGE_FALLING: | 339 | case IRQ_TYPE_EDGE_FALLING: |
381 | dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; | 340 | dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING; |
382 | break; | 341 | break; |
383 | default: | 342 | default: |
384 | return -EINVAL; | 343 | return -EINVAL; |
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) | |||
387 | return 0; | 346 | return 0; |
388 | } | 347 | } |
389 | 348 | ||
349 | static void dln2_irq_bus_lock(struct irq_data *irqd) | ||
350 | { | ||
351 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); | ||
352 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | ||
353 | |||
354 | mutex_lock(&dln2->irq_lock); | ||
355 | } | ||
356 | |||
357 | static void dln2_irq_bus_unlock(struct irq_data *irqd) | ||
358 | { | ||
359 | struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); | ||
360 | struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); | ||
361 | int pin = irqd_to_hwirq(irqd); | ||
362 | int enabled, unmasked; | ||
363 | unsigned type; | ||
364 | int ret; | ||
365 | |||
366 | enabled = test_bit(pin, dln2->enabled_irqs); | ||
367 | unmasked = test_bit(pin, dln2->unmasked_irqs); | ||
368 | |||
369 | if (enabled != unmasked) { | ||
370 | if (unmasked) { | ||
371 | type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK; | ||
372 | set_bit(pin, dln2->enabled_irqs); | ||
373 | } else { | ||
374 | type = DLN2_GPIO_EVENT_NONE; | ||
375 | clear_bit(pin, dln2->enabled_irqs); | ||
376 | } | ||
377 | |||
378 | ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0); | ||
379 | if (ret) | ||
380 | dev_err(dln2->gpio.dev, "failed to set event\n"); | ||
381 | } | ||
382 | |||
383 | mutex_unlock(&dln2->irq_lock); | ||
384 | } | ||
385 | |||
390 | static struct irq_chip dln2_gpio_irqchip = { | 386 | static struct irq_chip dln2_gpio_irqchip = { |
391 | .name = "dln2-irq", | 387 | .name = "dln2-irq", |
392 | .irq_enable = dln2_irq_enable, | ||
393 | .irq_disable = dln2_irq_disable, | ||
394 | .irq_mask = dln2_irq_mask, | 388 | .irq_mask = dln2_irq_mask, |
395 | .irq_unmask = dln2_irq_unmask, | 389 | .irq_unmask = dln2_irq_unmask, |
396 | .irq_set_type = dln2_irq_set_type, | 390 | .irq_set_type = dln2_irq_set_type, |
391 | .irq_bus_lock = dln2_irq_bus_lock, | ||
392 | .irq_bus_sync_unlock = dln2_irq_bus_unlock, | ||
397 | }; | 393 | }; |
398 | 394 | ||
399 | static void dln2_gpio_event(struct platform_device *pdev, u16 echo, | 395 | static void dln2_gpio_event(struct platform_device *pdev, u16 echo, |
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo, | |||
425 | return; | 421 | return; |
426 | } | 422 | } |
427 | 423 | ||
428 | if (!test_bit(pin, dln2->irqs_enabled)) | 424 | switch (dln2->irq_type[pin]) { |
429 | return; | ||
430 | if (test_bit(pin, dln2->irqs_masked)) { | ||
431 | set_bit(pin, dln2->irqs_pending); | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | switch (dln2->irq_work[pin].type) { | ||
436 | case DLN2_GPIO_EVENT_CHANGE_RISING: | 425 | case DLN2_GPIO_EVENT_CHANGE_RISING: |
437 | if (event->value) | 426 | if (event->value) |
438 | generic_handle_irq(irq); | 427 | generic_handle_irq(irq); |
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) | |||
451 | struct dln2_gpio *dln2; | 440 | struct dln2_gpio *dln2; |
452 | struct device *dev = &pdev->dev; | 441 | struct device *dev = &pdev->dev; |
453 | int pins; | 442 | int pins; |
454 | int i, ret; | 443 | int ret; |
455 | 444 | ||
456 | pins = dln2_gpio_get_pin_count(pdev); | 445 | pins = dln2_gpio_get_pin_count(pdev); |
457 | if (pins < 0) { | 446 | if (pins < 0) { |
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) | |||
467 | if (!dln2) | 456 | if (!dln2) |
468 | return -ENOMEM; | 457 | return -ENOMEM; |
469 | 458 | ||
470 | dln2->irq_work = devm_kcalloc(&pdev->dev, pins, | 459 | mutex_init(&dln2->irq_lock); |
471 | sizeof(struct dln2_irq_work), GFP_KERNEL); | ||
472 | if (!dln2->irq_work) | ||
473 | return -ENOMEM; | ||
474 | for (i = 0; i < pins; i++) { | ||
475 | INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work); | ||
476 | dln2->irq_work[i].pin = i; | ||
477 | dln2->irq_work[i].dln2 = dln2; | ||
478 | } | ||
479 | 460 | ||
480 | dln2->pdev = pdev; | 461 | dln2->pdev = pdev; |
481 | 462 | ||
@@ -529,11 +510,8 @@ out: | |||
529 | static int dln2_gpio_remove(struct platform_device *pdev) | 510 | static int dln2_gpio_remove(struct platform_device *pdev) |
530 | { | 511 | { |
531 | struct dln2_gpio *dln2 = platform_get_drvdata(pdev); | 512 | struct dln2_gpio *dln2 = platform_get_drvdata(pdev); |
532 | int i; | ||
533 | 513 | ||
534 | dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); | 514 | dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); |
535 | for (i = 0; i < dln2->gpio.ngpio; i++) | ||
536 | flush_work(&dln2->irq_work[i].work); | ||
537 | gpiochip_remove(&dln2->gpio); | 515 | gpiochip_remove(&dln2->gpio); |
538 | 516 | ||
539 | return 0; | 517 | return 0; |
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 09daaf2aeb56..3a5a71050559 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c | |||
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev) | |||
441 | err = gpiochip_add(gc); | 441 | err = gpiochip_add(gc); |
442 | if (err) { | 442 | if (err) { |
443 | dev_err(&ofdev->dev, "Could not add gpiochip\n"); | 443 | dev_err(&ofdev->dev, "Could not add gpiochip\n"); |
444 | irq_domain_remove(priv->domain); | 444 | if (priv->domain) |
445 | irq_domain_remove(priv->domain); | ||
445 | return err; | 446 | return err; |
446 | } | 447 | } |
447 | 448 | ||
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index a82e542ffc21..0b380603a578 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c | |||
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci) | |||
4880 | byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ | 4880 | byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ |
4881 | byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; | 4881 | byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; |
4882 | byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; | 4882 | byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; |
4883 | byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; | 4883 | byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00"; |
4884 | byte force_mt_info = false; | 4884 | byte force_mt_info = false; |
4885 | byte dir; | 4885 | byte dir; |
4886 | dword d; | 4886 | dword d; |
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index 26515c27ea8c..25e419752a7b 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c | |||
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev, | |||
330 | led_dat->sata = 0; | 330 | led_dat->sata = 0; |
331 | led_dat->cdev.brightness = LED_OFF; | 331 | led_dat->cdev.brightness = LED_OFF; |
332 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; | 332 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; |
333 | /* | ||
334 | * If available, expose the SATA activity blink capability through | ||
335 | * a "sata" sysfs attribute. | ||
336 | */ | ||
337 | if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) | ||
338 | led_dat->cdev.groups = netxbig_led_groups; | ||
339 | led_dat->mode_addr = template->mode_addr; | 333 | led_dat->mode_addr = template->mode_addr; |
340 | led_dat->mode_val = template->mode_val; | 334 | led_dat->mode_val = template->mode_val; |
341 | led_dat->bright_addr = template->bright_addr; | 335 | led_dat->bright_addr = template->bright_addr; |
342 | led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; | 336 | led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; |
343 | led_dat->timer = pdata->timer; | 337 | led_dat->timer = pdata->timer; |
344 | led_dat->num_timer = pdata->num_timer; | 338 | led_dat->num_timer = pdata->num_timer; |
339 | /* | ||
340 | * If available, expose the SATA activity blink capability through | ||
341 | * a "sata" sysfs attribute. | ||
342 | */ | ||
343 | if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) | ||
344 | led_dat->cdev.groups = netxbig_led_groups; | ||
345 | 345 | ||
346 | return led_classdev_register(&pdev->dev, &led_dat->cdev); | 346 | return led_classdev_register(&pdev->dev, &led_dat->cdev); |
347 | } | 347 | } |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 51fd6b524371..d1b55fe62817 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
104 | { | ||
105 | struct cxl_context *ctx = vma->vm_file->private_data; | ||
106 | unsigned long address = (unsigned long)vmf->virtual_address; | ||
107 | u64 area, offset; | ||
108 | |||
109 | offset = vmf->pgoff << PAGE_SHIFT; | ||
110 | |||
111 | pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", | ||
112 | __func__, ctx->pe, address, offset); | ||
113 | |||
114 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | ||
115 | area = ctx->afu->psn_phys; | ||
116 | if (offset > ctx->afu->adapter->ps_size) | ||
117 | return VM_FAULT_SIGBUS; | ||
118 | } else { | ||
119 | area = ctx->psn_phys; | ||
120 | if (offset > ctx->psn_size) | ||
121 | return VM_FAULT_SIGBUS; | ||
122 | } | ||
123 | |||
124 | mutex_lock(&ctx->status_mutex); | ||
125 | |||
126 | if (ctx->status != STARTED) { | ||
127 | mutex_unlock(&ctx->status_mutex); | ||
128 | pr_devel("%s: Context not started, failing problem state access\n", __func__); | ||
129 | return VM_FAULT_SIGBUS; | ||
130 | } | ||
131 | |||
132 | vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); | ||
133 | |||
134 | mutex_unlock(&ctx->status_mutex); | ||
135 | |||
136 | return VM_FAULT_NOPAGE; | ||
137 | } | ||
138 | |||
139 | static const struct vm_operations_struct cxl_mmap_vmops = { | ||
140 | .fault = cxl_mmap_fault, | ||
141 | }; | ||
142 | |||
103 | /* | 143 | /* |
104 | * Map a per-context mmio space into the given vma. | 144 | * Map a per-context mmio space into the given vma. |
105 | */ | 145 | */ |
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | |||
108 | u64 len = vma->vm_end - vma->vm_start; | 148 | u64 len = vma->vm_end - vma->vm_start; |
109 | len = min(len, ctx->psn_size); | 149 | len = min(len, ctx->psn_size); |
110 | 150 | ||
111 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | 151 | if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { |
112 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 152 | /* make sure there is a valid per process space for this AFU */ |
113 | return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); | 153 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { |
114 | } | 154 | pr_devel("AFU doesn't support mmio space\n"); |
155 | return -EINVAL; | ||
156 | } | ||
115 | 157 | ||
116 | /* make sure there is a valid per process space for this AFU */ | 158 | /* Can't mmap until the AFU is enabled */ |
117 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { | 159 | if (!ctx->afu->enabled) |
118 | pr_devel("AFU doesn't support mmio space\n"); | 160 | return -EBUSY; |
119 | return -EINVAL; | ||
120 | } | 161 | } |
121 | 162 | ||
122 | /* Can't mmap until the AFU is enabled */ | ||
123 | if (!ctx->afu->enabled) | ||
124 | return -EBUSY; | ||
125 | |||
126 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, | 163 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, |
127 | ctx->psn_phys, ctx->pe , ctx->master); | 164 | ctx->psn_phys, ctx->pe , ctx->master); |
128 | 165 | ||
166 | vma->vm_flags |= VM_IO | VM_PFNMAP; | ||
129 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 167 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
130 | return vm_iomap_memory(vma, ctx->psn_phys, len); | 168 | vma->vm_ops = &cxl_mmap_vmops; |
169 | return 0; | ||
131 | } | 170 | } |
132 | 171 | ||
133 | /* | 172 | /* |
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) | |||
150 | afu_release_irqs(ctx); | 189 | afu_release_irqs(ctx); |
151 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ | 190 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ |
152 | wake_up_all(&ctx->wq); | 191 | wake_up_all(&ctx->wq); |
153 | |||
154 | /* Release Problem State Area mapping */ | ||
155 | mutex_lock(&ctx->mapping_lock); | ||
156 | if (ctx->mapping) | ||
157 | unmap_mapping_range(ctx->mapping, 0, 0, 1); | ||
158 | mutex_unlock(&ctx->mapping_lock); | ||
159 | } | 192 | } |
160 | 193 | ||
161 | /* | 194 | /* |
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) | |||
184 | * created and torn down after the IDR removed | 217 | * created and torn down after the IDR removed |
185 | */ | 218 | */ |
186 | __detach_context(ctx); | 219 | __detach_context(ctx); |
220 | |||
221 | /* | ||
222 | * We are force detaching - remove any active PSA mappings so | ||
223 | * userspace cannot interfere with the card if it comes back. | ||
224 | * Easiest way to exercise this is to unbind and rebind the | ||
225 | * driver via sysfs while it is in use. | ||
226 | */ | ||
227 | mutex_lock(&ctx->mapping_lock); | ||
228 | if (ctx->mapping) | ||
229 | unmap_mapping_range(ctx->mapping, 0, 0, 1); | ||
230 | mutex_unlock(&ctx->mapping_lock); | ||
187 | } | 231 | } |
188 | mutex_unlock(&afu->contexts_lock); | 232 | mutex_unlock(&afu->contexts_lock); |
189 | } | 233 | } |
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index e9f2f10dbb37..b15d8113877c 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c | |||
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, | |||
140 | 140 | ||
141 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | 141 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); |
142 | 142 | ||
143 | mutex_lock(&ctx->status_mutex); | 143 | /* Do this outside the status_mutex to avoid a circular dependency with |
144 | if (ctx->status != OPENED) { | 144 | * the locking in cxl_mmap_fault() */ |
145 | rc = -EIO; | ||
146 | goto out; | ||
147 | } | ||
148 | |||
149 | if (copy_from_user(&work, uwork, | 145 | if (copy_from_user(&work, uwork, |
150 | sizeof(struct cxl_ioctl_start_work))) { | 146 | sizeof(struct cxl_ioctl_start_work))) { |
151 | rc = -EFAULT; | 147 | rc = -EFAULT; |
152 | goto out; | 148 | goto out; |
153 | } | 149 | } |
154 | 150 | ||
151 | mutex_lock(&ctx->status_mutex); | ||
152 | if (ctx->status != OPENED) { | ||
153 | rc = -EIO; | ||
154 | goto out; | ||
155 | } | ||
156 | |||
155 | /* | 157 | /* |
156 | * if any of the reserved fields are set or any of the unused | 158 | * if any of the reserved fields are set or any of the unused |
157 | * flags are set it's invalid | 159 | * flags are set it's invalid |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index e3e56d35f0ee..970314e0aac8 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c | |||
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { | |||
247 | { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, | 247 | { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, |
248 | { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, | 248 | { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, |
249 | { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, | 249 | { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, |
250 | { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, | ||
250 | { "PNP0D40" }, | 251 | { "PNP0D40" }, |
251 | { }, | 252 | { }, |
252 | }; | 253 | }; |
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { | |||
257 | { "INT33BB" }, | 258 | { "INT33BB" }, |
258 | { "INT33C6" }, | 259 | { "INT33C6" }, |
259 | { "INT3436" }, | 260 | { "INT3436" }, |
261 | { "INT344D" }, | ||
260 | { "PNP0D40" }, | 262 | { "PNP0D40" }, |
261 | { }, | 263 | { }, |
262 | }; | 264 | }; |
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 03427755b902..4f38554ce679 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c | |||
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = { | |||
993 | .subdevice = PCI_ANY_ID, | 993 | .subdevice = PCI_ANY_ID, |
994 | .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, | 994 | .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, |
995 | }, | 995 | }, |
996 | |||
997 | { | ||
998 | .vendor = PCI_VENDOR_ID_INTEL, | ||
999 | .device = PCI_DEVICE_ID_INTEL_SPT_EMMC, | ||
1000 | .subvendor = PCI_ANY_ID, | ||
1001 | .subdevice = PCI_ANY_ID, | ||
1002 | .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, | ||
1003 | }, | ||
1004 | |||
1005 | { | ||
1006 | .vendor = PCI_VENDOR_ID_INTEL, | ||
1007 | .device = PCI_DEVICE_ID_INTEL_SPT_SDIO, | ||
1008 | .subvendor = PCI_ANY_ID, | ||
1009 | .subdevice = PCI_ANY_ID, | ||
1010 | .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, | ||
1011 | }, | ||
1012 | |||
1013 | { | ||
1014 | .vendor = PCI_VENDOR_ID_INTEL, | ||
1015 | .device = PCI_DEVICE_ID_INTEL_SPT_SD, | ||
1016 | .subvendor = PCI_ANY_ID, | ||
1017 | .subdevice = PCI_ANY_ID, | ||
1018 | .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, | ||
1019 | }, | ||
1020 | |||
996 | { | 1021 | { |
997 | .vendor = PCI_VENDOR_ID_O2, | 1022 | .vendor = PCI_VENDOR_ID_O2, |
998 | .device = PCI_DEVICE_ID_O2_8120, | 1023 | .device = PCI_DEVICE_ID_O2_8120, |
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index d57c3d169914..1ec684d06d54 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h | |||
@@ -21,6 +21,9 @@ | |||
21 | #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 | 21 | #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 |
22 | #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 | 22 | #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 |
23 | #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 | 23 | #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 |
24 | #define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b | ||
25 | #define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c | ||
26 | #define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d | ||
24 | 27 | ||
25 | /* | 28 | /* |
26 | * PCI registers | 29 | * PCI registers |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 45238871192d..ca3424e7ef71 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
300 | if (IS_ERR(host)) | 300 | if (IS_ERR(host)) |
301 | return PTR_ERR(host); | 301 | return PTR_ERR(host); |
302 | 302 | ||
303 | if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { | ||
304 | ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); | ||
305 | if (ret < 0) | ||
306 | goto err_mbus_win; | ||
307 | } | ||
308 | |||
309 | |||
310 | pltfm_host = sdhci_priv(host); | 303 | pltfm_host = sdhci_priv(host); |
311 | pltfm_host->priv = pxa; | 304 | pltfm_host->priv = pxa; |
312 | 305 | ||
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
325 | if (!IS_ERR(pxa->clk_core)) | 318 | if (!IS_ERR(pxa->clk_core)) |
326 | clk_prepare_enable(pxa->clk_core); | 319 | clk_prepare_enable(pxa->clk_core); |
327 | 320 | ||
321 | if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { | ||
322 | ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); | ||
323 | if (ret < 0) | ||
324 | goto err_mbus_win; | ||
325 | } | ||
326 | |||
328 | /* enable 1/8V DDR capable */ | 327 | /* enable 1/8V DDR capable */ |
329 | host->mmc->caps |= MMC_CAP_1_8V_DDR; | 328 | host->mmc->caps |= MMC_CAP_1_8V_DDR; |
330 | 329 | ||
@@ -396,11 +395,11 @@ err_add_host: | |||
396 | pm_runtime_disable(&pdev->dev); | 395 | pm_runtime_disable(&pdev->dev); |
397 | err_of_parse: | 396 | err_of_parse: |
398 | err_cd_req: | 397 | err_cd_req: |
398 | err_mbus_win: | ||
399 | clk_disable_unprepare(pxa->clk_io); | 399 | clk_disable_unprepare(pxa->clk_io); |
400 | if (!IS_ERR(pxa->clk_core)) | 400 | if (!IS_ERR(pxa->clk_core)) |
401 | clk_disable_unprepare(pxa->clk_core); | 401 | clk_disable_unprepare(pxa->clk_core); |
402 | err_clk_get: | 402 | err_clk_get: |
403 | err_mbus_win: | ||
404 | sdhci_pltfm_free(pdev); | 403 | sdhci_pltfm_free(pdev); |
405 | return ret; | 404 | return ret; |
406 | } | 405 | } |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index cbb245b58538..f1a488ee432f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host) | |||
259 | 259 | ||
260 | del_timer_sync(&host->tuning_timer); | 260 | del_timer_sync(&host->tuning_timer); |
261 | host->flags &= ~SDHCI_NEEDS_RETUNING; | 261 | host->flags &= ~SDHCI_NEEDS_RETUNING; |
262 | host->mmc->max_blk_count = | ||
263 | (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; | ||
264 | } | 262 | } |
265 | sdhci_enable_card_detection(host); | 263 | sdhci_enable_card_detection(host); |
266 | } | 264 | } |
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, | |||
1273 | spin_unlock_irq(&host->lock); | 1271 | spin_unlock_irq(&host->lock); |
1274 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); | 1272 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); |
1275 | spin_lock_irq(&host->lock); | 1273 | spin_lock_irq(&host->lock); |
1274 | |||
1275 | if (mode != MMC_POWER_OFF) | ||
1276 | sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); | ||
1277 | else | ||
1278 | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); | ||
1279 | |||
1276 | return; | 1280 | return; |
1277 | } | 1281 | } |
1278 | 1282 | ||
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1353 | 1357 | ||
1354 | sdhci_runtime_pm_get(host); | 1358 | sdhci_runtime_pm_get(host); |
1355 | 1359 | ||
1360 | present = mmc_gpio_get_cd(host->mmc); | ||
1361 | |||
1356 | spin_lock_irqsave(&host->lock, flags); | 1362 | spin_lock_irqsave(&host->lock, flags); |
1357 | 1363 | ||
1358 | WARN_ON(host->mrq != NULL); | 1364 | WARN_ON(host->mrq != NULL); |
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
1381 | * zero: cd-gpio is used, and card is removed | 1387 | * zero: cd-gpio is used, and card is removed |
1382 | * one: cd-gpio is used, and card is present | 1388 | * one: cd-gpio is used, and card is present |
1383 | */ | 1389 | */ |
1384 | present = mmc_gpio_get_cd(host->mmc); | ||
1385 | if (present < 0) { | 1390 | if (present < 0) { |
1386 | /* If polling, assume that the card is always present. */ | 1391 | /* If polling, assume that the card is always present. */ |
1387 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 1392 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) |
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc) | |||
1880 | return !(present_state & SDHCI_DATA_LVL_MASK); | 1885 | return !(present_state & SDHCI_DATA_LVL_MASK); |
1881 | } | 1886 | } |
1882 | 1887 | ||
1888 | static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) | ||
1889 | { | ||
1890 | struct sdhci_host *host = mmc_priv(mmc); | ||
1891 | unsigned long flags; | ||
1892 | |||
1893 | spin_lock_irqsave(&host->lock, flags); | ||
1894 | host->flags |= SDHCI_HS400_TUNING; | ||
1895 | spin_unlock_irqrestore(&host->lock, flags); | ||
1896 | |||
1897 | return 0; | ||
1898 | } | ||
1899 | |||
1883 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | 1900 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) |
1884 | { | 1901 | { |
1885 | struct sdhci_host *host = mmc_priv(mmc); | 1902 | struct sdhci_host *host = mmc_priv(mmc); |
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1887 | int tuning_loop_counter = MAX_TUNING_LOOP; | 1904 | int tuning_loop_counter = MAX_TUNING_LOOP; |
1888 | int err = 0; | 1905 | int err = 0; |
1889 | unsigned long flags; | 1906 | unsigned long flags; |
1907 | unsigned int tuning_count = 0; | ||
1908 | bool hs400_tuning; | ||
1890 | 1909 | ||
1891 | sdhci_runtime_pm_get(host); | 1910 | sdhci_runtime_pm_get(host); |
1892 | spin_lock_irqsave(&host->lock, flags); | 1911 | spin_lock_irqsave(&host->lock, flags); |
1893 | 1912 | ||
1913 | hs400_tuning = host->flags & SDHCI_HS400_TUNING; | ||
1914 | host->flags &= ~SDHCI_HS400_TUNING; | ||
1915 | |||
1916 | if (host->tuning_mode == SDHCI_TUNING_MODE_1) | ||
1917 | tuning_count = host->tuning_count; | ||
1918 | |||
1894 | /* | 1919 | /* |
1895 | * The Host Controller needs tuning only in case of SDR104 mode | 1920 | * The Host Controller needs tuning only in case of SDR104 mode |
1896 | * and for SDR50 mode when Use Tuning for SDR50 is set in the | 1921 | * and for SDR50 mode when Use Tuning for SDR50 is set in the |
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1899 | * tuning function has to be executed. | 1924 | * tuning function has to be executed. |
1900 | */ | 1925 | */ |
1901 | switch (host->timing) { | 1926 | switch (host->timing) { |
1927 | /* HS400 tuning is done in HS200 mode */ | ||
1902 | case MMC_TIMING_MMC_HS400: | 1928 | case MMC_TIMING_MMC_HS400: |
1929 | err = -EINVAL; | ||
1930 | goto out_unlock; | ||
1931 | |||
1903 | case MMC_TIMING_MMC_HS200: | 1932 | case MMC_TIMING_MMC_HS200: |
1933 | /* | ||
1934 | * Periodic re-tuning for HS400 is not expected to be needed, so | ||
1935 | * disable it here. | ||
1936 | */ | ||
1937 | if (hs400_tuning) | ||
1938 | tuning_count = 0; | ||
1939 | break; | ||
1940 | |||
1904 | case MMC_TIMING_UHS_SDR104: | 1941 | case MMC_TIMING_UHS_SDR104: |
1905 | break; | 1942 | break; |
1906 | 1943 | ||
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
1911 | /* FALLTHROUGH */ | 1948 | /* FALLTHROUGH */ |
1912 | 1949 | ||
1913 | default: | 1950 | default: |
1914 | spin_unlock_irqrestore(&host->lock, flags); | 1951 | goto out_unlock; |
1915 | sdhci_runtime_pm_put(host); | ||
1916 | return 0; | ||
1917 | } | 1952 | } |
1918 | 1953 | ||
1919 | if (host->ops->platform_execute_tuning) { | 1954 | if (host->ops->platform_execute_tuning) { |
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) | |||
2037 | } | 2072 | } |
2038 | 2073 | ||
2039 | out: | 2074 | out: |
2040 | /* | 2075 | host->flags &= ~SDHCI_NEEDS_RETUNING; |
2041 | * If this is the very first time we are here, we start the retuning | 2076 | |
2042 | * timer. Since only during the first time, SDHCI_NEEDS_RETUNING | 2077 | if (tuning_count) { |
2043 | * flag won't be set, we check this condition before actually starting | ||
2044 | * the timer. | ||
2045 | */ | ||
2046 | if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && | ||
2047 | (host->tuning_mode == SDHCI_TUNING_MODE_1)) { | ||
2048 | host->flags |= SDHCI_USING_RETUNING_TIMER; | 2078 | host->flags |= SDHCI_USING_RETUNING_TIMER; |
2049 | mod_timer(&host->tuning_timer, jiffies + | 2079 | mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); |
2050 | host->tuning_count * HZ); | ||
2051 | /* Tuning mode 1 limits the maximum data length to 4MB */ | ||
2052 | mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; | ||
2053 | } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { | ||
2054 | host->flags &= ~SDHCI_NEEDS_RETUNING; | ||
2055 | /* Reload the new initial value for timer */ | ||
2056 | mod_timer(&host->tuning_timer, jiffies + | ||
2057 | host->tuning_count * HZ); | ||
2058 | } | 2080 | } |
2059 | 2081 | ||
2060 | /* | 2082 | /* |
@@ -2070,6 +2092,7 @@ out: | |||
2070 | 2092 | ||
2071 | sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); | 2093 | sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); |
2072 | sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); | 2094 | sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); |
2095 | out_unlock: | ||
2073 | spin_unlock_irqrestore(&host->lock, flags); | 2096 | spin_unlock_irqrestore(&host->lock, flags); |
2074 | sdhci_runtime_pm_put(host); | 2097 | sdhci_runtime_pm_put(host); |
2075 | 2098 | ||
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc) | |||
2110 | { | 2133 | { |
2111 | struct sdhci_host *host = mmc_priv(mmc); | 2134 | struct sdhci_host *host = mmc_priv(mmc); |
2112 | unsigned long flags; | 2135 | unsigned long flags; |
2136 | int present; | ||
2113 | 2137 | ||
2114 | /* First check if client has provided their own card event */ | 2138 | /* First check if client has provided their own card event */ |
2115 | if (host->ops->card_event) | 2139 | if (host->ops->card_event) |
2116 | host->ops->card_event(host); | 2140 | host->ops->card_event(host); |
2117 | 2141 | ||
2142 | present = sdhci_do_get_cd(host); | ||
2143 | |||
2118 | spin_lock_irqsave(&host->lock, flags); | 2144 | spin_lock_irqsave(&host->lock, flags); |
2119 | 2145 | ||
2120 | /* Check host->mrq first in case we are runtime suspended */ | 2146 | /* Check host->mrq first in case we are runtime suspended */ |
2121 | if (host->mrq && !sdhci_do_get_cd(host)) { | 2147 | if (host->mrq && !present) { |
2122 | pr_err("%s: Card removed during transfer!\n", | 2148 | pr_err("%s: Card removed during transfer!\n", |
2123 | mmc_hostname(host->mmc)); | 2149 | mmc_hostname(host->mmc)); |
2124 | pr_err("%s: Resetting controller.\n", | 2150 | pr_err("%s: Resetting controller.\n", |
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = { | |||
2142 | .hw_reset = sdhci_hw_reset, | 2168 | .hw_reset = sdhci_hw_reset, |
2143 | .enable_sdio_irq = sdhci_enable_sdio_irq, | 2169 | .enable_sdio_irq = sdhci_enable_sdio_irq, |
2144 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, | 2170 | .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, |
2171 | .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, | ||
2145 | .execute_tuning = sdhci_execute_tuning, | 2172 | .execute_tuning = sdhci_execute_tuning, |
2146 | .card_event = sdhci_card_event, | 2173 | .card_event = sdhci_card_event, |
2147 | .card_busy = sdhci_card_busy, | 2174 | .card_busy = sdhci_card_busy, |
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3260 | mmc->max_segs = SDHCI_MAX_SEGS; | 3287 | mmc->max_segs = SDHCI_MAX_SEGS; |
3261 | 3288 | ||
3262 | /* | 3289 | /* |
3263 | * Maximum number of sectors in one transfer. Limited by DMA boundary | 3290 | * Maximum number of sectors in one transfer. Limited by SDMA boundary |
3264 | * size (512KiB). | 3291 | * size (512KiB). Note some tuning modes impose a 4MiB limit, but this |
3292 | * is less anyway. | ||
3265 | */ | 3293 | */ |
3266 | mmc->max_req_size = 524288; | 3294 | mmc->max_req_size = 524288; |
3267 | 3295 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e398eda07298..c8af3ce3ea38 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx) | |||
184 | schedule_work(&alx->reset_wk); | 184 | schedule_work(&alx->reset_wk); |
185 | } | 185 | } |
186 | 186 | ||
187 | static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) | 187 | static int alx_clean_rx_irq(struct alx_priv *alx, int budget) |
188 | { | 188 | { |
189 | struct alx_rx_queue *rxq = &alx->rxq; | 189 | struct alx_rx_queue *rxq = &alx->rxq; |
190 | struct alx_rrd *rrd; | 190 | struct alx_rrd *rrd; |
191 | struct alx_buffer *rxb; | 191 | struct alx_buffer *rxb; |
192 | struct sk_buff *skb; | 192 | struct sk_buff *skb; |
193 | u16 length, rfd_cleaned = 0; | 193 | u16 length, rfd_cleaned = 0; |
194 | int work = 0; | ||
194 | 195 | ||
195 | while (budget > 0) { | 196 | while (work < budget) { |
196 | rrd = &rxq->rrd[rxq->rrd_read_idx]; | 197 | rrd = &rxq->rrd[rxq->rrd_read_idx]; |
197 | if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) | 198 | if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) |
198 | break; | 199 | break; |
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) | |||
203 | ALX_GET_FIELD(le32_to_cpu(rrd->word0), | 204 | ALX_GET_FIELD(le32_to_cpu(rrd->word0), |
204 | RRD_NOR) != 1) { | 205 | RRD_NOR) != 1) { |
205 | alx_schedule_reset(alx); | 206 | alx_schedule_reset(alx); |
206 | return 0; | 207 | return work; |
207 | } | 208 | } |
208 | 209 | ||
209 | rxb = &rxq->bufs[rxq->read_idx]; | 210 | rxb = &rxq->bufs[rxq->read_idx]; |
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) | |||
243 | } | 244 | } |
244 | 245 | ||
245 | napi_gro_receive(&alx->napi, skb); | 246 | napi_gro_receive(&alx->napi, skb); |
246 | budget--; | 247 | work++; |
247 | 248 | ||
248 | next_pkt: | 249 | next_pkt: |
249 | if (++rxq->read_idx == alx->rx_ringsz) | 250 | if (++rxq->read_idx == alx->rx_ringsz) |
@@ -258,21 +259,22 @@ next_pkt: | |||
258 | if (rfd_cleaned) | 259 | if (rfd_cleaned) |
259 | alx_refill_rx_ring(alx, GFP_ATOMIC); | 260 | alx_refill_rx_ring(alx, GFP_ATOMIC); |
260 | 261 | ||
261 | return budget > 0; | 262 | return work; |
262 | } | 263 | } |
263 | 264 | ||
264 | static int alx_poll(struct napi_struct *napi, int budget) | 265 | static int alx_poll(struct napi_struct *napi, int budget) |
265 | { | 266 | { |
266 | struct alx_priv *alx = container_of(napi, struct alx_priv, napi); | 267 | struct alx_priv *alx = container_of(napi, struct alx_priv, napi); |
267 | struct alx_hw *hw = &alx->hw; | 268 | struct alx_hw *hw = &alx->hw; |
268 | bool complete = true; | ||
269 | unsigned long flags; | 269 | unsigned long flags; |
270 | bool tx_complete; | ||
271 | int work; | ||
270 | 272 | ||
271 | complete = alx_clean_tx_irq(alx) && | 273 | tx_complete = alx_clean_tx_irq(alx); |
272 | alx_clean_rx_irq(alx, budget); | 274 | work = alx_clean_rx_irq(alx, budget); |
273 | 275 | ||
274 | if (!complete) | 276 | if (!tx_complete || work == budget) |
275 | return 1; | 277 | return budget; |
276 | 278 | ||
277 | napi_complete(&alx->napi); | 279 | napi_complete(&alx->napi); |
278 | 280 | ||
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget) | |||
284 | 286 | ||
285 | alx_post_write(hw); | 287 | alx_post_write(hw); |
286 | 288 | ||
287 | return 0; | 289 | return work; |
288 | } | 290 | } |
289 | 291 | ||
290 | static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) | 292 | static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 553dcd8a9df2..96bf01ba32dd 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp) | |||
7413 | } | 7413 | } |
7414 | 7414 | ||
7415 | static void tg3_irq_quiesce(struct tg3 *tp) | 7415 | static void tg3_irq_quiesce(struct tg3 *tp) |
7416 | __releases(tp->lock) | ||
7417 | __acquires(tp->lock) | ||
7416 | { | 7418 | { |
7417 | int i; | 7419 | int i; |
7418 | 7420 | ||
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp) | |||
7421 | tp->irq_sync = 1; | 7423 | tp->irq_sync = 1; |
7422 | smp_mb(); | 7424 | smp_mb(); |
7423 | 7425 | ||
7426 | spin_unlock_bh(&tp->lock); | ||
7427 | |||
7424 | for (i = 0; i < tp->irq_cnt; i++) | 7428 | for (i = 0; i < tp->irq_cnt; i++) |
7425 | synchronize_irq(tp->napi[i].irq_vec); | 7429 | synchronize_irq(tp->napi[i].irq_vec); |
7430 | |||
7431 | spin_lock_bh(&tp->lock); | ||
7426 | } | 7432 | } |
7427 | 7433 | ||
7428 | /* Fully shutdown all tg3 driver activity elsewhere in the system. | 7434 | /* Fully shutdown all tg3 driver activity elsewhere in the system. |
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp) | |||
9018 | 9024 | ||
9019 | /* tp->lock is held. */ | 9025 | /* tp->lock is held. */ |
9020 | static int tg3_chip_reset(struct tg3 *tp) | 9026 | static int tg3_chip_reset(struct tg3 *tp) |
9027 | __releases(tp->lock) | ||
9028 | __acquires(tp->lock) | ||
9021 | { | 9029 | { |
9022 | u32 val; | 9030 | u32 val; |
9023 | void (*write_op)(struct tg3 *, u32, u32); | 9031 | void (*write_op)(struct tg3 *, u32, u32); |
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
9073 | } | 9081 | } |
9074 | smp_mb(); | 9082 | smp_mb(); |
9075 | 9083 | ||
9084 | tg3_full_unlock(tp); | ||
9085 | |||
9076 | for (i = 0; i < tp->irq_cnt; i++) | 9086 | for (i = 0; i < tp->irq_cnt; i++) |
9077 | synchronize_irq(tp->napi[i].irq_vec); | 9087 | synchronize_irq(tp->napi[i].irq_vec); |
9078 | 9088 | ||
9089 | tg3_full_lock(tp, 0); | ||
9090 | |||
9079 | if (tg3_asic_rev(tp) == ASIC_REV_57780) { | 9091 | if (tg3_asic_rev(tp) == ASIC_REV_57780) { |
9080 | val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | 9092 | val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; |
9081 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); | 9093 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); |
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque) | |||
10903 | { | 10915 | { |
10904 | struct tg3 *tp = (struct tg3 *) __opaque; | 10916 | struct tg3 *tp = (struct tg3 *) __opaque; |
10905 | 10917 | ||
10906 | if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) | ||
10907 | goto restart_timer; | ||
10908 | |||
10909 | spin_lock(&tp->lock); | 10918 | spin_lock(&tp->lock); |
10910 | 10919 | ||
10920 | if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { | ||
10921 | spin_unlock(&tp->lock); | ||
10922 | goto restart_timer; | ||
10923 | } | ||
10924 | |||
10911 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || | 10925 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || |
10912 | tg3_flag(tp, 57765_CLASS)) | 10926 | tg3_flag(tp, 57765_CLASS)) |
10913 | tg3_chk_missed_msi(tp); | 10927 | tg3_chk_missed_msi(tp); |
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work) | |||
11101 | struct tg3 *tp = container_of(work, struct tg3, reset_task); | 11115 | struct tg3 *tp = container_of(work, struct tg3, reset_task); |
11102 | int err; | 11116 | int err; |
11103 | 11117 | ||
11118 | rtnl_lock(); | ||
11104 | tg3_full_lock(tp, 0); | 11119 | tg3_full_lock(tp, 0); |
11105 | 11120 | ||
11106 | if (!netif_running(tp->dev)) { | 11121 | if (!netif_running(tp->dev)) { |
11107 | tg3_flag_clear(tp, RESET_TASK_PENDING); | 11122 | tg3_flag_clear(tp, RESET_TASK_PENDING); |
11108 | tg3_full_unlock(tp); | 11123 | tg3_full_unlock(tp); |
11124 | rtnl_unlock(); | ||
11109 | return; | 11125 | return; |
11110 | } | 11126 | } |
11111 | 11127 | ||
@@ -11138,6 +11154,7 @@ out: | |||
11138 | tg3_phy_start(tp); | 11154 | tg3_phy_start(tp); |
11139 | 11155 | ||
11140 | tg3_flag_clear(tp, RESET_TASK_PENDING); | 11156 | tg3_flag_clear(tp, RESET_TASK_PENDING); |
11157 | rtnl_unlock(); | ||
11141 | } | 11158 | } |
11142 | 11159 | ||
11143 | static int tg3_request_irq(struct tg3 *tp, int irq_num) | 11160 | static int tg3_request_irq(struct tg3 *tp, int irq_num) |
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 55eb7f2af2b4..7ef55f5fa664 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c | |||
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev) | |||
340 | res = PTR_ERR(lp->pclk); | 340 | res = PTR_ERR(lp->pclk); |
341 | goto err_free_dev; | 341 | goto err_free_dev; |
342 | } | 342 | } |
343 | clk_enable(lp->pclk); | 343 | clk_prepare_enable(lp->pclk); |
344 | 344 | ||
345 | lp->hclk = ERR_PTR(-ENOENT); | 345 | lp->hclk = ERR_PTR(-ENOENT); |
346 | lp->tx_clk = ERR_PTR(-ENOENT); | 346 | lp->tx_clk = ERR_PTR(-ENOENT); |
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev) | |||
406 | err_out_unregister_netdev: | 406 | err_out_unregister_netdev: |
407 | unregister_netdev(dev); | 407 | unregister_netdev(dev); |
408 | err_disable_clock: | 408 | err_disable_clock: |
409 | clk_disable(lp->pclk); | 409 | clk_disable_unprepare(lp->pclk); |
410 | err_free_dev: | 410 | err_free_dev: |
411 | free_netdev(dev); | 411 | free_netdev(dev); |
412 | return res; | 412 | return res; |
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev) | |||
424 | kfree(lp->mii_bus->irq); | 424 | kfree(lp->mii_bus->irq); |
425 | mdiobus_free(lp->mii_bus); | 425 | mdiobus_free(lp->mii_bus); |
426 | unregister_netdev(dev); | 426 | unregister_netdev(dev); |
427 | clk_disable(lp->pclk); | 427 | clk_disable_unprepare(lp->pclk); |
428 | free_netdev(dev); | 428 | free_netdev(dev); |
429 | 429 | ||
430 | return 0; | 430 | return 0; |
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
440 | netif_stop_queue(net_dev); | 440 | netif_stop_queue(net_dev); |
441 | netif_device_detach(net_dev); | 441 | netif_device_detach(net_dev); |
442 | 442 | ||
443 | clk_disable(lp->pclk); | 443 | clk_disable_unprepare(lp->pclk); |
444 | } | 444 | } |
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev) | |||
451 | struct macb *lp = netdev_priv(net_dev); | 451 | struct macb *lp = netdev_priv(net_dev); |
452 | 452 | ||
453 | if (netif_running(net_dev)) { | 453 | if (netif_running(net_dev)) { |
454 | clk_enable(lp->pclk); | 454 | clk_prepare_enable(lp->pclk); |
455 | 455 | ||
456 | netif_device_attach(net_dev); | 456 | netif_device_attach(net_dev); |
457 | netif_start_queue(net_dev); | 457 | netif_start_queue(net_dev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2215d432a059..a936ee8958c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
@@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter) | |||
2430 | */ | 2430 | */ |
2431 | n10g = 0; | 2431 | n10g = 0; |
2432 | for_each_port(adapter, pidx) | 2432 | for_each_port(adapter, pidx) |
2433 | n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); | 2433 | n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); |
2434 | 2434 | ||
2435 | /* | 2435 | /* |
2436 | * We default to 1 queue per non-10G port and up to # of cores queues | 2436 | * We default to 1 queue per non-10G port and up to # of cores queues |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 21dc9a20308c..60426cf890a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx) | |||
323 | return v; | 323 | return v; |
324 | 324 | ||
325 | v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); | 325 | v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); |
326 | pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? | ||
327 | FW_PORT_CMD_MDIOADDR_G(v) : -1; | ||
326 | pi->port_type = FW_PORT_CMD_PTYPE_G(v); | 328 | pi->port_type = FW_PORT_CMD_PTYPE_G(v); |
327 | pi->mod_type = FW_PORT_MOD_TYPE_NA; | 329 | pi->mod_type = FW_PORT_MOD_TYPE_NA; |
328 | 330 | ||
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index a379c3e4b57f..13d00a38a5bd 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c | |||
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget) | |||
398 | * break out of while loop if there are no more | 398 | * break out of while loop if there are no more |
399 | * packets waiting | 399 | * packets waiting |
400 | */ | 400 | */ |
401 | if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { | 401 | if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) |
402 | napi_complete(napi); | 402 | break; |
403 | int_enable = dnet_readl(bp, INTR_ENB); | ||
404 | int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; | ||
405 | dnet_writel(bp, int_enable, INTR_ENB); | ||
406 | return 0; | ||
407 | } | ||
408 | 403 | ||
409 | cmd_word = dnet_readl(bp, RX_LEN_FIFO); | 404 | cmd_word = dnet_readl(bp, RX_LEN_FIFO); |
410 | pkt_len = cmd_word & 0xFFFF; | 405 | pkt_len = cmd_word & 0xFFFF; |
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget) | |||
433 | "size %u.\n", dev->name, pkt_len); | 428 | "size %u.\n", dev->name, pkt_len); |
434 | } | 429 | } |
435 | 430 | ||
436 | budget -= npackets; | ||
437 | |||
438 | if (npackets < budget) { | 431 | if (npackets < budget) { |
439 | /* We processed all packets available. Tell NAPI it can | 432 | /* We processed all packets available. Tell NAPI it can |
440 | * stop polling then re-enable rx interrupts */ | 433 | * stop polling then re-enable rx interrupts. |
434 | */ | ||
441 | napi_complete(napi); | 435 | napi_complete(napi); |
442 | int_enable = dnet_readl(bp, INTR_ENB); | 436 | int_enable = dnet_readl(bp, INTR_ENB); |
443 | int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; | 437 | int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; |
444 | dnet_writel(bp, int_enable, INTR_ENB); | 438 | dnet_writel(bp, int_enable, INTR_ENB); |
445 | return 0; | ||
446 | } | 439 | } |
447 | 440 | ||
448 | /* There are still packets waiting */ | 441 | return npackets; |
449 | return 1; | ||
450 | } | 442 | } |
451 | 443 | ||
452 | static irqreturn_t dnet_interrupt(int irq, void *dev_id) | 444 | static irqreturn_t dnet_interrupt(int irq, void *dev_id) |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 469691ad4a1e..40132929daf7 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -424,6 +424,8 @@ struct bufdesc_ex { | |||
424 | * (40ns * 6). | 424 | * (40ns * 6). |
425 | */ | 425 | */ |
426 | #define FEC_QUIRK_BUG_CAPTURE (1 << 10) | 426 | #define FEC_QUIRK_BUG_CAPTURE (1 << 10) |
427 | /* Controller has only one MDIO bus */ | ||
428 | #define FEC_QUIRK_SINGLE_MDIO (1 << 11) | ||
427 | 429 | ||
428 | struct fec_enet_priv_tx_q { | 430 | struct fec_enet_priv_tx_q { |
429 | int index; | 431 | int index; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5ebdf8dc8a31..bba87775419d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = { | |||
91 | .driver_data = 0, | 91 | .driver_data = 0, |
92 | }, { | 92 | }, { |
93 | .name = "imx28-fec", | 93 | .name = "imx28-fec", |
94 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | 94 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | |
95 | FEC_QUIRK_SINGLE_MDIO, | ||
95 | }, { | 96 | }, { |
96 | .name = "imx6q-fec", | 97 | .name = "imx6q-fec", |
97 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | 98 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1937 | int err = -ENXIO, i; | 1938 | int err = -ENXIO, i; |
1938 | 1939 | ||
1939 | /* | 1940 | /* |
1940 | * The dual fec interfaces are not equivalent with enet-mac. | 1941 | * The i.MX28 dual fec interfaces are not equal. |
1941 | * Here are the differences: | 1942 | * Here are the differences: |
1942 | * | 1943 | * |
1943 | * - fec0 supports MII & RMII modes while fec1 only supports RMII | 1944 | * - fec0 supports MII & RMII modes while fec1 only supports RMII |
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1952 | * mdio interface in board design, and need to be configured by | 1953 | * mdio interface in board design, and need to be configured by |
1953 | * fec0 mii_bus. | 1954 | * fec0 mii_bus. |
1954 | */ | 1955 | */ |
1955 | if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { | 1956 | if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { |
1956 | /* fec1 uses fec0 mii_bus */ | 1957 | /* fec1 uses fec0 mii_bus */ |
1957 | if (mii_cnt && fec0_mii_bus) { | 1958 | if (mii_cnt && fec0_mii_bus) { |
1958 | fep->mii_bus = fec0_mii_bus; | 1959 | fep->mii_bus = fec0_mii_bus; |
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
2015 | mii_cnt++; | 2016 | mii_cnt++; |
2016 | 2017 | ||
2017 | /* save fec0 mii_bus */ | 2018 | /* save fec0 mii_bus */ |
2018 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | 2019 | if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) |
2019 | fec0_mii_bus = fep->mii_bus; | 2020 | fec0_mii_bus = fep->mii_bus; |
2020 | 2021 | ||
2021 | return 0; | 2022 | return 0; |
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev) | |||
3129 | pdev->id_entry = of_id->data; | 3130 | pdev->id_entry = of_id->data; |
3130 | fep->quirks = pdev->id_entry->driver_data; | 3131 | fep->quirks = pdev->id_entry->driver_data; |
3131 | 3132 | ||
3133 | fep->netdev = ndev; | ||
3132 | fep->num_rx_queues = num_rx_qs; | 3134 | fep->num_rx_queues = num_rx_qs; |
3133 | fep->num_tx_queues = num_tx_qs; | 3135 | fep->num_tx_queues = num_tx_qs; |
3134 | 3136 | ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 5b8300a32bf5..4d61ef50b465 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -281,6 +281,17 @@ config I40E_DCB | |||
281 | 281 | ||
282 | If unsure, say N. | 282 | If unsure, say N. |
283 | 283 | ||
284 | config I40E_FCOE | ||
285 | bool "Fibre Channel over Ethernet (FCoE)" | ||
286 | default n | ||
287 | depends on I40E && DCB && FCOE | ||
288 | ---help--- | ||
289 | Say Y here if you want to use Fibre Channel over Ethernet (FCoE) | ||
290 | in the driver. This will create new netdev for exclusive FCoE | ||
291 | use with XL710 FCoE offloads enabled. | ||
292 | |||
293 | If unsure, say N. | ||
294 | |||
284 | config I40EVF | 295 | config I40EVF |
285 | tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" | 296 | tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" |
286 | depends on PCI_MSI | 297 | depends on PCI_MSI |
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 4b94ddb29c24..c40581999121 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile | |||
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \ | |||
44 | i40e_virtchnl_pf.o | 44 | i40e_virtchnl_pf.o |
45 | 45 | ||
46 | i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o | 46 | i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o |
47 | i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o | 47 | i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 045b5c4b98b3..ad802dd0f67a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h | |||
@@ -78,7 +78,7 @@ do { \ | |||
78 | } while (0) | 78 | } while (0) |
79 | 79 | ||
80 | typedef enum i40e_status_code i40e_status; | 80 | typedef enum i40e_status_code i40e_status; |
81 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 81 | #ifdef CONFIG_I40E_FCOE |
82 | #define I40E_FCOE | 82 | #define I40E_FCOE |
83 | #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ | 83 | #endif |
84 | #endif /* _I40E_OSDEP_H_ */ | 84 | #endif /* _I40E_OSDEP_H_ */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 04b441460bbd..cecb340898fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | |||
658 | return le32_to_cpu(*(volatile __le32 *)head); | 658 | return le32_to_cpu(*(volatile __le32 *)head); |
659 | } | 659 | } |
660 | 660 | ||
661 | #define WB_STRIDE 0x3 | ||
662 | |||
661 | /** | 663 | /** |
662 | * i40e_clean_tx_irq - Reclaim resources after transmit completes | 664 | * i40e_clean_tx_irq - Reclaim resources after transmit completes |
663 | * @tx_ring: tx ring to clean | 665 | * @tx_ring: tx ring to clean |
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
759 | tx_ring->q_vector->tx.total_bytes += total_bytes; | 761 | tx_ring->q_vector->tx.total_bytes += total_bytes; |
760 | tx_ring->q_vector->tx.total_packets += total_packets; | 762 | tx_ring->q_vector->tx.total_packets += total_packets; |
761 | 763 | ||
764 | /* check to see if there are any non-cache aligned descriptors | ||
765 | * waiting to be written back, and kick the hardware to force | ||
766 | * them to be written back in case of napi polling | ||
767 | */ | ||
768 | if (budget && | ||
769 | !((i & WB_STRIDE) == WB_STRIDE) && | ||
770 | !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && | ||
771 | (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) | ||
772 | tx_ring->arm_wb = true; | ||
773 | else | ||
774 | tx_ring->arm_wb = false; | ||
775 | |||
762 | if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { | 776 | if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { |
763 | /* schedule immediate reset if we believe we hung */ | 777 | /* schedule immediate reset if we believe we hung */ |
764 | dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" | 778 | dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" |
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
777 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 791 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
778 | 792 | ||
779 | dev_info(tx_ring->dev, | 793 | dev_info(tx_ring->dev, |
780 | "tx hang detected on queue %d, resetting adapter\n", | 794 | "tx hang detected on queue %d, reset requested\n", |
781 | tx_ring->queue_index); | 795 | tx_ring->queue_index); |
782 | 796 | ||
783 | tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); | 797 | /* do not fire the reset immediately, wait for the stack to |
798 | * decide we are truly stuck, also prevents every queue from | ||
799 | * simultaneously requesting a reset | ||
800 | */ | ||
784 | 801 | ||
785 | /* the adapter is about to reset, no point in enabling stuff */ | 802 | /* the adapter is about to reset, no point in enabling polling */ |
786 | return true; | 803 | budget = 1; |
787 | } | 804 | } |
788 | 805 | ||
789 | netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, | 806 | netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, |
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
806 | } | 823 | } |
807 | } | 824 | } |
808 | 825 | ||
809 | return budget > 0; | 826 | return !!budget; |
827 | } | ||
828 | |||
829 | /** | ||
830 | * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors | ||
831 | * @vsi: the VSI we care about | ||
832 | * @q_vector: the vector on which to force writeback | ||
833 | * | ||
834 | **/ | ||
835 | static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) | ||
836 | { | ||
837 | u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | | ||
838 | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | | ||
839 | I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | ||
840 | /* allow 00 to be written to the index */; | ||
841 | |||
842 | wr32(&vsi->back->hw, | ||
843 | I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), | ||
844 | val); | ||
810 | } | 845 | } |
811 | 846 | ||
812 | /** | 847 | /** |
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, | |||
1290 | * so the total length of IPv4 header is IHL*4 bytes | 1325 | * so the total length of IPv4 header is IHL*4 bytes |
1291 | * The UDP_0 bit *may* bet set if the *inner* header is UDP | 1326 | * The UDP_0 bit *may* bet set if the *inner* header is UDP |
1292 | */ | 1327 | */ |
1293 | if (ipv4_tunnel && | 1328 | if (ipv4_tunnel) { |
1294 | (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && | ||
1295 | !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { | ||
1296 | skb->transport_header = skb->mac_header + | 1329 | skb->transport_header = skb->mac_header + |
1297 | sizeof(struct ethhdr) + | 1330 | sizeof(struct ethhdr) + |
1298 | (ip_hdr(skb)->ihl * 4); | 1331 | (ip_hdr(skb)->ihl * 4); |
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, | |||
1302 | skb->protocol == htons(ETH_P_8021AD)) | 1335 | skb->protocol == htons(ETH_P_8021AD)) |
1303 | ? VLAN_HLEN : 0; | 1336 | ? VLAN_HLEN : 0; |
1304 | 1337 | ||
1305 | rx_udp_csum = udp_csum(skb); | 1338 | if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && |
1306 | iph = ip_hdr(skb); | 1339 | (udp_hdr(skb)->check != 0)) { |
1307 | csum = csum_tcpudp_magic( | 1340 | rx_udp_csum = udp_csum(skb); |
1308 | iph->saddr, iph->daddr, | 1341 | iph = ip_hdr(skb); |
1309 | (skb->len - skb_transport_offset(skb)), | 1342 | csum = csum_tcpudp_magic( |
1310 | IPPROTO_UDP, rx_udp_csum); | 1343 | iph->saddr, iph->daddr, |
1344 | (skb->len - skb_transport_offset(skb)), | ||
1345 | IPPROTO_UDP, rx_udp_csum); | ||
1311 | 1346 | ||
1312 | if (udp_hdr(skb)->check != csum) | 1347 | if (udp_hdr(skb)->check != csum) |
1313 | goto checksum_fail; | 1348 | goto checksum_fail; |
1349 | |||
1350 | } /* else its GRE and so no outer UDP header */ | ||
1314 | } | 1351 | } |
1315 | 1352 | ||
1316 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1353 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
1581 | struct i40e_vsi *vsi = q_vector->vsi; | 1618 | struct i40e_vsi *vsi = q_vector->vsi; |
1582 | struct i40e_ring *ring; | 1619 | struct i40e_ring *ring; |
1583 | bool clean_complete = true; | 1620 | bool clean_complete = true; |
1621 | bool arm_wb = false; | ||
1584 | int budget_per_ring; | 1622 | int budget_per_ring; |
1585 | 1623 | ||
1586 | if (test_bit(__I40E_DOWN, &vsi->state)) { | 1624 | if (test_bit(__I40E_DOWN, &vsi->state)) { |
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
1591 | /* Since the actual Tx work is minimal, we can give the Tx a larger | 1629 | /* Since the actual Tx work is minimal, we can give the Tx a larger |
1592 | * budget and be more aggressive about cleaning up the Tx descriptors. | 1630 | * budget and be more aggressive about cleaning up the Tx descriptors. |
1593 | */ | 1631 | */ |
1594 | i40e_for_each_ring(ring, q_vector->tx) | 1632 | i40e_for_each_ring(ring, q_vector->tx) { |
1595 | clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); | 1633 | clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); |
1634 | arm_wb |= ring->arm_wb; | ||
1635 | } | ||
1596 | 1636 | ||
1597 | /* We attempt to distribute budget to each Rx queue fairly, but don't | 1637 | /* We attempt to distribute budget to each Rx queue fairly, but don't |
1598 | * allow the budget to go below 1 because that would exit polling early. | 1638 | * allow the budget to go below 1 because that would exit polling early. |
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
1603 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); | 1643 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); |
1604 | 1644 | ||
1605 | /* If work not completed, return budget and polling will return */ | 1645 | /* If work not completed, return budget and polling will return */ |
1606 | if (!clean_complete) | 1646 | if (!clean_complete) { |
1647 | if (arm_wb) | ||
1648 | i40e_force_wb(vsi, q_vector); | ||
1607 | return budget; | 1649 | return budget; |
1650 | } | ||
1608 | 1651 | ||
1609 | /* Work is done so exit the polling mode and re-enable the interrupt */ | 1652 | /* Work is done so exit the polling mode and re-enable the interrupt */ |
1610 | napi_complete(napi); | 1653 | napi_complete(napi); |
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
1840 | if (err < 0) | 1883 | if (err < 0) |
1841 | return err; | 1884 | return err; |
1842 | 1885 | ||
1843 | if (protocol == htons(ETH_P_IP)) { | 1886 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); |
1844 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | 1887 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); |
1888 | |||
1889 | if (iph->version == 4) { | ||
1845 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1890 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
1846 | iph->tot_len = 0; | 1891 | iph->tot_len = 0; |
1847 | iph->check = 0; | 1892 | iph->check = 0; |
1848 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | 1893 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
1849 | 0, IPPROTO_TCP, 0); | 1894 | 0, IPPROTO_TCP, 0); |
1850 | } else if (skb_is_gso_v6(skb)) { | 1895 | } else if (ipv6h->version == 6) { |
1851 | |||
1852 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) | ||
1853 | : ipv6_hdr(skb); | ||
1854 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1896 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
1855 | ipv6h->payload_len = 0; | 1897 | ipv6h->payload_len = 0; |
1856 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, | 1898 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, |
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1946 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | 1988 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; |
1947 | } | 1989 | } |
1948 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { | 1990 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { |
1949 | if (tx_flags & I40E_TX_FLAGS_TSO) { | 1991 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; |
1950 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; | 1992 | if (tx_flags & I40E_TX_FLAGS_TSO) |
1951 | ip_hdr(skb)->check = 0; | 1993 | ip_hdr(skb)->check = 0; |
1952 | } else { | ||
1953 | *cd_tunneling |= | ||
1954 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | ||
1955 | } | ||
1956 | } | 1994 | } |
1957 | 1995 | ||
1958 | /* Now set the ctx descriptor fields */ | 1996 | /* Now set the ctx descriptor fields */ |
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1962 | ((skb_inner_network_offset(skb) - | 2000 | ((skb_inner_network_offset(skb) - |
1963 | skb_transport_offset(skb)) >> 1) << | 2001 | skb_transport_offset(skb)) >> 1) << |
1964 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; | 2002 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; |
1965 | 2003 | if (this_ip_hdr->version == 6) { | |
2004 | tx_flags &= ~I40E_TX_FLAGS_IPV4; | ||
2005 | tx_flags |= I40E_TX_FLAGS_IPV6; | ||
2006 | } | ||
1966 | } else { | 2007 | } else { |
1967 | network_hdr_len = skb_network_header_len(skb); | 2008 | network_hdr_len = skb_network_header_len(skb); |
1968 | this_ip_hdr = ip_hdr(skb); | 2009 | this_ip_hdr = ip_hdr(skb); |
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
2198 | /* Place RS bit on last descriptor of any packet that spans across the | 2239 | /* Place RS bit on last descriptor of any packet that spans across the |
2199 | * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. | 2240 | * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. |
2200 | */ | 2241 | */ |
2201 | #define WB_STRIDE 0x3 | ||
2202 | if (((i & WB_STRIDE) != WB_STRIDE) && | 2242 | if (((i & WB_STRIDE) != WB_STRIDE) && |
2203 | (first <= &tx_ring->tx_bi[i]) && | 2243 | (first <= &tx_ring->tx_bi[i]) && |
2204 | (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { | 2244 | (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e60d3accb2e2..18b00231d2f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
@@ -241,6 +241,7 @@ struct i40e_ring { | |||
241 | unsigned long last_rx_timestamp; | 241 | unsigned long last_rx_timestamp; |
242 | 242 | ||
243 | bool ring_active; /* is ring online or not */ | 243 | bool ring_active; /* is ring online or not */ |
244 | bool arm_wb; /* do something to arm write back */ | ||
244 | 245 | ||
245 | /* stats structs */ | 246 | /* stats structs */ |
246 | struct i40e_queue_stats stats; | 247 | struct i40e_queue_stats stats; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c29ba80ae02b..37583a9d8853 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = { | |||
473 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | 473 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | |
474 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | | 474 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | |
475 | EESR_ECI, | 475 | EESR_ECI, |
476 | .fdr_value = 0x00000f0f, | ||
476 | 477 | ||
477 | .apr = 1, | 478 | .apr = 1, |
478 | .mpr = 1, | 479 | .mpr = 1, |
@@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = { | |||
495 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | 496 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | |
496 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | | 497 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | |
497 | EESR_ECI, | 498 | EESR_ECI, |
499 | .fdr_value = 0x00000f0f, | ||
498 | 500 | ||
499 | .apr = 1, | 501 | .apr = 1, |
500 | .mpr = 1, | 502 | .mpr = 1, |
@@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = { | |||
536 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | | 538 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | |
537 | EESR_ECI, | 539 | EESR_ECI, |
538 | 540 | ||
541 | .trscer_err_mask = DESC_I_RINT8, | ||
542 | |||
539 | .apr = 1, | 543 | .apr = 1, |
540 | .mpr = 1, | 544 | .mpr = 1, |
541 | .tpauser = 1, | 545 | .tpauser = 1, |
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) | |||
856 | 860 | ||
857 | if (!cd->eesr_err_check) | 861 | if (!cd->eesr_err_check) |
858 | cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; | 862 | cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; |
863 | |||
864 | if (!cd->trscer_err_mask) | ||
865 | cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; | ||
859 | } | 866 | } |
860 | 867 | ||
861 | static int sh_eth_check_reset(struct net_device *ndev) | 868 | static int sh_eth_check_reset(struct net_device *ndev) |
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1294 | /* Frame recv control (enable multiple-packets per rx irq) */ | 1301 | /* Frame recv control (enable multiple-packets per rx irq) */ |
1295 | sh_eth_write(ndev, RMCR_RNC, RMCR); | 1302 | sh_eth_write(ndev, RMCR_RNC, RMCR); |
1296 | 1303 | ||
1297 | sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); | 1304 | sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); |
1298 | 1305 | ||
1299 | if (mdp->cd->bculr) | 1306 | if (mdp->cd->bculr) |
1300 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ | 1307 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 22301bf9c21d..71f5de1171bd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -369,6 +369,8 @@ enum DESC_I_BIT { | |||
369 | DESC_I_RINT1 = 0x0001, | 369 | DESC_I_RINT1 = 0x0001, |
370 | }; | 370 | }; |
371 | 371 | ||
372 | #define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) | ||
373 | |||
372 | /* RPADIR */ | 374 | /* RPADIR */ |
373 | enum RPADIR_BIT { | 375 | enum RPADIR_BIT { |
374 | RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, | 376 | RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, |
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data { | |||
470 | unsigned long tx_check; | 472 | unsigned long tx_check; |
471 | unsigned long eesr_err_check; | 473 | unsigned long eesr_err_check; |
472 | 474 | ||
475 | /* Error mask */ | ||
476 | unsigned long trscer_err_mask; | ||
477 | |||
473 | /* hardware features */ | 478 | /* hardware features */ |
474 | unsigned long irq_flags; /* IRQ configuration flags */ | 479 | unsigned long irq_flags; /* IRQ configuration flags */ |
475 | unsigned no_psr:1; /* EtherC DO NOT have PSR */ | 480 | unsigned no_psr:1; /* EtherC DO NOT have PSR */ |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e61ee8351272..64d1cef4cda1 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
610 | 610 | ||
611 | /* Clear all mcast from ALE */ | 611 | /* Clear all mcast from ALE */ |
612 | cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << | 612 | cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << |
613 | priv->host_port); | 613 | priv->host_port, -1); |
614 | 614 | ||
615 | /* Flood All Unicast Packets to Host port */ | 615 | /* Flood All Unicast Packets to Host port */ |
616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); | 616 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); |
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
634 | static void cpsw_ndo_set_rx_mode(struct net_device *ndev) | 634 | static void cpsw_ndo_set_rx_mode(struct net_device *ndev) |
635 | { | 635 | { |
636 | struct cpsw_priv *priv = netdev_priv(ndev); | 636 | struct cpsw_priv *priv = netdev_priv(ndev); |
637 | int vid; | ||
638 | |||
639 | if (priv->data.dual_emac) | ||
640 | vid = priv->slaves[priv->emac_port].port_vlan; | ||
641 | else | ||
642 | vid = priv->data.default_vlan; | ||
637 | 643 | ||
638 | if (ndev->flags & IFF_PROMISC) { | 644 | if (ndev->flags & IFF_PROMISC) { |
639 | /* Enable promiscuous mode */ | 645 | /* Enable promiscuous mode */ |
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) | |||
649 | cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); | 655 | cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); |
650 | 656 | ||
651 | /* Clear all mcast from ALE */ | 657 | /* Clear all mcast from ALE */ |
652 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); | 658 | cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, |
659 | vid); | ||
653 | 660 | ||
654 | if (!netdev_mc_empty(ndev)) { | 661 | if (!netdev_mc_empty(ndev)) { |
655 | struct netdev_hw_addr *ha; | 662 | struct netdev_hw_addr *ha; |
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 097ebe7077ac..5246b3a18ff8 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c | |||
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, | |||
234 | cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); | 234 | cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); |
235 | } | 235 | } |
236 | 236 | ||
237 | int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) | 237 | int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) |
238 | { | 238 | { |
239 | u32 ale_entry[ALE_ENTRY_WORDS]; | 239 | u32 ale_entry[ALE_ENTRY_WORDS]; |
240 | int ret, idx; | 240 | int ret, idx; |
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) | |||
245 | if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) | 245 | if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) |
246 | continue; | 246 | continue; |
247 | 247 | ||
248 | /* if vid passed is -1 then remove all multicast entry from | ||
249 | * the table irrespective of vlan id, if a valid vlan id is | ||
250 | * passed then remove only multicast added to that vlan id. | ||
251 | * if vlan id doesn't match then move on to next entry. | ||
252 | */ | ||
253 | if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) | ||
254 | continue; | ||
255 | |||
248 | if (cpsw_ale_get_mcast(ale_entry)) { | 256 | if (cpsw_ale_get_mcast(ale_entry)) { |
249 | u8 addr[6]; | 257 | u8 addr[6]; |
250 | 258 | ||
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index c0d4127aa549..af1e7ecd87c6 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h | |||
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale); | |||
92 | 92 | ||
93 | int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); | 93 | int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); |
94 | int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); | 94 | int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); |
95 | int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); | 95 | int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); |
96 | int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, | 96 | int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, |
97 | int flags, u16 vid); | 97 | int flags, u16 vid); |
98 | int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, | 98 | int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 93e224217e24..f7ff493f1e73 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) | |||
629 | static void team_notify_peers_work(struct work_struct *work) | 629 | static void team_notify_peers_work(struct work_struct *work) |
630 | { | 630 | { |
631 | struct team *team; | 631 | struct team *team; |
632 | int val; | ||
632 | 633 | ||
633 | team = container_of(work, struct team, notify_peers.dw.work); | 634 | team = container_of(work, struct team, notify_peers.dw.work); |
634 | 635 | ||
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) | |||
636 | schedule_delayed_work(&team->notify_peers.dw, 0); | 637 | schedule_delayed_work(&team->notify_peers.dw, 0); |
637 | return; | 638 | return; |
638 | } | 639 | } |
640 | val = atomic_dec_if_positive(&team->notify_peers.count_pending); | ||
641 | if (val < 0) { | ||
642 | rtnl_unlock(); | ||
643 | return; | ||
644 | } | ||
639 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); | 645 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); |
640 | rtnl_unlock(); | 646 | rtnl_unlock(); |
641 | if (!atomic_dec_and_test(&team->notify_peers.count_pending)) | 647 | if (val) |
642 | schedule_delayed_work(&team->notify_peers.dw, | 648 | schedule_delayed_work(&team->notify_peers.dw, |
643 | msecs_to_jiffies(team->notify_peers.interval)); | 649 | msecs_to_jiffies(team->notify_peers.interval)); |
644 | } | 650 | } |
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) | |||
669 | static void team_mcast_rejoin_work(struct work_struct *work) | 675 | static void team_mcast_rejoin_work(struct work_struct *work) |
670 | { | 676 | { |
671 | struct team *team; | 677 | struct team *team; |
678 | int val; | ||
672 | 679 | ||
673 | team = container_of(work, struct team, mcast_rejoin.dw.work); | 680 | team = container_of(work, struct team, mcast_rejoin.dw.work); |
674 | 681 | ||
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) | |||
676 | schedule_delayed_work(&team->mcast_rejoin.dw, 0); | 683 | schedule_delayed_work(&team->mcast_rejoin.dw, 0); |
677 | return; | 684 | return; |
678 | } | 685 | } |
686 | val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); | ||
687 | if (val < 0) { | ||
688 | rtnl_unlock(); | ||
689 | return; | ||
690 | } | ||
679 | call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); | 691 | call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); |
680 | rtnl_unlock(); | 692 | rtnl_unlock(); |
681 | if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) | 693 | if (val) |
682 | schedule_delayed_work(&team->mcast_rejoin.dw, | 694 | schedule_delayed_work(&team->mcast_rejoin.dw, |
683 | msecs_to_jiffies(team->mcast_rejoin.interval)); | 695 | msecs_to_jiffies(team->mcast_rejoin.interval)); |
684 | } | 696 | } |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index dcb6d33141e0..1e9cdca37014 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) | |||
1276 | awd.done = 0; | 1276 | awd.done = 0; |
1277 | 1277 | ||
1278 | urb->context = &awd; | 1278 | urb->context = &awd; |
1279 | status = usb_submit_urb(urb, GFP_NOIO); | 1279 | status = usb_submit_urb(urb, GFP_ATOMIC); |
1280 | if (status) { | 1280 | if (status) { |
1281 | // something went wrong | 1281 | // something went wrong |
1282 | usb_free_urb(urb); | 1282 | usb_free_urb(urb); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index e5be2d21868f..a5f9198d5747 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -69,8 +69,8 @@ | |||
69 | #include "iwl-agn-hw.h" | 69 | #include "iwl-agn-hw.h" |
70 | 70 | ||
71 | /* Highest firmware API version supported */ | 71 | /* Highest firmware API version supported */ |
72 | #define IWL7260_UCODE_API_MAX 10 | 72 | #define IWL7260_UCODE_API_MAX 12 |
73 | #define IWL3160_UCODE_API_MAX 10 | 73 | #define IWL3160_UCODE_API_MAX 12 |
74 | 74 | ||
75 | /* Oldest version we won't warn about */ | 75 | /* Oldest version we won't warn about */ |
76 | #define IWL7260_UCODE_API_OK 10 | 76 | #define IWL7260_UCODE_API_OK 10 |
@@ -105,7 +105,7 @@ | |||
105 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 105 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" |
106 | 106 | ||
107 | #define IWL7265D_FW_PRE "iwlwifi-7265D-" | 107 | #define IWL7265D_FW_PRE "iwlwifi-7265D-" |
108 | #define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 108 | #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" |
109 | 109 | ||
110 | #define NVM_HW_SECTION_NUM_FAMILY_7000 0 | 110 | #define NVM_HW_SECTION_NUM_FAMILY_7000 0 |
111 | 111 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index bf0a95cb7153..3668fc57e770 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #include "iwl-agn-hw.h" | 69 | #include "iwl-agn-hw.h" |
70 | 70 | ||
71 | /* Highest firmware API version supported */ | 71 | /* Highest firmware API version supported */ |
72 | #define IWL8000_UCODE_API_MAX 10 | 72 | #define IWL8000_UCODE_API_MAX 12 |
73 | 73 | ||
74 | /* Oldest version we won't warn about */ | 74 | /* Oldest version we won't warn about */ |
75 | #define IWL8000_UCODE_API_OK 10 | 75 | #define IWL8000_UCODE_API_OK 10 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index f2a047f6bb3e..1bbe4fc47b97 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h | |||
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag { | |||
243 | * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. | 243 | * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. |
244 | * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time | 244 | * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time |
245 | * longer than the passive one, which is essential for fragmented scan. | 245 | * longer than the passive one, which is essential for fragmented scan. |
246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, | ||
247 | * regardless of the band or the number of the probes. FW will calculate | ||
248 | * the actual dwell time. | ||
246 | */ | 249 | */ |
247 | enum iwl_ucode_tlv_api { | 250 | enum iwl_ucode_tlv_api { |
248 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), | 251 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), |
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api { | |||
253 | IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), | 256 | IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), |
254 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), | 257 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), |
255 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), | 258 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), |
259 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), | ||
256 | }; | 260 | }; |
257 | 261 | ||
258 | /** | 262 | /** |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 1f2acf47bfb2..201846de94e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt { | |||
672 | * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented | 672 | * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented |
673 | * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report | 673 | * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report |
674 | * and DS parameter set IEs into probe requests. | 674 | * and DS parameter set IEs into probe requests. |
675 | * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches | ||
675 | */ | 676 | */ |
676 | enum iwl_mvm_lmac_scan_flags { | 677 | enum iwl_mvm_lmac_scan_flags { |
677 | IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), | 678 | IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), |
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags { | |||
681 | IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), | 682 | IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), |
682 | IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), | 683 | IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), |
683 | IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), | 684 | IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), |
685 | IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), | ||
684 | }; | 686 | }; |
685 | 687 | ||
686 | enum iwl_scan_priority { | 688 | enum iwl_scan_priority { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e5294d01181e..ec9a8e7bae1d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid, | |||
171 | * already included in the probe template, so we need to set only | 171 | * already included in the probe template, so we need to set only |
172 | * req->n_ssids - 1 bits in addition to the first bit. | 172 | * req->n_ssids - 1 bits in addition to the first bit. |
173 | */ | 173 | */ |
174 | static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) | 174 | static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, |
175 | enum ieee80211_band band, int n_ssids) | ||
175 | { | 176 | { |
177 | if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) | ||
178 | return 10; | ||
176 | if (band == IEEE80211_BAND_2GHZ) | 179 | if (band == IEEE80211_BAND_2GHZ) |
177 | return 20 + 3 * (n_ssids + 1); | 180 | return 20 + 3 * (n_ssids + 1); |
178 | return 10 + 2 * (n_ssids + 1); | 181 | return 10 + 2 * (n_ssids + 1); |
179 | } | 182 | } |
180 | 183 | ||
181 | static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) | 184 | static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, |
185 | enum ieee80211_band band) | ||
182 | { | 186 | { |
187 | if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) | ||
188 | return 110; | ||
183 | return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; | 189 | return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; |
184 | } | 190 | } |
185 | 191 | ||
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, | |||
331 | */ | 337 | */ |
332 | if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { | 338 | if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { |
333 | u32 passive_dwell = | 339 | u32 passive_dwell = |
334 | iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); | 340 | iwl_mvm_get_passive_dwell(mvm, |
341 | IEEE80211_BAND_2GHZ); | ||
335 | params->max_out_time = passive_dwell; | 342 | params->max_out_time = passive_dwell; |
336 | } else { | 343 | } else { |
337 | params->passive_fragmented = true; | 344 | params->passive_fragmented = true; |
@@ -348,8 +355,8 @@ not_bound: | |||
348 | params->dwell[band].passive = frag_passive_dwell; | 355 | params->dwell[band].passive = frag_passive_dwell; |
349 | else | 356 | else |
350 | params->dwell[band].passive = | 357 | params->dwell[band].passive = |
351 | iwl_mvm_get_passive_dwell(band); | 358 | iwl_mvm_get_passive_dwell(mvm, band); |
352 | params->dwell[band].active = iwl_mvm_get_active_dwell(band, | 359 | params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, |
353 | n_ssids); | 360 | n_ssids); |
354 | } | 361 | } |
355 | } | 362 | } |
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, | |||
1448 | 1455 | ||
1449 | if (iwl_mvm_scan_pass_all(mvm, req)) | 1456 | if (iwl_mvm_scan_pass_all(mvm, req)) |
1450 | flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; | 1457 | flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; |
1458 | else | ||
1459 | flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; | ||
1451 | 1460 | ||
1452 | if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) | 1461 | if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) |
1453 | flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; | 1462 | flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4f15d9decc81..4333306ccdee 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
108 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | 108 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
109 | } | 109 | } |
110 | 110 | ||
111 | /* tid_tspec will default to 0 = BE when QOS isn't enabled */ | 111 | /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ |
112 | ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; | 112 | if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) |
113 | ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; | ||
114 | else | ||
115 | ac = tid_to_mac80211_ac[0]; | ||
116 | |||
113 | tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << | 117 | tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << |
114 | TX_CMD_FLG_BT_PRIO_POS; | 118 | TX_CMD_FLG_BT_PRIO_POS; |
115 | 119 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index e56e77ef5d2e..917431e30f74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c | |||
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) | |||
665 | if (num_of_ant(mvm->fw->valid_rx_ant) == 1) | 665 | if (num_of_ant(mvm->fw->valid_rx_ant) == 1) |
666 | return false; | 666 | return false; |
667 | 667 | ||
668 | if (!mvm->cfg->rx_with_siso_diversity) | 668 | if (mvm->cfg->rx_with_siso_diversity) |
669 | return false; | 669 | return false; |
670 | 670 | ||
671 | ieee80211_iterate_active_interfaces_atomic( | 671 | ieee80211_iterate_active_interfaces_atomic( |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2f0c4b170344..d5aadb00dd9e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
527 | else if (cfg == &iwl7265_n_cfg) | 527 | else if (cfg == &iwl7265_n_cfg) |
528 | cfg_7265d = &iwl7265d_n_cfg; | 528 | cfg_7265d = &iwl7265d_n_cfg; |
529 | if (cfg_7265d && | 529 | if (cfg_7265d && |
530 | (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) | 530 | (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { |
531 | cfg = cfg_7265d; | 531 | cfg = cfg_7265d; |
532 | iwl_trans->cfg = cfg_7265d; | ||
533 | } | ||
532 | #endif | 534 | #endif |
533 | 535 | ||
534 | pci_set_drvdata(pdev, iwl_trans); | 536 | pci_set_drvdata(pdev, iwl_trans); |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 846a2e6e34d8..c70efb9a6e78 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -666,7 +666,8 @@ tx_status_ok: | |||
666 | } | 666 | } |
667 | 667 | ||
668 | static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, | 668 | static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, |
669 | u8 *entry, int rxring_idx, int desc_idx) | 669 | struct sk_buff *new_skb, u8 *entry, |
670 | int rxring_idx, int desc_idx) | ||
670 | { | 671 | { |
671 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 672 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
672 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 673 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, | |||
674 | u8 tmp_one = 1; | 675 | u8 tmp_one = 1; |
675 | struct sk_buff *skb; | 676 | struct sk_buff *skb; |
676 | 677 | ||
678 | if (likely(new_skb)) { | ||
679 | skb = new_skb; | ||
680 | goto remap; | ||
681 | } | ||
677 | skb = dev_alloc_skb(rtlpci->rxbuffersize); | 682 | skb = dev_alloc_skb(rtlpci->rxbuffersize); |
678 | if (!skb) | 683 | if (!skb) |
679 | return 0; | 684 | return 0; |
680 | rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; | ||
681 | 685 | ||
686 | remap: | ||
682 | /* just set skb->cb to mapping addr for pci_unmap_single use */ | 687 | /* just set skb->cb to mapping addr for pci_unmap_single use */ |
683 | *((dma_addr_t *)skb->cb) = | 688 | *((dma_addr_t *)skb->cb) = |
684 | pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), | 689 | pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), |
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, | |||
686 | bufferaddress = *((dma_addr_t *)skb->cb); | 691 | bufferaddress = *((dma_addr_t *)skb->cb); |
687 | if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) | 692 | if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) |
688 | return 0; | 693 | return 0; |
694 | rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; | ||
689 | if (rtlpriv->use_new_trx_flow) { | 695 | if (rtlpriv->use_new_trx_flow) { |
690 | rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, | 696 | rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, |
691 | HW_DESC_RX_PREPARE, | 697 | HW_DESC_RX_PREPARE, |
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
781 | /*rx pkt */ | 787 | /*rx pkt */ |
782 | struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ | 788 | struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ |
783 | rtlpci->rx_ring[rxring_idx].idx]; | 789 | rtlpci->rx_ring[rxring_idx].idx]; |
790 | struct sk_buff *new_skb; | ||
784 | 791 | ||
785 | if (rtlpriv->use_new_trx_flow) { | 792 | if (rtlpriv->use_new_trx_flow) { |
786 | rx_remained_cnt = | 793 | rx_remained_cnt = |
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
807 | pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), | 814 | pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), |
808 | rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); | 815 | rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); |
809 | 816 | ||
817 | /* get a new skb - if fail, old one will be reused */ | ||
818 | new_skb = dev_alloc_skb(rtlpci->rxbuffersize); | ||
819 | if (unlikely(!new_skb)) { | ||
820 | pr_err("Allocation of new skb failed in %s\n", | ||
821 | __func__); | ||
822 | goto no_new; | ||
823 | } | ||
810 | if (rtlpriv->use_new_trx_flow) { | 824 | if (rtlpriv->use_new_trx_flow) { |
811 | buffer_desc = | 825 | buffer_desc = |
812 | &rtlpci->rx_ring[rxring_idx].buffer_desc | 826 | &rtlpci->rx_ring[rxring_idx].buffer_desc |
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
911 | schedule_work(&rtlpriv->works.lps_change_work); | 925 | schedule_work(&rtlpriv->works.lps_change_work); |
912 | } | 926 | } |
913 | end: | 927 | end: |
928 | skb = new_skb; | ||
929 | no_new: | ||
914 | if (rtlpriv->use_new_trx_flow) { | 930 | if (rtlpriv->use_new_trx_flow) { |
915 | _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, | 931 | _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, |
916 | rxring_idx, | 932 | rxring_idx, |
917 | rtlpci->rx_ring[rxring_idx].idx); | 933 | rtlpci->rx_ring[rxring_idx].idx); |
918 | } else { | 934 | } else { |
919 | _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, | 935 | _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, |
936 | rxring_idx, | ||
920 | rtlpci->rx_ring[rxring_idx].idx); | 937 | rtlpci->rx_ring[rxring_idx].idx); |
921 | |||
922 | if (rtlpci->rx_ring[rxring_idx].idx == | 938 | if (rtlpci->rx_ring[rxring_idx].idx == |
923 | rtlpci->rxringcount - 1) | 939 | rtlpci->rxringcount - 1) |
924 | rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, | 940 | rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, |
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) | |||
1307 | rtlpci->rx_ring[rxring_idx].idx = 0; | 1323 | rtlpci->rx_ring[rxring_idx].idx = 0; |
1308 | for (i = 0; i < rtlpci->rxringcount; i++) { | 1324 | for (i = 0; i < rtlpci->rxringcount; i++) { |
1309 | entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; | 1325 | entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; |
1310 | if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, | 1326 | if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, |
1311 | rxring_idx, i)) | 1327 | rxring_idx, i)) |
1312 | return -ENOMEM; | 1328 | return -ENOMEM; |
1313 | } | 1329 | } |
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) | |||
1332 | 1348 | ||
1333 | for (i = 0; i < rtlpci->rxringcount; i++) { | 1349 | for (i = 0; i < rtlpci->rxringcount; i++) { |
1334 | entry = &rtlpci->rx_ring[rxring_idx].desc[i]; | 1350 | entry = &rtlpci->rx_ring[rxring_idx].desc[i]; |
1335 | if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, | 1351 | if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, |
1336 | rxring_idx, i)) | 1352 | rxring_idx, i)) |
1337 | return -ENOMEM; | 1353 | return -ENOMEM; |
1338 | } | 1354 | } |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 22bcb4e12e2a..d8c10764f130 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -88,10 +88,8 @@ struct netfront_cb { | |||
88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) | 88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
89 | 89 | ||
90 | struct netfront_stats { | 90 | struct netfront_stats { |
91 | u64 rx_packets; | 91 | u64 packets; |
92 | u64 tx_packets; | 92 | u64 bytes; |
93 | u64 rx_bytes; | ||
94 | u64 tx_bytes; | ||
95 | struct u64_stats_sync syncp; | 93 | struct u64_stats_sync syncp; |
96 | }; | 94 | }; |
97 | 95 | ||
@@ -160,7 +158,8 @@ struct netfront_info { | |||
160 | struct netfront_queue *queues; | 158 | struct netfront_queue *queues; |
161 | 159 | ||
162 | /* Statistics */ | 160 | /* Statistics */ |
163 | struct netfront_stats __percpu *stats; | 161 | struct netfront_stats __percpu *rx_stats; |
162 | struct netfront_stats __percpu *tx_stats; | ||
164 | 163 | ||
165 | atomic_t rx_gso_checksum_fixup; | 164 | atomic_t rx_gso_checksum_fixup; |
166 | }; | 165 | }; |
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
565 | { | 564 | { |
566 | unsigned short id; | 565 | unsigned short id; |
567 | struct netfront_info *np = netdev_priv(dev); | 566 | struct netfront_info *np = netdev_priv(dev); |
568 | struct netfront_stats *stats = this_cpu_ptr(np->stats); | 567 | struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); |
569 | struct xen_netif_tx_request *tx; | 568 | struct xen_netif_tx_request *tx; |
570 | char *data = skb->data; | 569 | char *data = skb->data; |
571 | RING_IDX i; | 570 | RING_IDX i; |
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
672 | if (notify) | 671 | if (notify) |
673 | notify_remote_via_irq(queue->tx_irq); | 672 | notify_remote_via_irq(queue->tx_irq); |
674 | 673 | ||
675 | u64_stats_update_begin(&stats->syncp); | 674 | u64_stats_update_begin(&tx_stats->syncp); |
676 | stats->tx_bytes += skb->len; | 675 | tx_stats->bytes += skb->len; |
677 | stats->tx_packets++; | 676 | tx_stats->packets++; |
678 | u64_stats_update_end(&stats->syncp); | 677 | u64_stats_update_end(&tx_stats->syncp); |
679 | 678 | ||
680 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ | 679 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ |
681 | xennet_tx_buf_gc(queue); | 680 | xennet_tx_buf_gc(queue); |
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb) | |||
931 | static int handle_incoming_queue(struct netfront_queue *queue, | 930 | static int handle_incoming_queue(struct netfront_queue *queue, |
932 | struct sk_buff_head *rxq) | 931 | struct sk_buff_head *rxq) |
933 | { | 932 | { |
934 | struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); | 933 | struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); |
935 | int packets_dropped = 0; | 934 | int packets_dropped = 0; |
936 | struct sk_buff *skb; | 935 | struct sk_buff *skb; |
937 | 936 | ||
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue, | |||
952 | continue; | 951 | continue; |
953 | } | 952 | } |
954 | 953 | ||
955 | u64_stats_update_begin(&stats->syncp); | 954 | u64_stats_update_begin(&rx_stats->syncp); |
956 | stats->rx_packets++; | 955 | rx_stats->packets++; |
957 | stats->rx_bytes += skb->len; | 956 | rx_stats->bytes += skb->len; |
958 | u64_stats_update_end(&stats->syncp); | 957 | u64_stats_update_end(&rx_stats->syncp); |
959 | 958 | ||
960 | /* Pass it up. */ | 959 | /* Pass it up. */ |
961 | napi_gro_receive(&queue->napi, skb); | 960 | napi_gro_receive(&queue->napi, skb); |
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, | |||
1079 | int cpu; | 1078 | int cpu; |
1080 | 1079 | ||
1081 | for_each_possible_cpu(cpu) { | 1080 | for_each_possible_cpu(cpu) { |
1082 | struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); | 1081 | struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); |
1082 | struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); | ||
1083 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; | 1083 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; |
1084 | unsigned int start; | 1084 | unsigned int start; |
1085 | 1085 | ||
1086 | do { | 1086 | do { |
1087 | start = u64_stats_fetch_begin_irq(&stats->syncp); | 1087 | start = u64_stats_fetch_begin_irq(&tx_stats->syncp); |
1088 | tx_packets = tx_stats->packets; | ||
1089 | tx_bytes = tx_stats->bytes; | ||
1090 | } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); | ||
1088 | 1091 | ||
1089 | rx_packets = stats->rx_packets; | 1092 | do { |
1090 | tx_packets = stats->tx_packets; | 1093 | start = u64_stats_fetch_begin_irq(&rx_stats->syncp); |
1091 | rx_bytes = stats->rx_bytes; | 1094 | rx_packets = rx_stats->packets; |
1092 | tx_bytes = stats->tx_bytes; | 1095 | rx_bytes = rx_stats->bytes; |
1093 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); | 1096 | } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); |
1094 | 1097 | ||
1095 | tot->rx_packets += rx_packets; | 1098 | tot->rx_packets += rx_packets; |
1096 | tot->tx_packets += tx_packets; | 1099 | tot->tx_packets += tx_packets; |
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = { | |||
1275 | #endif | 1278 | #endif |
1276 | }; | 1279 | }; |
1277 | 1280 | ||
1281 | static void xennet_free_netdev(struct net_device *netdev) | ||
1282 | { | ||
1283 | struct netfront_info *np = netdev_priv(netdev); | ||
1284 | |||
1285 | free_percpu(np->rx_stats); | ||
1286 | free_percpu(np->tx_stats); | ||
1287 | free_netdev(netdev); | ||
1288 | } | ||
1289 | |||
1278 | static struct net_device *xennet_create_dev(struct xenbus_device *dev) | 1290 | static struct net_device *xennet_create_dev(struct xenbus_device *dev) |
1279 | { | 1291 | { |
1280 | int err; | 1292 | int err; |
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1295 | np->queues = NULL; | 1307 | np->queues = NULL; |
1296 | 1308 | ||
1297 | err = -ENOMEM; | 1309 | err = -ENOMEM; |
1298 | np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); | 1310 | np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); |
1299 | if (np->stats == NULL) | 1311 | if (np->rx_stats == NULL) |
1312 | goto exit; | ||
1313 | np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); | ||
1314 | if (np->tx_stats == NULL) | ||
1300 | goto exit; | 1315 | goto exit; |
1301 | 1316 | ||
1302 | netdev->netdev_ops = &xennet_netdev_ops; | 1317 | netdev->netdev_ops = &xennet_netdev_ops; |
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1327 | return netdev; | 1342 | return netdev; |
1328 | 1343 | ||
1329 | exit: | 1344 | exit: |
1330 | free_netdev(netdev); | 1345 | xennet_free_netdev(netdev); |
1331 | return ERR_PTR(err); | 1346 | return ERR_PTR(err); |
1332 | } | 1347 | } |
1333 | 1348 | ||
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev, | |||
1369 | return 0; | 1384 | return 0; |
1370 | 1385 | ||
1371 | fail: | 1386 | fail: |
1372 | free_netdev(netdev); | 1387 | xennet_free_netdev(netdev); |
1373 | dev_set_drvdata(&dev->dev, NULL); | 1388 | dev_set_drvdata(&dev->dev, NULL); |
1374 | return err; | 1389 | return err; |
1375 | } | 1390 | } |
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2189 | info->queues = NULL; | 2204 | info->queues = NULL; |
2190 | } | 2205 | } |
2191 | 2206 | ||
2192 | free_percpu(info->stats); | 2207 | xennet_free_netdev(info->netdev); |
2193 | |||
2194 | free_netdev(info->netdev); | ||
2195 | 2208 | ||
2196 | return 0; | 2209 | return 0; |
2197 | } | 2210 | } |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 91e97ec01418..4d41bf75c233 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id) | |||
1163 | */ | 1163 | */ |
1164 | static inline int ap_test_config_domain(unsigned int domain) | 1164 | static inline int ap_test_config_domain(unsigned int domain) |
1165 | { | 1165 | { |
1166 | if (!ap_configuration) | 1166 | if (!ap_configuration) /* QCI not supported */ |
1167 | return 1; | 1167 | if (domain < 16) |
1168 | return ap_test_config(ap_configuration->aqm, domain); | 1168 | return 1; /* then domains 0...15 are configured */ |
1169 | else | ||
1170 | return 0; | ||
1171 | else | ||
1172 | return ap_test_config(ap_configuration->aqm, domain); | ||
1169 | } | 1173 | } |
1170 | 1174 | ||
1171 | /** | 1175 | /** |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 55f6774f706f..aebde3289c50 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2027 | goto reject; | 2027 | goto reject; |
2028 | } | 2028 | } |
2029 | if (!strncmp("=All", text_ptr, 4)) { | 2029 | if (!strncmp("=All", text_ptr, 4)) { |
2030 | cmd->cmd_flags |= IFC_SENDTARGETS_ALL; | 2030 | cmd->cmd_flags |= ICF_SENDTARGETS_ALL; |
2031 | } else if (!strncmp("=iqn.", text_ptr, 5) || | 2031 | } else if (!strncmp("=iqn.", text_ptr, 5) || |
2032 | !strncmp("=eui.", text_ptr, 5)) { | 2032 | !strncmp("=eui.", text_ptr, 5)) { |
2033 | cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; | 2033 | cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; |
2034 | } else { | 2034 | } else { |
2035 | pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); | 2035 | pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); |
2036 | goto reject; | 2036 | goto reject; |
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, | |||
3415 | return -ENOMEM; | 3415 | return -ENOMEM; |
3416 | } | 3416 | } |
3417 | /* | 3417 | /* |
3418 | * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE | 3418 | * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE |
3419 | * explicit case.. | 3419 | * explicit case.. |
3420 | */ | 3420 | */ |
3421 | if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { | 3421 | if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { |
3422 | text_ptr = strchr(text_in, '='); | 3422 | text_ptr = strchr(text_in, '='); |
3423 | if (!text_ptr) { | 3423 | if (!text_ptr) { |
3424 | pr_err("Unable to locate '=' string in text_in:" | 3424 | pr_err("Unable to locate '=' string in text_in:" |
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, | |||
3434 | 3434 | ||
3435 | spin_lock(&tiqn_lock); | 3435 | spin_lock(&tiqn_lock); |
3436 | list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { | 3436 | list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { |
3437 | if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && | 3437 | if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && |
3438 | strcmp(tiqn->tiqn, text_ptr)) { | 3438 | strcmp(tiqn->tiqn, text_ptr)) { |
3439 | continue; | 3439 | continue; |
3440 | } | 3440 | } |
@@ -3512,7 +3512,7 @@ eob: | |||
3512 | if (end_of_buf) | 3512 | if (end_of_buf) |
3513 | break; | 3513 | break; |
3514 | 3514 | ||
3515 | if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) | 3515 | if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) |
3516 | break; | 3516 | break; |
3517 | } | 3517 | } |
3518 | spin_unlock(&tiqn_lock); | 3518 | spin_unlock(&tiqn_lock); |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 09a522bae222..cbcff38ac9b7 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -135,8 +135,8 @@ enum cmd_flags_table { | |||
135 | ICF_CONTIG_MEMORY = 0x00000020, | 135 | ICF_CONTIG_MEMORY = 0x00000020, |
136 | ICF_ATTACHED_TO_RQUEUE = 0x00000040, | 136 | ICF_ATTACHED_TO_RQUEUE = 0x00000040, |
137 | ICF_OOO_CMDSN = 0x00000080, | 137 | ICF_OOO_CMDSN = 0x00000080, |
138 | IFC_SENDTARGETS_ALL = 0x00000100, | 138 | ICF_SENDTARGETS_ALL = 0x00000100, |
139 | IFC_SENDTARGETS_SINGLE = 0x00000200, | 139 | ICF_SENDTARGETS_SINGLE = 0x00000200, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | /* struct iscsi_cmd->i_state */ | 142 | /* struct iscsi_cmd->i_state */ |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7653cfb027a2..58f49ff69b14 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |||
1103 | } | 1103 | } |
1104 | EXPORT_SYMBOL(se_dev_set_queue_depth); | 1104 | EXPORT_SYMBOL(se_dev_set_queue_depth); |
1105 | 1105 | ||
1106 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) | ||
1107 | { | ||
1108 | int block_size = dev->dev_attrib.block_size; | ||
1109 | |||
1110 | if (dev->export_count) { | ||
1111 | pr_err("dev[%p]: Unable to change SE Device" | ||
1112 | " fabric_max_sectors while export_count is %d\n", | ||
1113 | dev, dev->export_count); | ||
1114 | return -EINVAL; | ||
1115 | } | ||
1116 | if (!fabric_max_sectors) { | ||
1117 | pr_err("dev[%p]: Illegal ZERO value for" | ||
1118 | " fabric_max_sectors\n", dev); | ||
1119 | return -EINVAL; | ||
1120 | } | ||
1121 | if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | ||
1122 | pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" | ||
1123 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, | ||
1124 | DA_STATUS_MAX_SECTORS_MIN); | ||
1125 | return -EINVAL; | ||
1126 | } | ||
1127 | if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | ||
1128 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | ||
1129 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | ||
1130 | " %u\n", dev, fabric_max_sectors, | ||
1131 | DA_STATUS_MAX_SECTORS_MAX); | ||
1132 | return -EINVAL; | ||
1133 | } | ||
1134 | /* | ||
1135 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | ||
1136 | */ | ||
1137 | if (!block_size) { | ||
1138 | block_size = 512; | ||
1139 | pr_warn("Defaulting to 512 for zero block_size\n"); | ||
1140 | } | ||
1141 | fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, | ||
1142 | block_size); | ||
1143 | |||
1144 | dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; | ||
1145 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", | ||
1146 | dev, fabric_max_sectors); | ||
1147 | return 0; | ||
1148 | } | ||
1149 | EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); | ||
1150 | |||
1151 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1106 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1152 | { | 1107 | { |
1153 | if (dev->export_count) { | 1108 | if (dev->export_count) { |
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |||
1156 | dev, dev->export_count); | 1111 | dev, dev->export_count); |
1157 | return -EINVAL; | 1112 | return -EINVAL; |
1158 | } | 1113 | } |
1159 | if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { | 1114 | if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { |
1160 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" | 1115 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
1161 | " greater than fabric_max_sectors: %u\n", dev, | 1116 | " greater than hw_max_sectors: %u\n", dev, |
1162 | optimal_sectors, dev->dev_attrib.fabric_max_sectors); | 1117 | optimal_sectors, dev->dev_attrib.hw_max_sectors); |
1163 | return -EINVAL; | 1118 | return -EINVAL; |
1164 | } | 1119 | } |
1165 | 1120 | ||
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |||
1553 | dev->dev_attrib.unmap_granularity_alignment = | 1508 | dev->dev_attrib.unmap_granularity_alignment = |
1554 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | 1509 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
1555 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; | 1510 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; |
1556 | dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; | ||
1557 | dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; | ||
1558 | 1511 | ||
1559 | xcopy_lun = &dev->xcopy_lun; | 1512 | xcopy_lun = &dev->xcopy_lun; |
1560 | xcopy_lun->lun_se_dev = dev; | 1513 | xcopy_lun->lun_se_dev = dev; |
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev) | |||
1595 | dev->dev_attrib.hw_max_sectors = | 1548 | dev->dev_attrib.hw_max_sectors = |
1596 | se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, | 1549 | se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, |
1597 | dev->dev_attrib.hw_block_size); | 1550 | dev->dev_attrib.hw_block_size); |
1551 | dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; | ||
1598 | 1552 | ||
1599 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | 1553 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); |
1600 | dev->creation_time = get_jiffies_64(); | 1554 | dev->creation_time = get_jiffies_64(); |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c2aea099ea4a..d836de200a03 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
621 | struct fd_prot fd_prot; | 621 | struct fd_prot fd_prot; |
622 | sense_reason_t rc; | 622 | sense_reason_t rc; |
623 | int ret = 0; | 623 | int ret = 0; |
624 | 624 | /* | |
625 | * We are currently limited by the number of iovecs (2048) per | ||
626 | * single vfs_[writev,readv] call. | ||
627 | */ | ||
628 | if (cmd->data_length > FD_MAX_BYTES) { | ||
629 | pr_err("FILEIO: Not able to process I/O of %u bytes due to" | ||
630 | "FD_MAX_BYTES: %u iovec count limitiation\n", | ||
631 | cmd->data_length, FD_MAX_BYTES); | ||
632 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
633 | } | ||
625 | /* | 634 | /* |
626 | * Call vectorized fileio functions to map struct scatterlist | 635 | * Call vectorized fileio functions to map struct scatterlist |
627 | * physical memory addresses to struct iovec virtual memory. | 636 | * physical memory addresses to struct iovec virtual memory. |
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = { | |||
959 | &fileio_dev_attrib_hw_block_size.attr, | 968 | &fileio_dev_attrib_hw_block_size.attr, |
960 | &fileio_dev_attrib_block_size.attr, | 969 | &fileio_dev_attrib_block_size.attr, |
961 | &fileio_dev_attrib_hw_max_sectors.attr, | 970 | &fileio_dev_attrib_hw_max_sectors.attr, |
962 | &fileio_dev_attrib_fabric_max_sectors.attr, | ||
963 | &fileio_dev_attrib_optimal_sectors.attr, | 971 | &fileio_dev_attrib_optimal_sectors.attr, |
964 | &fileio_dev_attrib_hw_queue_depth.attr, | 972 | &fileio_dev_attrib_hw_queue_depth.attr, |
965 | &fileio_dev_attrib_queue_depth.attr, | 973 | &fileio_dev_attrib_queue_depth.attr, |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3efff94fbd97..78346b850968 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev) | |||
124 | q = bdev_get_queue(bd); | 124 | q = bdev_get_queue(bd); |
125 | 125 | ||
126 | dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); | 126 | dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); |
127 | dev->dev_attrib.hw_max_sectors = UINT_MAX; | 127 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); |
128 | dev->dev_attrib.hw_queue_depth = q->nr_requests; | 128 | dev->dev_attrib.hw_queue_depth = q->nr_requests; |
129 | 129 | ||
130 | /* | 130 | /* |
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = { | |||
883 | &iblock_dev_attrib_hw_block_size.attr, | 883 | &iblock_dev_attrib_hw_block_size.attr, |
884 | &iblock_dev_attrib_block_size.attr, | 884 | &iblock_dev_attrib_block_size.attr, |
885 | &iblock_dev_attrib_hw_max_sectors.attr, | 885 | &iblock_dev_attrib_hw_max_sectors.attr, |
886 | &iblock_dev_attrib_fabric_max_sectors.attr, | ||
887 | &iblock_dev_attrib_optimal_sectors.attr, | 886 | &iblock_dev_attrib_optimal_sectors.attr, |
888 | &iblock_dev_attrib_hw_queue_depth.attr, | 887 | &iblock_dev_attrib_hw_queue_depth.attr, |
889 | &iblock_dev_attrib_queue_depth.attr, | 888 | &iblock_dev_attrib_queue_depth.attr, |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d56f2aaba9af..283cf786ef98 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder( | |||
528 | 528 | ||
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | } else if (we && registered_nexus) { | ||
532 | /* | ||
533 | * Reads are allowed for Write Exclusive locks | ||
534 | * from all registrants. | ||
535 | */ | ||
536 | if (cmd->data_direction == DMA_FROM_DEVICE) { | ||
537 | pr_debug("Allowing READ CDB: 0x%02x for %s" | ||
538 | " reservation\n", cdb[0], | ||
539 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
540 | |||
541 | return 0; | ||
542 | } | ||
531 | } | 543 | } |
532 | pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" | 544 | pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" |
533 | " for %s reservation\n", transport_dump_cmd_direction(cmd), | 545 | " for %s reservation\n", transport_dump_cmd_direction(cmd), |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 60ebd170a561..98e83ac5661b 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { | |||
657 | &rd_mcp_dev_attrib_hw_block_size.attr, | 657 | &rd_mcp_dev_attrib_hw_block_size.attr, |
658 | &rd_mcp_dev_attrib_block_size.attr, | 658 | &rd_mcp_dev_attrib_block_size.attr, |
659 | &rd_mcp_dev_attrib_hw_max_sectors.attr, | 659 | &rd_mcp_dev_attrib_hw_max_sectors.attr, |
660 | &rd_mcp_dev_attrib_fabric_max_sectors.attr, | ||
661 | &rd_mcp_dev_attrib_optimal_sectors.attr, | 660 | &rd_mcp_dev_attrib_optimal_sectors.attr, |
662 | &rd_mcp_dev_attrib_hw_queue_depth.attr, | 661 | &rd_mcp_dev_attrib_hw_queue_depth.attr, |
663 | &rd_mcp_dev_attrib_queue_depth.attr, | 662 | &rd_mcp_dev_attrib_queue_depth.attr, |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 11bea1952435..cd4bed7b2757 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
953 | 953 | ||
954 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { | 954 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
955 | unsigned long long end_lba; | 955 | unsigned long long end_lba; |
956 | |||
957 | if (sectors > dev->dev_attrib.fabric_max_sectors) { | ||
958 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
959 | " big sectors %u exceeds fabric_max_sectors:" | ||
960 | " %u\n", cdb[0], sectors, | ||
961 | dev->dev_attrib.fabric_max_sectors); | ||
962 | return TCM_INVALID_CDB_FIELD; | ||
963 | } | ||
964 | if (sectors > dev->dev_attrib.hw_max_sectors) { | ||
965 | printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" | ||
966 | " big sectors %u exceeds backend hw_max_sectors:" | ||
967 | " %u\n", cdb[0], sectors, | ||
968 | dev->dev_attrib.hw_max_sectors); | ||
969 | return TCM_INVALID_CDB_FIELD; | ||
970 | } | ||
971 | check_lba: | 956 | check_lba: |
972 | end_lba = dev->transport->get_blocks(dev) + 1; | 957 | end_lba = dev->transport->get_blocks(dev) + 1; |
973 | if (cmd->t_task_lba + sectors > end_lba) { | 958 | if (cmd->t_task_lba + sectors > end_lba) { |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 1307600fe726..4c71657da56a 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -505,7 +505,6 @@ static sense_reason_t | |||
505 | spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | 505 | spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) |
506 | { | 506 | { |
507 | struct se_device *dev = cmd->se_dev; | 507 | struct se_device *dev = cmd->se_dev; |
508 | u32 max_sectors; | ||
509 | int have_tp = 0; | 508 | int have_tp = 0; |
510 | int opt, min; | 509 | int opt, min; |
511 | 510 | ||
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | |||
539 | /* | 538 | /* |
540 | * Set MAXIMUM TRANSFER LENGTH | 539 | * Set MAXIMUM TRANSFER LENGTH |
541 | */ | 540 | */ |
542 | max_sectors = min(dev->dev_attrib.fabric_max_sectors, | 541 | put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); |
543 | dev->dev_attrib.hw_max_sectors); | ||
544 | put_unaligned_be32(max_sectors, &buf[8]); | ||
545 | 542 | ||
546 | /* | 543 | /* |
547 | * Set OPTIMAL TRANSFER LENGTH | 544 | * Set OPTIMAL TRANSFER LENGTH |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8bfa61c9693d..1157b559683b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = { | |||
1118 | &tcmu_dev_attrib_hw_block_size.attr, | 1118 | &tcmu_dev_attrib_hw_block_size.attr, |
1119 | &tcmu_dev_attrib_block_size.attr, | 1119 | &tcmu_dev_attrib_block_size.attr, |
1120 | &tcmu_dev_attrib_hw_max_sectors.attr, | 1120 | &tcmu_dev_attrib_hw_max_sectors.attr, |
1121 | &tcmu_dev_attrib_fabric_max_sectors.attr, | ||
1122 | &tcmu_dev_attrib_optimal_sectors.attr, | 1121 | &tcmu_dev_attrib_optimal_sectors.attr, |
1123 | &tcmu_dev_attrib_hw_queue_depth.attr, | 1122 | &tcmu_dev_attrib_hw_queue_depth.attr, |
1124 | &tcmu_dev_attrib_queue_depth.attr, | 1123 | &tcmu_dev_attrib_queue_depth.attr, |
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c1188ac053c9..2ccbc0788353 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c | |||
@@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev) | |||
608 | regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); | 608 | regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); |
609 | regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); | 609 | regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); |
610 | data->mode = THERMAL_DEVICE_DISABLED; | 610 | data->mode = THERMAL_DEVICE_DISABLED; |
611 | clk_disable_unprepare(data->thermal_clk); | ||
611 | 612 | ||
612 | return 0; | 613 | return 0; |
613 | } | 614 | } |
@@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev) | |||
617 | struct imx_thermal_data *data = dev_get_drvdata(dev); | 618 | struct imx_thermal_data *data = dev_get_drvdata(dev); |
618 | struct regmap *map = data->tempmon; | 619 | struct regmap *map = data->tempmon; |
619 | 620 | ||
621 | clk_prepare_enable(data->thermal_clk); | ||
620 | /* Enabled thermal sensor after resume */ | 622 | /* Enabled thermal sensor after resume */ |
621 | regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); | 623 | regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); |
622 | regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); | 624 | regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); |
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c index 231cabc16e16..2c2ec7666eb1 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c | |||
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, | |||
119 | continue; | 119 | continue; |
120 | 120 | ||
121 | result = acpi_bus_get_device(trt->source, &adev); | 121 | result = acpi_bus_get_device(trt->source, &adev); |
122 | if (!result) | 122 | if (result) |
123 | acpi_create_platform_device(adev); | ||
124 | else | ||
125 | pr_warn("Failed to get source ACPI device\n"); | 123 | pr_warn("Failed to get source ACPI device\n"); |
126 | 124 | ||
127 | result = acpi_bus_get_device(trt->target, &adev); | 125 | result = acpi_bus_get_device(trt->target, &adev); |
128 | if (!result) | 126 | if (result) |
129 | acpi_create_platform_device(adev); | ||
130 | else | ||
131 | pr_warn("Failed to get target ACPI device\n"); | 127 | pr_warn("Failed to get target ACPI device\n"); |
132 | } | 128 | } |
133 | 129 | ||
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, | |||
206 | 202 | ||
207 | if (art->source) { | 203 | if (art->source) { |
208 | result = acpi_bus_get_device(art->source, &adev); | 204 | result = acpi_bus_get_device(art->source, &adev); |
209 | if (!result) | 205 | if (result) |
210 | acpi_create_platform_device(adev); | ||
211 | else | ||
212 | pr_warn("Failed to get source ACPI device\n"); | 206 | pr_warn("Failed to get source ACPI device\n"); |
213 | } | 207 | } |
214 | if (art->target) { | 208 | if (art->target) { |
215 | result = acpi_bus_get_device(art->target, &adev); | 209 | result = acpi_bus_get_device(art->target, &adev); |
216 | if (!result) | 210 | if (result) |
217 | acpi_create_platform_device(adev); | ||
218 | else | ||
219 | pr_warn("Failed to get source ACPI device\n"); | 211 | pr_warn("Failed to get source ACPI device\n"); |
220 | } | 212 | } |
221 | } | 213 | } |
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 31bb553aac26..0fe5dbbea968 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c | |||
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev, | |||
130 | int ret; | 130 | int ret; |
131 | 131 | ||
132 | adev = ACPI_COMPANION(dev); | 132 | adev = ACPI_COMPANION(dev); |
133 | if (!adev) | ||
134 | return -ENODEV; | ||
133 | 135 | ||
134 | status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); | 136 | status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); |
135 | if (ACPI_FAILURE(status)) | 137 | if (ACPI_FAILURE(status)) |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index e145b66df444..d717f3dab6f1 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid); | |||
149 | * | 149 | * |
150 | * Return: pointer to trip points table, NULL otherwise | 150 | * Return: pointer to trip points table, NULL otherwise |
151 | */ | 151 | */ |
152 | const struct thermal_trip * const | 152 | const struct thermal_trip * |
153 | of_thermal_get_trip_points(struct thermal_zone_device *tz) | 153 | of_thermal_get_trip_points(struct thermal_zone_device *tz) |
154 | { | 154 | { |
155 | struct __thermal_zone *data = tz->devdata; | 155 | struct __thermal_zone *data = tz->devdata; |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 8803e693fe68..2580a4872f90 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -63,7 +63,7 @@ struct rcar_thermal_priv { | |||
63 | struct mutex lock; | 63 | struct mutex lock; |
64 | struct list_head list; | 64 | struct list_head list; |
65 | int id; | 65 | int id; |
66 | int ctemp; | 66 | u32 ctemp; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #define rcar_thermal_for_each_priv(pos, common) \ | 69 | #define rcar_thermal_for_each_priv(pos, common) \ |
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
145 | { | 145 | { |
146 | struct device *dev = rcar_priv_to_dev(priv); | 146 | struct device *dev = rcar_priv_to_dev(priv); |
147 | int i; | 147 | int i; |
148 | int ctemp, old, new; | 148 | u32 ctemp, old, new; |
149 | int ret = -EINVAL; | 149 | int ret = -EINVAL; |
150 | 150 | ||
151 | mutex_lock(&priv->lock); | 151 | mutex_lock(&priv->lock); |
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
372 | int i; | 372 | int i; |
373 | int ret = -ENODEV; | 373 | int ret = -ENODEV; |
374 | int idle = IDLE_INTERVAL; | 374 | int idle = IDLE_INTERVAL; |
375 | u32 enr_bits = 0; | ||
375 | 376 | ||
376 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); | 377 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); |
377 | if (!common) | 378 | if (!common) |
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
390 | 391 | ||
391 | /* | 392 | /* |
392 | * platform has IRQ support. | 393 | * platform has IRQ support. |
393 | * Then, drier use common register | 394 | * Then, driver uses common registers |
394 | */ | 395 | */ |
395 | 396 | ||
396 | ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, | 397 | ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, |
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
408 | if (IS_ERR(common->base)) | 409 | if (IS_ERR(common->base)) |
409 | return PTR_ERR(common->base); | 410 | return PTR_ERR(common->base); |
410 | 411 | ||
411 | /* enable temperature comparation */ | ||
412 | rcar_thermal_common_write(common, ENR, 0x00030303); | ||
413 | |||
414 | idle = 0; /* polling delay is not needed */ | 412 | idle = 0; /* polling delay is not needed */ |
415 | } | 413 | } |
416 | 414 | ||
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
452 | rcar_thermal_irq_enable(priv); | 450 | rcar_thermal_irq_enable(priv); |
453 | 451 | ||
454 | list_move_tail(&priv->list, &common->head); | 452 | list_move_tail(&priv->list, &common->head); |
453 | |||
454 | /* update ENR bits */ | ||
455 | enr_bits |= 3 << (i * 8); | ||
455 | } | 456 | } |
456 | 457 | ||
458 | /* enable temperature comparation */ | ||
459 | if (irq) | ||
460 | rcar_thermal_common_write(common, ENR, enr_bits); | ||
461 | |||
457 | platform_set_drvdata(pdev, common); | 462 | platform_set_drvdata(pdev, common); |
458 | 463 | ||
459 | dev_info(dev, "%d sensor probed\n", i); | 464 | dev_info(dev, "%d sensor probed\n", i); |
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 9083e7520623..0531c752fbbb 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h | |||
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void); | |||
91 | void of_thermal_destroy_zones(void); | 91 | void of_thermal_destroy_zones(void); |
92 | int of_thermal_get_ntrips(struct thermal_zone_device *); | 92 | int of_thermal_get_ntrips(struct thermal_zone_device *); |
93 | bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); | 93 | bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); |
94 | const struct thermal_trip * const | 94 | const struct thermal_trip * |
95 | of_thermal_get_trip_points(struct thermal_zone_device *); | 95 | of_thermal_get_trip_points(struct thermal_zone_device *); |
96 | #else | 96 | #else |
97 | static inline int of_parse_thermal_zones(void) { return 0; } | 97 | static inline int of_parse_thermal_zones(void) { return 0; } |
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, | |||
105 | { | 105 | { |
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
108 | static inline const struct thermal_trip * const | 108 | static inline const struct thermal_trip * |
109 | of_thermal_get_trip_points(struct thermal_zone_device *tz) | 109 | of_thermal_get_trip_points(struct thermal_zone_device *tz) |
110 | { | 110 | { |
111 | return NULL; | 111 | return NULL; |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 01c01cb3933f..d695b1673ae5 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, | |||
911 | return 0; | 911 | return 0; |
912 | } | 912 | } |
913 | 913 | ||
914 | static int vhost_scsi_to_tcm_attr(int attr) | ||
915 | { | ||
916 | switch (attr) { | ||
917 | case VIRTIO_SCSI_S_SIMPLE: | ||
918 | return TCM_SIMPLE_TAG; | ||
919 | case VIRTIO_SCSI_S_ORDERED: | ||
920 | return TCM_ORDERED_TAG; | ||
921 | case VIRTIO_SCSI_S_HEAD: | ||
922 | return TCM_HEAD_TAG; | ||
923 | case VIRTIO_SCSI_S_ACA: | ||
924 | return TCM_ACA_TAG; | ||
925 | default: | ||
926 | break; | ||
927 | } | ||
928 | return TCM_SIMPLE_TAG; | ||
929 | } | ||
930 | |||
914 | static void tcm_vhost_submission_work(struct work_struct *work) | 931 | static void tcm_vhost_submission_work(struct work_struct *work) |
915 | { | 932 | { |
916 | struct tcm_vhost_cmd *cmd = | 933 | struct tcm_vhost_cmd *cmd = |
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
936 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, | 953 | rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, |
937 | cmd->tvc_cdb, &cmd->tvc_sense_buf[0], | 954 | cmd->tvc_cdb, &cmd->tvc_sense_buf[0], |
938 | cmd->tvc_lun, cmd->tvc_exp_data_len, | 955 | cmd->tvc_lun, cmd->tvc_exp_data_len, |
939 | cmd->tvc_task_attr, cmd->tvc_data_direction, | 956 | vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), |
940 | TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, | 957 | cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, |
941 | NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); | 958 | sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, |
959 | cmd->tvc_prot_sgl_count); | ||
942 | if (rc < 0) { | 960 | if (rc < 0) { |
943 | transport_send_check_condition_and_sense(se_cmd, | 961 | transport_send_check_condition_and_sense(se_cmd, |
944 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 962 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 1c29bd19e3d5..0e5fde1d3ffb 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c | |||
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, | |||
636 | err = broadsheet_spiflash_read_range(par, start_sector_addr, | 636 | err = broadsheet_spiflash_read_range(par, start_sector_addr, |
637 | data_start_addr, sector_buffer); | 637 | data_start_addr, sector_buffer); |
638 | if (err) | 638 | if (err) |
639 | return err; | 639 | goto out; |
640 | } | 640 | } |
641 | 641 | ||
642 | /* now we copy our data into the right place in the sector buffer */ | 642 | /* now we copy our data into the right place in the sector buffer */ |
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, | |||
657 | err = broadsheet_spiflash_read_range(par, tail_start_addr, | 657 | err = broadsheet_spiflash_read_range(par, tail_start_addr, |
658 | tail_len, sector_buffer + tail_start_addr); | 658 | tail_len, sector_buffer + tail_start_addr); |
659 | if (err) | 659 | if (err) |
660 | return err; | 660 | goto out; |
661 | } | 661 | } |
662 | 662 | ||
663 | /* if we got here we have the full sector that we want to rewrite. */ | 663 | /* if we got here we have the full sector that we want to rewrite. */ |
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, | |||
665 | /* first erase the sector */ | 665 | /* first erase the sector */ |
666 | err = broadsheet_spiflash_erase_sector(par, start_sector_addr); | 666 | err = broadsheet_spiflash_erase_sector(par, start_sector_addr); |
667 | if (err) | 667 | if (err) |
668 | return err; | 668 | goto out; |
669 | 669 | ||
670 | /* now write it */ | 670 | /* now write it */ |
671 | err = broadsheet_spiflash_write_sector(par, start_sector_addr, | 671 | err = broadsheet_spiflash_write_sector(par, start_sector_addr, |
672 | sector_buffer, sector_size); | 672 | sector_buffer, sector_size); |
673 | out: | ||
674 | kfree(sector_buffer); | ||
673 | return err; | 675 | return err; |
674 | } | 676 | } |
675 | 677 | ||
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 92cac803dee3..1085c0432158 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c | |||
@@ -402,7 +402,7 @@ static int __init simplefb_init(void) | |||
402 | if (ret) | 402 | if (ret) |
403 | return ret; | 403 | return ret; |
404 | 404 | ||
405 | if (IS_ENABLED(CONFIG_OF) && of_chosen) { | 405 | if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) { |
406 | for_each_child_of_node(of_chosen, np) { | 406 | for_each_child_of_node(of_chosen, np) { |
407 | if (of_device_is_compatible(np, "simple-framebuffer")) | 407 | if (of_device_is_compatible(np, "simple-framebuffer")) |
408 | of_platform_device_create(np, NULL, NULL); | 408 | of_platform_device_create(np, NULL, NULL); |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ba1107977f2e..ed19a7d622fa 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req) | |||
131 | req->in.h.pid = current->pid; | 131 | req->in.h.pid = current->pid; |
132 | } | 132 | } |
133 | 133 | ||
134 | void fuse_set_initialized(struct fuse_conn *fc) | ||
135 | { | ||
136 | /* Make sure stores before this are seen on another CPU */ | ||
137 | smp_wmb(); | ||
138 | fc->initialized = 1; | ||
139 | } | ||
140 | |||
134 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) | 141 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
135 | { | 142 | { |
136 | return !fc->initialized || (for_background && fc->blocked); | 143 | return !fc->initialized || (for_background && fc->blocked); |
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, | |||
155 | if (intr) | 162 | if (intr) |
156 | goto out; | 163 | goto out; |
157 | } | 164 | } |
165 | /* Matches smp_wmb() in fuse_set_initialized() */ | ||
166 | smp_rmb(); | ||
158 | 167 | ||
159 | err = -ENOTCONN; | 168 | err = -ENOTCONN; |
160 | if (!fc->connected) | 169 | if (!fc->connected) |
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, | |||
253 | 262 | ||
254 | atomic_inc(&fc->num_waiting); | 263 | atomic_inc(&fc->num_waiting); |
255 | wait_event(fc->blocked_waitq, fc->initialized); | 264 | wait_event(fc->blocked_waitq, fc->initialized); |
265 | /* Matches smp_wmb() in fuse_set_initialized() */ | ||
266 | smp_rmb(); | ||
256 | req = fuse_request_alloc(0); | 267 | req = fuse_request_alloc(0); |
257 | if (!req) | 268 | if (!req) |
258 | req = get_reserved_req(fc, file); | 269 | req = get_reserved_req(fc, file); |
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
511 | } | 522 | } |
512 | EXPORT_SYMBOL_GPL(fuse_request_send); | 523 | EXPORT_SYMBOL_GPL(fuse_request_send); |
513 | 524 | ||
525 | static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) | ||
526 | { | ||
527 | if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) | ||
528 | args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; | ||
529 | |||
530 | if (fc->minor < 9) { | ||
531 | switch (args->in.h.opcode) { | ||
532 | case FUSE_LOOKUP: | ||
533 | case FUSE_CREATE: | ||
534 | case FUSE_MKNOD: | ||
535 | case FUSE_MKDIR: | ||
536 | case FUSE_SYMLINK: | ||
537 | case FUSE_LINK: | ||
538 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
539 | break; | ||
540 | case FUSE_GETATTR: | ||
541 | case FUSE_SETATTR: | ||
542 | args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
543 | break; | ||
544 | } | ||
545 | } | ||
546 | if (fc->minor < 12) { | ||
547 | switch (args->in.h.opcode) { | ||
548 | case FUSE_CREATE: | ||
549 | args->in.args[0].size = sizeof(struct fuse_open_in); | ||
550 | break; | ||
551 | case FUSE_MKNOD: | ||
552 | args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; | ||
553 | break; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | |||
514 | ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) | 558 | ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) |
515 | { | 559 | { |
516 | struct fuse_req *req; | 560 | struct fuse_req *req; |
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) | |||
520 | if (IS_ERR(req)) | 564 | if (IS_ERR(req)) |
521 | return PTR_ERR(req); | 565 | return PTR_ERR(req); |
522 | 566 | ||
567 | /* Needs to be done after fuse_get_req() so that fc->minor is valid */ | ||
568 | fuse_adjust_compat(fc, args); | ||
569 | |||
523 | req->in.h.opcode = args->in.h.opcode; | 570 | req->in.h.opcode = args->in.h.opcode; |
524 | req->in.h.nodeid = args->in.h.nodeid; | 571 | req->in.h.nodeid = args->in.h.nodeid; |
525 | req->in.numargs = args->in.numargs; | 572 | req->in.numargs = args->in.numargs; |
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
2127 | if (fc->connected) { | 2174 | if (fc->connected) { |
2128 | fc->connected = 0; | 2175 | fc->connected = 0; |
2129 | fc->blocked = 0; | 2176 | fc->blocked = 0; |
2130 | fc->initialized = 1; | 2177 | fuse_set_initialized(fc); |
2131 | end_io_requests(fc); | 2178 | end_io_requests(fc); |
2132 | end_queued_requests(fc); | 2179 | end_queued_requests(fc); |
2133 | end_polls(fc); | 2180 | end_polls(fc); |
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
2146 | spin_lock(&fc->lock); | 2193 | spin_lock(&fc->lock); |
2147 | fc->connected = 0; | 2194 | fc->connected = 0; |
2148 | fc->blocked = 0; | 2195 | fc->blocked = 0; |
2149 | fc->initialized = 1; | 2196 | fuse_set_initialized(fc); |
2150 | end_queued_requests(fc); | 2197 | end_queued_requests(fc); |
2151 | end_polls(fc); | 2198 | end_polls(fc); |
2152 | wake_up_all(&fc->blocked_waitq); | 2199 | wake_up_all(&fc->blocked_waitq); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 252b8a5de8b5..08e7b1a9d5d0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, | |||
156 | args->in.args[0].size = name->len + 1; | 156 | args->in.args[0].size = name->len + 1; |
157 | args->in.args[0].value = name->name; | 157 | args->in.args[0].value = name->name; |
158 | args->out.numargs = 1; | 158 | args->out.numargs = 1; |
159 | if (fc->minor < 9) | 159 | args->out.args[0].size = sizeof(struct fuse_entry_out); |
160 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
161 | else | ||
162 | args->out.args[0].size = sizeof(struct fuse_entry_out); | ||
163 | args->out.args[0].value = outarg; | 160 | args->out.args[0].value = outarg; |
164 | } | 161 | } |
165 | 162 | ||
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, | |||
422 | args.in.h.opcode = FUSE_CREATE; | 419 | args.in.h.opcode = FUSE_CREATE; |
423 | args.in.h.nodeid = get_node_id(dir); | 420 | args.in.h.nodeid = get_node_id(dir); |
424 | args.in.numargs = 2; | 421 | args.in.numargs = 2; |
425 | args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : | 422 | args.in.args[0].size = sizeof(inarg); |
426 | sizeof(inarg); | ||
427 | args.in.args[0].value = &inarg; | 423 | args.in.args[0].value = &inarg; |
428 | args.in.args[1].size = entry->d_name.len + 1; | 424 | args.in.args[1].size = entry->d_name.len + 1; |
429 | args.in.args[1].value = entry->d_name.name; | 425 | args.in.args[1].value = entry->d_name.name; |
430 | args.out.numargs = 2; | 426 | args.out.numargs = 2; |
431 | if (fc->minor < 9) | 427 | args.out.args[0].size = sizeof(outentry); |
432 | args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
433 | else | ||
434 | args.out.args[0].size = sizeof(outentry); | ||
435 | args.out.args[0].value = &outentry; | 428 | args.out.args[0].value = &outentry; |
436 | args.out.args[1].size = sizeof(outopen); | 429 | args.out.args[1].size = sizeof(outopen); |
437 | args.out.args[1].value = &outopen; | 430 | args.out.args[1].value = &outopen; |
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, | |||
539 | memset(&outarg, 0, sizeof(outarg)); | 532 | memset(&outarg, 0, sizeof(outarg)); |
540 | args->in.h.nodeid = get_node_id(dir); | 533 | args->in.h.nodeid = get_node_id(dir); |
541 | args->out.numargs = 1; | 534 | args->out.numargs = 1; |
542 | if (fc->minor < 9) | 535 | args->out.args[0].size = sizeof(outarg); |
543 | args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; | ||
544 | else | ||
545 | args->out.args[0].size = sizeof(outarg); | ||
546 | args->out.args[0].value = &outarg; | 536 | args->out.args[0].value = &outarg; |
547 | err = fuse_simple_request(fc, args); | 537 | err = fuse_simple_request(fc, args); |
548 | if (err) | 538 | if (err) |
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode, | |||
592 | inarg.umask = current_umask(); | 582 | inarg.umask = current_umask(); |
593 | args.in.h.opcode = FUSE_MKNOD; | 583 | args.in.h.opcode = FUSE_MKNOD; |
594 | args.in.numargs = 2; | 584 | args.in.numargs = 2; |
595 | args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : | 585 | args.in.args[0].size = sizeof(inarg); |
596 | sizeof(inarg); | ||
597 | args.in.args[0].value = &inarg; | 586 | args.in.args[0].value = &inarg; |
598 | args.in.args[1].size = entry->d_name.len + 1; | 587 | args.in.args[1].size = entry->d_name.len + 1; |
599 | args.in.args[1].value = entry->d_name.name; | 588 | args.in.args[1].value = entry->d_name.name; |
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, | |||
899 | args.in.args[0].size = sizeof(inarg); | 888 | args.in.args[0].size = sizeof(inarg); |
900 | args.in.args[0].value = &inarg; | 889 | args.in.args[0].value = &inarg; |
901 | args.out.numargs = 1; | 890 | args.out.numargs = 1; |
902 | if (fc->minor < 9) | 891 | args.out.args[0].size = sizeof(outarg); |
903 | args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
904 | else | ||
905 | args.out.args[0].size = sizeof(outarg); | ||
906 | args.out.args[0].value = &outarg; | 892 | args.out.args[0].value = &outarg; |
907 | err = fuse_simple_request(fc, &args); | 893 | err = fuse_simple_request(fc, &args); |
908 | if (!err) { | 894 | if (!err) { |
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args, | |||
1574 | args->in.args[0].size = sizeof(*inarg_p); | 1560 | args->in.args[0].size = sizeof(*inarg_p); |
1575 | args->in.args[0].value = inarg_p; | 1561 | args->in.args[0].value = inarg_p; |
1576 | args->out.numargs = 1; | 1562 | args->out.numargs = 1; |
1577 | if (fc->minor < 9) | 1563 | args->out.args[0].size = sizeof(*outarg_p); |
1578 | args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; | ||
1579 | else | ||
1580 | args->out.args[0].size = sizeof(*outarg_p); | ||
1581 | args->out.args[0].value = outarg_p; | 1564 | args->out.args[0].value = outarg_p; |
1582 | } | 1565 | } |
1583 | 1566 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e0fc6725d1d0..1cdfb07c1376 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); | |||
906 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, | 906 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, |
907 | struct file *file); | 907 | struct file *file); |
908 | 908 | ||
909 | void fuse_set_initialized(struct fuse_conn *fc); | ||
910 | |||
909 | #endif /* _FS_FUSE_I_H */ | 911 | #endif /* _FS_FUSE_I_H */ |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6749109f255d..f38256e4476e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
424 | args.in.h.opcode = FUSE_STATFS; | 424 | args.in.h.opcode = FUSE_STATFS; |
425 | args.in.h.nodeid = get_node_id(dentry->d_inode); | 425 | args.in.h.nodeid = get_node_id(dentry->d_inode); |
426 | args.out.numargs = 1; | 426 | args.out.numargs = 1; |
427 | args.out.args[0].size = | 427 | args.out.args[0].size = sizeof(outarg); |
428 | fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); | ||
429 | args.out.args[0].value = &outarg; | 428 | args.out.args[0].value = &outarg; |
430 | err = fuse_simple_request(fc, &args); | 429 | err = fuse_simple_request(fc, &args); |
431 | if (!err) | 430 | if (!err) |
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
898 | fc->max_write = max_t(unsigned, 4096, fc->max_write); | 897 | fc->max_write = max_t(unsigned, 4096, fc->max_write); |
899 | fc->conn_init = 1; | 898 | fc->conn_init = 1; |
900 | } | 899 | } |
901 | fc->initialized = 1; | 900 | fuse_set_initialized(fc); |
902 | wake_up_all(&fc->blocked_waitq); | 901 | wake_up_all(&fc->blocked_waitq); |
903 | } | 902 | } |
904 | 903 | ||
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index e94c887da2d7..55505cbe11af 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -138,10 +138,6 @@ lockd(void *vrqstp) | |||
138 | 138 | ||
139 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); | 139 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); |
140 | 140 | ||
141 | if (!nlm_timeout) | ||
142 | nlm_timeout = LOCKD_DFLT_TIMEO; | ||
143 | nlmsvc_timeout = nlm_timeout * HZ; | ||
144 | |||
145 | /* | 141 | /* |
146 | * The main request loop. We don't terminate until the last | 142 | * The main request loop. We don't terminate until the last |
147 | * NFS mount or NFS daemon has gone away. | 143 | * NFS mount or NFS daemon has gone away. |
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void) | |||
350 | printk(KERN_WARNING | 346 | printk(KERN_WARNING |
351 | "lockd_up: no pid, %d users??\n", nlmsvc_users); | 347 | "lockd_up: no pid, %d users??\n", nlmsvc_users); |
352 | 348 | ||
349 | if (!nlm_timeout) | ||
350 | nlm_timeout = LOCKD_DFLT_TIMEO; | ||
351 | nlmsvc_timeout = nlm_timeout * HZ; | ||
352 | |||
353 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); | 353 | serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); |
354 | if (!serv) { | 354 | if (!serv) { |
355 | printk(KERN_WARNING "lockd_up: create service failed\n"); | 355 | printk(KERN_WARNING "lockd_up: create service failed\n"); |
diff --git a/fs/locks.c b/fs/locks.c index 735b8d3fa78c..59e2f905e4ff 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp) | |||
1702 | break; | 1702 | break; |
1703 | } | 1703 | } |
1704 | trace_generic_delete_lease(inode, fl); | 1704 | trace_generic_delete_lease(inode, fl); |
1705 | if (fl) | 1705 | if (fl && IS_LEASE(fl)) |
1706 | error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); | 1706 | error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); |
1707 | spin_unlock(&inode->i_lock); | 1707 | spin_unlock(&inode->i_lock); |
1708 | locks_dispose_list(&dispose); | 1708 | locks_dispose_list(&dispose); |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 03311259b0c4..953daa44a282 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) | |||
228 | kfree(clp->cl_serverowner); | 228 | kfree(clp->cl_serverowner); |
229 | kfree(clp->cl_serverscope); | 229 | kfree(clp->cl_serverscope); |
230 | kfree(clp->cl_implid); | 230 | kfree(clp->cl_implid); |
231 | kfree(clp->cl_owner_id); | ||
231 | } | 232 | } |
232 | 233 | ||
233 | void nfs4_free_client(struct nfs_client *clp) | 234 | void nfs4_free_client(struct nfs_client *clp) |
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep, | |||
452 | spin_unlock(&nn->nfs_client_lock); | 453 | spin_unlock(&nn->nfs_client_lock); |
453 | } | 454 | } |
454 | 455 | ||
456 | static bool nfs4_match_client_owner_id(const struct nfs_client *clp1, | ||
457 | const struct nfs_client *clp2) | ||
458 | { | ||
459 | if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL) | ||
460 | return true; | ||
461 | return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0; | ||
462 | } | ||
463 | |||
455 | /** | 464 | /** |
456 | * nfs40_walk_client_list - Find server that recognizes a client ID | 465 | * nfs40_walk_client_list - Find server that recognizes a client ID |
457 | * | 466 | * |
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
483 | if (pos->rpc_ops != new->rpc_ops) | 492 | if (pos->rpc_ops != new->rpc_ops) |
484 | continue; | 493 | continue; |
485 | 494 | ||
486 | if (pos->cl_proto != new->cl_proto) | ||
487 | continue; | ||
488 | |||
489 | if (pos->cl_minorversion != new->cl_minorversion) | 495 | if (pos->cl_minorversion != new->cl_minorversion) |
490 | continue; | 496 | continue; |
491 | 497 | ||
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
510 | if (pos->cl_clientid != new->cl_clientid) | 516 | if (pos->cl_clientid != new->cl_clientid) |
511 | continue; | 517 | continue; |
512 | 518 | ||
519 | if (!nfs4_match_client_owner_id(pos, new)) | ||
520 | continue; | ||
521 | |||
513 | atomic_inc(&pos->cl_count); | 522 | atomic_inc(&pos->cl_count); |
514 | spin_unlock(&nn->nfs_client_lock); | 523 | spin_unlock(&nn->nfs_client_lock); |
515 | 524 | ||
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b) | |||
566 | } | 575 | } |
567 | 576 | ||
568 | /* | 577 | /* |
569 | * Returns true if the server owners match | 578 | * Returns true if the server major ids match |
570 | */ | 579 | */ |
571 | static bool | 580 | static bool |
572 | nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) | 581 | nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) |
573 | { | 582 | { |
574 | struct nfs41_server_owner *o1 = a->cl_serverowner; | 583 | struct nfs41_server_owner *o1 = a->cl_serverowner; |
575 | struct nfs41_server_owner *o2 = b->cl_serverowner; | 584 | struct nfs41_server_owner *o2 = b->cl_serverowner; |
576 | 585 | ||
577 | if (o1->minor_id != o2->minor_id) { | ||
578 | dprintk("NFS: --> %s server owner minor IDs do not match\n", | ||
579 | __func__); | ||
580 | return false; | ||
581 | } | ||
582 | |||
583 | if (o1->major_id_sz != o2->major_id_sz) | 586 | if (o1->major_id_sz != o2->major_id_sz) |
584 | goto out_major_mismatch; | 587 | goto out_major_mismatch; |
585 | if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) | 588 | if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) |
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
621 | if (pos->rpc_ops != new->rpc_ops) | 624 | if (pos->rpc_ops != new->rpc_ops) |
622 | continue; | 625 | continue; |
623 | 626 | ||
624 | if (pos->cl_proto != new->cl_proto) | ||
625 | continue; | ||
626 | |||
627 | if (pos->cl_minorversion != new->cl_minorversion) | 627 | if (pos->cl_minorversion != new->cl_minorversion) |
628 | continue; | 628 | continue; |
629 | 629 | ||
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
654 | if (!nfs4_match_clientids(pos, new)) | 654 | if (!nfs4_match_clientids(pos, new)) |
655 | continue; | 655 | continue; |
656 | 656 | ||
657 | if (!nfs4_match_serverowners(pos, new)) | 657 | /* |
658 | * Note that session trunking is just a special subcase of | ||
659 | * client id trunking. In either case, we want to fall back | ||
660 | * to using the existing nfs_client. | ||
661 | */ | ||
662 | if (!nfs4_check_clientid_trunking(pos, new)) | ||
663 | continue; | ||
664 | |||
665 | /* Unlike NFSv4.0, we know that NFSv4.1 always uses the | ||
666 | * uniform string, however someone might switch the | ||
667 | * uniquifier string on us. | ||
668 | */ | ||
669 | if (!nfs4_match_client_owner_id(pos, new)) | ||
658 | continue; | 670 | continue; |
659 | 671 | ||
660 | atomic_inc(&pos->cl_count); | 672 | atomic_inc(&pos->cl_count); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e7f8d5ff2581..c347705b0161 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) | |||
1117 | return 0; | 1117 | return 0; |
1118 | if ((delegation->type & fmode) != fmode) | 1118 | if ((delegation->type & fmode) != fmode) |
1119 | return 0; | 1119 | return 0; |
1120 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) | ||
1121 | return 0; | ||
1122 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) | 1120 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) |
1123 | return 0; | 1121 | return 0; |
1124 | nfs_mark_delegation_referenced(delegation); | 1122 | nfs_mark_delegation_referenced(delegation); |
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp, | |||
4917 | } | 4915 | } |
4918 | 4916 | ||
4919 | static unsigned int | 4917 | static unsigned int |
4920 | nfs4_init_nonuniform_client_string(const struct nfs_client *clp, | 4918 | nfs4_init_nonuniform_client_string(struct nfs_client *clp, |
4921 | char *buf, size_t len) | 4919 | char *buf, size_t len) |
4922 | { | 4920 | { |
4923 | unsigned int result; | 4921 | unsigned int result; |
4924 | 4922 | ||
4923 | if (clp->cl_owner_id != NULL) | ||
4924 | return strlcpy(buf, clp->cl_owner_id, len); | ||
4925 | |||
4925 | rcu_read_lock(); | 4926 | rcu_read_lock(); |
4926 | result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", | 4927 | result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", |
4927 | clp->cl_ipaddr, | 4928 | clp->cl_ipaddr, |
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp, | |||
4930 | rpc_peeraddr2str(clp->cl_rpcclient, | 4931 | rpc_peeraddr2str(clp->cl_rpcclient, |
4931 | RPC_DISPLAY_PROTO)); | 4932 | RPC_DISPLAY_PROTO)); |
4932 | rcu_read_unlock(); | 4933 | rcu_read_unlock(); |
4934 | clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); | ||
4933 | return result; | 4935 | return result; |
4934 | } | 4936 | } |
4935 | 4937 | ||
4936 | static unsigned int | 4938 | static unsigned int |
4937 | nfs4_init_uniform_client_string(const struct nfs_client *clp, | 4939 | nfs4_init_uniform_client_string(struct nfs_client *clp, |
4938 | char *buf, size_t len) | 4940 | char *buf, size_t len) |
4939 | { | 4941 | { |
4940 | const char *nodename = clp->cl_rpcclient->cl_nodename; | 4942 | const char *nodename = clp->cl_rpcclient->cl_nodename; |
4943 | unsigned int result; | ||
4944 | |||
4945 | if (clp->cl_owner_id != NULL) | ||
4946 | return strlcpy(buf, clp->cl_owner_id, len); | ||
4941 | 4947 | ||
4942 | if (nfs4_client_id_uniquifier[0] != '\0') | 4948 | if (nfs4_client_id_uniquifier[0] != '\0') |
4943 | return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", | 4949 | result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", |
4944 | clp->rpc_ops->version, | 4950 | clp->rpc_ops->version, |
4945 | clp->cl_minorversion, | 4951 | clp->cl_minorversion, |
4946 | nfs4_client_id_uniquifier, | 4952 | nfs4_client_id_uniquifier, |
4947 | nodename); | 4953 | nodename); |
4948 | return scnprintf(buf, len, "Linux NFSv%u.%u %s", | 4954 | else |
4955 | result = scnprintf(buf, len, "Linux NFSv%u.%u %s", | ||
4949 | clp->rpc_ops->version, clp->cl_minorversion, | 4956 | clp->rpc_ops->version, clp->cl_minorversion, |
4950 | nodename); | 4957 | nodename); |
4958 | clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); | ||
4959 | return result; | ||
4951 | } | 4960 | } |
4952 | 4961 | ||
4953 | /* | 4962 | /* |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 08848050922e..db284bff29dc 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb, | |||
136 | 136 | ||
137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | 137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
138 | { | 138 | { |
139 | tlb->start = TASK_SIZE; | 139 | if (tlb->fullmm) { |
140 | tlb->end = 0; | 140 | tlb->start = tlb->end = ~0; |
141 | } else { | ||
142 | tlb->start = TASK_SIZE; | ||
143 | tlb->end = 0; | ||
144 | } | ||
141 | } | 145 | } |
142 | 146 | ||
143 | /* | 147 | /* |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9ab2e4e..5735e7130d63 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { | |||
34 | unsigned long flags; /* BLK_MQ_F_* flags */ | 34 | unsigned long flags; /* BLK_MQ_F_* flags */ |
35 | 35 | ||
36 | struct request_queue *queue; | 36 | struct request_queue *queue; |
37 | unsigned int queue_num; | ||
38 | struct blk_flush_queue *fq; | 37 | struct blk_flush_queue *fq; |
39 | 38 | ||
40 | void *driver_data; | 39 | void *driver_data; |
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { | |||
54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | 53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
55 | 54 | ||
56 | unsigned int numa_node; | 55 | unsigned int numa_node; |
57 | unsigned int cmd_size; /* per-request extra data */ | 56 | unsigned int queue_num; |
58 | 57 | ||
59 | atomic_t nr_active; | 58 | atomic_t nr_active; |
60 | 59 | ||
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |||
195 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 194 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
196 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 195 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
197 | 196 | ||
197 | int blk_mq_request_started(struct request *rq); | ||
198 | void blk_mq_start_request(struct request *rq); | 198 | void blk_mq_start_request(struct request *rq); |
199 | void blk_mq_end_request(struct request *rq, int error); | 199 | void blk_mq_end_request(struct request *rq, int error); |
200 | void __blk_mq_end_request(struct request *rq, int error); | 200 | void __blk_mq_end_request(struct request *rq, int error); |
201 | 201 | ||
202 | void blk_mq_requeue_request(struct request *rq); | 202 | void blk_mq_requeue_request(struct request *rq); |
203 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 203 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
204 | void blk_mq_cancel_requeue_work(struct request_queue *q); | ||
204 | void blk_mq_kick_requeue_list(struct request_queue *q); | 205 | void blk_mq_kick_requeue_list(struct request_queue *q); |
206 | void blk_mq_abort_requeue_list(struct request_queue *q); | ||
205 | void blk_mq_complete_request(struct request *rq); | 207 | void blk_mq_complete_request(struct request *rq); |
206 | 208 | ||
207 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 209 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | |||
212 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 214 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
213 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 215 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
214 | void *priv); | 216 | void *priv); |
217 | void blk_mq_unfreeze_queue(struct request_queue *q); | ||
218 | void blk_mq_freeze_queue_start(struct request_queue *q); | ||
215 | 219 | ||
216 | /* | 220 | /* |
217 | * Driver command data is immediately after the request. So subtract request | 221 | * Driver command data is immediately after the request. So subtract request |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 445d59231bc4..c294e3e25e37 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -190,6 +190,7 @@ enum rq_flag_bits { | |||
190 | __REQ_PM, /* runtime pm request */ | 190 | __REQ_PM, /* runtime pm request */ |
191 | __REQ_HASHED, /* on IO scheduler merge hash */ | 191 | __REQ_HASHED, /* on IO scheduler merge hash */ |
192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ | 192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
193 | __REQ_NO_TIMEOUT, /* requests may never expire */ | ||
193 | __REQ_NR_BITS, /* stops here */ | 194 | __REQ_NR_BITS, /* stops here */ |
194 | }; | 195 | }; |
195 | 196 | ||
@@ -243,5 +244,6 @@ enum rq_flag_bits { | |||
243 | #define REQ_PM (1ULL << __REQ_PM) | 244 | #define REQ_PM (1ULL << __REQ_PM) |
244 | #define REQ_HASHED (1ULL << __REQ_HASHED) | 245 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
245 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 246 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
247 | #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) | ||
246 | 248 | ||
247 | #endif /* __LINUX_BLK_TYPES_H */ | 249 | #endif /* __LINUX_BLK_TYPES_H */ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a1c81f80978e..33063f872ee3 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si | |||
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
218 | static __always_inline void __assign_once_size(volatile void *p, void *res, int size) | 218 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
219 | { | 219 | { |
220 | switch (size) { | 220 | switch (size) { |
221 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | 221 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int | |||
235 | /* | 235 | /* |
236 | * Prevent the compiler from merging or refetching reads or writes. The | 236 | * Prevent the compiler from merging or refetching reads or writes. The |
237 | * compiler is also forbidden from reordering successive instances of | 237 | * compiler is also forbidden from reordering successive instances of |
238 | * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the | 238 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the |
239 | * compiler is aware of some particular ordering. One way to make the | 239 | * compiler is aware of some particular ordering. One way to make the |
240 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | 240 | * compiler aware of ordering is to put the two invocations of READ_ONCE, |
241 | * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. | 241 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. |
242 | * | 242 | * |
243 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | 243 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate |
244 | * data types like structs or unions. If the size of the accessed data | 244 | * data types like structs or unions. If the size of the accessed data |
245 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | 245 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
246 | * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a | 246 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a |
247 | * compile-time warning. | 247 | * compile-time warning. |
248 | * | 248 | * |
249 | * Their two major use cases are: (1) Mediating communication between | 249 | * Their two major use cases are: (1) Mediating communication between |
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int | |||
257 | #define READ_ONCE(x) \ | 257 | #define READ_ONCE(x) \ |
258 | ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) | 258 | ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) |
259 | 259 | ||
260 | #define ASSIGN_ONCE(val, x) \ | 260 | #define WRITE_ONCE(x, val) \ |
261 | ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) | 261 | ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) |
262 | 262 | ||
263 | #endif /* __KERNEL__ */ | 263 | #endif /* __KERNEL__ */ |
264 | 264 | ||
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 375af80bde7d..f767a0de611f 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h | |||
@@ -137,6 +137,7 @@ struct sdhci_host { | |||
137 | #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ | 137 | #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ |
138 | #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ | 138 | #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ |
139 | #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ | 139 | #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ |
140 | #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ | ||
140 | 141 | ||
141 | unsigned int version; /* SDHCI spec. version */ | 142 | unsigned int version; /* SDHCI spec. version */ |
142 | 143 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 679e6e90aa4c..52fd8e8694cf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
852 | * 3. Update dev->stats asynchronously and atomically, and define | 852 | * 3. Update dev->stats asynchronously and atomically, and define |
853 | * neither operation. | 853 | * neither operation. |
854 | * | 854 | * |
855 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); | 855 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
856 | * If device support VLAN filtering this function is called when a | 856 | * If device support VLAN filtering this function is called when a |
857 | * VLAN id is registered. | 857 | * VLAN id is registered. |
858 | * | 858 | * |
859 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | 859 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
860 | * If device support VLAN filtering this function is called when a | 860 | * If device support VLAN filtering this function is called when a |
861 | * VLAN id is unregistered. | 861 | * VLAN id is unregistered. |
862 | * | 862 | * |
@@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */ | |||
2085 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | 2085 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) |
2086 | #define for_each_netdev_in_bond_rcu(bond, slave) \ | 2086 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
2087 | for_each_netdev_rcu(&init_net, slave) \ | 2087 | for_each_netdev_rcu(&init_net, slave) \ |
2088 | if (netdev_master_upper_dev_get_rcu(slave) == bond) | 2088 | if (netdev_master_upper_dev_get_rcu(slave) == (bond)) |
2089 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) | 2089 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
2090 | 2090 | ||
2091 | static inline struct net_device *next_net_device(struct net_device *dev) | 2091 | static inline struct net_device *next_net_device(struct net_device *dev) |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 1e37fbb78f7a..ddea982355f3 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -74,6 +74,9 @@ struct nfs_client { | |||
74 | /* idmapper */ | 74 | /* idmapper */ |
75 | struct idmap * cl_idmap; | 75 | struct idmap * cl_idmap; |
76 | 76 | ||
77 | /* Client owner identifier */ | ||
78 | const char * cl_owner_id; | ||
79 | |||
77 | /* Our own IP address, as a null-terminated string. | 80 | /* Our own IP address, as a null-terminated string. |
78 | * This is used to generate the mv0 callback address. | 81 | * This is used to generate the mv0 callback address. |
79 | */ | 82 | */ |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 430cfaf92285..db81c65b8f48 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int); | |||
135 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); | 135 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int); |
136 | int se_dev_set_queue_depth(struct se_device *, u32); | 136 | int se_dev_set_queue_depth(struct se_device *, u32); |
137 | int se_dev_set_max_sectors(struct se_device *, u32); | 137 | int se_dev_set_max_sectors(struct se_device *, u32); |
138 | int se_dev_set_fabric_max_sectors(struct se_device *, u32); | ||
139 | int se_dev_set_optimal_sectors(struct se_device *, u32); | 138 | int se_dev_set_optimal_sectors(struct se_device *, u32); |
140 | int se_dev_set_block_size(struct se_device *, u32); | 139 | int se_dev_set_block_size(struct se_device *, u32); |
141 | 140 | ||
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h index 3247d7530107..186f7a923570 100644 --- a/include/target/target_core_backend_configfs.h +++ b/include/target/target_core_backend_configfs.h | |||
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name | |||
98 | TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ | 98 | TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ |
99 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ | 99 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ |
100 | TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ | 100 | TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ |
101 | DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ | ||
102 | TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ | ||
103 | DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ | 101 | DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ |
104 | TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ | 102 | TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ |
105 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ | 103 | DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 397fb635766a..4a8795a87b9e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -77,8 +77,6 @@ | |||
77 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 | 77 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 |
78 | /* Default max_write_same_len, disabled by default */ | 78 | /* Default max_write_same_len, disabled by default */ |
79 | #define DA_MAX_WRITE_SAME_LEN 0 | 79 | #define DA_MAX_WRITE_SAME_LEN 0 |
80 | /* Default max transfer length */ | ||
81 | #define DA_FABRIC_MAX_SECTORS 8192 | ||
82 | /* Use a model alias based on the configfs backend device name */ | 80 | /* Use a model alias based on the configfs backend device name */ |
83 | #define DA_EMULATE_MODEL_ALIAS 0 | 81 | #define DA_EMULATE_MODEL_ALIAS 0 |
84 | /* Emulation for Direct Page Out */ | 82 | /* Emulation for Direct Page Out */ |
@@ -694,7 +692,6 @@ struct se_dev_attrib { | |||
694 | u32 hw_block_size; | 692 | u32 hw_block_size; |
695 | u32 block_size; | 693 | u32 block_size; |
696 | u32 hw_max_sectors; | 694 | u32 hw_max_sectors; |
697 | u32 fabric_max_sectors; | ||
698 | u32 optimal_sectors; | 695 | u32 optimal_sectors; |
699 | u32 hw_queue_depth; | 696 | u32 hw_queue_depth; |
700 | u32 queue_depth; | 697 | u32 queue_depth; |
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 3a6dcaa359b7..f714e8633352 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
@@ -174,6 +174,10 @@ enum ovs_packet_attr { | |||
174 | OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ | 174 | OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ |
175 | OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* | 175 | OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* |
176 | attributes. */ | 176 | attributes. */ |
177 | OVS_PACKET_ATTR_UNUSED1, | ||
178 | OVS_PACKET_ATTR_UNUSED2, | ||
179 | OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, | ||
180 | error logging should be suppressed. */ | ||
177 | __OVS_PACKET_ATTR_MAX | 181 | __OVS_PACKET_ATTR_MAX |
178 | }; | 182 | }; |
179 | 183 | ||
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h new file mode 100644 index 000000000000..b47d9d06fade --- /dev/null +++ b/include/xen/interface/nmi.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /****************************************************************************** | ||
2 | * nmi.h | ||
3 | * | ||
4 | * NMI callback registration and reason codes. | ||
5 | * | ||
6 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __XEN_PUBLIC_NMI_H__ | ||
10 | #define __XEN_PUBLIC_NMI_H__ | ||
11 | |||
12 | #include <xen/interface/xen.h> | ||
13 | |||
14 | /* | ||
15 | * NMI reason codes: | ||
16 | * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. | ||
17 | */ | ||
18 | /* I/O-check error reported via ISA port 0x61, bit 6. */ | ||
19 | #define _XEN_NMIREASON_io_error 0 | ||
20 | #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) | ||
21 | /* PCI SERR reported via ISA port 0x61, bit 7. */ | ||
22 | #define _XEN_NMIREASON_pci_serr 1 | ||
23 | #define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) | ||
24 | /* Unknown hardware-generated NMI. */ | ||
25 | #define _XEN_NMIREASON_unknown 2 | ||
26 | #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) | ||
27 | |||
28 | /* | ||
29 | * long nmi_op(unsigned int cmd, void *arg) | ||
30 | * NB. All ops return zero on success, else a negative error code. | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * Register NMI callback for this (calling) VCPU. Currently this only makes | ||
35 | * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. | ||
36 | * arg == pointer to xennmi_callback structure. | ||
37 | */ | ||
38 | #define XENNMI_register_callback 0 | ||
39 | struct xennmi_callback { | ||
40 | unsigned long handler_address; | ||
41 | unsigned long pad; | ||
42 | }; | ||
43 | DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback); | ||
44 | |||
45 | /* | ||
46 | * Deregister NMI callback for this (calling) VCPU. | ||
47 | * arg == NULL. | ||
48 | */ | ||
49 | #define XENNMI_unregister_callback 1 | ||
50 | |||
51 | #endif /* __XEN_PUBLIC_NMI_H__ */ | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 929a733d302e..224e768bdc73 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command) | |||
2497 | } | 2497 | } |
2498 | 2498 | ||
2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, | 2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
2500 | struct ftrace_hash *old_hash) | 2500 | struct ftrace_ops_hash *old_hash) |
2501 | { | 2501 | { |
2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | 2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
2503 | ops->old_hash.filter_hash = old_hash; | 2503 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2504 | ops->old_hash.notrace_hash = old_hash->notrace_hash; | ||
2504 | ftrace_run_update_code(command); | 2505 | ftrace_run_update_code(command); |
2505 | ops->old_hash.filter_hash = NULL; | 2506 | ops->old_hash.filter_hash = NULL; |
2507 | ops->old_hash.notrace_hash = NULL; | ||
2506 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; | 2508 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2507 | } | 2509 | } |
2508 | 2510 | ||
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
3579 | 3581 | ||
3580 | static int ftrace_probe_registered; | 3582 | static int ftrace_probe_registered; |
3581 | 3583 | ||
3582 | static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) | 3584 | static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) |
3583 | { | 3585 | { |
3584 | int ret; | 3586 | int ret; |
3585 | int i; | 3587 | int i; |
@@ -3637,6 +3639,7 @@ int | |||
3637 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3639 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3638 | void *data) | 3640 | void *data) |
3639 | { | 3641 | { |
3642 | struct ftrace_ops_hash old_hash_ops; | ||
3640 | struct ftrace_func_probe *entry; | 3643 | struct ftrace_func_probe *entry; |
3641 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 3644 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
3642 | struct ftrace_hash *old_hash = *orig_hash; | 3645 | struct ftrace_hash *old_hash = *orig_hash; |
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3658 | 3661 | ||
3659 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 3662 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
3660 | 3663 | ||
3664 | old_hash_ops.filter_hash = old_hash; | ||
3665 | /* Probes only have filters */ | ||
3666 | old_hash_ops.notrace_hash = NULL; | ||
3667 | |||
3661 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | 3668 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
3662 | if (!hash) { | 3669 | if (!hash) { |
3663 | count = -ENOMEM; | 3670 | count = -ENOMEM; |
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3718 | 3725 | ||
3719 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3726 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
3720 | 3727 | ||
3721 | __enable_ftrace_function_probe(old_hash); | 3728 | __enable_ftrace_function_probe(&old_hash_ops); |
3722 | 3729 | ||
3723 | if (!ret) | 3730 | if (!ret) |
3724 | free_ftrace_hash_rcu(old_hash); | 3731 | free_ftrace_hash_rcu(old_hash); |
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
4006 | } | 4013 | } |
4007 | 4014 | ||
4008 | static void ftrace_ops_update_code(struct ftrace_ops *ops, | 4015 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
4009 | struct ftrace_hash *old_hash) | 4016 | struct ftrace_ops_hash *old_hash) |
4010 | { | 4017 | { |
4011 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 4018 | struct ftrace_ops *op; |
4019 | |||
4020 | if (!ftrace_enabled) | ||
4021 | return; | ||
4022 | |||
4023 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
4012 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | 4024 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); |
4025 | return; | ||
4026 | } | ||
4027 | |||
4028 | /* | ||
4029 | * If this is the shared global_ops filter, then we need to | ||
4030 | * check if there is another ops that shares it, is enabled. | ||
4031 | * If so, we still need to run the modify code. | ||
4032 | */ | ||
4033 | if (ops->func_hash != &global_ops.local_hash) | ||
4034 | return; | ||
4035 | |||
4036 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
4037 | if (op->func_hash == &global_ops.local_hash && | ||
4038 | op->flags & FTRACE_OPS_FL_ENABLED) { | ||
4039 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | ||
4040 | /* Only need to do this once */ | ||
4041 | return; | ||
4042 | } | ||
4043 | } while_for_each_ftrace_op(op); | ||
4013 | } | 4044 | } |
4014 | 4045 | ||
4015 | static int | 4046 | static int |
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
4017 | unsigned long ip, int remove, int reset, int enable) | 4048 | unsigned long ip, int remove, int reset, int enable) |
4018 | { | 4049 | { |
4019 | struct ftrace_hash **orig_hash; | 4050 | struct ftrace_hash **orig_hash; |
4051 | struct ftrace_ops_hash old_hash_ops; | ||
4020 | struct ftrace_hash *old_hash; | 4052 | struct ftrace_hash *old_hash; |
4021 | struct ftrace_hash *hash; | 4053 | struct ftrace_hash *hash; |
4022 | int ret; | 4054 | int ret; |
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
4053 | 4085 | ||
4054 | mutex_lock(&ftrace_lock); | 4086 | mutex_lock(&ftrace_lock); |
4055 | old_hash = *orig_hash; | 4087 | old_hash = *orig_hash; |
4088 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | ||
4089 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | ||
4056 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 4090 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
4057 | if (!ret) { | 4091 | if (!ret) { |
4058 | ftrace_ops_update_code(ops, old_hash); | 4092 | ftrace_ops_update_code(ops, &old_hash_ops); |
4059 | free_ftrace_hash_rcu(old_hash); | 4093 | free_ftrace_hash_rcu(old_hash); |
4060 | } | 4094 | } |
4061 | mutex_unlock(&ftrace_lock); | 4095 | mutex_unlock(&ftrace_lock); |
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void) | |||
4267 | int ftrace_regex_release(struct inode *inode, struct file *file) | 4301 | int ftrace_regex_release(struct inode *inode, struct file *file) |
4268 | { | 4302 | { |
4269 | struct seq_file *m = (struct seq_file *)file->private_data; | 4303 | struct seq_file *m = (struct seq_file *)file->private_data; |
4304 | struct ftrace_ops_hash old_hash_ops; | ||
4270 | struct ftrace_iterator *iter; | 4305 | struct ftrace_iterator *iter; |
4271 | struct ftrace_hash **orig_hash; | 4306 | struct ftrace_hash **orig_hash; |
4272 | struct ftrace_hash *old_hash; | 4307 | struct ftrace_hash *old_hash; |
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
4300 | 4335 | ||
4301 | mutex_lock(&ftrace_lock); | 4336 | mutex_lock(&ftrace_lock); |
4302 | old_hash = *orig_hash; | 4337 | old_hash = *orig_hash; |
4338 | old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; | ||
4339 | old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; | ||
4303 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4340 | ret = ftrace_hash_move(iter->ops, filter_hash, |
4304 | orig_hash, iter->hash); | 4341 | orig_hash, iter->hash); |
4305 | if (!ret) { | 4342 | if (!ret) { |
4306 | ftrace_ops_update_code(iter->ops, old_hash); | 4343 | ftrace_ops_update_code(iter->ops, &old_hash_ops); |
4307 | free_ftrace_hash_rcu(old_hash); | 4344 | free_ftrace_hash_rcu(old_hash); |
4308 | } | 4345 | } |
4309 | mutex_unlock(&ftrace_lock); | 4346 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2e767972e99c..4a9079b9f082 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -6918,7 +6918,6 @@ void __init trace_init(void) | |||
6918 | tracepoint_printk = 0; | 6918 | tracepoint_printk = 0; |
6919 | } | 6919 | } |
6920 | tracer_alloc_buffers(); | 6920 | tracer_alloc_buffers(); |
6921 | init_ftrace_syscalls(); | ||
6922 | trace_event_init(); | 6921 | trace_event_init(); |
6923 | } | 6922 | } |
6924 | 6923 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 366a78a3e61e..b03a0ea77b99 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void) | |||
2429 | return 0; | 2429 | return 0; |
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | static __init void | ||
2433 | early_enable_events(struct trace_array *tr, bool disable_first) | ||
2434 | { | ||
2435 | char *buf = bootup_event_buf; | ||
2436 | char *token; | ||
2437 | int ret; | ||
2438 | |||
2439 | while (true) { | ||
2440 | token = strsep(&buf, ","); | ||
2441 | |||
2442 | if (!token) | ||
2443 | break; | ||
2444 | if (!*token) | ||
2445 | continue; | ||
2446 | |||
2447 | /* Restarting syscalls requires that we stop them first */ | ||
2448 | if (disable_first) | ||
2449 | ftrace_set_clr_event(tr, token, 0); | ||
2450 | |||
2451 | ret = ftrace_set_clr_event(tr, token, 1); | ||
2452 | if (ret) | ||
2453 | pr_warn("Failed to enable trace event: %s\n", token); | ||
2454 | |||
2455 | /* Put back the comma to allow this to be called again */ | ||
2456 | if (buf) | ||
2457 | *(buf - 1) = ','; | ||
2458 | } | ||
2459 | } | ||
2460 | |||
2432 | static __init int event_trace_enable(void) | 2461 | static __init int event_trace_enable(void) |
2433 | { | 2462 | { |
2434 | struct trace_array *tr = top_trace_array(); | 2463 | struct trace_array *tr = top_trace_array(); |
2435 | struct ftrace_event_call **iter, *call; | 2464 | struct ftrace_event_call **iter, *call; |
2436 | char *buf = bootup_event_buf; | ||
2437 | char *token; | ||
2438 | int ret; | 2465 | int ret; |
2439 | 2466 | ||
2440 | if (!tr) | 2467 | if (!tr) |
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void) | |||
2456 | */ | 2483 | */ |
2457 | __trace_early_add_events(tr); | 2484 | __trace_early_add_events(tr); |
2458 | 2485 | ||
2459 | while (true) { | 2486 | early_enable_events(tr, false); |
2460 | token = strsep(&buf, ","); | ||
2461 | |||
2462 | if (!token) | ||
2463 | break; | ||
2464 | if (!*token) | ||
2465 | continue; | ||
2466 | |||
2467 | ret = ftrace_set_clr_event(tr, token, 1); | ||
2468 | if (ret) | ||
2469 | pr_warn("Failed to enable trace event: %s\n", token); | ||
2470 | } | ||
2471 | 2487 | ||
2472 | trace_printk_start_comm(); | 2488 | trace_printk_start_comm(); |
2473 | 2489 | ||
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void) | |||
2478 | return 0; | 2494 | return 0; |
2479 | } | 2495 | } |
2480 | 2496 | ||
2497 | /* | ||
2498 | * event_trace_enable() is called from trace_event_init() first to | ||
2499 | * initialize events and perhaps start any events that are on the | ||
2500 | * command line. Unfortunately, there are some events that will not | ||
2501 | * start this early, like the system call tracepoints that need | ||
2502 | * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() | ||
2503 | * is called before pid 1 starts, and this flag is never set, making | ||
2504 | * the syscall tracepoint never get reached, but the event is enabled | ||
2505 | * regardless (and not doing anything). | ||
2506 | */ | ||
2507 | static __init int event_trace_enable_again(void) | ||
2508 | { | ||
2509 | struct trace_array *tr; | ||
2510 | |||
2511 | tr = top_trace_array(); | ||
2512 | if (!tr) | ||
2513 | return -ENODEV; | ||
2514 | |||
2515 | early_enable_events(tr, true); | ||
2516 | |||
2517 | return 0; | ||
2518 | } | ||
2519 | |||
2520 | early_initcall(event_trace_enable_again); | ||
2521 | |||
2481 | static __init int event_trace_init(void) | 2522 | static __init int event_trace_init(void) |
2482 | { | 2523 | { |
2483 | struct trace_array *tr; | 2524 | struct trace_array *tr; |
diff --git a/mm/memory.c b/mm/memory.c index c6565f00fb38..54f3a9b00956 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
235 | 235 | ||
236 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | 236 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
237 | { | 237 | { |
238 | if (!tlb->end) | ||
239 | return; | ||
240 | |||
238 | tlb_flush(tlb); | 241 | tlb_flush(tlb); |
239 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); | 242 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); |
240 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 243 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) | |||
247 | { | 250 | { |
248 | struct mmu_gather_batch *batch; | 251 | struct mmu_gather_batch *batch; |
249 | 252 | ||
250 | for (batch = &tlb->local; batch; batch = batch->next) { | 253 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { |
251 | free_pages_and_swap_cache(batch->pages, batch->nr); | 254 | free_pages_and_swap_cache(batch->pages, batch->nr); |
252 | batch->nr = 0; | 255 | batch->nr = 0; |
253 | } | 256 | } |
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) | |||
256 | 259 | ||
257 | void tlb_flush_mmu(struct mmu_gather *tlb) | 260 | void tlb_flush_mmu(struct mmu_gather *tlb) |
258 | { | 261 | { |
259 | if (!tlb->end) | ||
260 | return; | ||
261 | |||
262 | tlb_flush_mmu_tlbonly(tlb); | 262 | tlb_flush_mmu_tlbonly(tlb); |
263 | tlb_flush_mmu_free(tlb); | 263 | tlb_flush_mmu_free(tlb); |
264 | } | 264 | } |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 1f1de715197c..e2aa7be3a847 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
154 | dst = NULL; | 154 | dst = NULL; |
155 | 155 | ||
156 | if (is_broadcast_ether_addr(dest)) { | 156 | if (is_broadcast_ether_addr(dest)) { |
157 | if (p->flags & BR_PROXYARP && | 157 | if (IS_ENABLED(CONFIG_INET) && |
158 | p->flags & BR_PROXYARP && | ||
158 | skb->protocol == htons(ETH_P_ARP)) | 159 | skb->protocol == htons(ETH_P_ARP)) |
159 | br_do_proxy_arp(skb, br, vid); | 160 | br_do_proxy_arp(skb, br, vid); |
160 | 161 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e38f17288d3..8d614c93f86a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2043 | case NDTPA_BASE_REACHABLE_TIME: | 2043 | case NDTPA_BASE_REACHABLE_TIME: |
2044 | NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, | 2044 | NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, |
2045 | nla_get_msecs(tbp[i])); | 2045 | nla_get_msecs(tbp[i])); |
2046 | /* update reachable_time as well, otherwise, the change will | ||
2047 | * only be effective after the next time neigh_periodic_work | ||
2048 | * decides to recompute it (can be multiple minutes) | ||
2049 | */ | ||
2050 | p->reachable_time = | ||
2051 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); | ||
2046 | break; | 2052 | break; |
2047 | case NDTPA_GC_STALETIME: | 2053 | case NDTPA_GC_STALETIME: |
2048 | NEIGH_VAR_SET(p, GC_STALETIME, | 2054 | NEIGH_VAR_SET(p, GC_STALETIME, |
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, | |||
2921 | return ret; | 2927 | return ret; |
2922 | } | 2928 | } |
2923 | 2929 | ||
2930 | static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, | ||
2931 | void __user *buffer, | ||
2932 | size_t *lenp, loff_t *ppos) | ||
2933 | { | ||
2934 | struct neigh_parms *p = ctl->extra2; | ||
2935 | int ret; | ||
2936 | |||
2937 | if (strcmp(ctl->procname, "base_reachable_time") == 0) | ||
2938 | ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); | ||
2939 | else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) | ||
2940 | ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); | ||
2941 | else | ||
2942 | ret = -1; | ||
2943 | |||
2944 | if (write && ret == 0) { | ||
2945 | /* update reachable_time as well, otherwise, the change will | ||
2946 | * only be effective after the next time neigh_periodic_work | ||
2947 | * decides to recompute it | ||
2948 | */ | ||
2949 | p->reachable_time = | ||
2950 | neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); | ||
2951 | } | ||
2952 | return ret; | ||
2953 | } | ||
2954 | |||
2924 | #define NEIGH_PARMS_DATA_OFFSET(index) \ | 2955 | #define NEIGH_PARMS_DATA_OFFSET(index) \ |
2925 | (&((struct neigh_parms *) 0)->data[index]) | 2956 | (&((struct neigh_parms *) 0)->data[index]) |
2926 | 2957 | ||
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
3047 | t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; | 3078 | t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; |
3048 | /* ReachableTime (in milliseconds) */ | 3079 | /* ReachableTime (in milliseconds) */ |
3049 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; | 3080 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; |
3081 | } else { | ||
3082 | /* Those handlers will update p->reachable_time after | ||
3083 | * base_reachable_time(_ms) is set to ensure the new timer starts being | ||
3084 | * applied after the next neighbour update instead of waiting for | ||
3085 | * neigh_periodic_work to update its value (can be multiple minutes) | ||
3086 | * So any handler that replaces them should do this as well | ||
3087 | */ | ||
3088 | /* ReachableTime */ | ||
3089 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = | ||
3090 | neigh_proc_base_reachable_time; | ||
3091 | /* ReachableTime (in milliseconds) */ | ||
3092 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = | ||
3093 | neigh_proc_base_reachable_time; | ||
3050 | } | 3094 | } |
3051 | 3095 | ||
3052 | /* Don't export sysctls to unprivileged users */ | 3096 | /* Don't export sysctls to unprivileged users */ |
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index ff2d23d8c87a..6ecfce63201a 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c | |||
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, | |||
27 | 27 | ||
28 | memset(&mr, 0, sizeof(mr)); | 28 | memset(&mr, 0, sizeof(mr)); |
29 | if (priv->sreg_proto_min) { | 29 | if (priv->sreg_proto_min) { |
30 | mr.range[0].min.all = (__force __be16) | 30 | mr.range[0].min.all = |
31 | data[priv->sreg_proto_min].data[0]; | 31 | *(__be16 *)&data[priv->sreg_proto_min].data[0]; |
32 | mr.range[0].max.all = (__force __be16) | 32 | mr.range[0].max.all = |
33 | data[priv->sreg_proto_max].data[0]; | 33 | *(__be16 *)&data[priv->sreg_proto_max].data[0]; |
34 | mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 34 | mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
35 | } | 35 | } |
36 | 36 | ||
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index 2433a6bfb191..11820b6b3613 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c | |||
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, | |||
27 | 27 | ||
28 | memset(&range, 0, sizeof(range)); | 28 | memset(&range, 0, sizeof(range)); |
29 | if (priv->sreg_proto_min) { | 29 | if (priv->sreg_proto_min) { |
30 | range.min_proto.all = (__force __be16) | 30 | range.min_proto.all = |
31 | data[priv->sreg_proto_min].data[0]; | 31 | *(__be16 *)&data[priv->sreg_proto_min].data[0]; |
32 | range.max_proto.all = (__force __be16) | 32 | range.max_proto.all = |
33 | data[priv->sreg_proto_max].data[0]; | 33 | *(__be16 *)&data[priv->sreg_proto_max].data[0]; |
34 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 34 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
35 | } | 35 | } |
36 | 36 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 1d5341f3761d..5d3daae98bf0 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
183 | struct nf_conn *ct; | 183 | struct nf_conn *ct; |
184 | struct net *net; | 184 | struct net *net; |
185 | 185 | ||
186 | *diff = 0; | ||
187 | |||
186 | #ifdef CONFIG_IP_VS_IPV6 | 188 | #ifdef CONFIG_IP_VS_IPV6 |
187 | /* This application helper doesn't work with IPv6 yet, | 189 | /* This application helper doesn't work with IPv6 yet, |
188 | * so turn this into a no-op for IPv6 packets | 190 | * so turn this into a no-op for IPv6 packets |
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
191 | return 1; | 193 | return 1; |
192 | #endif | 194 | #endif |
193 | 195 | ||
194 | *diff = 0; | ||
195 | |||
196 | /* Only useful for established sessions */ | 196 | /* Only useful for established sessions */ |
197 | if (cp->state != IP_VS_TCP_S_ESTABLISHED) | 197 | if (cp->state != IP_VS_TCP_S_ESTABLISHED) |
198 | return 1; | 198 | return 1; |
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
322 | struct ip_vs_conn *n_cp; | 322 | struct ip_vs_conn *n_cp; |
323 | struct net *net; | 323 | struct net *net; |
324 | 324 | ||
325 | /* no diff required for incoming packets */ | ||
326 | *diff = 0; | ||
327 | |||
325 | #ifdef CONFIG_IP_VS_IPV6 | 328 | #ifdef CONFIG_IP_VS_IPV6 |
326 | /* This application helper doesn't work with IPv6 yet, | 329 | /* This application helper doesn't work with IPv6 yet, |
327 | * so turn this into a no-op for IPv6 packets | 330 | * so turn this into a no-op for IPv6 packets |
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
330 | return 1; | 333 | return 1; |
331 | #endif | 334 | #endif |
332 | 335 | ||
333 | /* no diff required for incoming packets */ | ||
334 | *diff = 0; | ||
335 | |||
336 | /* Only useful for established sessions */ | 336 | /* Only useful for established sessions */ |
337 | if (cp->state != IP_VS_TCP_S_ESTABLISHED) | 337 | if (cp->state != IP_VS_TCP_S_ESTABLISHED) |
338 | return 1; | 338 | return 1; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a11674806707..46d1b26a468e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
611 | */ | 611 | */ |
612 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | 612 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
613 | pr_debug("Confirming conntrack %p\n", ct); | 613 | pr_debug("Confirming conntrack %p\n", ct); |
614 | /* We have to check the DYING flag inside the lock to prevent | 614 | /* We have to check the DYING flag after unlink to prevent |
615 | a race against nf_ct_get_next_corpse() possibly called from | 615 | * a race against nf_ct_get_next_corpse() possibly called from |
616 | user context, else we insert an already 'dead' hash, blocking | 616 | * user context, else we insert an already 'dead' hash, blocking |
617 | further use of that particular connection -JM */ | 617 | * further use of that particular connection -JM. |
618 | */ | ||
619 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | ||
618 | 620 | ||
619 | if (unlikely(nf_ct_is_dying(ct))) { | 621 | if (unlikely(nf_ct_is_dying(ct))) |
620 | nf_conntrack_double_unlock(hash, reply_hash); | 622 | goto out; |
621 | local_bh_enable(); | ||
622 | return NF_ACCEPT; | ||
623 | } | ||
624 | 623 | ||
625 | /* See if there's one in the list already, including reverse: | 624 | /* See if there's one in the list already, including reverse: |
626 | NAT could have grabbed it without realizing, since we're | 625 | NAT could have grabbed it without realizing, since we're |
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
636 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) | 635 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
637 | goto out; | 636 | goto out; |
638 | 637 | ||
639 | nf_ct_del_from_dying_or_unconfirmed_list(ct); | ||
640 | |||
641 | /* Timer relative to confirmation time, not original | 638 | /* Timer relative to confirmation time, not original |
642 | setting time, otherwise we'd get timer wrap in | 639 | setting time, otherwise we'd get timer wrap in |
643 | weird delay cases. */ | 640 | weird delay cases. */ |
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
673 | return NF_ACCEPT; | 670 | return NF_ACCEPT; |
674 | 671 | ||
675 | out: | 672 | out: |
673 | nf_ct_add_to_dying_list(ct); | ||
676 | nf_conntrack_double_unlock(hash, reply_hash); | 674 | nf_conntrack_double_unlock(hash, reply_hash); |
677 | NF_CT_STAT_INC(net, insert_failed); | 675 | NF_CT_STAT_INC(net, insert_failed); |
678 | local_bh_enable(); | 676 | local_bh_enable(); |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 129a8daa4abf..3b3ddb4fb9ee 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx) | |||
713 | struct nft_chain *chain, *nc; | 713 | struct nft_chain *chain, *nc; |
714 | struct nft_set *set, *ns; | 714 | struct nft_set *set, *ns; |
715 | 715 | ||
716 | list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { | 716 | list_for_each_entry(chain, &ctx->table->chains, list) { |
717 | ctx->chain = chain; | 717 | ctx->chain = chain; |
718 | 718 | ||
719 | err = nft_delrule_by_chain(ctx); | 719 | err = nft_delrule_by_chain(ctx); |
720 | if (err < 0) | 720 | if (err < 0) |
721 | goto out; | 721 | goto out; |
722 | |||
723 | err = nft_delchain(ctx); | ||
724 | if (err < 0) | ||
725 | goto out; | ||
726 | } | 722 | } |
727 | 723 | ||
728 | list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { | 724 | list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { |
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx) | |||
735 | goto out; | 731 | goto out; |
736 | } | 732 | } |
737 | 733 | ||
734 | list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { | ||
735 | ctx->chain = chain; | ||
736 | |||
737 | err = nft_delchain(ctx); | ||
738 | if (err < 0) | ||
739 | goto out; | ||
740 | } | ||
741 | |||
738 | err = nft_deltable(ctx); | 742 | err = nft_deltable(ctx); |
739 | out: | 743 | out: |
740 | return err; | 744 | return err; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index cde4a6702fa3..c421d94c4652 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -321,7 +321,8 @@ replay: | |||
321 | nlh = nlmsg_hdr(skb); | 321 | nlh = nlmsg_hdr(skb); |
322 | err = 0; | 322 | err = 0; |
323 | 323 | ||
324 | if (nlh->nlmsg_len < NLMSG_HDRLEN) { | 324 | if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || |
325 | skb->len < nlh->nlmsg_len) { | ||
325 | err = -EINVAL; | 326 | err = -EINVAL; |
326 | goto ack; | 327 | goto ack; |
327 | } | 328 | } |
@@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group) | |||
469 | int type; | 470 | int type; |
470 | 471 | ||
471 | if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) | 472 | if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) |
472 | return -EINVAL; | 473 | return 0; |
473 | 474 | ||
474 | type = nfnl_group2type[group]; | 475 | type = nfnl_group2type[group]; |
475 | 476 | ||
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index afe2b0b45ec4..aff54fb1c8a0 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c | |||
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, | |||
65 | } | 65 | } |
66 | 66 | ||
67 | if (priv->sreg_proto_min) { | 67 | if (priv->sreg_proto_min) { |
68 | range.min_proto.all = (__force __be16) | 68 | range.min_proto.all = |
69 | data[priv->sreg_proto_min].data[0]; | 69 | *(__be16 *)&data[priv->sreg_proto_min].data[0]; |
70 | range.max_proto.all = (__force __be16) | 70 | range.max_proto.all = |
71 | data[priv->sreg_proto_max].data[0]; | 71 | *(__be16 *)&data[priv->sreg_proto_max].data[0]; |
72 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 72 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 4e9a5f035cbc..b07349e82d78 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
524 | struct vport *input_vport; | 524 | struct vport *input_vport; |
525 | int len; | 525 | int len; |
526 | int err; | 526 | int err; |
527 | bool log = !a[OVS_FLOW_ATTR_PROBE]; | 527 | bool log = !a[OVS_PACKET_ATTR_PROBE]; |
528 | 528 | ||
529 | err = -EINVAL; | 529 | err = -EINVAL; |
530 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || | 530 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || |
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { | |||
610 | [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, | 610 | [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, |
611 | [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, | 611 | [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, |
612 | [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, | 612 | [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, |
613 | [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, | ||
613 | }; | 614 | }; |
614 | 615 | ||
615 | static const struct genl_ops dp_packet_genl_ops[] = { | 616 | static const struct genl_ops dp_packet_genl_ops[] = { |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6880f34a529a..9cfe2e1dd8b5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2517 | err = -EINVAL; | 2517 | err = -EINVAL; |
2518 | if (sock->type == SOCK_DGRAM) { | 2518 | if (sock->type == SOCK_DGRAM) { |
2519 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); | 2519 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); |
2520 | if (unlikely(offset) < 0) | 2520 | if (unlikely(offset < 0)) |
2521 | goto out_free; | 2521 | goto out_free; |
2522 | } else { | 2522 | } else { |
2523 | if (ll_header_truncated(dev, len)) | 2523 | if (ll_header_truncated(dev, len)) |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 96ceefeb9daf..a9e174fc0f91 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) | |||
220 | struct sk_buff *skb; | 220 | struct sk_buff *skb; |
221 | 221 | ||
222 | skb_queue_walk(&bcl->outqueue, skb) { | 222 | skb_queue_walk(&bcl->outqueue, skb) { |
223 | if (more(buf_seqno(skb), after)) | 223 | if (more(buf_seqno(skb), after)) { |
224 | tipc_link_retransmit(bcl, skb, mod(to - after)); | ||
224 | break; | 225 | break; |
226 | } | ||
225 | } | 227 | } |
226 | tipc_link_retransmit(bcl, skb, mod(to - after)); | ||
227 | } | 228 | } |
228 | 229 | ||
229 | /** | 230 | /** |
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index d273624c93a6..e238c9559caf 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c | |||
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags, | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static int check_execveat_invoked_rc(int fd, const char *path, int flags, | 64 | static int check_execveat_invoked_rc(int fd, const char *path, int flags, |
65 | int expected_rc) | 65 | int expected_rc, int expected_rc2) |
66 | { | 66 | { |
67 | int status; | 67 | int status; |
68 | int rc; | 68 | int rc; |
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, | |||
98 | child, status); | 98 | child, status); |
99 | return 1; | 99 | return 1; |
100 | } | 100 | } |
101 | if (WEXITSTATUS(status) != expected_rc) { | 101 | if ((WEXITSTATUS(status) != expected_rc) && |
102 | printf("[FAIL] (child %d exited with %d not %d)\n", | 102 | (WEXITSTATUS(status) != expected_rc2)) { |
103 | child, WEXITSTATUS(status), expected_rc); | 103 | printf("[FAIL] (child %d exited with %d not %d nor %d)\n", |
104 | child, WEXITSTATUS(status), expected_rc, expected_rc2); | ||
104 | return 1; | 105 | return 1; |
105 | } | 106 | } |
106 | printf("[OK]\n"); | 107 | printf("[OK]\n"); |
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, | |||
109 | 110 | ||
110 | static int check_execveat(int fd, const char *path, int flags) | 111 | static int check_execveat(int fd, const char *path, int flags) |
111 | { | 112 | { |
112 | return check_execveat_invoked_rc(fd, path, flags, 99); | 113 | return check_execveat_invoked_rc(fd, path, flags, 99, 99); |
113 | } | 114 | } |
114 | 115 | ||
115 | static char *concat(const char *left, const char *right) | 116 | static char *concat(const char *left, const char *right) |
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script) | |||
192 | * Execute as a long pathname relative to ".". If this is a script, | 193 | * Execute as a long pathname relative to ".". If this is a script, |
193 | * the interpreter will launch but fail to open the script because its | 194 | * the interpreter will launch but fail to open the script because its |
194 | * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. | 195 | * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. |
196 | * | ||
197 | * The failure code is usually 127 (POSIX: "If a command is not found, | ||
198 | * the exit status shall be 127."), but some systems give 126 (POSIX: | ||
199 | * "If the command name is found, but it is not an executable utility, | ||
200 | * the exit status shall be 126."), so allow either. | ||
195 | */ | 201 | */ |
196 | if (is_script) | 202 | if (is_script) |
197 | fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); | 203 | fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, |
204 | 127, 126); | ||
198 | else | 205 | else |
199 | fail += check_execveat(dot_dfd, longpath, 0); | 206 | fail += check_execveat(dot_dfd, longpath, 0); |
200 | 207 | ||
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c index 94dae65eea41..8519e9ee97e3 100644 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c | |||
@@ -536,10 +536,9 @@ int main(int argc, char *argv[]) | |||
536 | { | 536 | { |
537 | struct mq_attr attr; | 537 | struct mq_attr attr; |
538 | char *option, *next_option; | 538 | char *option, *next_option; |
539 | int i, cpu; | 539 | int i, cpu, rc; |
540 | struct sigaction sa; | 540 | struct sigaction sa; |
541 | poptContext popt_context; | 541 | poptContext popt_context; |
542 | char rc; | ||
543 | void *retval; | 542 | void *retval; |
544 | 543 | ||
545 | main_thread = pthread_self(); | 544 | main_thread = pthread_self(); |
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 4c4b1f631ecf..077828c889f1 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile | |||
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress | |||
7 | 7 | ||
8 | all: $(BINARIES) | 8 | all: $(BINARIES) |
9 | %: %.c | 9 | %: %.c |
10 | $(CC) $(CFLAGS) -o $@ $^ | 10 | $(CC) $(CFLAGS) -o $@ $^ -lrt |
11 | 11 | ||
12 | run_tests: all | 12 | run_tests: all |
13 | @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) | 13 | @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) |