diff options
329 files changed, 5670 insertions, 2501 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index ff335f8aeb39..92f30006adae 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst | |||
@@ -136,6 +136,19 @@ Sorting | |||
136 | .. kernel-doc:: lib/list_sort.c | 136 | .. kernel-doc:: lib/list_sort.c |
137 | :export: | 137 | :export: |
138 | 138 | ||
139 | Text Searching | ||
140 | -------------- | ||
141 | |||
142 | .. kernel-doc:: lib/textsearch.c | ||
143 | :doc: ts_intro | ||
144 | |||
145 | .. kernel-doc:: lib/textsearch.c | ||
146 | :export: | ||
147 | |||
148 | .. kernel-doc:: include/linux/textsearch.h | ||
149 | :functions: textsearch_find textsearch_next \ | ||
150 | textsearch_get_pattern textsearch_get_pattern_len | ||
151 | |||
139 | UUID/GUID | 152 | UUID/GUID |
140 | --------- | 153 | --------- |
141 | 154 | ||
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 1b596fd38dc4..b957acff57aa 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt | |||
@@ -49,19 +49,6 @@ on the SoC (only first trip points defined in DT will be configured): | |||
49 | - samsung,exynos5433-tmu: 8 | 49 | - samsung,exynos5433-tmu: 8 |
50 | - samsung,exynos7-tmu: 8 | 50 | - samsung,exynos7-tmu: 8 |
51 | 51 | ||
52 | Following properties are mandatory (depending on SoC): | ||
53 | - samsung,tmu_gain: Gain value for internal TMU operation. | ||
54 | - samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage | ||
55 | - samsung,tmu_noise_cancel_mode: Mode for noise cancellation | ||
56 | - samsung,tmu_efuse_value: Default level of temperature - it is needed when | ||
57 | in factory fusing produced wrong value | ||
58 | - samsung,tmu_min_efuse_value: Minimum temperature fused value | ||
59 | - samsung,tmu_max_efuse_value: Maximum temperature fused value | ||
60 | - samsung,tmu_first_point_trim: First point trimming value | ||
61 | - samsung,tmu_second_point_trim: Second point trimming value | ||
62 | - samsung,tmu_default_temp_offset: Default temperature offset | ||
63 | - samsung,tmu_cal_type: Callibration type | ||
64 | |||
65 | ** Optional properties: | 52 | ** Optional properties: |
66 | 53 | ||
67 | - vtmu-supply: This entry is optional and provides the regulator node supplying | 54 | - vtmu-supply: This entry is optional and provides the regulator node supplying |
@@ -78,7 +65,7 @@ Example 1): | |||
78 | clocks = <&clock 383>; | 65 | clocks = <&clock 383>; |
79 | clock-names = "tmu_apbif"; | 66 | clock-names = "tmu_apbif"; |
80 | vtmu-supply = <&tmu_regulator_node>; | 67 | vtmu-supply = <&tmu_regulator_node>; |
81 | #include "exynos4412-tmu-sensor-conf.dtsi" | 68 | #thermal-sensor-cells = <0>; |
82 | }; | 69 | }; |
83 | 70 | ||
84 | Example 2): | 71 | Example 2): |
@@ -89,7 +76,7 @@ Example 2): | |||
89 | interrupts = <0 58 0>; | 76 | interrupts = <0 58 0>; |
90 | clocks = <&clock 21>; | 77 | clocks = <&clock 21>; |
91 | clock-names = "tmu_apbif"; | 78 | clock-names = "tmu_apbif"; |
92 | #include "exynos5440-tmu-sensor-conf.dtsi" | 79 | #thermal-sensor-cells = <0>; |
93 | }; | 80 | }; |
94 | 81 | ||
95 | Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | 82 | Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") |
@@ -99,7 +86,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
99 | interrupts = <0 184 0>; | 86 | interrupts = <0 184 0>; |
100 | clocks = <&clock 318>, <&clock 318>; | 87 | clocks = <&clock 318>, <&clock 318>; |
101 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 88 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
102 | #include "exynos4412-tmu-sensor-conf.dtsi" | 89 | #thermal-sensor-cells = <0>; |
103 | }; | 90 | }; |
104 | 91 | ||
105 | tmu_cpu3: tmu@1006c000 { | 92 | tmu_cpu3: tmu@1006c000 { |
@@ -108,7 +95,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
108 | interrupts = <0 185 0>; | 95 | interrupts = <0 185 0>; |
109 | clocks = <&clock 318>, <&clock 319>; | 96 | clocks = <&clock 318>, <&clock 319>; |
110 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 97 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
111 | #include "exynos4412-tmu-sensor-conf.dtsi" | 98 | #thermal-sensor-cells = <0>; |
112 | }; | 99 | }; |
113 | 100 | ||
114 | tmu_gpu: tmu@100a0000 { | 101 | tmu_gpu: tmu@100a0000 { |
@@ -117,7 +104,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
117 | interrupts = <0 215 0>; | 104 | interrupts = <0 215 0>; |
118 | clocks = <&clock 319>, <&clock 318>; | 105 | clocks = <&clock 319>, <&clock 318>; |
119 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 106 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
120 | #include "exynos4412-tmu-sensor-conf.dtsi" | 107 | #thermal-sensor-cells = <0>; |
121 | }; | 108 | }; |
122 | 109 | ||
123 | Note: For multi-instance tmu each instance should have an alias correctly | 110 | Note: For multi-instance tmu each instance should have an alias correctly |
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 1719d47a5e2f..cc553f0952c5 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt | |||
@@ -55,8 +55,7 @@ of heat dissipation). For example a fan's cooling states correspond to | |||
55 | the different fan speeds possible. Cooling states are referred to by | 55 | the different fan speeds possible. Cooling states are referred to by |
56 | single unsigned integers, where larger numbers mean greater heat | 56 | single unsigned integers, where larger numbers mean greater heat |
57 | dissipation. The precise set of cooling states associated with a device | 57 | dissipation. The precise set of cooling states associated with a device |
58 | (as referred to by the cooling-min-level and cooling-max-level | 58 | should be defined in a particular device's binding. |
59 | properties) should be defined in a particular device's binding. | ||
60 | For more examples of cooling devices, refer to the example sections below. | 59 | For more examples of cooling devices, refer to the example sections below. |
61 | 60 | ||
62 | Required properties: | 61 | Required properties: |
@@ -69,15 +68,6 @@ Required properties: | |||
69 | See Cooling device maps section below for more details | 68 | See Cooling device maps section below for more details |
70 | on how consumers refer to cooling devices. | 69 | on how consumers refer to cooling devices. |
71 | 70 | ||
72 | Optional properties: | ||
73 | - cooling-min-level: An integer indicating the smallest | ||
74 | Type: unsigned cooling state accepted. Typically 0. | ||
75 | Size: one cell | ||
76 | |||
77 | - cooling-max-level: An integer indicating the largest | ||
78 | Type: unsigned cooling state accepted. | ||
79 | Size: one cell | ||
80 | |||
81 | * Trip points | 71 | * Trip points |
82 | 72 | ||
83 | The trip node is a node to describe a point in the temperature domain | 73 | The trip node is a node to describe a point in the temperature domain |
@@ -226,8 +216,6 @@ cpus { | |||
226 | 396000 950000 | 216 | 396000 950000 |
227 | 198000 850000 | 217 | 198000 850000 |
228 | >; | 218 | >; |
229 | cooling-min-level = <0>; | ||
230 | cooling-max-level = <3>; | ||
231 | #cooling-cells = <2>; /* min followed by max */ | 219 | #cooling-cells = <2>; /* min followed by max */ |
232 | }; | 220 | }; |
233 | ... | 221 | ... |
@@ -241,8 +229,6 @@ cpus { | |||
241 | */ | 229 | */ |
242 | fan0: fan@48 { | 230 | fan0: fan@48 { |
243 | ... | 231 | ... |
244 | cooling-min-level = <0>; | ||
245 | cooling-max-level = <9>; | ||
246 | #cooling-cells = <2>; /* min followed by max */ | 232 | #cooling-cells = <2>; /* min followed by max */ |
247 | }; | 233 | }; |
248 | }; | 234 | }; |
diff --git a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt new file mode 100644 index 000000000000..ea22dfe485be --- /dev/null +++ b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt | |||
@@ -0,0 +1,21 @@ | |||
1 | Nuvoton NPCM7xx timer | ||
2 | |||
3 | Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit | ||
4 | timer counters. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible : "nuvoton,npcm750-timer" for Poleg NPCM750. | ||
8 | - reg : Offset and length of the register set for the device. | ||
9 | - interrupts : Contain the timer interrupt with flags for | ||
10 | falling edge. | ||
11 | - clocks : phandle of timer reference clock (usually a 25 MHz clock). | ||
12 | |||
13 | Example: | ||
14 | |||
15 | timer@f0008000 { | ||
16 | compatible = "nuvoton,npcm750-timer"; | ||
17 | interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; | ||
18 | reg = <0xf0008000 0x50>; | ||
19 | clocks = <&clk NPCM7XX_CLK_TIMER>; | ||
20 | }; | ||
21 | |||
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt index b4aa7ddb5b13..f82087b220f4 100644 --- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt +++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt | |||
@@ -15,7 +15,7 @@ Required properties: | |||
15 | - interrupts : Should be the clock event device interrupt. | 15 | - interrupts : Should be the clock event device interrupt. |
16 | - clocks : The clocks provided by the SoC to drive the timer, must contain | 16 | - clocks : The clocks provided by the SoC to drive the timer, must contain |
17 | an entry for each entry in clock-names. | 17 | an entry for each entry in clock-names. |
18 | - clock-names : Must include the following entries: "igp" and "per". | 18 | - clock-names : Must include the following entries: "ipg" and "per". |
19 | 19 | ||
20 | Example: | 20 | Example: |
21 | tpm5: tpm@40260000 { | 21 | tpm5: tpm@40260000 { |
diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.txt index 89c66634d600..ecc09a7be5dd 100644 --- a/Documentation/livepatch/shadow-vars.txt +++ b/Documentation/livepatch/shadow-vars.txt | |||
@@ -34,9 +34,13 @@ meta-data and shadow-data: | |||
34 | - data[] - storage for shadow data | 34 | - data[] - storage for shadow data |
35 | 35 | ||
36 | It is important to note that the klp_shadow_alloc() and | 36 | It is important to note that the klp_shadow_alloc() and |
37 | klp_shadow_get_or_alloc() calls, described below, store a *copy* of the | 37 | klp_shadow_get_or_alloc() are zeroing the variable by default. |
38 | data that the functions are provided. Callers should provide whatever | 38 | They also allow to call a custom constructor function when a non-zero |
39 | mutual exclusion is required of the shadow data. | 39 | value is needed. Callers should provide whatever mutual exclusion |
40 | is required. | ||
41 | |||
42 | Note that the constructor is called under klp_shadow_lock spinlock. It allows | ||
43 | to do actions that can be done only once when a new variable is allocated. | ||
40 | 44 | ||
41 | * klp_shadow_get() - retrieve a shadow variable data pointer | 45 | * klp_shadow_get() - retrieve a shadow variable data pointer |
42 | - search hashtable for <obj, id> pair | 46 | - search hashtable for <obj, id> pair |
@@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data. | |||
47 | - WARN and return NULL | 51 | - WARN and return NULL |
48 | - if <obj, id> doesn't already exist | 52 | - if <obj, id> doesn't already exist |
49 | - allocate a new shadow variable | 53 | - allocate a new shadow variable |
50 | - copy data into the new shadow variable | 54 | - initialize the variable using a custom constructor and data when provided |
51 | - add <obj, id> to the global hashtable | 55 | - add <obj, id> to the global hashtable |
52 | 56 | ||
53 | * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable | 57 | * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable |
@@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data. | |||
56 | - return existing shadow variable | 60 | - return existing shadow variable |
57 | - if <obj, id> doesn't already exist | 61 | - if <obj, id> doesn't already exist |
58 | - allocate a new shadow variable | 62 | - allocate a new shadow variable |
59 | - copy data into the new shadow variable | 63 | - initialize the variable using a custom constructor and data when provided |
60 | - add <obj, id> pair to the global hashtable | 64 | - add <obj, id> pair to the global hashtable |
61 | 65 | ||
62 | * klp_shadow_free() - detach and free a <obj, id> shadow variable | 66 | * klp_shadow_free() - detach and free a <obj, id> shadow variable |
63 | - find and remove a <obj, id> reference from global hashtable | 67 | - find and remove a <obj, id> reference from global hashtable |
64 | - if found, free shadow variable | 68 | - if found |
69 | - call destructor function if defined | ||
70 | - free shadow variable | ||
65 | 71 | ||
66 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables | 72 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables |
67 | - find and remove any <*, id> references from global hashtable | 73 | - find and remove any <*, id> references from global hashtable |
68 | - if found, free shadow variable | 74 | - if found |
75 | - call destructor function if defined | ||
76 | - free shadow variable | ||
69 | 77 | ||
70 | 78 | ||
71 | 2. Use cases | 79 | 2. Use cases |
@@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
107 | sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); | 115 | sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); |
108 | 116 | ||
109 | /* Attach a corresponding shadow variable, then initialize it */ | 117 | /* Attach a corresponding shadow variable, then initialize it */ |
110 | ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp); | 118 | ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp, |
119 | NULL, NULL); | ||
111 | if (!ps_lock) | 120 | if (!ps_lock) |
112 | goto shadow_fail; | 121 | goto shadow_fail; |
113 | spin_lock_init(ps_lock); | 122 | spin_lock_init(ps_lock); |
@@ -131,7 +140,7 @@ variable: | |||
131 | 140 | ||
132 | void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) | 141 | void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) |
133 | { | 142 | { |
134 | klp_shadow_free(sta, PS_LOCK); | 143 | klp_shadow_free(sta, PS_LOCK, NULL); |
135 | kfree(sta); | 144 | kfree(sta); |
136 | ... | 145 | ... |
137 | 146 | ||
@@ -148,16 +157,24 @@ shadow variables to parents already in-flight. | |||
148 | For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is | 157 | For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is |
149 | inside ieee80211_sta_ps_deliver_wakeup(): | 158 | inside ieee80211_sta_ps_deliver_wakeup(): |
150 | 159 | ||
160 | int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data) | ||
161 | { | ||
162 | spinlock_t *lock = shadow_data; | ||
163 | |||
164 | spin_lock_init(lock); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
151 | #define PS_LOCK 1 | 168 | #define PS_LOCK 1 |
152 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | 169 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) |
153 | { | 170 | { |
154 | DEFINE_SPINLOCK(ps_lock_fallback); | ||
155 | spinlock_t *ps_lock; | 171 | spinlock_t *ps_lock; |
156 | 172 | ||
157 | /* sync with ieee80211_tx_h_unicast_ps_buf */ | 173 | /* sync with ieee80211_tx_h_unicast_ps_buf */ |
158 | ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, | 174 | ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, |
159 | &ps_lock_fallback, sizeof(ps_lock_fallback), | 175 | sizeof(*ps_lock), GFP_ATOMIC, |
160 | GFP_ATOMIC); | 176 | ps_lock_shadow_ctor, NULL); |
177 | |||
161 | if (ps_lock) | 178 | if (ps_lock) |
162 | spin_lock(ps_lock); | 179 | spin_lock(ps_lock); |
163 | ... | 180 | ... |
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index a4508ec1816b..fd55c7de9991 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -169,7 +169,7 @@ access to BPF code as well. | |||
169 | BPF engine and instruction set | 169 | BPF engine and instruction set |
170 | ------------------------------ | 170 | ------------------------------ |
171 | 171 | ||
172 | Under tools/net/ there's a small helper tool called bpf_asm which can | 172 | Under tools/bpf/ there's a small helper tool called bpf_asm which can |
173 | be used to write low-level filters for example scenarios mentioned in the | 173 | be used to write low-level filters for example scenarios mentioned in the |
174 | previous section. Asm-like syntax mentioned here has been implemented in | 174 | previous section. Asm-like syntax mentioned here has been implemented in |
175 | bpf_asm and will be used for further explanations (instead of dealing with | 175 | bpf_asm and will be used for further explanations (instead of dealing with |
@@ -359,7 +359,7 @@ $ ./bpf_asm -c foo | |||
359 | In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF | 359 | In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF |
360 | filters that might not be obvious at first, it's good to test filters before | 360 | filters that might not be obvious at first, it's good to test filters before |
361 | attaching to a live system. For that purpose, there's a small tool called | 361 | attaching to a live system. For that purpose, there's a small tool called |
362 | bpf_dbg under tools/net/ in the kernel source directory. This debugger allows | 362 | bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows |
363 | for testing BPF filters against given pcap files, single stepping through the | 363 | for testing BPF filters against given pcap files, single stepping through the |
364 | BPF code on the pcap's packets and to do BPF machine register dumps. | 364 | BPF code on the pcap's packets and to do BPF machine register dumps. |
365 | 365 | ||
@@ -483,7 +483,7 @@ Example output from dmesg: | |||
483 | [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 | 483 | [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 |
484 | [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 | 484 | [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 |
485 | 485 | ||
486 | In the kernel source tree under tools/net/, there's bpf_jit_disasm for | 486 | In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for |
487 | generating disassembly out of the kernel log's hexdump: | 487 | generating disassembly out of the kernel log's hexdump: |
488 | 488 | ||
489 | # ./bpf_jit_disasm | 489 | # ./bpf_jit_disasm |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 5dc1a040a2f1..b583a73cf95f 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -1390,26 +1390,26 @@ mld_qrv - INTEGER | |||
1390 | Default: 2 (as specified by RFC3810 9.1) | 1390 | Default: 2 (as specified by RFC3810 9.1) |
1391 | Minimum: 1 (as specified by RFC6636 4.5) | 1391 | Minimum: 1 (as specified by RFC6636 4.5) |
1392 | 1392 | ||
1393 | max_dst_opts_cnt - INTEGER | 1393 | max_dst_opts_number - INTEGER |
1394 | Maximum number of non-padding TLVs allowed in a Destination | 1394 | Maximum number of non-padding TLVs allowed in a Destination |
1395 | options extension header. If this value is less than zero | 1395 | options extension header. If this value is less than zero |
1396 | then unknown options are disallowed and the number of known | 1396 | then unknown options are disallowed and the number of known |
1397 | TLVs allowed is the absolute value of this number. | 1397 | TLVs allowed is the absolute value of this number. |
1398 | Default: 8 | 1398 | Default: 8 |
1399 | 1399 | ||
1400 | max_hbh_opts_cnt - INTEGER | 1400 | max_hbh_opts_number - INTEGER |
1401 | Maximum number of non-padding TLVs allowed in a Hop-by-Hop | 1401 | Maximum number of non-padding TLVs allowed in a Hop-by-Hop |
1402 | options extension header. If this value is less than zero | 1402 | options extension header. If this value is less than zero |
1403 | then unknown options are disallowed and the number of known | 1403 | then unknown options are disallowed and the number of known |
1404 | TLVs allowed is the absolute value of this number. | 1404 | TLVs allowed is the absolute value of this number. |
1405 | Default: 8 | 1405 | Default: 8 |
1406 | 1406 | ||
1407 | max dst_opts_len - INTEGER | 1407 | max_dst_opts_length - INTEGER |
1408 | Maximum length allowed for a Destination options extension | 1408 | Maximum length allowed for a Destination options extension |
1409 | header. | 1409 | header. |
1410 | Default: INT_MAX (unlimited) | 1410 | Default: INT_MAX (unlimited) |
1411 | 1411 | ||
1412 | max hbh_opts_len - INTEGER | 1412 | max_hbh_length - INTEGER |
1413 | Maximum length allowed for a Hop-by-Hop options extension | 1413 | Maximum length allowed for a Hop-by-Hop options extension |
1414 | header. | 1414 | header. |
1415 | Default: INT_MAX (unlimited) | 1415 | Default: INT_MAX (unlimited) |
diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..92be777d060a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/ | |||
1373 | F: drivers/net/ethernet/amd/am79c961a.* | 1373 | F: drivers/net/ethernet/amd/am79c961a.* |
1374 | 1374 | ||
1375 | ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT | 1375 | ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT |
1376 | M: Uwe Kleine-König <kernel@pengutronix.de> | 1376 | M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> |
1377 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
1377 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1378 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1378 | S: Maintained | 1379 | S: Maintained |
1379 | N: efm32 | 1380 | N: efm32 |
@@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/ | |||
1401 | 1402 | ||
1402 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 1403 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
1403 | M: Shawn Guo <shawnguo@kernel.org> | 1404 | M: Shawn Guo <shawnguo@kernel.org> |
1404 | M: Sascha Hauer <kernel@pengutronix.de> | 1405 | M: Sascha Hauer <s.hauer@pengutronix.de> |
1406 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
1405 | R: Fabio Estevam <fabio.estevam@nxp.com> | 1407 | R: Fabio Estevam <fabio.estevam@nxp.com> |
1406 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1408 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1407 | S: Maintained | 1409 | S: Maintained |
@@ -1416,7 +1418,8 @@ F: include/soc/imx/ | |||
1416 | 1418 | ||
1417 | ARM/FREESCALE VYBRID ARM ARCHITECTURE | 1419 | ARM/FREESCALE VYBRID ARM ARCHITECTURE |
1418 | M: Shawn Guo <shawnguo@kernel.org> | 1420 | M: Shawn Guo <shawnguo@kernel.org> |
1419 | M: Sascha Hauer <kernel@pengutronix.de> | 1421 | M: Sascha Hauer <s.hauer@pengutronix.de> |
1422 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
1420 | R: Stefan Agner <stefan@agner.ch> | 1423 | R: Stefan Agner <stefan@agner.ch> |
1421 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1424 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1422 | S: Maintained | 1425 | S: Maintained |
@@ -4245,6 +4248,9 @@ F: include/trace/events/fs_dax.h | |||
4245 | 4248 | ||
4246 | DEVICE DIRECT ACCESS (DAX) | 4249 | DEVICE DIRECT ACCESS (DAX) |
4247 | M: Dan Williams <dan.j.williams@intel.com> | 4250 | M: Dan Williams <dan.j.williams@intel.com> |
4251 | M: Dave Jiang <dave.jiang@intel.com> | ||
4252 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
4253 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
4248 | L: linux-nvdimm@lists.01.org | 4254 | L: linux-nvdimm@lists.01.org |
4249 | S: Supported | 4255 | S: Supported |
4250 | F: drivers/dax/ | 4256 | F: drivers/dax/ |
@@ -5652,7 +5658,8 @@ F: drivers/net/ethernet/freescale/fec.h | |||
5652 | F: Documentation/devicetree/bindings/net/fsl-fec.txt | 5658 | F: Documentation/devicetree/bindings/net/fsl-fec.txt |
5653 | 5659 | ||
5654 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER | 5660 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER |
5655 | M: Sascha Hauer <kernel@pengutronix.de> | 5661 | M: Sascha Hauer <s.hauer@pengutronix.de> |
5662 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
5656 | L: linux-fbdev@vger.kernel.org | 5663 | L: linux-fbdev@vger.kernel.org |
5657 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 5664 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
5658 | S: Maintained | 5665 | S: Maintained |
@@ -5784,6 +5791,14 @@ F: fs/crypto/ | |||
5784 | F: include/linux/fscrypt*.h | 5791 | F: include/linux/fscrypt*.h |
5785 | F: Documentation/filesystems/fscrypt.rst | 5792 | F: Documentation/filesystems/fscrypt.rst |
5786 | 5793 | ||
5794 | FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE | ||
5795 | M: Jan Kara <jack@suse.cz> | ||
5796 | R: Amir Goldstein <amir73il@gmail.com> | ||
5797 | L: linux-fsdevel@vger.kernel.org | ||
5798 | S: Maintained | ||
5799 | F: fs/notify/ | ||
5800 | F: include/linux/fsnotify*.h | ||
5801 | |||
5787 | FUJITSU LAPTOP EXTRAS | 5802 | FUJITSU LAPTOP EXTRAS |
5788 | M: Jonathan Woithe <jwoithe@just42.net> | 5803 | M: Jonathan Woithe <jwoithe@just42.net> |
5789 | L: platform-driver-x86@vger.kernel.org | 5804 | L: platform-driver-x86@vger.kernel.org |
@@ -6256,7 +6271,7 @@ S: Odd Fixes | |||
6256 | F: drivers/media/usb/hdpvr/ | 6271 | F: drivers/media/usb/hdpvr/ |
6257 | 6272 | ||
6258 | HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER | 6273 | HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER |
6259 | M: Jimmy Vance <jimmy.vance@hpe.com> | 6274 | M: Jerry Hoemann <jerry.hoemann@hpe.com> |
6260 | S: Supported | 6275 | S: Supported |
6261 | F: Documentation/watchdog/hpwdt.txt | 6276 | F: Documentation/watchdog/hpwdt.txt |
6262 | F: drivers/watchdog/hpwdt.c | 6277 | F: drivers/watchdog/hpwdt.c |
@@ -8048,6 +8063,9 @@ F: tools/lib/lockdep/ | |||
8048 | 8063 | ||
8049 | LIBNVDIMM BLK: MMIO-APERTURE DRIVER | 8064 | LIBNVDIMM BLK: MMIO-APERTURE DRIVER |
8050 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | 8065 | M: Ross Zwisler <ross.zwisler@linux.intel.com> |
8066 | M: Dan Williams <dan.j.williams@intel.com> | ||
8067 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
8068 | M: Dave Jiang <dave.jiang@intel.com> | ||
8051 | L: linux-nvdimm@lists.01.org | 8069 | L: linux-nvdimm@lists.01.org |
8052 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8070 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
8053 | S: Supported | 8071 | S: Supported |
@@ -8056,6 +8074,9 @@ F: drivers/nvdimm/region_devs.c | |||
8056 | 8074 | ||
8057 | LIBNVDIMM BTT: BLOCK TRANSLATION TABLE | 8075 | LIBNVDIMM BTT: BLOCK TRANSLATION TABLE |
8058 | M: Vishal Verma <vishal.l.verma@intel.com> | 8076 | M: Vishal Verma <vishal.l.verma@intel.com> |
8077 | M: Dan Williams <dan.j.williams@intel.com> | ||
8078 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
8079 | M: Dave Jiang <dave.jiang@intel.com> | ||
8059 | L: linux-nvdimm@lists.01.org | 8080 | L: linux-nvdimm@lists.01.org |
8060 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8081 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
8061 | S: Supported | 8082 | S: Supported |
@@ -8063,6 +8084,9 @@ F: drivers/nvdimm/btt* | |||
8063 | 8084 | ||
8064 | LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER | 8085 | LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER |
8065 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | 8086 | M: Ross Zwisler <ross.zwisler@linux.intel.com> |
8087 | M: Dan Williams <dan.j.williams@intel.com> | ||
8088 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
8089 | M: Dave Jiang <dave.jiang@intel.com> | ||
8066 | L: linux-nvdimm@lists.01.org | 8090 | L: linux-nvdimm@lists.01.org |
8067 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8091 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
8068 | S: Supported | 8092 | S: Supported |
@@ -8078,6 +8102,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt | |||
8078 | 8102 | ||
8079 | LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM | 8103 | LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM |
8080 | M: Dan Williams <dan.j.williams@intel.com> | 8104 | M: Dan Williams <dan.j.williams@intel.com> |
8105 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
8106 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
8107 | M: Dave Jiang <dave.jiang@intel.com> | ||
8081 | L: linux-nvdimm@lists.01.org | 8108 | L: linux-nvdimm@lists.01.org |
8082 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8109 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
8083 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git | 8110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git |
@@ -9765,6 +9792,7 @@ F: include/uapi/linux/net_namespace.h | |||
9765 | F: tools/testing/selftests/net/ | 9792 | F: tools/testing/selftests/net/ |
9766 | F: lib/net_utils.c | 9793 | F: lib/net_utils.c |
9767 | F: lib/random32.c | 9794 | F: lib/random32.c |
9795 | F: Documentation/networking/ | ||
9768 | 9796 | ||
9769 | NETWORKING [IPSEC] | 9797 | NETWORKING [IPSEC] |
9770 | M: Steffen Klassert <steffen.klassert@secunet.com> | 9798 | M: Steffen Klassert <steffen.klassert@secunet.com> |
@@ -12816,7 +12844,8 @@ F: include/linux/siphash.h | |||
12816 | 12844 | ||
12817 | SIOX | 12845 | SIOX |
12818 | M: Gavin Schenk <g.schenk@eckelmann.de> | 12846 | M: Gavin Schenk <g.schenk@eckelmann.de> |
12819 | M: Uwe Kleine-König <kernel@pengutronix.de> | 12847 | M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> |
12848 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
12820 | S: Supported | 12849 | S: Supported |
12821 | F: drivers/siox/* | 12850 | F: drivers/siox/* |
12822 | F: include/trace/events/siox.h | 12851 | F: include/trace/events/siox.h |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 17 | 3 | PATCHLEVEL = 17 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc1 | 5 | EXTRAVERSION = -rc2 |
6 | NAME = Fearless Coyote | 6 | NAME = Fearless Coyote |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..1cb2749a72bf 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -366,7 +366,7 @@ void force_signal_inject(int signal, int code, unsigned long address) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | /* Force signals we don't understand to SIGKILL */ | 368 | /* Force signals we don't understand to SIGKILL */ |
369 | if (WARN_ON(signal != SIGKILL || | 369 | if (WARN_ON(signal != SIGKILL && |
370 | siginfo_layout(signal, code) != SIL_FAULT)) { | 370 | siginfo_layout(signal, code) != SIL_FAULT)) { |
371 | signal = SIGKILL; | 371 | signal = SIGKILL; |
372 | } | 372 | } |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index dabfc1ecda3d..12145874c02b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
@@ -204,7 +204,7 @@ void __init kasan_init(void) | |||
204 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); | 204 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
205 | 205 | ||
206 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, | 206 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, |
207 | pfn_to_nid(virt_to_pfn(lm_alias(_text)))); | 207 | early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); |
208 | 208 | ||
209 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, | 209 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
210 | (void *)mod_shadow_start); | 210 | (void *)mod_shadow_start); |
@@ -224,7 +224,7 @@ void __init kasan_init(void) | |||
224 | 224 | ||
225 | kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), | 225 | kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), |
226 | (unsigned long)kasan_mem_to_shadow(end), | 226 | (unsigned long)kasan_mem_to_shadow(end), |
227 | pfn_to_nid(virt_to_pfn(start))); | 227 | early_pfn_to_nid(virt_to_pfn(start))); |
228 | } | 228 | } |
229 | 229 | ||
230 | /* | 230 | /* |
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 1bd105428f61..65af3f6ba81c 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts | |||
@@ -51,6 +51,8 @@ | |||
51 | ranges = <0x02000000 0 0x40000000 | 51 | ranges = <0x02000000 0 0x40000000 |
52 | 0x40000000 0 0x40000000>; | 52 | 0x40000000 0 0x40000000>; |
53 | 53 | ||
54 | bus-range = <0x00 0xff>; | ||
55 | |||
54 | interrupt-map-mask = <0 0 0 7>; | 56 | interrupt-map-mask = <0 0 0 7>; |
55 | interrupt-map = <0 0 0 1 &pci0_intc 1>, | 57 | interrupt-map = <0 0 0 1 &pci0_intc 1>, |
56 | <0 0 0 2 &pci0_intc 2>, | 58 | <0 0 0 2 &pci0_intc 2>, |
@@ -79,6 +81,8 @@ | |||
79 | ranges = <0x02000000 0 0x20000000 | 81 | ranges = <0x02000000 0 0x20000000 |
80 | 0x20000000 0 0x20000000>; | 82 | 0x20000000 0 0x20000000>; |
81 | 83 | ||
84 | bus-range = <0x00 0xff>; | ||
85 | |||
82 | interrupt-map-mask = <0 0 0 7>; | 86 | interrupt-map-mask = <0 0 0 7>; |
83 | interrupt-map = <0 0 0 1 &pci1_intc 1>, | 87 | interrupt-map = <0 0 0 1 &pci1_intc 1>, |
84 | <0 0 0 2 &pci1_intc 2>, | 88 | <0 0 0 2 &pci1_intc 2>, |
@@ -107,6 +111,8 @@ | |||
107 | ranges = <0x02000000 0 0x16000000 | 111 | ranges = <0x02000000 0 0x16000000 |
108 | 0x16000000 0 0x100000>; | 112 | 0x16000000 0 0x100000>; |
109 | 113 | ||
114 | bus-range = <0x00 0xff>; | ||
115 | |||
110 | interrupt-map-mask = <0 0 0 7>; | 116 | interrupt-map-mask = <0 0 0 7>; |
111 | interrupt-map = <0 0 0 1 &pci2_intc 1>, | 117 | interrupt-map = <0 0 0 1 &pci2_intc 1>, |
112 | <0 0 0 2 &pci2_intc 2>, | 118 | <0 0 0 2 &pci2_intc 2>, |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..a7d0b836f2f7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
@@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) | |||
307 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) | 307 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) |
308 | #define war_io_reorder_wmb() wmb() | 308 | #define war_io_reorder_wmb() wmb() |
309 | #else | 309 | #else |
310 | #define war_io_reorder_wmb() do { } while (0) | 310 | #define war_io_reorder_wmb() barrier() |
311 | #endif | 311 | #endif |
312 | 312 | ||
313 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ | 313 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ |
@@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ | |||
377 | BUG(); \ | 377 | BUG(); \ |
378 | } \ | 378 | } \ |
379 | \ | 379 | \ |
380 | /* prevent prefetching of coherent DMA data prematurely */ \ | ||
381 | rmb(); \ | ||
380 | return pfx##ioswab##bwlq(__mem, __val); \ | 382 | return pfx##ioswab##bwlq(__mem, __val); \ |
381 | } | 383 | } |
382 | 384 | ||
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h | |||
@@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
654 | { | 654 | { |
655 | __kernel_size_t res; | 655 | __kernel_size_t res; |
656 | 656 | ||
657 | #ifdef CONFIG_CPU_MICROMIPS | ||
658 | /* micromips memset / bzero also clobbers t7 & t8 */ | ||
659 | #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" | ||
660 | #else | ||
661 | #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" | ||
662 | #endif /* CONFIG_CPU_MICROMIPS */ | ||
663 | |||
657 | if (eva_kernel_access()) { | 664 | if (eva_kernel_access()) { |
658 | __asm__ __volatile__( | 665 | __asm__ __volatile__( |
659 | "move\t$4, %1\n\t" | 666 | "move\t$4, %1\n\t" |
@@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
663 | "move\t%0, $6" | 670 | "move\t%0, $6" |
664 | : "=r" (res) | 671 | : "=r" (res) |
665 | : "r" (addr), "r" (size) | 672 | : "r" (addr), "r" (size) |
666 | : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); | 673 | : bzero_clobbers); |
667 | } else { | 674 | } else { |
668 | might_fault(); | 675 | might_fault(); |
669 | __asm__ __volatile__( | 676 | __asm__ __volatile__( |
@@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
674 | "move\t%0, $6" | 681 | "move\t%0, $6" |
675 | : "=r" (res) | 682 | : "=r" (res) |
676 | : "r" (addr), "r" (size) | 683 | : "r" (addr), "r" (size) |
677 | : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); | 684 | : bzero_clobbers); |
678 | } | 685 | } |
679 | 686 | ||
680 | return res; | 687 | return res; |
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index a1456664d6c2..f7327979a8f8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S | |||
@@ -219,7 +219,7 @@ | |||
219 | 1: PTR_ADDIU a0, 1 /* fill bytewise */ | 219 | 1: PTR_ADDIU a0, 1 /* fill bytewise */ |
220 | R10KCBARRIER(0(ra)) | 220 | R10KCBARRIER(0(ra)) |
221 | bne t1, a0, 1b | 221 | bne t1, a0, 1b |
222 | sb a1, -1(a0) | 222 | EX(sb, a1, -1(a0), .Lsmall_fixup\@) |
223 | 223 | ||
224 | 2: jr ra /* done */ | 224 | 2: jr ra /* done */ |
225 | move a2, zero | 225 | move a2, zero |
@@ -252,13 +252,18 @@ | |||
252 | PTR_L t0, TI_TASK($28) | 252 | PTR_L t0, TI_TASK($28) |
253 | andi a2, STORMASK | 253 | andi a2, STORMASK |
254 | LONG_L t0, THREAD_BUADDR(t0) | 254 | LONG_L t0, THREAD_BUADDR(t0) |
255 | LONG_ADDU a2, t1 | 255 | LONG_ADDU a2, a0 |
256 | jr ra | 256 | jr ra |
257 | LONG_SUBU a2, t0 | 257 | LONG_SUBU a2, t0 |
258 | 258 | ||
259 | .Llast_fixup\@: | 259 | .Llast_fixup\@: |
260 | jr ra | 260 | jr ra |
261 | andi v1, a2, STORMASK | 261 | nop |
262 | |||
263 | .Lsmall_fixup\@: | ||
264 | PTR_SUBU a2, t1, a0 | ||
265 | jr ra | ||
266 | PTR_ADDIU a2, 1 | ||
262 | 267 | ||
263 | .endm | 268 | .endm |
264 | 269 | ||
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index eafd06ab59ef..e5de34d00b1a 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile | |||
@@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o | |||
23 | obj-$(CONFIG_PA11) += pci-dma.o | 23 | obj-$(CONFIG_PA11) += pci-dma.o |
24 | obj-$(CONFIG_PCI) += pci.o | 24 | obj-$(CONFIG_PCI) += pci.o |
25 | obj-$(CONFIG_MODULES) += module.o | 25 | obj-$(CONFIG_MODULES) += module.o |
26 | obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o | 26 | obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o |
27 | obj-$(CONFIG_STACKTRACE)+= stacktrace.o | 27 | obj-$(CONFIG_STACKTRACE)+= stacktrace.o |
28 | obj-$(CONFIG_AUDIT) += audit.o | 28 | obj-$(CONFIG_AUDIT) += audit.o |
29 | obj64-$(CONFIG_AUDIT) += compat_audit.o | 29 | obj64-$(CONFIG_AUDIT) += compat_audit.o |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2d4956e97aa9..ee5a67d57aab 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) | |||
807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); | 807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
808 | 808 | ||
809 | /* PCI Command: 0x4 */ | 809 | /* PCI Command: 0x4 */ |
810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); | 810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | |
811 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | ||
811 | 812 | ||
812 | /* Check the PCIe link is ready */ | 813 | /* Check the PCIe link is ready */ |
813 | eeh_bridge_check_link(edev); | 814 | eeh_bridge_check_link(edev); |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 79d005445c6c..e734f6e45abc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
@@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |||
553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) | 554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) |
555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL | 555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL |
556 | beq 1f | 556 | beq 0f |
557 | li r0,KVM_HWTHREAD_IN_KERNEL | 557 | li r0,KVM_HWTHREAD_IN_KERNEL |
558 | stb r0,HSTATE_HWTHREAD_STATE(r13) | 558 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
559 | /* Order setting hwthread_state vs. testing hwthread_req */ | 559 | /* Order setting hwthread_state vs. testing hwthread_req */ |
560 | sync | 560 | sync |
561 | lbz r0,HSTATE_HWTHREAD_REQ(r13) | 561 | 0: lbz r0,HSTATE_HWTHREAD_REQ(r13) |
562 | cmpwi r0,0 | 562 | cmpwi r0,0 |
563 | beq 1f | 563 | beq 1f |
564 | b kvm_start_guest | 564 | b kvm_start_guest |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 44c30dd38067..b78f142a4148 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void) | |||
890 | return; | 890 | return; |
891 | 891 | ||
892 | l1d_size = ppc64_caches.l1d.size; | 892 | l1d_size = ppc64_caches.l1d.size; |
893 | |||
894 | /* | ||
895 | * If there is no d-cache-size property in the device tree, l1d_size | ||
896 | * could be zero. That leads to the loop in the asm wrapping around to | ||
897 | * 2^64-1, and then walking off the end of the fallback area and | ||
898 | * eventually causing a page fault which is fatal. Just default to | ||
899 | * something vaguely sane. | ||
900 | */ | ||
901 | if (!l1d_size) | ||
902 | l1d_size = (64 * 1024); | ||
903 | |||
893 | limit = min(ppc64_bolted_size(), ppc64_rma_size); | 904 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
894 | 905 | ||
895 | /* | 906 | /* |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 35f80ab7cbd8..288fe4f0db4e 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, | |||
55 | unsigned int *target = (unsigned int *)branch_target(src); | 55 | unsigned int *target = (unsigned int *)branch_target(src); |
56 | 56 | ||
57 | /* Branch within the section doesn't need translating */ | 57 | /* Branch within the section doesn't need translating */ |
58 | if (target < alt_start || target >= alt_end) { | 58 | if (target < alt_start || target > alt_end) { |
59 | instr = translate_branch(dest, src); | 59 | instr = translate_branch(dest, src); |
60 | if (!instr) | 60 | if (!instr) |
61 | return 1; | 61 | return 1; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9033c8194eda..ccc421503363 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) | |||
1093 | LOAD_INT(c), LOAD_FRAC(c), | 1093 | LOAD_INT(c), LOAD_FRAC(c), |
1094 | count_active_contexts(), | 1094 | count_active_contexts(), |
1095 | atomic_read(&nr_spu_contexts), | 1095 | atomic_read(&nr_spu_contexts), |
1096 | idr_get_cursor(&task_active_pid_ns(current)->idr)); | 1096 | idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); |
1097 | return 0; | 1097 | return 0; |
1098 | } | 1098 | } |
1099 | 1099 | ||
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d22aeb0b69e1..b48454be5b98 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c | |||
@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) | |||
389 | if (xive_pool_vps == XIVE_INVALID_VP) | 389 | if (xive_pool_vps == XIVE_INVALID_VP) |
390 | return; | 390 | return; |
391 | 391 | ||
392 | /* Check if pool VP already active, if it is, pull it */ | ||
393 | if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) | ||
394 | in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); | ||
395 | |||
392 | /* Enable the pool VP */ | 396 | /* Enable the pool VP */ |
393 | vp = xive_pool_vps + cpu; | 397 | vp = xive_pool_vps + cpu; |
394 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); | 398 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); |
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 9fdff3fe1a42..e63940bb57cd 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild | |||
@@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/ | |||
8 | obj-y += net/ | 8 | obj-y += net/ |
9 | obj-$(CONFIG_PCI) += pci/ | 9 | obj-$(CONFIG_PCI) += pci/ |
10 | obj-$(CONFIG_NUMA) += numa/ | 10 | obj-$(CONFIG_NUMA) += numa/ |
11 | obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 32a0d5b958bf..199ac3e4da1d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -47,10 +47,6 @@ config PGSTE | |||
47 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC | 47 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
48 | def_bool y | 48 | def_bool y |
49 | 49 | ||
50 | config KEXEC | ||
51 | def_bool y | ||
52 | select KEXEC_CORE | ||
53 | |||
54 | config AUDIT_ARCH | 50 | config AUDIT_ARCH |
55 | def_bool y | 51 | def_bool y |
56 | 52 | ||
@@ -290,12 +286,12 @@ config MARCH_Z13 | |||
290 | older machines. | 286 | older machines. |
291 | 287 | ||
292 | config MARCH_Z14 | 288 | config MARCH_Z14 |
293 | bool "IBM z14" | 289 | bool "IBM z14 ZR1 and z14" |
294 | select HAVE_MARCH_Z14_FEATURES | 290 | select HAVE_MARCH_Z14_FEATURES |
295 | help | 291 | help |
296 | Select this to enable optimizations for IBM z14 (3906 series). | 292 | Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 |
297 | The kernel will be slightly faster but will not work on older | 293 | and 3906 series). The kernel will be slightly faster but will not |
298 | machines. | 294 | work on older machines. |
299 | 295 | ||
300 | endchoice | 296 | endchoice |
301 | 297 | ||
@@ -525,6 +521,26 @@ source kernel/Kconfig.preempt | |||
525 | 521 | ||
526 | source kernel/Kconfig.hz | 522 | source kernel/Kconfig.hz |
527 | 523 | ||
524 | config KEXEC | ||
525 | def_bool y | ||
526 | select KEXEC_CORE | ||
527 | |||
528 | config KEXEC_FILE | ||
529 | bool "kexec file based system call" | ||
530 | select KEXEC_CORE | ||
531 | select BUILD_BIN2C | ||
532 | depends on CRYPTO | ||
533 | depends on CRYPTO_SHA256 | ||
534 | depends on CRYPTO_SHA256_S390 | ||
535 | help | ||
536 | Enable the kexec file based system call. In contrast to the normal | ||
537 | kexec system call this system call takes file descriptors for the | ||
538 | kernel and initramfs as arguments. | ||
539 | |||
540 | config ARCH_HAS_KEXEC_PURGATORY | ||
541 | def_bool y | ||
542 | depends on KEXEC_FILE | ||
543 | |||
528 | config ARCH_RANDOM | 544 | config ARCH_RANDOM |
529 | def_bool y | 545 | def_bool y |
530 | prompt "s390 architectural random number generation API" | 546 | prompt "s390 architectural random number generation API" |
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index da9dad35c28e..d1fa37fcce83 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile | |||
@@ -3,12 +3,6 @@ | |||
3 | # Makefile for the linux s390-specific parts of the memory manager. | 3 | # Makefile for the linux s390-specific parts of the memory manager. |
4 | # | 4 | # |
5 | 5 | ||
6 | COMPILE_VERSION := __linux_compile_version_id__`hostname | \ | ||
7 | tr -c '[0-9A-Za-z]' '_'`__`date | \ | ||
8 | tr -c '[0-9A-Za-z]' '_'`_t | ||
9 | |||
10 | ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. | ||
11 | |||
12 | targets := image | 6 | targets := image |
13 | targets += bzImage | 7 | targets += bzImage |
14 | subdir- := compressed | 8 | subdir- := compressed |
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore index ae06b9b4c02f..2088cc140629 100644 --- a/arch/s390/boot/compressed/.gitignore +++ b/arch/s390/boot/compressed/.gitignore | |||
@@ -1,3 +1,4 @@ | |||
1 | sizes.h | 1 | sizes.h |
2 | vmlinux | 2 | vmlinux |
3 | vmlinux.lds | 3 | vmlinux.lds |
4 | vmlinux.bin.full | ||
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/debug_defconfig index 5af8458951cf..6176fe9795ca 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/debug_defconfig | |||
@@ -24,13 +24,13 @@ CONFIG_CPUSETS=y | |||
24 | CONFIG_CGROUP_DEVICE=y | 24 | CONFIG_CGROUP_DEVICE=y |
25 | CONFIG_CGROUP_CPUACCT=y | 25 | CONFIG_CGROUP_CPUACCT=y |
26 | CONFIG_CGROUP_PERF=y | 26 | CONFIG_CGROUP_PERF=y |
27 | CONFIG_CHECKPOINT_RESTORE=y | ||
28 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
29 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
30 | CONFIG_SCHED_AUTOGROUP=y | 29 | CONFIG_SCHED_AUTOGROUP=y |
31 | CONFIG_BLK_DEV_INITRD=y | 30 | CONFIG_BLK_DEV_INITRD=y |
32 | CONFIG_EXPERT=y | 31 | CONFIG_EXPERT=y |
33 | # CONFIG_SYSFS_SYSCALL is not set | 32 | # CONFIG_SYSFS_SYSCALL is not set |
33 | CONFIG_CHECKPOINT_RESTORE=y | ||
34 | CONFIG_BPF_SYSCALL=y | 34 | CONFIG_BPF_SYSCALL=y |
35 | CONFIG_USERFAULTFD=y | 35 | CONFIG_USERFAULTFD=y |
36 | # CONFIG_COMPAT_BRK is not set | 36 | # CONFIG_COMPAT_BRK is not set |
@@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y | |||
59 | CONFIG_DEFAULT_DEADLINE=y | 59 | CONFIG_DEFAULT_DEADLINE=y |
60 | CONFIG_LIVEPATCH=y | 60 | CONFIG_LIVEPATCH=y |
61 | CONFIG_TUNE_ZEC12=y | 61 | CONFIG_TUNE_ZEC12=y |
62 | CONFIG_NR_CPUS=256 | 62 | CONFIG_NR_CPUS=512 |
63 | CONFIG_NUMA=y | 63 | CONFIG_NUMA=y |
64 | CONFIG_PREEMPT=y | 64 | CONFIG_PREEMPT=y |
65 | CONFIG_HZ_100=y | 65 | CONFIG_HZ_100=y |
66 | CONFIG_KEXEC_FILE=y | ||
66 | CONFIG_MEMORY_HOTPLUG=y | 67 | CONFIG_MEMORY_HOTPLUG=y |
67 | CONFIG_MEMORY_HOTREMOVE=y | 68 | CONFIG_MEMORY_HOTREMOVE=y |
68 | CONFIG_KSM=y | 69 | CONFIG_KSM=y |
@@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m | |||
305 | CONFIG_IP6_NF_NAT=m | 306 | CONFIG_IP6_NF_NAT=m |
306 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
307 | CONFIG_NF_TABLES_BRIDGE=m | 308 | CONFIG_NF_TABLES_BRIDGE=m |
308 | CONFIG_NET_SCTPPROBE=m | ||
309 | CONFIG_RDS=m | 309 | CONFIG_RDS=m |
310 | CONFIG_RDS_RDMA=m | 310 | CONFIG_RDS_RDMA=m |
311 | CONFIG_RDS_TCP=m | 311 | CONFIG_RDS_TCP=m |
@@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m | |||
364 | CONFIG_NET_ACT_SKBEDIT=m | 364 | CONFIG_NET_ACT_SKBEDIT=m |
365 | CONFIG_NET_ACT_CSUM=m | 365 | CONFIG_NET_ACT_CSUM=m |
366 | CONFIG_DNS_RESOLVER=y | 366 | CONFIG_DNS_RESOLVER=y |
367 | CONFIG_OPENVSWITCH=m | ||
367 | CONFIG_NETLINK_DIAG=m | 368 | CONFIG_NETLINK_DIAG=m |
368 | CONFIG_CGROUP_NET_PRIO=y | 369 | CONFIG_CGROUP_NET_PRIO=y |
369 | CONFIG_BPF_JIT=y | 370 | CONFIG_BPF_JIT=y |
370 | CONFIG_NET_PKTGEN=m | 371 | CONFIG_NET_PKTGEN=m |
371 | CONFIG_NET_TCPPROBE=m | ||
372 | CONFIG_DEVTMPFS=y | 372 | CONFIG_DEVTMPFS=y |
373 | CONFIG_DMA_CMA=y | 373 | CONFIG_DMA_CMA=y |
374 | CONFIG_CMA_SIZE_MBYTES=0 | 374 | CONFIG_CMA_SIZE_MBYTES=0 |
@@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m | |||
380 | CONFIG_BLK_DEV_NBD=m | 380 | CONFIG_BLK_DEV_NBD=m |
381 | CONFIG_BLK_DEV_RAM=y | 381 | CONFIG_BLK_DEV_RAM=y |
382 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 382 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
383 | CONFIG_BLK_DEV_RAM_DAX=y | ||
384 | CONFIG_VIRTIO_BLK=y | 383 | CONFIG_VIRTIO_BLK=y |
385 | CONFIG_BLK_DEV_RBD=m | 384 | CONFIG_BLK_DEV_RBD=m |
385 | CONFIG_BLK_DEV_NVME=m | ||
386 | CONFIG_ENCLOSURE_SERVICES=m | 386 | CONFIG_ENCLOSURE_SERVICES=m |
387 | CONFIG_GENWQE=m | 387 | CONFIG_GENWQE=m |
388 | CONFIG_RAID_ATTRS=m | 388 | CONFIG_RAID_ATTRS=m |
@@ -461,6 +461,7 @@ CONFIG_PPTP=m | |||
461 | CONFIG_PPPOL2TP=m | 461 | CONFIG_PPPOL2TP=m |
462 | CONFIG_PPP_ASYNC=m | 462 | CONFIG_PPP_ASYNC=m |
463 | CONFIG_PPP_SYNC_TTY=m | 463 | CONFIG_PPP_SYNC_TTY=m |
464 | CONFIG_INPUT_EVDEV=y | ||
464 | # CONFIG_INPUT_KEYBOARD is not set | 465 | # CONFIG_INPUT_KEYBOARD is not set |
465 | # CONFIG_INPUT_MOUSE is not set | 466 | # CONFIG_INPUT_MOUSE is not set |
466 | # CONFIG_SERIO is not set | 467 | # CONFIG_SERIO is not set |
@@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y | |||
474 | CONFIG_WATCHDOG_NOWAYOUT=y | 475 | CONFIG_WATCHDOG_NOWAYOUT=y |
475 | CONFIG_SOFT_WATCHDOG=m | 476 | CONFIG_SOFT_WATCHDOG=m |
476 | CONFIG_DIAG288_WATCHDOG=m | 477 | CONFIG_DIAG288_WATCHDOG=m |
478 | CONFIG_DRM=y | ||
479 | CONFIG_DRM_VIRTIO_GPU=y | ||
480 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
477 | # CONFIG_HID is not set | 481 | # CONFIG_HID is not set |
478 | # CONFIG_USB_SUPPORT is not set | 482 | # CONFIG_USB_SUPPORT is not set |
479 | CONFIG_INFINIBAND=m | 483 | CONFIG_INFINIBAND=m |
@@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m | |||
482 | CONFIG_MLX5_INFINIBAND=m | 486 | CONFIG_MLX5_INFINIBAND=m |
483 | CONFIG_VFIO=m | 487 | CONFIG_VFIO=m |
484 | CONFIG_VFIO_PCI=m | 488 | CONFIG_VFIO_PCI=m |
489 | CONFIG_VIRTIO_PCI=m | ||
485 | CONFIG_VIRTIO_BALLOON=m | 490 | CONFIG_VIRTIO_BALLOON=m |
491 | CONFIG_VIRTIO_INPUT=y | ||
486 | CONFIG_EXT4_FS=y | 492 | CONFIG_EXT4_FS=y |
487 | CONFIG_EXT4_FS_POSIX_ACL=y | 493 | CONFIG_EXT4_FS_POSIX_ACL=y |
488 | CONFIG_EXT4_FS_SECURITY=y | 494 | CONFIG_EXT4_FS_SECURITY=y |
@@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y | |||
641 | CONFIG_TEST_BPF=m | 647 | CONFIG_TEST_BPF=m |
642 | CONFIG_BUG_ON_DATA_CORRUPTION=y | 648 | CONFIG_BUG_ON_DATA_CORRUPTION=y |
643 | CONFIG_S390_PTDUMP=y | 649 | CONFIG_S390_PTDUMP=y |
650 | CONFIG_PERSISTENT_KEYRINGS=y | ||
651 | CONFIG_BIG_KEYS=y | ||
644 | CONFIG_ENCRYPTED_KEYS=m | 652 | CONFIG_ENCRYPTED_KEYS=m |
645 | CONFIG_SECURITY=y | 653 | CONFIG_SECURITY=y |
646 | CONFIG_SECURITY_NETWORK=y | 654 | CONFIG_SECURITY_NETWORK=y |
@@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y | |||
649 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | 657 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y |
650 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | 658 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 |
651 | CONFIG_SECURITY_SELINUX_DISABLE=y | 659 | CONFIG_SECURITY_SELINUX_DISABLE=y |
660 | CONFIG_INTEGRITY_SIGNATURE=y | ||
661 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | ||
652 | CONFIG_IMA=y | 662 | CONFIG_IMA=y |
663 | CONFIG_IMA_DEFAULT_HASH_SHA256=y | ||
664 | CONFIG_IMA_WRITE_POLICY=y | ||
653 | CONFIG_IMA_APPRAISE=y | 665 | CONFIG_IMA_APPRAISE=y |
654 | CONFIG_CRYPTO_RSA=m | ||
655 | CONFIG_CRYPTO_DH=m | 666 | CONFIG_CRYPTO_DH=m |
656 | CONFIG_CRYPTO_ECDH=m | 667 | CONFIG_CRYPTO_ECDH=m |
657 | CONFIG_CRYPTO_USER=m | 668 | CONFIG_CRYPTO_USER=m |
669 | # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set | ||
658 | CONFIG_CRYPTO_PCRYPT=m | 670 | CONFIG_CRYPTO_PCRYPT=m |
659 | CONFIG_CRYPTO_CRYPTD=m | 671 | CONFIG_CRYPTO_CRYPTD=m |
660 | CONFIG_CRYPTO_MCRYPTD=m | 672 | CONFIG_CRYPTO_MCRYPTD=m |
661 | CONFIG_CRYPTO_TEST=m | 673 | CONFIG_CRYPTO_TEST=m |
662 | CONFIG_CRYPTO_GCM=m | ||
663 | CONFIG_CRYPTO_CHACHA20POLY1305=m | 674 | CONFIG_CRYPTO_CHACHA20POLY1305=m |
664 | CONFIG_CRYPTO_LRW=m | 675 | CONFIG_CRYPTO_LRW=m |
665 | CONFIG_CRYPTO_PCBC=m | 676 | CONFIG_CRYPTO_PCBC=m |
@@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m | |||
707 | CONFIG_CRYPTO_AES_S390=m | 718 | CONFIG_CRYPTO_AES_S390=m |
708 | CONFIG_CRYPTO_GHASH_S390=m | 719 | CONFIG_CRYPTO_GHASH_S390=m |
709 | CONFIG_CRYPTO_CRC32_S390=y | 720 | CONFIG_CRYPTO_CRC32_S390=y |
710 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 721 | CONFIG_PKCS7_MESSAGE_PARSER=y |
711 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 722 | CONFIG_SYSTEM_TRUSTED_KEYRING=y |
712 | CONFIG_X509_CERTIFICATE_PARSER=m | ||
713 | CONFIG_CRC7=m | 723 | CONFIG_CRC7=m |
714 | CONFIG_CRC8=m | 724 | CONFIG_CRC8=m |
715 | CONFIG_RANDOM32_SELFTEST=y | 725 | CONFIG_RANDOM32_SELFTEST=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig deleted file mode 100644 index d52eafe57ae8..000000000000 --- a/arch/s390/configs/gcov_defconfig +++ /dev/null | |||
@@ -1,661 +0,0 @@ | |||
1 | CONFIG_SYSVIPC=y | ||
2 | CONFIG_POSIX_MQUEUE=y | ||
3 | CONFIG_AUDIT=y | ||
4 | CONFIG_NO_HZ_IDLE=y | ||
5 | CONFIG_HIGH_RES_TIMERS=y | ||
6 | CONFIG_BSD_PROCESS_ACCT=y | ||
7 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
8 | CONFIG_TASKSTATS=y | ||
9 | CONFIG_TASK_DELAY_ACCT=y | ||
10 | CONFIG_TASK_XACCT=y | ||
11 | CONFIG_TASK_IO_ACCOUNTING=y | ||
12 | CONFIG_IKCONFIG=y | ||
13 | CONFIG_IKCONFIG_PROC=y | ||
14 | CONFIG_NUMA_BALANCING=y | ||
15 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set | ||
16 | CONFIG_MEMCG=y | ||
17 | CONFIG_MEMCG_SWAP=y | ||
18 | CONFIG_BLK_CGROUP=y | ||
19 | CONFIG_CFS_BANDWIDTH=y | ||
20 | CONFIG_RT_GROUP_SCHED=y | ||
21 | CONFIG_CGROUP_PIDS=y | ||
22 | CONFIG_CGROUP_FREEZER=y | ||
23 | CONFIG_CGROUP_HUGETLB=y | ||
24 | CONFIG_CPUSETS=y | ||
25 | CONFIG_CGROUP_DEVICE=y | ||
26 | CONFIG_CGROUP_CPUACCT=y | ||
27 | CONFIG_CGROUP_PERF=y | ||
28 | CONFIG_CHECKPOINT_RESTORE=y | ||
29 | CONFIG_NAMESPACES=y | ||
30 | CONFIG_USER_NS=y | ||
31 | CONFIG_SCHED_AUTOGROUP=y | ||
32 | CONFIG_BLK_DEV_INITRD=y | ||
33 | CONFIG_EXPERT=y | ||
34 | # CONFIG_SYSFS_SYSCALL is not set | ||
35 | CONFIG_BPF_SYSCALL=y | ||
36 | CONFIG_USERFAULTFD=y | ||
37 | # CONFIG_COMPAT_BRK is not set | ||
38 | CONFIG_PROFILING=y | ||
39 | CONFIG_OPROFILE=m | ||
40 | CONFIG_KPROBES=y | ||
41 | CONFIG_JUMP_LABEL=y | ||
42 | CONFIG_GCOV_KERNEL=y | ||
43 | CONFIG_GCOV_PROFILE_ALL=y | ||
44 | CONFIG_MODULES=y | ||
45 | CONFIG_MODULE_FORCE_LOAD=y | ||
46 | CONFIG_MODULE_UNLOAD=y | ||
47 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
48 | CONFIG_MODVERSIONS=y | ||
49 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
50 | CONFIG_BLK_DEV_INTEGRITY=y | ||
51 | CONFIG_BLK_DEV_THROTTLING=y | ||
52 | CONFIG_BLK_WBT=y | ||
53 | CONFIG_BLK_WBT_SQ=y | ||
54 | CONFIG_PARTITION_ADVANCED=y | ||
55 | CONFIG_IBM_PARTITION=y | ||
56 | CONFIG_BSD_DISKLABEL=y | ||
57 | CONFIG_MINIX_SUBPARTITION=y | ||
58 | CONFIG_SOLARIS_X86_PARTITION=y | ||
59 | CONFIG_UNIXWARE_DISKLABEL=y | ||
60 | CONFIG_CFQ_GROUP_IOSCHED=y | ||
61 | CONFIG_DEFAULT_DEADLINE=y | ||
62 | CONFIG_LIVEPATCH=y | ||
63 | CONFIG_TUNE_ZEC12=y | ||
64 | CONFIG_NR_CPUS=512 | ||
65 | CONFIG_NUMA=y | ||
66 | CONFIG_HZ_100=y | ||
67 | CONFIG_MEMORY_HOTPLUG=y | ||
68 | CONFIG_MEMORY_HOTREMOVE=y | ||
69 | CONFIG_KSM=y | ||
70 | CONFIG_TRANSPARENT_HUGEPAGE=y | ||
71 | CONFIG_CLEANCACHE=y | ||
72 | CONFIG_FRONTSWAP=y | ||
73 | CONFIG_MEM_SOFT_DIRTY=y | ||
74 | CONFIG_ZSWAP=y | ||
75 | CONFIG_ZBUD=m | ||
76 | CONFIG_ZSMALLOC=m | ||
77 | CONFIG_ZSMALLOC_STAT=y | ||
78 | CONFIG_DEFERRED_STRUCT_PAGE_INIT=y | ||
79 | CONFIG_IDLE_PAGE_TRACKING=y | ||
80 | CONFIG_PCI=y | ||
81 | CONFIG_HOTPLUG_PCI=y | ||
82 | CONFIG_HOTPLUG_PCI_S390=y | ||
83 | CONFIG_CHSC_SCH=y | ||
84 | CONFIG_CRASH_DUMP=y | ||
85 | CONFIG_BINFMT_MISC=m | ||
86 | CONFIG_HIBERNATION=y | ||
87 | CONFIG_NET=y | ||
88 | CONFIG_PACKET=y | ||
89 | CONFIG_PACKET_DIAG=m | ||
90 | CONFIG_UNIX=y | ||
91 | CONFIG_UNIX_DIAG=m | ||
92 | CONFIG_XFRM_USER=m | ||
93 | CONFIG_NET_KEY=m | ||
94 | CONFIG_SMC=m | ||
95 | CONFIG_SMC_DIAG=m | ||
96 | CONFIG_INET=y | ||
97 | CONFIG_IP_MULTICAST=y | ||
98 | CONFIG_IP_ADVANCED_ROUTER=y | ||
99 | CONFIG_IP_MULTIPLE_TABLES=y | ||
100 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
101 | CONFIG_IP_ROUTE_VERBOSE=y | ||
102 | CONFIG_NET_IPIP=m | ||
103 | CONFIG_NET_IPGRE_DEMUX=m | ||
104 | CONFIG_NET_IPGRE=m | ||
105 | CONFIG_NET_IPGRE_BROADCAST=y | ||
106 | CONFIG_IP_MROUTE=y | ||
107 | CONFIG_IP_MROUTE_MULTIPLE_TABLES=y | ||
108 | CONFIG_IP_PIMSM_V1=y | ||
109 | CONFIG_IP_PIMSM_V2=y | ||
110 | CONFIG_SYN_COOKIES=y | ||
111 | CONFIG_NET_IPVTI=m | ||
112 | CONFIG_INET_AH=m | ||
113 | CONFIG_INET_ESP=m | ||
114 | CONFIG_INET_IPCOMP=m | ||
115 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | ||
116 | CONFIG_INET_XFRM_MODE_TUNNEL=m | ||
117 | CONFIG_INET_XFRM_MODE_BEET=m | ||
118 | CONFIG_INET_DIAG=m | ||
119 | CONFIG_INET_UDP_DIAG=m | ||
120 | CONFIG_TCP_CONG_ADVANCED=y | ||
121 | CONFIG_TCP_CONG_HSTCP=m | ||
122 | CONFIG_TCP_CONG_HYBLA=m | ||
123 | CONFIG_TCP_CONG_SCALABLE=m | ||
124 | CONFIG_TCP_CONG_LP=m | ||
125 | CONFIG_TCP_CONG_VENO=m | ||
126 | CONFIG_TCP_CONG_YEAH=m | ||
127 | CONFIG_TCP_CONG_ILLINOIS=m | ||
128 | CONFIG_IPV6_ROUTER_PREF=y | ||
129 | CONFIG_INET6_AH=m | ||
130 | CONFIG_INET6_ESP=m | ||
131 | CONFIG_INET6_IPCOMP=m | ||
132 | CONFIG_IPV6_MIP6=m | ||
133 | CONFIG_INET6_XFRM_MODE_TRANSPORT=m | ||
134 | CONFIG_INET6_XFRM_MODE_TUNNEL=m | ||
135 | CONFIG_INET6_XFRM_MODE_BEET=m | ||
136 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m | ||
137 | CONFIG_IPV6_VTI=m | ||
138 | CONFIG_IPV6_SIT=m | ||
139 | CONFIG_IPV6_GRE=m | ||
140 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
141 | CONFIG_IPV6_SUBTREES=y | ||
142 | CONFIG_NETFILTER=y | ||
143 | CONFIG_NF_CONNTRACK=m | ||
144 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
145 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
146 | CONFIG_NF_CONNTRACK_TIMEOUT=y | ||
147 | CONFIG_NF_CONNTRACK_TIMESTAMP=y | ||
148 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
149 | CONFIG_NF_CONNTRACK_FTP=m | ||
150 | CONFIG_NF_CONNTRACK_H323=m | ||
151 | CONFIG_NF_CONNTRACK_IRC=m | ||
152 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
153 | CONFIG_NF_CONNTRACK_SNMP=m | ||
154 | CONFIG_NF_CONNTRACK_PPTP=m | ||
155 | CONFIG_NF_CONNTRACK_SANE=m | ||
156 | CONFIG_NF_CONNTRACK_SIP=m | ||
157 | CONFIG_NF_CONNTRACK_TFTP=m | ||
158 | CONFIG_NF_CT_NETLINK=m | ||
159 | CONFIG_NF_CT_NETLINK_TIMEOUT=m | ||
160 | CONFIG_NF_TABLES=m | ||
161 | CONFIG_NFT_EXTHDR=m | ||
162 | CONFIG_NFT_META=m | ||
163 | CONFIG_NFT_CT=m | ||
164 | CONFIG_NFT_COUNTER=m | ||
165 | CONFIG_NFT_LOG=m | ||
166 | CONFIG_NFT_LIMIT=m | ||
167 | CONFIG_NFT_NAT=m | ||
168 | CONFIG_NFT_COMPAT=m | ||
169 | CONFIG_NFT_HASH=m | ||
170 | CONFIG_NETFILTER_XT_SET=m | ||
171 | CONFIG_NETFILTER_XT_TARGET_AUDIT=m | ||
172 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | ||
173 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | ||
174 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m | ||
175 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
176 | CONFIG_NETFILTER_XT_TARGET_CT=m | ||
177 | CONFIG_NETFILTER_XT_TARGET_DSCP=m | ||
178 | CONFIG_NETFILTER_XT_TARGET_HMARK=m | ||
179 | CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m | ||
180 | CONFIG_NETFILTER_XT_TARGET_LOG=m | ||
181 | CONFIG_NETFILTER_XT_TARGET_MARK=m | ||
182 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | ||
183 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | ||
184 | CONFIG_NETFILTER_XT_TARGET_TEE=m | ||
185 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
186 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | ||
187 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m | ||
188 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | ||
189 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | ||
190 | CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m | ||
191 | CONFIG_NETFILTER_XT_MATCH_BPF=m | ||
192 | CONFIG_NETFILTER_XT_MATCH_CLUSTER=m | ||
193 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | ||
194 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | ||
195 | CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m | ||
196 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | ||
197 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | ||
198 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
199 | CONFIG_NETFILTER_XT_MATCH_CPU=m | ||
200 | CONFIG_NETFILTER_XT_MATCH_DCCP=m | ||
201 | CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m | ||
202 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | ||
203 | CONFIG_NETFILTER_XT_MATCH_ESP=m | ||
204 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | ||
205 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | ||
206 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m | ||
207 | CONFIG_NETFILTER_XT_MATCH_IPVS=m | ||
208 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | ||
209 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | ||
210 | CONFIG_NETFILTER_XT_MATCH_MAC=m | ||
211 | CONFIG_NETFILTER_XT_MATCH_MARK=m | ||
212 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | ||
213 | CONFIG_NETFILTER_XT_MATCH_NFACCT=m | ||
214 | CONFIG_NETFILTER_XT_MATCH_OSF=m | ||
215 | CONFIG_NETFILTER_XT_MATCH_OWNER=m | ||
216 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | ||
217 | CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m | ||
218 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | ||
219 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | ||
220 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | ||
221 | CONFIG_NETFILTER_XT_MATCH_REALM=m | ||
222 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | ||
223 | CONFIG_NETFILTER_XT_MATCH_STATE=m | ||
224 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | ||
225 | CONFIG_NETFILTER_XT_MATCH_STRING=m | ||
226 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | ||
227 | CONFIG_NETFILTER_XT_MATCH_TIME=m | ||
228 | CONFIG_NETFILTER_XT_MATCH_U32=m | ||
229 | CONFIG_IP_SET=m | ||
230 | CONFIG_IP_SET_BITMAP_IP=m | ||
231 | CONFIG_IP_SET_BITMAP_IPMAC=m | ||
232 | CONFIG_IP_SET_BITMAP_PORT=m | ||
233 | CONFIG_IP_SET_HASH_IP=m | ||
234 | CONFIG_IP_SET_HASH_IPPORT=m | ||
235 | CONFIG_IP_SET_HASH_IPPORTIP=m | ||
236 | CONFIG_IP_SET_HASH_IPPORTNET=m | ||
237 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
238 | CONFIG_IP_SET_HASH_NET=m | ||
239 | CONFIG_IP_SET_HASH_NETNET=m | ||
240 | CONFIG_IP_SET_HASH_NETPORT=m | ||
241 | CONFIG_IP_SET_HASH_NETIFACE=m | ||
242 | CONFIG_IP_SET_LIST_SET=m | ||
243 | CONFIG_IP_VS=m | ||
244 | CONFIG_IP_VS_PROTO_TCP=y | ||
245 | CONFIG_IP_VS_PROTO_UDP=y | ||
246 | CONFIG_IP_VS_PROTO_ESP=y | ||
247 | CONFIG_IP_VS_PROTO_AH=y | ||
248 | CONFIG_IP_VS_RR=m | ||
249 | CONFIG_IP_VS_WRR=m | ||
250 | CONFIG_IP_VS_LC=m | ||
251 | CONFIG_IP_VS_WLC=m | ||
252 | CONFIG_IP_VS_LBLC=m | ||
253 | CONFIG_IP_VS_LBLCR=m | ||
254 | CONFIG_IP_VS_DH=m | ||
255 | CONFIG_IP_VS_SH=m | ||
256 | CONFIG_IP_VS_SED=m | ||
257 | CONFIG_IP_VS_NQ=m | ||
258 | CONFIG_IP_VS_FTP=m | ||
259 | CONFIG_IP_VS_PE_SIP=m | ||
260 | CONFIG_NF_CONNTRACK_IPV4=m | ||
261 | CONFIG_NF_TABLES_IPV4=m | ||
262 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
263 | CONFIG_NF_TABLES_ARP=m | ||
264 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
265 | CONFIG_IP_NF_IPTABLES=m | ||
266 | CONFIG_IP_NF_MATCH_AH=m | ||
267 | CONFIG_IP_NF_MATCH_ECN=m | ||
268 | CONFIG_IP_NF_MATCH_RPFILTER=m | ||
269 | CONFIG_IP_NF_MATCH_TTL=m | ||
270 | CONFIG_IP_NF_FILTER=m | ||
271 | CONFIG_IP_NF_TARGET_REJECT=m | ||
272 | CONFIG_IP_NF_NAT=m | ||
273 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
274 | CONFIG_IP_NF_MANGLE=m | ||
275 | CONFIG_IP_NF_TARGET_CLUSTERIP=m | ||
276 | CONFIG_IP_NF_TARGET_ECN=m | ||
277 | CONFIG_IP_NF_TARGET_TTL=m | ||
278 | CONFIG_IP_NF_RAW=m | ||
279 | CONFIG_IP_NF_SECURITY=m | ||
280 | CONFIG_IP_NF_ARPTABLES=m | ||
281 | CONFIG_IP_NF_ARPFILTER=m | ||
282 | CONFIG_IP_NF_ARP_MANGLE=m | ||
283 | CONFIG_NF_CONNTRACK_IPV6=m | ||
284 | CONFIG_NF_TABLES_IPV6=m | ||
285 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
286 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
287 | CONFIG_IP6_NF_IPTABLES=m | ||
288 | CONFIG_IP6_NF_MATCH_AH=m | ||
289 | CONFIG_IP6_NF_MATCH_EUI64=m | ||
290 | CONFIG_IP6_NF_MATCH_FRAG=m | ||
291 | CONFIG_IP6_NF_MATCH_OPTS=m | ||
292 | CONFIG_IP6_NF_MATCH_HL=m | ||
293 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | ||
294 | CONFIG_IP6_NF_MATCH_MH=m | ||
295 | CONFIG_IP6_NF_MATCH_RPFILTER=m | ||
296 | CONFIG_IP6_NF_MATCH_RT=m | ||
297 | CONFIG_IP6_NF_TARGET_HL=m | ||
298 | CONFIG_IP6_NF_FILTER=m | ||
299 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
300 | CONFIG_IP6_NF_MANGLE=m | ||
301 | CONFIG_IP6_NF_RAW=m | ||
302 | CONFIG_IP6_NF_SECURITY=m | ||
303 | CONFIG_IP6_NF_NAT=m | ||
304 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | ||
305 | CONFIG_NF_TABLES_BRIDGE=m | ||
306 | CONFIG_NET_SCTPPROBE=m | ||
307 | CONFIG_RDS=m | ||
308 | CONFIG_RDS_RDMA=m | ||
309 | CONFIG_RDS_TCP=m | ||
310 | CONFIG_L2TP=m | ||
311 | CONFIG_L2TP_DEBUGFS=m | ||
312 | CONFIG_L2TP_V3=y | ||
313 | CONFIG_L2TP_IP=m | ||
314 | CONFIG_L2TP_ETH=m | ||
315 | CONFIG_BRIDGE=m | ||
316 | CONFIG_VLAN_8021Q=m | ||
317 | CONFIG_VLAN_8021Q_GVRP=y | ||
318 | CONFIG_NET_SCHED=y | ||
319 | CONFIG_NET_SCH_CBQ=m | ||
320 | CONFIG_NET_SCH_HTB=m | ||
321 | CONFIG_NET_SCH_HFSC=m | ||
322 | CONFIG_NET_SCH_PRIO=m | ||
323 | CONFIG_NET_SCH_MULTIQ=m | ||
324 | CONFIG_NET_SCH_RED=m | ||
325 | CONFIG_NET_SCH_SFB=m | ||
326 | CONFIG_NET_SCH_SFQ=m | ||
327 | CONFIG_NET_SCH_TEQL=m | ||
328 | CONFIG_NET_SCH_TBF=m | ||
329 | CONFIG_NET_SCH_GRED=m | ||
330 | CONFIG_NET_SCH_DSMARK=m | ||
331 | CONFIG_NET_SCH_NETEM=m | ||
332 | CONFIG_NET_SCH_DRR=m | ||
333 | CONFIG_NET_SCH_MQPRIO=m | ||
334 | CONFIG_NET_SCH_CHOKE=m | ||
335 | CONFIG_NET_SCH_QFQ=m | ||
336 | CONFIG_NET_SCH_CODEL=m | ||
337 | CONFIG_NET_SCH_FQ_CODEL=m | ||
338 | CONFIG_NET_SCH_INGRESS=m | ||
339 | CONFIG_NET_SCH_PLUG=m | ||
340 | CONFIG_NET_CLS_BASIC=m | ||
341 | CONFIG_NET_CLS_TCINDEX=m | ||
342 | CONFIG_NET_CLS_ROUTE4=m | ||
343 | CONFIG_NET_CLS_FW=m | ||
344 | CONFIG_NET_CLS_U32=m | ||
345 | CONFIG_CLS_U32_PERF=y | ||
346 | CONFIG_CLS_U32_MARK=y | ||
347 | CONFIG_NET_CLS_RSVP=m | ||
348 | CONFIG_NET_CLS_RSVP6=m | ||
349 | CONFIG_NET_CLS_FLOW=m | ||
350 | CONFIG_NET_CLS_CGROUP=y | ||
351 | CONFIG_NET_CLS_BPF=m | ||
352 | CONFIG_NET_CLS_ACT=y | ||
353 | CONFIG_NET_ACT_POLICE=m | ||
354 | CONFIG_NET_ACT_GACT=m | ||
355 | CONFIG_GACT_PROB=y | ||
356 | CONFIG_NET_ACT_MIRRED=m | ||
357 | CONFIG_NET_ACT_IPT=m | ||
358 | CONFIG_NET_ACT_NAT=m | ||
359 | CONFIG_NET_ACT_PEDIT=m | ||
360 | CONFIG_NET_ACT_SIMP=m | ||
361 | CONFIG_NET_ACT_SKBEDIT=m | ||
362 | CONFIG_NET_ACT_CSUM=m | ||
363 | CONFIG_DNS_RESOLVER=y | ||
364 | CONFIG_NETLINK_DIAG=m | ||
365 | CONFIG_CGROUP_NET_PRIO=y | ||
366 | CONFIG_BPF_JIT=y | ||
367 | CONFIG_NET_PKTGEN=m | ||
368 | CONFIG_NET_TCPPROBE=m | ||
369 | CONFIG_DEVTMPFS=y | ||
370 | CONFIG_DMA_CMA=y | ||
371 | CONFIG_CMA_SIZE_MBYTES=0 | ||
372 | CONFIG_CONNECTOR=y | ||
373 | CONFIG_ZRAM=m | ||
374 | CONFIG_BLK_DEV_LOOP=m | ||
375 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
376 | CONFIG_BLK_DEV_DRBD=m | ||
377 | CONFIG_BLK_DEV_NBD=m | ||
378 | CONFIG_BLK_DEV_RAM=y | ||
379 | CONFIG_BLK_DEV_RAM_SIZE=32768 | ||
380 | CONFIG_BLK_DEV_RAM_DAX=y | ||
381 | CONFIG_VIRTIO_BLK=y | ||
382 | CONFIG_ENCLOSURE_SERVICES=m | ||
383 | CONFIG_GENWQE=m | ||
384 | CONFIG_RAID_ATTRS=m | ||
385 | CONFIG_SCSI=y | ||
386 | CONFIG_BLK_DEV_SD=y | ||
387 | CONFIG_CHR_DEV_ST=m | ||
388 | CONFIG_CHR_DEV_OSST=m | ||
389 | CONFIG_BLK_DEV_SR=m | ||
390 | CONFIG_CHR_DEV_SG=y | ||
391 | CONFIG_CHR_DEV_SCH=m | ||
392 | CONFIG_SCSI_ENCLOSURE=m | ||
393 | CONFIG_SCSI_CONSTANTS=y | ||
394 | CONFIG_SCSI_LOGGING=y | ||
395 | CONFIG_SCSI_SPI_ATTRS=m | ||
396 | CONFIG_SCSI_FC_ATTRS=y | ||
397 | CONFIG_SCSI_SAS_LIBSAS=m | ||
398 | CONFIG_SCSI_SRP_ATTRS=m | ||
399 | CONFIG_ISCSI_TCP=m | ||
400 | CONFIG_SCSI_DEBUG=m | ||
401 | CONFIG_ZFCP=y | ||
402 | CONFIG_SCSI_VIRTIO=m | ||
403 | CONFIG_SCSI_DH=y | ||
404 | CONFIG_SCSI_DH_RDAC=m | ||
405 | CONFIG_SCSI_DH_HP_SW=m | ||
406 | CONFIG_SCSI_DH_EMC=m | ||
407 | CONFIG_SCSI_DH_ALUA=m | ||
408 | CONFIG_SCSI_OSD_INITIATOR=m | ||
409 | CONFIG_SCSI_OSD_ULD=m | ||
410 | CONFIG_MD=y | ||
411 | CONFIG_BLK_DEV_MD=y | ||
412 | CONFIG_MD_LINEAR=m | ||
413 | CONFIG_MD_MULTIPATH=m | ||
414 | CONFIG_MD_FAULTY=m | ||
415 | CONFIG_BLK_DEV_DM=m | ||
416 | CONFIG_DM_CRYPT=m | ||
417 | CONFIG_DM_SNAPSHOT=m | ||
418 | CONFIG_DM_THIN_PROVISIONING=m | ||
419 | CONFIG_DM_MIRROR=m | ||
420 | CONFIG_DM_LOG_USERSPACE=m | ||
421 | CONFIG_DM_RAID=m | ||
422 | CONFIG_DM_ZERO=m | ||
423 | CONFIG_DM_MULTIPATH=m | ||
424 | CONFIG_DM_MULTIPATH_QL=m | ||
425 | CONFIG_DM_MULTIPATH_ST=m | ||
426 | CONFIG_DM_DELAY=m | ||
427 | CONFIG_DM_UEVENT=y | ||
428 | CONFIG_DM_FLAKEY=m | ||
429 | CONFIG_DM_VERITY=m | ||
430 | CONFIG_DM_SWITCH=m | ||
431 | CONFIG_NETDEVICES=y | ||
432 | CONFIG_BONDING=m | ||
433 | CONFIG_DUMMY=m | ||
434 | CONFIG_EQUALIZER=m | ||
435 | CONFIG_IFB=m | ||
436 | CONFIG_MACVLAN=m | ||
437 | CONFIG_MACVTAP=m | ||
438 | CONFIG_VXLAN=m | ||
439 | CONFIG_TUN=m | ||
440 | CONFIG_VETH=m | ||
441 | CONFIG_VIRTIO_NET=m | ||
442 | CONFIG_NLMON=m | ||
443 | # CONFIG_NET_VENDOR_ARC is not set | ||
444 | # CONFIG_NET_VENDOR_CHELSIO is not set | ||
445 | # CONFIG_NET_VENDOR_INTEL is not set | ||
446 | # CONFIG_NET_VENDOR_MARVELL is not set | ||
447 | CONFIG_MLX4_EN=m | ||
448 | CONFIG_MLX5_CORE=m | ||
449 | CONFIG_MLX5_CORE_EN=y | ||
450 | # CONFIG_NET_VENDOR_NATSEMI is not set | ||
451 | CONFIG_PPP=m | ||
452 | CONFIG_PPP_BSDCOMP=m | ||
453 | CONFIG_PPP_DEFLATE=m | ||
454 | CONFIG_PPP_MPPE=m | ||
455 | CONFIG_PPPOE=m | ||
456 | CONFIG_PPTP=m | ||
457 | CONFIG_PPPOL2TP=m | ||
458 | CONFIG_PPP_ASYNC=m | ||
459 | CONFIG_PPP_SYNC_TTY=m | ||
460 | # CONFIG_INPUT_KEYBOARD is not set | ||
461 | # CONFIG_INPUT_MOUSE is not set | ||
462 | # CONFIG_SERIO is not set | ||
463 | CONFIG_LEGACY_PTY_COUNT=0 | ||
464 | CONFIG_HW_RANDOM_VIRTIO=m | ||
465 | CONFIG_RAW_DRIVER=m | ||
466 | CONFIG_HANGCHECK_TIMER=m | ||
467 | CONFIG_TN3270_FS=y | ||
468 | # CONFIG_HWMON is not set | ||
469 | CONFIG_WATCHDOG=y | ||
470 | CONFIG_WATCHDOG_NOWAYOUT=y | ||
471 | CONFIG_SOFT_WATCHDOG=m | ||
472 | CONFIG_DIAG288_WATCHDOG=m | ||
473 | # CONFIG_HID is not set | ||
474 | # CONFIG_USB_SUPPORT is not set | ||
475 | CONFIG_INFINIBAND=m | ||
476 | CONFIG_INFINIBAND_USER_ACCESS=m | ||
477 | CONFIG_MLX4_INFINIBAND=m | ||
478 | CONFIG_MLX5_INFINIBAND=m | ||
479 | CONFIG_VFIO=m | ||
480 | CONFIG_VFIO_PCI=m | ||
481 | CONFIG_VIRTIO_BALLOON=m | ||
482 | CONFIG_EXT4_FS=y | ||
483 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
484 | CONFIG_EXT4_FS_SECURITY=y | ||
485 | CONFIG_EXT4_ENCRYPTION=y | ||
486 | CONFIG_JBD2_DEBUG=y | ||
487 | CONFIG_JFS_FS=m | ||
488 | CONFIG_JFS_POSIX_ACL=y | ||
489 | CONFIG_JFS_SECURITY=y | ||
490 | CONFIG_JFS_STATISTICS=y | ||
491 | CONFIG_XFS_FS=y | ||
492 | CONFIG_XFS_QUOTA=y | ||
493 | CONFIG_XFS_POSIX_ACL=y | ||
494 | CONFIG_XFS_RT=y | ||
495 | CONFIG_GFS2_FS=m | ||
496 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
497 | CONFIG_OCFS2_FS=m | ||
498 | CONFIG_BTRFS_FS=y | ||
499 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
500 | CONFIG_NILFS2_FS=m | ||
501 | CONFIG_FS_DAX=y | ||
502 | CONFIG_EXPORTFS_BLOCK_OPS=y | ||
503 | CONFIG_FANOTIFY=y | ||
504 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | ||
505 | CONFIG_QUOTA_NETLINK_INTERFACE=y | ||
506 | CONFIG_QFMT_V1=m | ||
507 | CONFIG_QFMT_V2=m | ||
508 | CONFIG_AUTOFS4_FS=m | ||
509 | CONFIG_FUSE_FS=y | ||
510 | CONFIG_CUSE=m | ||
511 | CONFIG_OVERLAY_FS=m | ||
512 | CONFIG_OVERLAY_FS_REDIRECT_DIR=y | ||
513 | CONFIG_FSCACHE=m | ||
514 | CONFIG_CACHEFILES=m | ||
515 | CONFIG_ISO9660_FS=y | ||
516 | CONFIG_JOLIET=y | ||
517 | CONFIG_ZISOFS=y | ||
518 | CONFIG_UDF_FS=m | ||
519 | CONFIG_MSDOS_FS=m | ||
520 | CONFIG_VFAT_FS=m | ||
521 | CONFIG_NTFS_FS=m | ||
522 | CONFIG_NTFS_RW=y | ||
523 | CONFIG_PROC_KCORE=y | ||
524 | CONFIG_TMPFS=y | ||
525 | CONFIG_TMPFS_POSIX_ACL=y | ||
526 | CONFIG_HUGETLBFS=y | ||
527 | CONFIG_CONFIGFS_FS=m | ||
528 | CONFIG_ECRYPT_FS=m | ||
529 | CONFIG_CRAMFS=m | ||
530 | CONFIG_SQUASHFS=m | ||
531 | CONFIG_SQUASHFS_XATTR=y | ||
532 | CONFIG_SQUASHFS_LZO=y | ||
533 | CONFIG_SQUASHFS_XZ=y | ||
534 | CONFIG_ROMFS_FS=m | ||
535 | CONFIG_NFS_FS=m | ||
536 | CONFIG_NFS_V3_ACL=y | ||
537 | CONFIG_NFS_V4=m | ||
538 | CONFIG_NFS_SWAP=y | ||
539 | CONFIG_NFSD=m | ||
540 | CONFIG_NFSD_V3_ACL=y | ||
541 | CONFIG_NFSD_V4=y | ||
542 | CONFIG_NFSD_V4_SECURITY_LABEL=y | ||
543 | CONFIG_CIFS=m | ||
544 | CONFIG_CIFS_STATS=y | ||
545 | CONFIG_CIFS_STATS2=y | ||
546 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
547 | CONFIG_CIFS_UPCALL=y | ||
548 | CONFIG_CIFS_XATTR=y | ||
549 | CONFIG_CIFS_POSIX=y | ||
550 | # CONFIG_CIFS_DEBUG is not set | ||
551 | CONFIG_CIFS_DFS_UPCALL=y | ||
552 | CONFIG_NLS_DEFAULT="utf8" | ||
553 | CONFIG_NLS_CODEPAGE_437=m | ||
554 | CONFIG_NLS_CODEPAGE_850=m | ||
555 | CONFIG_NLS_ASCII=m | ||
556 | CONFIG_NLS_ISO8859_1=m | ||
557 | CONFIG_NLS_ISO8859_15=m | ||
558 | CONFIG_NLS_UTF8=m | ||
559 | CONFIG_DLM=m | ||
560 | CONFIG_PRINTK_TIME=y | ||
561 | CONFIG_DEBUG_INFO=y | ||
562 | CONFIG_DEBUG_INFO_DWARF4=y | ||
563 | CONFIG_GDB_SCRIPTS=y | ||
564 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
565 | CONFIG_FRAME_WARN=1024 | ||
566 | CONFIG_UNUSED_SYMBOLS=y | ||
567 | CONFIG_MAGIC_SYSRQ=y | ||
568 | CONFIG_DEBUG_MEMORY_INIT=y | ||
569 | CONFIG_PANIC_ON_OOPS=y | ||
570 | CONFIG_RCU_TORTURE_TEST=m | ||
571 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | ||
572 | CONFIG_LATENCYTOP=y | ||
573 | CONFIG_SCHED_TRACER=y | ||
574 | CONFIG_FTRACE_SYSCALLS=y | ||
575 | CONFIG_STACK_TRACER=y | ||
576 | CONFIG_BLK_DEV_IO_TRACE=y | ||
577 | CONFIG_FUNCTION_PROFILER=y | ||
578 | CONFIG_HIST_TRIGGERS=y | ||
579 | CONFIG_LKDTM=m | ||
580 | CONFIG_PERCPU_TEST=m | ||
581 | CONFIG_ATOMIC64_SELFTEST=y | ||
582 | CONFIG_TEST_BPF=m | ||
583 | CONFIG_BUG_ON_DATA_CORRUPTION=y | ||
584 | CONFIG_S390_PTDUMP=y | ||
585 | CONFIG_PERSISTENT_KEYRINGS=y | ||
586 | CONFIG_BIG_KEYS=y | ||
587 | CONFIG_ENCRYPTED_KEYS=m | ||
588 | CONFIG_SECURITY=y | ||
589 | CONFIG_SECURITY_NETWORK=y | ||
590 | CONFIG_SECURITY_SELINUX=y | ||
591 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | ||
592 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | ||
593 | CONFIG_SECURITY_SELINUX_DISABLE=y | ||
594 | CONFIG_INTEGRITY_SIGNATURE=y | ||
595 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | ||
596 | CONFIG_IMA=y | ||
597 | CONFIG_IMA_WRITE_POLICY=y | ||
598 | CONFIG_IMA_APPRAISE=y | ||
599 | CONFIG_CRYPTO_DH=m | ||
600 | CONFIG_CRYPTO_ECDH=m | ||
601 | CONFIG_CRYPTO_USER=m | ||
602 | # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set | ||
603 | CONFIG_CRYPTO_PCRYPT=m | ||
604 | CONFIG_CRYPTO_CRYPTD=m | ||
605 | CONFIG_CRYPTO_MCRYPTD=m | ||
606 | CONFIG_CRYPTO_TEST=m | ||
607 | CONFIG_CRYPTO_CHACHA20POLY1305=m | ||
608 | CONFIG_CRYPTO_LRW=m | ||
609 | CONFIG_CRYPTO_PCBC=m | ||
610 | CONFIG_CRYPTO_KEYWRAP=m | ||
611 | CONFIG_CRYPTO_XCBC=m | ||
612 | CONFIG_CRYPTO_VMAC=m | ||
613 | CONFIG_CRYPTO_CRC32=m | ||
614 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
615 | CONFIG_CRYPTO_RMD128=m | ||
616 | CONFIG_CRYPTO_RMD160=m | ||
617 | CONFIG_CRYPTO_RMD256=m | ||
618 | CONFIG_CRYPTO_RMD320=m | ||
619 | CONFIG_CRYPTO_SHA512=m | ||
620 | CONFIG_CRYPTO_SHA3=m | ||
621 | CONFIG_CRYPTO_TGR192=m | ||
622 | CONFIG_CRYPTO_WP512=m | ||
623 | CONFIG_CRYPTO_AES_TI=m | ||
624 | CONFIG_CRYPTO_ANUBIS=m | ||
625 | CONFIG_CRYPTO_BLOWFISH=m | ||
626 | CONFIG_CRYPTO_CAMELLIA=m | ||
627 | CONFIG_CRYPTO_CAST5=m | ||
628 | CONFIG_CRYPTO_CAST6=m | ||
629 | CONFIG_CRYPTO_FCRYPT=m | ||
630 | CONFIG_CRYPTO_KHAZAD=m | ||
631 | CONFIG_CRYPTO_SALSA20=m | ||
632 | CONFIG_CRYPTO_SEED=m | ||
633 | CONFIG_CRYPTO_SERPENT=m | ||
634 | CONFIG_CRYPTO_TEA=m | ||
635 | CONFIG_CRYPTO_TWOFISH=m | ||
636 | CONFIG_CRYPTO_842=m | ||
637 | CONFIG_CRYPTO_LZ4=m | ||
638 | CONFIG_CRYPTO_LZ4HC=m | ||
639 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
640 | CONFIG_CRYPTO_USER_API_HASH=m | ||
641 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | ||
642 | CONFIG_CRYPTO_USER_API_RNG=m | ||
643 | CONFIG_CRYPTO_USER_API_AEAD=m | ||
644 | CONFIG_ZCRYPT=m | ||
645 | CONFIG_PKEY=m | ||
646 | CONFIG_CRYPTO_PAES_S390=m | ||
647 | CONFIG_CRYPTO_SHA1_S390=m | ||
648 | CONFIG_CRYPTO_SHA256_S390=m | ||
649 | CONFIG_CRYPTO_SHA512_S390=m | ||
650 | CONFIG_CRYPTO_DES_S390=m | ||
651 | CONFIG_CRYPTO_AES_S390=m | ||
652 | CONFIG_CRYPTO_GHASH_S390=m | ||
653 | CONFIG_CRYPTO_CRC32_S390=y | ||
654 | CONFIG_CRC7=m | ||
655 | CONFIG_CRC8=m | ||
656 | CONFIG_CORDIC=m | ||
657 | CONFIG_CMM=m | ||
658 | CONFIG_APPLDATA_BASE=y | ||
659 | CONFIG_KVM=m | ||
660 | CONFIG_KVM_S390_UCONTROL=y | ||
661 | CONFIG_VHOST_NET=m | ||
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 20ed149e1137..c105bcc6d7a6 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -25,13 +25,13 @@ CONFIG_CPUSETS=y | |||
25 | CONFIG_CGROUP_DEVICE=y | 25 | CONFIG_CGROUP_DEVICE=y |
26 | CONFIG_CGROUP_CPUACCT=y | 26 | CONFIG_CGROUP_CPUACCT=y |
27 | CONFIG_CGROUP_PERF=y | 27 | CONFIG_CGROUP_PERF=y |
28 | CONFIG_CHECKPOINT_RESTORE=y | ||
29 | CONFIG_NAMESPACES=y | 28 | CONFIG_NAMESPACES=y |
30 | CONFIG_USER_NS=y | 29 | CONFIG_USER_NS=y |
31 | CONFIG_SCHED_AUTOGROUP=y | 30 | CONFIG_SCHED_AUTOGROUP=y |
32 | CONFIG_BLK_DEV_INITRD=y | 31 | CONFIG_BLK_DEV_INITRD=y |
33 | CONFIG_EXPERT=y | 32 | CONFIG_EXPERT=y |
34 | # CONFIG_SYSFS_SYSCALL is not set | 33 | # CONFIG_SYSFS_SYSCALL is not set |
34 | CONFIG_CHECKPOINT_RESTORE=y | ||
35 | CONFIG_BPF_SYSCALL=y | 35 | CONFIG_BPF_SYSCALL=y |
36 | CONFIG_USERFAULTFD=y | 36 | CONFIG_USERFAULTFD=y |
37 | # CONFIG_COMPAT_BRK is not set | 37 | # CONFIG_COMPAT_BRK is not set |
@@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y | |||
45 | CONFIG_MODULE_FORCE_UNLOAD=y | 45 | CONFIG_MODULE_FORCE_UNLOAD=y |
46 | CONFIG_MODVERSIONS=y | 46 | CONFIG_MODVERSIONS=y |
47 | CONFIG_MODULE_SRCVERSION_ALL=y | 47 | CONFIG_MODULE_SRCVERSION_ALL=y |
48 | CONFIG_MODULE_SIG=y | ||
49 | CONFIG_MODULE_SIG_SHA256=y | ||
48 | CONFIG_BLK_DEV_INTEGRITY=y | 50 | CONFIG_BLK_DEV_INTEGRITY=y |
49 | CONFIG_BLK_DEV_THROTTLING=y | 51 | CONFIG_BLK_DEV_THROTTLING=y |
50 | CONFIG_BLK_WBT=y | 52 | CONFIG_BLK_WBT=y |
@@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y | |||
62 | CONFIG_NR_CPUS=512 | 64 | CONFIG_NR_CPUS=512 |
63 | CONFIG_NUMA=y | 65 | CONFIG_NUMA=y |
64 | CONFIG_HZ_100=y | 66 | CONFIG_HZ_100=y |
67 | CONFIG_KEXEC_FILE=y | ||
65 | CONFIG_MEMORY_HOTPLUG=y | 68 | CONFIG_MEMORY_HOTPLUG=y |
66 | CONFIG_MEMORY_HOTREMOVE=y | 69 | CONFIG_MEMORY_HOTREMOVE=y |
67 | CONFIG_KSM=y | 70 | CONFIG_KSM=y |
@@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m | |||
301 | CONFIG_IP6_NF_NAT=m | 304 | CONFIG_IP6_NF_NAT=m |
302 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
303 | CONFIG_NF_TABLES_BRIDGE=m | 306 | CONFIG_NF_TABLES_BRIDGE=m |
304 | CONFIG_NET_SCTPPROBE=m | ||
305 | CONFIG_RDS=m | 307 | CONFIG_RDS=m |
306 | CONFIG_RDS_RDMA=m | 308 | CONFIG_RDS_RDMA=m |
307 | CONFIG_RDS_TCP=m | 309 | CONFIG_RDS_TCP=m |
@@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m | |||
359 | CONFIG_NET_ACT_SKBEDIT=m | 361 | CONFIG_NET_ACT_SKBEDIT=m |
360 | CONFIG_NET_ACT_CSUM=m | 362 | CONFIG_NET_ACT_CSUM=m |
361 | CONFIG_DNS_RESOLVER=y | 363 | CONFIG_DNS_RESOLVER=y |
364 | CONFIG_OPENVSWITCH=m | ||
362 | CONFIG_NETLINK_DIAG=m | 365 | CONFIG_NETLINK_DIAG=m |
363 | CONFIG_CGROUP_NET_PRIO=y | 366 | CONFIG_CGROUP_NET_PRIO=y |
364 | CONFIG_BPF_JIT=y | 367 | CONFIG_BPF_JIT=y |
365 | CONFIG_NET_PKTGEN=m | 368 | CONFIG_NET_PKTGEN=m |
366 | CONFIG_NET_TCPPROBE=m | ||
367 | CONFIG_DEVTMPFS=y | 369 | CONFIG_DEVTMPFS=y |
368 | CONFIG_DMA_CMA=y | 370 | CONFIG_DMA_CMA=y |
369 | CONFIG_CMA_SIZE_MBYTES=0 | 371 | CONFIG_CMA_SIZE_MBYTES=0 |
@@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m | |||
375 | CONFIG_BLK_DEV_NBD=m | 377 | CONFIG_BLK_DEV_NBD=m |
376 | CONFIG_BLK_DEV_RAM=y | 378 | CONFIG_BLK_DEV_RAM=y |
377 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 379 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
378 | CONFIG_BLK_DEV_RAM_DAX=y | ||
379 | CONFIG_VIRTIO_BLK=y | 380 | CONFIG_VIRTIO_BLK=y |
381 | CONFIG_BLK_DEV_RBD=m | ||
382 | CONFIG_BLK_DEV_NVME=m | ||
380 | CONFIG_ENCLOSURE_SERVICES=m | 383 | CONFIG_ENCLOSURE_SERVICES=m |
381 | CONFIG_GENWQE=m | 384 | CONFIG_GENWQE=m |
382 | CONFIG_RAID_ATTRS=m | 385 | CONFIG_RAID_ATTRS=m |
@@ -455,6 +458,7 @@ CONFIG_PPTP=m | |||
455 | CONFIG_PPPOL2TP=m | 458 | CONFIG_PPPOL2TP=m |
456 | CONFIG_PPP_ASYNC=m | 459 | CONFIG_PPP_ASYNC=m |
457 | CONFIG_PPP_SYNC_TTY=m | 460 | CONFIG_PPP_SYNC_TTY=m |
461 | CONFIG_INPUT_EVDEV=y | ||
458 | # CONFIG_INPUT_KEYBOARD is not set | 462 | # CONFIG_INPUT_KEYBOARD is not set |
459 | # CONFIG_INPUT_MOUSE is not set | 463 | # CONFIG_INPUT_MOUSE is not set |
460 | # CONFIG_SERIO is not set | 464 | # CONFIG_SERIO is not set |
@@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y | |||
468 | CONFIG_WATCHDOG_NOWAYOUT=y | 472 | CONFIG_WATCHDOG_NOWAYOUT=y |
469 | CONFIG_SOFT_WATCHDOG=m | 473 | CONFIG_SOFT_WATCHDOG=m |
470 | CONFIG_DIAG288_WATCHDOG=m | 474 | CONFIG_DIAG288_WATCHDOG=m |
475 | CONFIG_DRM=y | ||
476 | CONFIG_DRM_VIRTIO_GPU=y | ||
477 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
471 | # CONFIG_HID is not set | 478 | # CONFIG_HID is not set |
472 | # CONFIG_USB_SUPPORT is not set | 479 | # CONFIG_USB_SUPPORT is not set |
473 | CONFIG_INFINIBAND=m | 480 | CONFIG_INFINIBAND=m |
@@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m | |||
476 | CONFIG_MLX5_INFINIBAND=m | 483 | CONFIG_MLX5_INFINIBAND=m |
477 | CONFIG_VFIO=m | 484 | CONFIG_VFIO=m |
478 | CONFIG_VFIO_PCI=m | 485 | CONFIG_VFIO_PCI=m |
486 | CONFIG_VIRTIO_PCI=m | ||
479 | CONFIG_VIRTIO_BALLOON=m | 487 | CONFIG_VIRTIO_BALLOON=m |
488 | CONFIG_VIRTIO_INPUT=y | ||
480 | CONFIG_EXT4_FS=y | 489 | CONFIG_EXT4_FS=y |
481 | CONFIG_EXT4_FS_POSIX_ACL=y | 490 | CONFIG_EXT4_FS_POSIX_ACL=y |
482 | CONFIG_EXT4_FS_SECURITY=y | 491 | CONFIG_EXT4_FS_SECURITY=y |
@@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m | |||
507 | CONFIG_FUSE_FS=y | 516 | CONFIG_FUSE_FS=y |
508 | CONFIG_CUSE=m | 517 | CONFIG_CUSE=m |
509 | CONFIG_OVERLAY_FS=m | 518 | CONFIG_OVERLAY_FS=m |
510 | CONFIG_OVERLAY_FS_REDIRECT_DIR=y | ||
511 | CONFIG_FSCACHE=m | 519 | CONFIG_FSCACHE=m |
512 | CONFIG_CACHEFILES=m | 520 | CONFIG_CACHEFILES=m |
513 | CONFIG_ISO9660_FS=y | 521 | CONFIG_ISO9660_FS=y |
@@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y | |||
592 | CONFIG_INTEGRITY_SIGNATURE=y | 600 | CONFIG_INTEGRITY_SIGNATURE=y |
593 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | 601 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y |
594 | CONFIG_IMA=y | 602 | CONFIG_IMA=y |
603 | CONFIG_IMA_DEFAULT_HASH_SHA256=y | ||
595 | CONFIG_IMA_WRITE_POLICY=y | 604 | CONFIG_IMA_WRITE_POLICY=y |
596 | CONFIG_IMA_APPRAISE=y | 605 | CONFIG_IMA_APPRAISE=y |
606 | CONFIG_CRYPTO_FIPS=y | ||
597 | CONFIG_CRYPTO_DH=m | 607 | CONFIG_CRYPTO_DH=m |
598 | CONFIG_CRYPTO_ECDH=m | 608 | CONFIG_CRYPTO_ECDH=m |
599 | CONFIG_CRYPTO_USER=m | 609 | CONFIG_CRYPTO_USER=m |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 46a3178d8bc6..f40600eb1762 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y | |||
8 | CONFIG_TASK_DELAY_ACCT=y | 8 | CONFIG_TASK_DELAY_ACCT=y |
9 | CONFIG_TASK_XACCT=y | 9 | CONFIG_TASK_XACCT=y |
10 | CONFIG_TASK_IO_ACCOUNTING=y | 10 | CONFIG_TASK_IO_ACCOUNTING=y |
11 | # CONFIG_CPU_ISOLATION is not set | ||
11 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
12 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
13 | CONFIG_CGROUPS=y | 14 | CONFIG_CGROUPS=y |
@@ -23,12 +24,12 @@ CONFIG_CPUSETS=y | |||
23 | CONFIG_CGROUP_DEVICE=y | 24 | CONFIG_CGROUP_DEVICE=y |
24 | CONFIG_CGROUP_CPUACCT=y | 25 | CONFIG_CGROUP_CPUACCT=y |
25 | CONFIG_CGROUP_PERF=y | 26 | CONFIG_CGROUP_PERF=y |
26 | CONFIG_CHECKPOINT_RESTORE=y | ||
27 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
29 | CONFIG_BLK_DEV_INITRD=y | 29 | CONFIG_BLK_DEV_INITRD=y |
30 | CONFIG_EXPERT=y | 30 | CONFIG_EXPERT=y |
31 | # CONFIG_SYSFS_SYSCALL is not set | 31 | # CONFIG_SYSFS_SYSCALL is not set |
32 | CONFIG_CHECKPOINT_RESTORE=y | ||
32 | CONFIG_BPF_SYSCALL=y | 33 | CONFIG_BPF_SYSCALL=y |
33 | CONFIG_USERFAULTFD=y | 34 | CONFIG_USERFAULTFD=y |
34 | # CONFIG_COMPAT_BRK is not set | 35 | # CONFIG_COMPAT_BRK is not set |
@@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y | |||
47 | CONFIG_NR_CPUS=256 | 48 | CONFIG_NR_CPUS=256 |
48 | CONFIG_NUMA=y | 49 | CONFIG_NUMA=y |
49 | CONFIG_HZ_100=y | 50 | CONFIG_HZ_100=y |
51 | CONFIG_KEXEC_FILE=y | ||
50 | CONFIG_MEMORY_HOTPLUG=y | 52 | CONFIG_MEMORY_HOTPLUG=y |
51 | CONFIG_MEMORY_HOTREMOVE=y | 53 | CONFIG_MEMORY_HOTREMOVE=y |
52 | CONFIG_KSM=y | 54 | CONFIG_KSM=y |
@@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m | |||
129 | CONFIG_TUN=m | 131 | CONFIG_TUN=m |
130 | CONFIG_VIRTIO_NET=y | 132 | CONFIG_VIRTIO_NET=y |
131 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 133 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
134 | # CONFIG_NET_VENDOR_CORTINA is not set | ||
132 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 135 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
136 | # CONFIG_NET_VENDOR_SOCIONEXT is not set | ||
133 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | 137 | # CONFIG_NET_VENDOR_SYNOPSYS is not set |
134 | # CONFIG_INPUT is not set | 138 | # CONFIG_INPUT is not set |
135 | # CONFIG_SERIO is not set | 139 | # CONFIG_SERIO is not set |
140 | # CONFIG_VT is not set | ||
136 | CONFIG_DEVKMEM=y | 141 | CONFIG_DEVKMEM=y |
137 | CONFIG_RAW_DRIVER=m | 142 | CONFIG_RAW_DRIVER=m |
138 | CONFIG_VIRTIO_BALLOON=y | 143 | CONFIG_VIRTIO_BALLOON=y |
@@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | |||
177 | CONFIG_STACK_TRACER=y | 182 | CONFIG_STACK_TRACER=y |
178 | CONFIG_BLK_DEV_IO_TRACE=y | 183 | CONFIG_BLK_DEV_IO_TRACE=y |
179 | CONFIG_FUNCTION_PROFILER=y | 184 | CONFIG_FUNCTION_PROFILER=y |
180 | CONFIG_KPROBES_SANITY_TEST=y | 185 | # CONFIG_RUNTIME_TESTING_MENU is not set |
181 | CONFIG_S390_PTDUMP=y | 186 | CONFIG_S390_PTDUMP=y |
182 | CONFIG_CRYPTO_CRYPTD=m | 187 | CONFIG_CRYPTO_CRYPTD=m |
188 | CONFIG_CRYPTO_AUTHENC=m | ||
183 | CONFIG_CRYPTO_TEST=m | 189 | CONFIG_CRYPTO_TEST=m |
184 | CONFIG_CRYPTO_CCM=m | 190 | CONFIG_CRYPTO_CCM=m |
185 | CONFIG_CRYPTO_GCM=m | 191 | CONFIG_CRYPTO_GCM=m |
186 | CONFIG_CRYPTO_CBC=y | 192 | CONFIG_CRYPTO_CBC=y |
193 | CONFIG_CRYPTO_CFB=m | ||
187 | CONFIG_CRYPTO_CTS=m | 194 | CONFIG_CRYPTO_CTS=m |
188 | CONFIG_CRYPTO_LRW=m | 195 | CONFIG_CRYPTO_LRW=m |
189 | CONFIG_CRYPTO_PCBC=m | 196 | CONFIG_CRYPTO_PCBC=m |
@@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m | |||
213 | CONFIG_CRYPTO_SALSA20=m | 220 | CONFIG_CRYPTO_SALSA20=m |
214 | CONFIG_CRYPTO_SEED=m | 221 | CONFIG_CRYPTO_SEED=m |
215 | CONFIG_CRYPTO_SERPENT=m | 222 | CONFIG_CRYPTO_SERPENT=m |
223 | CONFIG_CRYPTO_SM4=m | ||
224 | CONFIG_CRYPTO_SPECK=m | ||
216 | CONFIG_CRYPTO_TEA=m | 225 | CONFIG_CRYPTO_TEA=m |
217 | CONFIG_CRYPTO_TWOFISH=m | 226 | CONFIG_CRYPTO_TWOFISH=m |
218 | CONFIG_CRYPTO_DEFLATE=m | 227 | CONFIG_CRYPTO_DEFLATE=m |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 43bbe63e2992..06b513d192b9 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb) | |||
320 | 320 | ||
321 | if (sb->s_root) | 321 | if (sb->s_root) |
322 | hypfs_delete_tree(sb->s_root); | 322 | hypfs_delete_tree(sb->s_root); |
323 | if (sb_info->update_file) | 323 | if (sb_info && sb_info->update_file) |
324 | hypfs_remove(sb_info->update_file); | 324 | hypfs_remove(sb_info->update_file); |
325 | kfree(sb->s_fs_info); | 325 | kfree(sb->s_fs_info); |
326 | sb->s_fs_info = NULL; | 326 | sb->s_fs_info = NULL; |
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 1d708a419326..825dd0f7f221 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h | |||
@@ -46,4 +46,27 @@ | |||
46 | static inline void crash_setup_regs(struct pt_regs *newregs, | 46 | static inline void crash_setup_regs(struct pt_regs *newregs, |
47 | struct pt_regs *oldregs) { } | 47 | struct pt_regs *oldregs) { } |
48 | 48 | ||
49 | struct kimage; | ||
50 | struct s390_load_data { | ||
51 | /* Pointer to the kernel buffer. Used to register cmdline etc.. */ | ||
52 | void *kernel_buf; | ||
53 | |||
54 | /* Total size of loaded segments in memory. Used as an offset. */ | ||
55 | size_t memsz; | ||
56 | |||
57 | /* Load address of initrd. Used to register INITRD_START in kernel. */ | ||
58 | unsigned long initrd_load_addr; | ||
59 | }; | ||
60 | |||
61 | int kexec_file_add_purgatory(struct kimage *image, | ||
62 | struct s390_load_data *data); | ||
63 | int kexec_file_add_initrd(struct kimage *image, | ||
64 | struct s390_load_data *data, | ||
65 | char *initrd, unsigned long initrd_len); | ||
66 | int *kexec_file_update_kernel(struct kimage *iamge, | ||
67 | struct s390_load_data *data); | ||
68 | |||
69 | extern const struct kexec_file_ops s390_kexec_image_ops; | ||
70 | extern const struct kexec_file_ops s390_kexec_elf_ops; | ||
71 | |||
49 | #endif /*_S390_KEXEC_H */ | 72 | #endif /*_S390_KEXEC_H */ |
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h new file mode 100644 index 000000000000..e297bcfc476f --- /dev/null +++ b/arch/s390/include/asm/purgatory.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright IBM Corp. 2018 | ||
4 | * | ||
5 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _S390_PURGATORY_H_ | ||
9 | #define _S390_PURGATORY_H_ | ||
10 | #ifndef __ASSEMBLY__ | ||
11 | |||
12 | #include <linux/purgatory.h> | ||
13 | |||
14 | int verify_sha256_digest(void); | ||
15 | |||
16 | #endif /* __ASSEMBLY__ */ | ||
17 | #endif /* _S390_PURGATORY_H_ */ | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 124154fdfc97..9c30ebe046f3 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | 2 | /* |
3 | * S390 version | 3 | * S390 version |
4 | * Copyright IBM Corp. 1999, 2010 | 4 | * Copyright IBM Corp. 1999, 2017 |
5 | */ | 5 | */ |
6 | #ifndef _ASM_S390_SETUP_H | 6 | #ifndef _ASM_S390_SETUP_H |
7 | #define _ASM_S390_SETUP_H | 7 | #define _ASM_S390_SETUP_H |
@@ -37,17 +37,31 @@ | |||
37 | #define LPP_MAGIC _BITUL(31) | 37 | #define LPP_MAGIC _BITUL(31) |
38 | #define LPP_PID_MASK _AC(0xffffffff, UL) | 38 | #define LPP_PID_MASK _AC(0xffffffff, UL) |
39 | 39 | ||
40 | /* Offsets to entry points in kernel/head.S */ | ||
41 | |||
42 | #define STARTUP_NORMAL_OFFSET 0x10000 | ||
43 | #define STARTUP_KDUMP_OFFSET 0x10010 | ||
44 | |||
45 | /* Offsets to parameters in kernel/head.S */ | ||
46 | |||
47 | #define IPL_DEVICE_OFFSET 0x10400 | ||
48 | #define INITRD_START_OFFSET 0x10408 | ||
49 | #define INITRD_SIZE_OFFSET 0x10410 | ||
50 | #define OLDMEM_BASE_OFFSET 0x10418 | ||
51 | #define OLDMEM_SIZE_OFFSET 0x10420 | ||
52 | #define COMMAND_LINE_OFFSET 0x10480 | ||
53 | |||
40 | #ifndef __ASSEMBLY__ | 54 | #ifndef __ASSEMBLY__ |
41 | 55 | ||
42 | #include <asm/lowcore.h> | 56 | #include <asm/lowcore.h> |
43 | #include <asm/types.h> | 57 | #include <asm/types.h> |
44 | 58 | ||
45 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) | 59 | #define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET)) |
46 | #define INITRD_START (*(unsigned long *) (0x10408)) | 60 | #define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET)) |
47 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) | 61 | #define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET)) |
48 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) | 62 | #define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET)) |
49 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) | 63 | #define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) |
50 | #define COMMAND_LINE ((char *) (0x10480)) | 64 | #define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) |
51 | 65 | ||
52 | extern int memory_end_set; | 66 | extern int memory_end_set; |
53 | extern unsigned long memory_end; | 67 | extern unsigned long memory_end; |
@@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void); | |||
121 | 135 | ||
122 | #else /* __ASSEMBLY__ */ | 136 | #else /* __ASSEMBLY__ */ |
123 | 137 | ||
124 | #define IPL_DEVICE 0x10400 | 138 | #define IPL_DEVICE (IPL_DEVICE_OFFSET) |
125 | #define INITRD_START 0x10408 | 139 | #define INITRD_START (INITRD_START_OFFSET) |
126 | #define INITRD_SIZE 0x10410 | 140 | #define INITRD_SIZE (INITRD_SIZE_OFFSET) |
127 | #define OLDMEM_BASE 0x10418 | 141 | #define OLDMEM_BASE (OLDMEM_BASE_OFFSET) |
128 | #define OLDMEM_SIZE 0x10420 | 142 | #define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET) |
129 | #define COMMAND_LINE 0x10480 | 143 | #define COMMAND_LINE (COMMAND_LINE_OFFSET) |
130 | 144 | ||
131 | #endif /* __ASSEMBLY__ */ | 145 | #endif /* __ASSEMBLY__ */ |
132 | #endif /* _ASM_S390_SETUP_H */ | 146 | #endif /* _ASM_S390_SETUP_H */ |
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index c57f9d28d894..9a14a611ed82 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h | |||
@@ -97,22 +97,31 @@ typedef unsigned long sigset_t; | |||
97 | #include <asm-generic/signal-defs.h> | 97 | #include <asm-generic/signal-defs.h> |
98 | 98 | ||
99 | #ifndef __KERNEL__ | 99 | #ifndef __KERNEL__ |
100 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
101 | 100 | ||
101 | /* | ||
102 | * There are two system calls in regard to sigaction, sys_rt_sigaction | ||
103 | * and sys_sigaction. Internally the kernel uses the struct old_sigaction | ||
104 | * for the older sys_sigaction system call, and the kernel version of the | ||
105 | * struct sigaction for the newer sys_rt_sigaction. | ||
106 | * | ||
107 | * The uapi definition for struct sigaction has made a strange distinction | ||
108 | * between 31-bit and 64-bit in the past. For 64-bit the uapi structure | ||
109 | * looks like the kernel struct sigaction, but for 31-bit it used to | ||
110 | * look like the kernel struct old_sigaction. That practically made the | ||
111 | * structure unusable for either system call. To get around this problem | ||
112 | * the glibc always had its own definitions for the sigaction structures. | ||
113 | * | ||
114 | * The current struct sigaction uapi definition below is suitable for the | ||
115 | * sys_rt_sigaction system call only. | ||
116 | */ | ||
102 | struct sigaction { | 117 | struct sigaction { |
103 | union { | 118 | union { |
104 | __sighandler_t _sa_handler; | 119 | __sighandler_t _sa_handler; |
105 | void (*_sa_sigaction)(int, struct siginfo *, void *); | 120 | void (*_sa_sigaction)(int, struct siginfo *, void *); |
106 | } _u; | 121 | } _u; |
107 | #ifndef __s390x__ /* lovely */ | ||
108 | sigset_t sa_mask; | ||
109 | unsigned long sa_flags; | ||
110 | void (*sa_restorer)(void); | ||
111 | #else /* __s390x__ */ | ||
112 | unsigned long sa_flags; | 122 | unsigned long sa_flags; |
113 | void (*sa_restorer)(void); | 123 | void (*sa_restorer)(void); |
114 | sigset_t sa_mask; | 124 | sigset_t sa_mask; |
115 | #endif /* __s390x__ */ | ||
116 | }; | 125 | }; |
117 | 126 | ||
118 | #define sa_handler _u._sa_handler | 127 | #define sa_handler _u._sa_handler |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b06a6f79c1ec..84ea6225efb4 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | |||
82 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 82 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
83 | obj-$(CONFIG_UPROBES) += uprobes.o | 83 | obj-$(CONFIG_UPROBES) += uprobes.o |
84 | 84 | ||
85 | obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o | ||
86 | obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o | ||
87 | |||
85 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o | 88 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o |
86 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o | 89 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o |
87 | 90 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cfe2c45c5180..eb2a5c0443cd 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kbuild.h> | 10 | #include <linux/kbuild.h> |
11 | #include <linux/kvm_host.h> | 11 | #include <linux/kvm_host.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/purgatory.h> | ||
13 | #include <asm/idle.h> | 14 | #include <asm/idle.h> |
14 | #include <asm/vdso.h> | 15 | #include <asm/vdso.h> |
15 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
@@ -204,5 +205,9 @@ int main(void) | |||
204 | OFFSET(__GMAP_ASCE, gmap, asce); | 205 | OFFSET(__GMAP_ASCE, gmap, asce); |
205 | OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); | 206 | OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); |
206 | OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); | 207 | OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); |
208 | /* kexec_sha_region */ | ||
209 | OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); | ||
210 | OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); | ||
211 | DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); | ||
207 | return 0; | 212 | return 0; |
208 | } | 213 | } |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 11e9d8b5c1b0..607c5e9fba3d 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
@@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, | |||
182 | COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); | 182 | COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); |
183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); | 183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); |
184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); | 184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); |
185 | COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) | ||
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c new file mode 100644 index 000000000000..5a286b012043 --- /dev/null +++ b/arch/s390/kernel/kexec_elf.c | |||
@@ -0,0 +1,147 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * ELF loader for kexec_file_load system call. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2018 | ||
6 | * | ||
7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/kexec.h> | ||
13 | #include <asm/setup.h> | ||
14 | |||
15 | static int kexec_file_add_elf_kernel(struct kimage *image, | ||
16 | struct s390_load_data *data, | ||
17 | char *kernel, unsigned long kernel_len) | ||
18 | { | ||
19 | struct kexec_buf buf; | ||
20 | const Elf_Ehdr *ehdr; | ||
21 | const Elf_Phdr *phdr; | ||
22 | int i, ret; | ||
23 | |||
24 | ehdr = (Elf_Ehdr *)kernel; | ||
25 | buf.image = image; | ||
26 | |||
27 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
28 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | ||
29 | if (phdr->p_type != PT_LOAD) | ||
30 | continue; | ||
31 | |||
32 | buf.buffer = kernel + phdr->p_offset; | ||
33 | buf.bufsz = phdr->p_filesz; | ||
34 | |||
35 | buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); | ||
36 | buf.memsz = phdr->p_memsz; | ||
37 | |||
38 | if (phdr->p_paddr == 0) { | ||
39 | data->kernel_buf = buf.buffer; | ||
40 | data->memsz += STARTUP_NORMAL_OFFSET; | ||
41 | |||
42 | buf.buffer += STARTUP_NORMAL_OFFSET; | ||
43 | buf.bufsz -= STARTUP_NORMAL_OFFSET; | ||
44 | |||
45 | buf.mem += STARTUP_NORMAL_OFFSET; | ||
46 | buf.memsz -= STARTUP_NORMAL_OFFSET; | ||
47 | } | ||
48 | |||
49 | if (image->type == KEXEC_TYPE_CRASH) | ||
50 | buf.mem += crashk_res.start; | ||
51 | |||
52 | ret = kexec_add_buffer(&buf); | ||
53 | if (ret) | ||
54 | return ret; | ||
55 | |||
56 | data->memsz += buf.memsz; | ||
57 | } | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static void *s390_elf_load(struct kimage *image, | ||
63 | char *kernel, unsigned long kernel_len, | ||
64 | char *initrd, unsigned long initrd_len, | ||
65 | char *cmdline, unsigned long cmdline_len) | ||
66 | { | ||
67 | struct s390_load_data data = {0}; | ||
68 | const Elf_Ehdr *ehdr; | ||
69 | const Elf_Phdr *phdr; | ||
70 | size_t size; | ||
71 | int i, ret; | ||
72 | |||
73 | /* image->fobs->probe already checked for valid ELF magic number. */ | ||
74 | ehdr = (Elf_Ehdr *)kernel; | ||
75 | |||
76 | if (ehdr->e_type != ET_EXEC || | ||
77 | ehdr->e_ident[EI_CLASS] != ELFCLASS64 || | ||
78 | !elf_check_arch(ehdr)) | ||
79 | return ERR_PTR(-EINVAL); | ||
80 | |||
81 | if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr)) | ||
82 | return ERR_PTR(-EINVAL); | ||
83 | |||
84 | size = ehdr->e_ehsize + ehdr->e_phoff; | ||
85 | size += ehdr->e_phentsize * ehdr->e_phnum; | ||
86 | if (size > kernel_len) | ||
87 | return ERR_PTR(-EINVAL); | ||
88 | |||
89 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
90 | size = ALIGN(size, phdr->p_align); | ||
91 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | ||
92 | if (phdr->p_type == PT_INTERP) | ||
93 | return ERR_PTR(-EINVAL); | ||
94 | |||
95 | if (phdr->p_offset > kernel_len) | ||
96 | return ERR_PTR(-EINVAL); | ||
97 | |||
98 | size += ALIGN(phdr->p_filesz, phdr->p_align); | ||
99 | } | ||
100 | |||
101 | if (size > kernel_len) | ||
102 | return ERR_PTR(-EINVAL); | ||
103 | |||
104 | ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len); | ||
105 | if (ret) | ||
106 | return ERR_PTR(ret); | ||
107 | |||
108 | if (!data.memsz) | ||
109 | return ERR_PTR(-EINVAL); | ||
110 | |||
111 | if (initrd) { | ||
112 | ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); | ||
113 | if (ret) | ||
114 | return ERR_PTR(ret); | ||
115 | } | ||
116 | |||
117 | ret = kexec_file_add_purgatory(image, &data); | ||
118 | if (ret) | ||
119 | return ERR_PTR(ret); | ||
120 | |||
121 | return kexec_file_update_kernel(image, &data); | ||
122 | } | ||
123 | |||
124 | static int s390_elf_probe(const char *buf, unsigned long len) | ||
125 | { | ||
126 | const Elf_Ehdr *ehdr; | ||
127 | |||
128 | if (len < sizeof(Elf_Ehdr)) | ||
129 | return -ENOEXEC; | ||
130 | |||
131 | ehdr = (Elf_Ehdr *)buf; | ||
132 | |||
133 | /* Only check the ELF magic number here and do proper validity check | ||
134 | * in the loader. Any check here that fails would send the erroneous | ||
135 | * ELF file to the image loader that does not care what it gets. | ||
136 | * (Most likely) causing behavior not intended by the user. | ||
137 | */ | ||
138 | if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) | ||
139 | return -ENOEXEC; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | const struct kexec_file_ops s390_kexec_elf_ops = { | ||
145 | .probe = s390_elf_probe, | ||
146 | .load = s390_elf_load, | ||
147 | }; | ||
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c new file mode 100644 index 000000000000..3800852595e8 --- /dev/null +++ b/arch/s390/kernel/kexec_image.c | |||
@@ -0,0 +1,76 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Image loader for kexec_file_load system call. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2018 | ||
6 | * | ||
7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/kexec.h> | ||
13 | #include <asm/setup.h> | ||
14 | |||
15 | static int kexec_file_add_image_kernel(struct kimage *image, | ||
16 | struct s390_load_data *data, | ||
17 | char *kernel, unsigned long kernel_len) | ||
18 | { | ||
19 | struct kexec_buf buf; | ||
20 | int ret; | ||
21 | |||
22 | buf.image = image; | ||
23 | |||
24 | buf.buffer = kernel + STARTUP_NORMAL_OFFSET; | ||
25 | buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET; | ||
26 | |||
27 | buf.mem = STARTUP_NORMAL_OFFSET; | ||
28 | if (image->type == KEXEC_TYPE_CRASH) | ||
29 | buf.mem += crashk_res.start; | ||
30 | buf.memsz = buf.bufsz; | ||
31 | |||
32 | ret = kexec_add_buffer(&buf); | ||
33 | |||
34 | data->kernel_buf = kernel; | ||
35 | data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET; | ||
36 | |||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | static void *s390_image_load(struct kimage *image, | ||
41 | char *kernel, unsigned long kernel_len, | ||
42 | char *initrd, unsigned long initrd_len, | ||
43 | char *cmdline, unsigned long cmdline_len) | ||
44 | { | ||
45 | struct s390_load_data data = {0}; | ||
46 | int ret; | ||
47 | |||
48 | ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len); | ||
49 | if (ret) | ||
50 | return ERR_PTR(ret); | ||
51 | |||
52 | if (initrd) { | ||
53 | ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); | ||
54 | if (ret) | ||
55 | return ERR_PTR(ret); | ||
56 | } | ||
57 | |||
58 | ret = kexec_file_add_purgatory(image, &data); | ||
59 | if (ret) | ||
60 | return ERR_PTR(ret); | ||
61 | |||
62 | return kexec_file_update_kernel(image, &data); | ||
63 | } | ||
64 | |||
65 | static int s390_image_probe(const char *buf, unsigned long len) | ||
66 | { | ||
67 | /* Can't reliably tell if an image is valid. Therefore give the | ||
68 | * user whatever he wants. | ||
69 | */ | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | const struct kexec_file_ops s390_kexec_image_ops = { | ||
74 | .probe = s390_image_probe, | ||
75 | .load = s390_image_load, | ||
76 | }; | ||
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..f413f57f8d20 --- /dev/null +++ b/arch/s390/kernel/machine_kexec_file.c | |||
@@ -0,0 +1,245 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * s390 code for kexec_file_load system call | ||
4 | * | ||
5 | * Copyright IBM Corp. 2018 | ||
6 | * | ||
7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/elf.h> | ||
11 | #include <linux/kexec.h> | ||
12 | #include <asm/setup.h> | ||
13 | |||
14 | const struct kexec_file_ops * const kexec_file_loaders[] = { | ||
15 | &s390_kexec_elf_ops, | ||
16 | &s390_kexec_image_ops, | ||
17 | NULL, | ||
18 | }; | ||
19 | |||
20 | int *kexec_file_update_kernel(struct kimage *image, | ||
21 | struct s390_load_data *data) | ||
22 | { | ||
23 | unsigned long *loc; | ||
24 | |||
25 | if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) | ||
26 | return ERR_PTR(-EINVAL); | ||
27 | |||
28 | if (image->cmdline_buf_len) | ||
29 | memcpy(data->kernel_buf + COMMAND_LINE_OFFSET, | ||
30 | image->cmdline_buf, image->cmdline_buf_len); | ||
31 | |||
32 | if (image->type == KEXEC_TYPE_CRASH) { | ||
33 | loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET); | ||
34 | *loc = crashk_res.start; | ||
35 | |||
36 | loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET); | ||
37 | *loc = crashk_res.end - crashk_res.start + 1; | ||
38 | } | ||
39 | |||
40 | if (image->initrd_buf) { | ||
41 | loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET); | ||
42 | *loc = data->initrd_load_addr; | ||
43 | |||
44 | loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET); | ||
45 | *loc = image->initrd_buf_len; | ||
46 | } | ||
47 | |||
48 | return NULL; | ||
49 | } | ||
50 | |||
51 | static int kexec_file_update_purgatory(struct kimage *image) | ||
52 | { | ||
53 | u64 entry, type; | ||
54 | int ret; | ||
55 | |||
56 | if (image->type == KEXEC_TYPE_CRASH) { | ||
57 | entry = STARTUP_KDUMP_OFFSET; | ||
58 | type = KEXEC_TYPE_CRASH; | ||
59 | } else { | ||
60 | entry = STARTUP_NORMAL_OFFSET; | ||
61 | type = KEXEC_TYPE_DEFAULT; | ||
62 | } | ||
63 | |||
64 | ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, | ||
65 | sizeof(entry), false); | ||
66 | if (ret) | ||
67 | return ret; | ||
68 | |||
69 | ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, | ||
70 | sizeof(type), false); | ||
71 | if (ret) | ||
72 | return ret; | ||
73 | |||
74 | if (image->type == KEXEC_TYPE_CRASH) { | ||
75 | u64 crash_size; | ||
76 | |||
77 | ret = kexec_purgatory_get_set_symbol(image, "crash_start", | ||
78 | &crashk_res.start, | ||
79 | sizeof(crashk_res.start), | ||
80 | false); | ||
81 | if (ret) | ||
82 | return ret; | ||
83 | |||
84 | crash_size = crashk_res.end - crashk_res.start + 1; | ||
85 | ret = kexec_purgatory_get_set_symbol(image, "crash_size", | ||
86 | &crash_size, | ||
87 | sizeof(crash_size), | ||
88 | false); | ||
89 | } | ||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data) | ||
94 | { | ||
95 | struct kexec_buf buf; | ||
96 | int ret; | ||
97 | |||
98 | buf.image = image; | ||
99 | |||
100 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); | ||
101 | buf.mem = data->memsz; | ||
102 | if (image->type == KEXEC_TYPE_CRASH) | ||
103 | buf.mem += crashk_res.start; | ||
104 | |||
105 | ret = kexec_load_purgatory(image, &buf); | ||
106 | if (ret) | ||
107 | return ret; | ||
108 | |||
109 | ret = kexec_file_update_purgatory(image); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, | ||
114 | char *initrd, unsigned long initrd_len) | ||
115 | { | ||
116 | struct kexec_buf buf; | ||
117 | int ret; | ||
118 | |||
119 | buf.image = image; | ||
120 | |||
121 | buf.buffer = initrd; | ||
122 | buf.bufsz = initrd_len; | ||
123 | |||
124 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); | ||
125 | buf.mem = data->memsz; | ||
126 | if (image->type == KEXEC_TYPE_CRASH) | ||
127 | buf.mem += crashk_res.start; | ||
128 | buf.memsz = buf.bufsz; | ||
129 | |||
130 | data->initrd_load_addr = buf.mem; | ||
131 | data->memsz += buf.memsz; | ||
132 | |||
133 | ret = kexec_add_buffer(&buf); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole | ||
139 | * and provide kbuf->mem by hand. | ||
140 | */ | ||
141 | int arch_kexec_walk_mem(struct kexec_buf *kbuf, | ||
142 | int (*func)(struct resource *, void *)) | ||
143 | { | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | int arch_kexec_apply_relocations_add(struct purgatory_info *pi, | ||
148 | Elf_Shdr *section, | ||
149 | const Elf_Shdr *relsec, | ||
150 | const Elf_Shdr *symtab) | ||
151 | { | ||
152 | Elf_Rela *relas; | ||
153 | int i; | ||
154 | |||
155 | relas = (void *)pi->ehdr + relsec->sh_offset; | ||
156 | |||
157 | for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { | ||
158 | const Elf_Sym *sym; /* symbol to relocate */ | ||
159 | unsigned long addr; /* final location after relocation */ | ||
160 | unsigned long val; /* relocated symbol value */ | ||
161 | void *loc; /* tmp location to modify */ | ||
162 | |||
163 | sym = (void *)pi->ehdr + symtab->sh_offset; | ||
164 | sym += ELF64_R_SYM(relas[i].r_info); | ||
165 | |||
166 | if (sym->st_shndx == SHN_UNDEF) | ||
167 | return -ENOEXEC; | ||
168 | |||
169 | if (sym->st_shndx == SHN_COMMON) | ||
170 | return -ENOEXEC; | ||
171 | |||
172 | if (sym->st_shndx >= pi->ehdr->e_shnum && | ||
173 | sym->st_shndx != SHN_ABS) | ||
174 | return -ENOEXEC; | ||
175 | |||
176 | loc = pi->purgatory_buf; | ||
177 | loc += section->sh_offset; | ||
178 | loc += relas[i].r_offset; | ||
179 | |||
180 | val = sym->st_value; | ||
181 | if (sym->st_shndx != SHN_ABS) | ||
182 | val += pi->sechdrs[sym->st_shndx].sh_addr; | ||
183 | val += relas[i].r_addend; | ||
184 | |||
185 | addr = section->sh_addr + relas[i].r_offset; | ||
186 | |||
187 | switch (ELF64_R_TYPE(relas[i].r_info)) { | ||
188 | case R_390_8: /* Direct 8 bit. */ | ||
189 | *(u8 *)loc = val; | ||
190 | break; | ||
191 | case R_390_12: /* Direct 12 bit. */ | ||
192 | *(u16 *)loc &= 0xf000; | ||
193 | *(u16 *)loc |= val & 0xfff; | ||
194 | break; | ||
195 | case R_390_16: /* Direct 16 bit. */ | ||
196 | *(u16 *)loc = val; | ||
197 | break; | ||
198 | case R_390_20: /* Direct 20 bit. */ | ||
199 | *(u32 *)loc &= 0xf00000ff; | ||
200 | *(u32 *)loc |= (val & 0xfff) << 16; /* DL */ | ||
201 | *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */ | ||
202 | break; | ||
203 | case R_390_32: /* Direct 32 bit. */ | ||
204 | *(u32 *)loc = val; | ||
205 | break; | ||
206 | case R_390_64: /* Direct 64 bit. */ | ||
207 | *(u64 *)loc = val; | ||
208 | break; | ||
209 | case R_390_PC16: /* PC relative 16 bit. */ | ||
210 | *(u16 *)loc = (val - addr); | ||
211 | break; | ||
212 | case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ | ||
213 | *(u16 *)loc = (val - addr) >> 1; | ||
214 | break; | ||
215 | case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ | ||
216 | *(u32 *)loc = (val - addr) >> 1; | ||
217 | break; | ||
218 | case R_390_PC32: /* PC relative 32 bit. */ | ||
219 | *(u32 *)loc = (val - addr); | ||
220 | break; | ||
221 | case R_390_PC64: /* PC relative 64 bit. */ | ||
222 | *(u64 *)loc = (val - addr); | ||
223 | break; | ||
224 | default: | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, | ||
232 | unsigned long buf_len) | ||
233 | { | ||
234 | /* A kernel must be at least large enough to contain head.S. During | ||
235 | * load memory in head.S will be accessed, e.g. to register the next | ||
236 | * command line. If the next kernel were smaller the current kernel | ||
237 | * will panic at load. | ||
238 | * | ||
239 | * 0x11000 = sizeof(head.S) | ||
240 | */ | ||
241 | if (buf_len < 0x11000) | ||
242 | return -ENOEXEC; | ||
243 | |||
244 | return kexec_image_probe_default(image, buf, buf_len); | ||
245 | } | ||
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index f236ce8757e8..46d49a11663f 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c | |||
@@ -1,6 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
4 | #include <linux/cpu.h> | ||
4 | #include <asm/nospec-branch.h> | 5 | #include <asm/nospec-branch.h> |
5 | 6 | ||
6 | static int __init nobp_setup_early(char *str) | 7 | static int __init nobp_setup_early(char *str) |
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index c5bc3f209652..5ee27dc9a10c 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c | |||
@@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void) | |||
583 | model = cpumcf_z13_pmu_event_attr; | 583 | model = cpumcf_z13_pmu_event_attr; |
584 | break; | 584 | break; |
585 | case 0x3906: | 585 | case 0x3906: |
586 | case 0x3907: | ||
586 | model = cpumcf_z14_pmu_event_attr; | 587 | model = cpumcf_z14_pmu_event_attr; |
587 | break; | 588 | break; |
588 | default: | 589 | default: |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index fc3b4aa185cc..d82a9ec64ea9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -821,6 +821,7 @@ static int __init setup_hwcaps(void) | |||
821 | strcpy(elf_platform, "z13"); | 821 | strcpy(elf_platform, "z13"); |
822 | break; | 822 | break; |
823 | case 0x3906: | 823 | case 0x3906: |
824 | case 0x3907: | ||
824 | strcpy(elf_platform, "z14"); | 825 | strcpy(elf_platform, "z14"); |
825 | break; | 826 | break; |
826 | } | 827 | } |
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index b38d48464368..8b210ead7956 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl | |||
@@ -388,3 +388,4 @@ | |||
388 | 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage | 388 | 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage |
389 | 379 common statx sys_statx compat_sys_statx | 389 | 379 common statx sys_statx compat_sys_statx |
390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi | 390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi |
391 | 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load | ||
diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore new file mode 100644 index 000000000000..e9e66f178a6d --- /dev/null +++ b/arch/s390/purgatory/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | kexec-purgatory.c | ||
2 | purgatory.ro | ||
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile new file mode 100644 index 000000000000..e9525bc1b4a6 --- /dev/null +++ b/arch/s390/purgatory/Makefile | |||
@@ -0,0 +1,37 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | OBJECT_FILES_NON_STANDARD := y | ||
4 | |||
5 | purgatory-y := head.o purgatory.o string.o sha256.o mem.o | ||
6 | |||
7 | targets += $(purgatory-y) purgatory.ro kexec-purgatory.c | ||
8 | PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) | ||
9 | |||
10 | $(obj)/sha256.o: $(srctree)/lib/sha256.c | ||
11 | $(call if_changed_rule,cc_o_c) | ||
12 | |||
13 | $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S | ||
14 | $(call if_changed_rule,as_o_S) | ||
15 | |||
16 | $(obj)/string.o: $(srctree)/arch/s390/lib/string.c | ||
17 | $(call if_changed_rule,cc_o_c) | ||
18 | |||
19 | LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib | ||
20 | LDFLAGS_purgatory.ro += -z nodefaultlib | ||
21 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes | ||
22 | KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare | ||
23 | KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding | ||
24 | KBUILD_CFLAGS += -c -MD -Os -m64 | ||
25 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | ||
26 | |||
27 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE | ||
28 | $(call if_changed,ld) | ||
29 | |||
30 | CMD_BIN2C = $(objtree)/scripts/basic/bin2c | ||
31 | quiet_cmd_bin2c = BIN2C $@ | ||
32 | cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ | ||
33 | |||
34 | $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE | ||
35 | $(call if_changed,bin2c) | ||
36 | |||
37 | obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o | ||
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S new file mode 100644 index 000000000000..660c96a05a9b --- /dev/null +++ b/arch/s390/purgatory/head.S | |||
@@ -0,0 +1,279 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Purgatory setup code | ||
4 | * | ||
5 | * Copyright IBM Corp. 2018 | ||
6 | * | ||
7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/linkage.h> | ||
11 | #include <asm/asm-offsets.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/sigp.h> | ||
14 | |||
15 | /* The purgatory is the code running between two kernels. It's main purpose | ||
16 | * is to verify that the next kernel was not corrupted after load and to | ||
17 | * start it. | ||
18 | * | ||
19 | * If the next kernel is a crash kernel there are some peculiarities to | ||
20 | * consider: | ||
21 | * | ||
22 | * First the purgatory is called twice. Once only to verify the | ||
23 | * sha digest. So if the crash kernel got corrupted the old kernel can try | ||
24 | * to trigger a stand-alone dumper. And once to actually load the crash kernel. | ||
25 | * | ||
26 | * Second the purgatory also has to swap the crash memory region with its | ||
27 | * destination at address 0. As the purgatory is part of crash memory this | ||
28 | * requires some finesse. The tactic here is that the purgatory first copies | ||
29 | * itself to the end of the destination and then swaps the rest of the | ||
30 | * memory running from there. | ||
31 | */ | ||
32 | |||
33 | #define bufsz purgatory_end-stack | ||
34 | |||
35 | .macro MEMCPY dst,src,len | ||
36 | lgr %r0,\dst | ||
37 | lgr %r1,\len | ||
38 | lgr %r2,\src | ||
39 | lgr %r3,\len | ||
40 | |||
41 | 20: mvcle %r0,%r2,0 | ||
42 | jo 20b | ||
43 | .endm | ||
44 | |||
45 | .macro MEMSWAP dst,src,buf,len | ||
46 | 10: cghi \len,bufsz | ||
47 | jh 11f | ||
48 | lgr %r4,\len | ||
49 | j 12f | ||
50 | 11: lghi %r4,bufsz | ||
51 | |||
52 | 12: MEMCPY \buf,\dst,%r4 | ||
53 | MEMCPY \dst,\src,%r4 | ||
54 | MEMCPY \src,\buf,%r4 | ||
55 | |||
56 | agr \dst,%r4 | ||
57 | agr \src,%r4 | ||
58 | sgr \len,%r4 | ||
59 | |||
60 | cghi \len,0 | ||
61 | jh 10b | ||
62 | .endm | ||
63 | |||
64 | .macro START_NEXT_KERNEL base | ||
65 | lg %r4,kernel_entry-\base(%r13) | ||
66 | lg %r5,load_psw_mask-\base(%r13) | ||
67 | ogr %r4,%r5 | ||
68 | stg %r4,0(%r0) | ||
69 | |||
70 | xgr %r0,%r0 | ||
71 | diag %r0,%r0,0x308 | ||
72 | .endm | ||
73 | |||
74 | .text | ||
75 | .align PAGE_SIZE | ||
76 | ENTRY(purgatory_start) | ||
77 | /* The purgatory might be called after a diag308 so better set | ||
78 | * architecture and addressing mode. | ||
79 | */ | ||
80 | lhi %r1,1 | ||
81 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | ||
82 | sam64 | ||
83 | |||
84 | larl %r5,gprregs | ||
85 | stmg %r6,%r15,0(%r5) | ||
86 | |||
87 | basr %r13,0 | ||
88 | .base_crash: | ||
89 | |||
90 | /* Setup stack */ | ||
91 | larl %r15,purgatory_end | ||
92 | aghi %r15,-160 | ||
93 | |||
94 | /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called | ||
95 | * directly with a flag passed in %r2 whether the purgatory shall do | ||
96 | * checksum verification only (%r2 = 0 -> verification only). | ||
97 | * | ||
98 | * Check now and preserve over C function call by storing in | ||
99 | * %r10 whith | ||
100 | * 1 -> checksum verification only | ||
101 | * 0 -> load new kernel | ||
102 | */ | ||
103 | lghi %r10,0 | ||
104 | lg %r11,kernel_type-.base_crash(%r13) | ||
105 | cghi %r11,1 /* KEXEC_TYPE_CRASH */ | ||
106 | jne .do_checksum_verification | ||
107 | cghi %r2,0 /* checksum verification only */ | ||
108 | jne .do_checksum_verification | ||
109 | lghi %r10,1 | ||
110 | |||
111 | .do_checksum_verification: | ||
112 | brasl %r14,verify_sha256_digest | ||
113 | |||
114 | cghi %r10,1 /* checksum verification only */ | ||
115 | je .return_old_kernel | ||
116 | cghi %r2,0 /* checksum match */ | ||
117 | jne .disabled_wait | ||
118 | |||
119 | /* If the next kernel is a crash kernel the purgatory has to swap | ||
120 | * the mem regions first. | ||
121 | */ | ||
122 | cghi %r11,1 /* KEXEC_TYPE_CRASH */ | ||
123 | je .start_crash_kernel | ||
124 | |||
125 | /* start normal kernel */ | ||
126 | START_NEXT_KERNEL .base_crash | ||
127 | |||
128 | .return_old_kernel: | ||
129 | lmg %r6,%r15,gprregs-.base_crash(%r13) | ||
130 | br %r14 | ||
131 | |||
132 | .disabled_wait: | ||
133 | lpswe disabled_wait_psw-.base_crash(%r13) | ||
134 | |||
135 | .start_crash_kernel: | ||
136 | /* Location of purgatory_start in crash memory */ | ||
137 | lgr %r8,%r13 | ||
138 | aghi %r8,-(.base_crash-purgatory_start) | ||
139 | |||
140 | /* Destination for this code i.e. end of memory to be swapped. */ | ||
141 | lg %r9,crash_size-.base_crash(%r13) | ||
142 | aghi %r9,-(purgatory_end-purgatory_start) | ||
143 | |||
144 | /* Destination in crash memory, i.e. same as r9 but in crash memory. */ | ||
145 | lg %r10,crash_start-.base_crash(%r13) | ||
146 | agr %r10,%r9 | ||
147 | |||
148 | /* Buffer location (in crash memory) and size. As the purgatory is | ||
149 | * behind the point of no return it can re-use the stack as buffer. | ||
150 | */ | ||
151 | lghi %r11,bufsz | ||
152 | larl %r12,stack | ||
153 | |||
154 | MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ | ||
155 | MEMCPY %r9,%r8,%r11 /* self -> dst */ | ||
156 | |||
157 | /* Jump to new location. */ | ||
158 | lgr %r7,%r9 | ||
159 | aghi %r7,.jump_to_dst-purgatory_start | ||
160 | br %r7 | ||
161 | |||
162 | .jump_to_dst: | ||
163 | basr %r13,0 | ||
164 | .base_dst: | ||
165 | |||
166 | /* clear buffer */ | ||
167 | MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ | ||
168 | |||
169 | /* Load new buffer location after jump */ | ||
170 | larl %r7,stack | ||
171 | aghi %r10,stack-purgatory_start | ||
172 | MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ | ||
173 | |||
174 | /* Now the code is set up to run from its designated location. Start | ||
175 | * swapping the rest of crash memory now. | ||
176 | * | ||
177 | * The registers will be used as follow: | ||
178 | * | ||
179 | * %r0-%r4 reserved for macros defined above | ||
180 | * %r5-%r6 tmp registers | ||
181 | * %r7 pointer to current struct sha region | ||
182 | * %r8 index to iterate over all sha regions | ||
183 | * %r9 pointer in crash memory | ||
184 | * %r10 pointer in old kernel | ||
185 | * %r11 total size (still) to be moved | ||
186 | * %r12 pointer to buffer | ||
187 | */ | ||
188 | lgr %r12,%r7 | ||
189 | lgr %r11,%r9 | ||
190 | lghi %r10,0 | ||
191 | lg %r9,crash_start-.base_dst(%r13) | ||
192 | lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ | ||
193 | larl %r7,purgatory_sha_regions | ||
194 | |||
195 | j .loop_first | ||
196 | |||
197 | /* Loop over all purgatory_sha_regions. */ | ||
198 | .loop_next: | ||
199 | aghi %r8,-1 | ||
200 | cghi %r8,0 | ||
201 | je .loop_out | ||
202 | |||
203 | aghi %r7,__KEXEC_SHA_REGION_SIZE | ||
204 | |||
205 | .loop_first: | ||
206 | lg %r5,__KEXEC_SHA_REGION_START(%r7) | ||
207 | cghi %r5,0 | ||
208 | je .loop_next | ||
209 | |||
210 | /* Copy [end last sha region, start current sha region) */ | ||
211 | /* Note: kexec_sha_region->start points in crash memory */ | ||
212 | sgr %r5,%r9 | ||
213 | MEMCPY %r9,%r10,%r5 | ||
214 | |||
215 | agr %r9,%r5 | ||
216 | agr %r10,%r5 | ||
217 | sgr %r11,%r5 | ||
218 | |||
219 | /* Swap sha region */ | ||
220 | lg %r6,__KEXEC_SHA_REGION_LEN(%r7) | ||
221 | MEMSWAP %r9,%r10,%r12,%r6 | ||
222 | sg %r11,__KEXEC_SHA_REGION_LEN(%r7) | ||
223 | j .loop_next | ||
224 | |||
225 | .loop_out: | ||
226 | /* Copy rest of crash memory */ | ||
227 | MEMCPY %r9,%r10,%r11 | ||
228 | |||
229 | /* start crash kernel */ | ||
230 | START_NEXT_KERNEL .base_dst | ||
231 | |||
232 | |||
233 | load_psw_mask: | ||
234 | .long 0x00080000,0x80000000 | ||
235 | |||
236 | .align 8 | ||
237 | disabled_wait_psw: | ||
238 | .quad 0x0002000180000000 | ||
239 | .quad 0x0000000000000000 + .do_checksum_verification | ||
240 | |||
241 | gprregs: | ||
242 | .rept 10 | ||
243 | .quad 0 | ||
244 | .endr | ||
245 | |||
246 | purgatory_sha256_digest: | ||
247 | .global purgatory_sha256_digest | ||
248 | .rept 32 /* SHA256_DIGEST_SIZE */ | ||
249 | .byte 0 | ||
250 | .endr | ||
251 | |||
252 | purgatory_sha_regions: | ||
253 | .global purgatory_sha_regions | ||
254 | .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */ | ||
255 | .byte 0 | ||
256 | .endr | ||
257 | |||
258 | kernel_entry: | ||
259 | .global kernel_entry | ||
260 | .quad 0 | ||
261 | |||
262 | kernel_type: | ||
263 | .global kernel_type | ||
264 | .quad 0 | ||
265 | |||
266 | crash_start: | ||
267 | .global crash_start | ||
268 | .quad 0 | ||
269 | |||
270 | crash_size: | ||
271 | .global crash_size | ||
272 | .quad 0 | ||
273 | |||
274 | .align PAGE_SIZE | ||
275 | stack: | ||
276 | /* The buffer to move this code must be as big as the code. */ | ||
277 | .skip stack-purgatory_start | ||
278 | .align PAGE_SIZE | ||
279 | purgatory_end: | ||
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c new file mode 100644 index 000000000000..4e2beb3c29b7 --- /dev/null +++ b/arch/s390/purgatory/purgatory.c | |||
@@ -0,0 +1,42 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Purgatory code running between two kernels. | ||
4 | * | ||
5 | * Copyright IBM Corp. 2018 | ||
6 | * | ||
7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kexec.h> | ||
11 | #include <linux/sha256.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <asm/purgatory.h> | ||
14 | |||
15 | struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; | ||
16 | u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; | ||
17 | |||
18 | u64 kernel_entry; | ||
19 | u64 kernel_type; | ||
20 | |||
21 | u64 crash_start; | ||
22 | u64 crash_size; | ||
23 | |||
24 | int verify_sha256_digest(void) | ||
25 | { | ||
26 | struct kexec_sha_region *ptr, *end; | ||
27 | u8 digest[SHA256_DIGEST_SIZE]; | ||
28 | struct sha256_state sctx; | ||
29 | |||
30 | sha256_init(&sctx); | ||
31 | end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); | ||
32 | |||
33 | for (ptr = purgatory_sha_regions; ptr < end; ptr++) | ||
34 | sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); | ||
35 | |||
36 | sha256_final(&sctx, digest); | ||
37 | |||
38 | if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) | ||
39 | return 1; | ||
40 | |||
41 | return 0; | ||
42 | } | ||
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c98b943e58b4..77076a102e34 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = { | |||
3028 | .format_group = &hswep_uncore_cbox_format_group, | 3028 | .format_group = &hswep_uncore_cbox_format_group, |
3029 | }; | 3029 | }; |
3030 | 3030 | ||
3031 | static struct intel_uncore_type bdx_uncore_sbox = { | ||
3032 | .name = "sbox", | ||
3033 | .num_counters = 4, | ||
3034 | .num_boxes = 4, | ||
3035 | .perf_ctr_bits = 48, | ||
3036 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, | ||
3037 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, | ||
3038 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, | ||
3039 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, | ||
3040 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, | ||
3041 | .ops = &hswep_uncore_sbox_msr_ops, | ||
3042 | .format_group = &hswep_uncore_sbox_format_group, | ||
3043 | }; | ||
3044 | |||
3045 | #define BDX_MSR_UNCORE_SBOX 3 | ||
3046 | |||
3031 | static struct intel_uncore_type *bdx_msr_uncores[] = { | 3047 | static struct intel_uncore_type *bdx_msr_uncores[] = { |
3032 | &bdx_uncore_ubox, | 3048 | &bdx_uncore_ubox, |
3033 | &bdx_uncore_cbox, | 3049 | &bdx_uncore_cbox, |
3034 | &hswep_uncore_pcu, | 3050 | &hswep_uncore_pcu, |
3051 | &bdx_uncore_sbox, | ||
3035 | NULL, | 3052 | NULL, |
3036 | }; | 3053 | }; |
3037 | 3054 | ||
@@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { | |||
3043 | 3060 | ||
3044 | void bdx_uncore_cpu_init(void) | 3061 | void bdx_uncore_cpu_init(void) |
3045 | { | 3062 | { |
3063 | int pkg = topology_phys_to_logical_pkg(0); | ||
3064 | |||
3046 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | 3065 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
3047 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | 3066 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
3048 | uncore_msr_uncores = bdx_msr_uncores; | 3067 | uncore_msr_uncores = bdx_msr_uncores; |
3049 | 3068 | ||
3069 | /* BDX-DE doesn't have SBOX */ | ||
3070 | if (boot_cpu_data.x86_model == 86) { | ||
3071 | uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; | ||
3072 | /* Detect systems with no SBOXes */ | ||
3073 | } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { | ||
3074 | struct pci_dev *pdev; | ||
3075 | u32 capid4; | ||
3076 | |||
3077 | pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; | ||
3078 | pci_read_config_dword(pdev, 0x94, &capid4); | ||
3079 | if (((capid4 >> 6) & 0x3) == 0) | ||
3080 | bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; | ||
3081 | } | ||
3050 | hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; | 3082 | hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; |
3051 | } | 3083 | } |
3052 | 3084 | ||
@@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { | |||
3264 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), | 3296 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), |
3265 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), | 3297 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), |
3266 | }, | 3298 | }, |
3299 | { /* PCU.3 (for Capability registers) */ | ||
3300 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), | ||
3301 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
3302 | HSWEP_PCI_PCU_3), | ||
3303 | }, | ||
3267 | { /* end: all zeroes */ } | 3304 | { /* end: all zeroes */ } |
3268 | }; | 3305 | }; |
3269 | 3306 | ||
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 386a6900e206..219faaec51df 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -136,7 +136,6 @@ | |||
136 | #endif | 136 | #endif |
137 | 137 | ||
138 | #ifndef __ASSEMBLY__ | 138 | #ifndef __ASSEMBLY__ |
139 | #ifndef __BPF__ | ||
140 | /* | 139 | /* |
141 | * This output constraint should be used for any inline asm which has a "call" | 140 | * This output constraint should be used for any inline asm which has a "call" |
142 | * instruction. Otherwise the asm may be inserted before the frame pointer | 141 | * instruction. Otherwise the asm may be inserted before the frame pointer |
@@ -146,6 +145,5 @@ | |||
146 | register unsigned long current_stack_pointer asm(_ASM_SP); | 145 | register unsigned long current_stack_pointer asm(_ASM_SP); |
147 | #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) | 146 | #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) |
148 | #endif | 147 | #endif |
149 | #endif | ||
150 | 148 | ||
151 | #endif /* _ASM_X86_ASM_H */ | 149 | #endif /* _ASM_X86_ASM_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 949c977bc4c9..c25775fad4ed 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -1013,6 +1013,7 @@ struct kvm_x86_ops { | |||
1013 | 1013 | ||
1014 | bool (*has_wbinvd_exit)(void); | 1014 | bool (*has_wbinvd_exit)(void); |
1015 | 1015 | ||
1016 | u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); | ||
1016 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 1017 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
1017 | 1018 | ||
1018 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 1019 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 4fa4206029e3..21a114914ba4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | |||
749 | extern void enable_sep_cpu(void); | 749 | extern void enable_sep_cpu(void); |
750 | extern int sysenter_setup(void); | 750 | extern int sysenter_setup(void); |
751 | 751 | ||
752 | extern void early_trap_init(void); | ||
753 | void early_trap_pf_init(void); | 752 | void early_trap_pf_init(void); |
754 | 753 | ||
755 | /* Defined in head.S */ | 754 | /* Defined in head.S */ |
756 | extern struct desc_ptr early_gdt_descr; | 755 | extern struct desc_ptr early_gdt_descr; |
757 | 756 | ||
758 | extern void cpu_set_gdt(int); | ||
759 | extern void switch_to_new_gdt(int); | 757 | extern void switch_to_new_gdt(int); |
760 | extern void load_direct_gdt(int); | 758 | extern void load_direct_gdt(int); |
761 | extern void load_fixmap_gdt(int); | 759 | extern void load_fixmap_gdt(int); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index dde444f932c1..3b20607d581b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) | |||
215 | apic_id = processor->local_apic_id; | 215 | apic_id = processor->local_apic_id; |
216 | enabled = processor->lapic_flags & ACPI_MADT_ENABLED; | 216 | enabled = processor->lapic_flags & ACPI_MADT_ENABLED; |
217 | 217 | ||
218 | /* Ignore invalid ID */ | ||
219 | if (apic_id == 0xffffffff) | ||
220 | return 0; | ||
221 | |||
218 | /* | 222 | /* |
219 | * We need to register disabled CPU as well to permit | 223 | * We need to register disabled CPU as well to permit |
220 | * counting disabled CPUs. This allows us to size | 224 | * counting disabled CPUs. This allows us to size |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 3182908b7e6c..7326078eaa7a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
398 | * little bit simple | 398 | * little bit simple |
399 | */ | 399 | */ |
400 | efi_map_sz = efi_get_runtime_map_size(); | 400 | efi_map_sz = efi_get_runtime_map_size(); |
401 | efi_map_sz = ALIGN(efi_map_sz, 16); | ||
402 | params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + | 401 | params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + |
403 | MAX_ELFCOREHDR_STR_LEN; | 402 | MAX_ELFCOREHDR_STR_LEN; |
404 | params_cmdline_sz = ALIGN(params_cmdline_sz, 16); | 403 | params_cmdline_sz = ALIGN(params_cmdline_sz, 16); |
405 | kbuf.bufsz = params_cmdline_sz + efi_map_sz + | 404 | kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + |
406 | sizeof(struct setup_data) + | 405 | sizeof(struct setup_data) + |
407 | sizeof(struct efi_setup_data); | 406 | sizeof(struct efi_setup_data); |
408 | 407 | ||
@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
410 | if (!params) | 409 | if (!params) |
411 | return ERR_PTR(-ENOMEM); | 410 | return ERR_PTR(-ENOMEM); |
412 | efi_map_offset = params_cmdline_sz; | 411 | efi_map_offset = params_cmdline_sz; |
413 | efi_setup_data_offset = efi_map_offset + efi_map_sz; | 412 | efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); |
414 | 413 | ||
415 | /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ | 414 | /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ |
416 | setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; | 415 | setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index d41d896481b8..c9b14020f4dd 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -166,7 +166,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
166 | */ | 166 | */ |
167 | pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); | 167 | pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); |
168 | /* Filter out unsuppored __PAGE_KERNEL* bits: */ | 168 | /* Filter out unsuppored __PAGE_KERNEL* bits: */ |
169 | pgprot_val(pte_prot) |= __supported_pte_mask; | 169 | pgprot_val(pte_prot) &= __supported_pte_mask; |
170 | pte = pfn_pte(pfn, pte_prot); | 170 | pte = pfn_pte(pfn, pte_prot); |
171 | set_pte_at(mm, va, ptep, pte); | 171 | set_pte_at(mm, va, ptep, pte); |
172 | pte_unmap_unlock(ptep, ptl); | 172 | pte_unmap_unlock(ptep, ptl); |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c deleted file mode 100644 index ac7ea3a8242f..000000000000 --- a/arch/x86/kernel/pci-nommu.c +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* Fallback functions when the main IOMMU code is not compiled in. This | ||
3 | code is roughly equivalent to i386. */ | ||
4 | #include <linux/dma-direct.h> | ||
5 | #include <linux/scatterlist.h> | ||
6 | #include <linux/string.h> | ||
7 | #include <linux/gfp.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/mm.h> | ||
10 | |||
11 | #include <asm/processor.h> | ||
12 | #include <asm/iommu.h> | ||
13 | #include <asm/dma.h> | ||
14 | |||
15 | #define NOMMU_MAPPING_ERROR 0 | ||
16 | |||
17 | static int | ||
18 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | ||
19 | { | ||
20 | if (hwdev && !dma_capable(hwdev, bus, size)) { | ||
21 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | ||
22 | printk(KERN_ERR | ||
23 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | ||
24 | name, (long long)bus, size, | ||
25 | (long long)*hwdev->dma_mask); | ||
26 | return 0; | ||
27 | } | ||
28 | return 1; | ||
29 | } | ||
30 | |||
31 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | ||
32 | unsigned long offset, size_t size, | ||
33 | enum dma_data_direction dir, | ||
34 | unsigned long attrs) | ||
35 | { | ||
36 | dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset; | ||
37 | WARN_ON(size == 0); | ||
38 | if (!check_addr("map_single", dev, bus, size)) | ||
39 | return NOMMU_MAPPING_ERROR; | ||
40 | return bus; | ||
41 | } | ||
42 | |||
43 | /* Map a set of buffers described by scatterlist in streaming | ||
44 | * mode for DMA. This is the scatter-gather version of the | ||
45 | * above pci_map_single interface. Here the scatter gather list | ||
46 | * elements are each tagged with the appropriate dma address | ||
47 | * and length. They are obtained via sg_dma_{address,length}(SG). | ||
48 | * | ||
49 | * NOTE: An implementation may be able to use a smaller number of | ||
50 | * DMA address/length pairs than there are SG table elements. | ||
51 | * (for example via virtual mapping capabilities) | ||
52 | * The routine returns the number of addr/length pairs actually | ||
53 | * used, at most nents. | ||
54 | * | ||
55 | * Device ownership issues as mentioned above for pci_map_single are | ||
56 | * the same here. | ||
57 | */ | ||
58 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
59 | int nents, enum dma_data_direction dir, | ||
60 | unsigned long attrs) | ||
61 | { | ||
62 | struct scatterlist *s; | ||
63 | int i; | ||
64 | |||
65 | WARN_ON(nents == 0 || sg[0].length == 0); | ||
66 | |||
67 | for_each_sg(sg, s, nents, i) { | ||
68 | BUG_ON(!sg_page(s)); | ||
69 | s->dma_address = sg_phys(s); | ||
70 | if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) | ||
71 | return 0; | ||
72 | s->dma_length = s->length; | ||
73 | } | ||
74 | return nents; | ||
75 | } | ||
76 | |||
77 | static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
78 | { | ||
79 | return dma_addr == NOMMU_MAPPING_ERROR; | ||
80 | } | ||
81 | |||
82 | const struct dma_map_ops nommu_dma_ops = { | ||
83 | .alloc = dma_generic_alloc_coherent, | ||
84 | .free = dma_generic_free_coherent, | ||
85 | .map_sg = nommu_map_sg, | ||
86 | .map_page = nommu_map_page, | ||
87 | .is_phys = 1, | ||
88 | .mapping_error = nommu_mapping_error, | ||
89 | .dma_supported = x86_dma_supported, | ||
90 | }; | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ff99e2b6fc54..45175b81dd5b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -77,6 +77,8 @@ | |||
77 | #include <asm/i8259.h> | 77 | #include <asm/i8259.h> |
78 | #include <asm/misc.h> | 78 | #include <asm/misc.h> |
79 | #include <asm/qspinlock.h> | 79 | #include <asm/qspinlock.h> |
80 | #include <asm/intel-family.h> | ||
81 | #include <asm/cpu_device_id.h> | ||
80 | 82 | ||
81 | /* Number of siblings per CPU package */ | 83 | /* Number of siblings per CPU package */ |
82 | int smp_num_siblings = 1; | 84 | int smp_num_siblings = 1; |
@@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
390 | return false; | 392 | return false; |
391 | } | 393 | } |
392 | 394 | ||
395 | /* | ||
396 | * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. | ||
397 | * | ||
398 | * These are Intel CPUs that enumerate an LLC that is shared by | ||
399 | * multiple NUMA nodes. The LLC on these systems is shared for | ||
400 | * off-package data access but private to the NUMA node (half | ||
401 | * of the package) for on-package access. | ||
402 | * | ||
403 | * CPUID (the source of the information about the LLC) can only | ||
404 | * enumerate the cache as being shared *or* unshared, but not | ||
405 | * this particular configuration. The CPU in this case enumerates | ||
406 | * the cache to be shared across the entire package (spanning both | ||
407 | * NUMA nodes). | ||
408 | */ | ||
409 | |||
410 | static const struct x86_cpu_id snc_cpu[] = { | ||
411 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, | ||
412 | {} | ||
413 | }; | ||
414 | |||
393 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 415 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
394 | { | 416 | { |
395 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 417 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
396 | 418 | ||
397 | if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && | 419 | /* Do not match if we do not have a valid APICID for cpu: */ |
398 | per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) | 420 | if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) |
399 | return topology_sane(c, o, "llc"); | 421 | return false; |
400 | 422 | ||
401 | return false; | 423 | /* Do not match if LLC id does not match: */ |
424 | if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) | ||
425 | return false; | ||
426 | |||
427 | /* | ||
428 | * Allow the SNC topology without warning. Return of false | ||
429 | * means 'c' does not share the LLC of 'o'. This will be | ||
430 | * reflected to userspace. | ||
431 | */ | ||
432 | if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) | ||
433 | return false; | ||
434 | |||
435 | return topology_sane(c, o, "llc"); | ||
402 | } | 436 | } |
403 | 437 | ||
404 | /* | 438 | /* |
@@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = { | |||
456 | 490 | ||
457 | /* | 491 | /* |
458 | * Set if a package/die has multiple NUMA nodes inside. | 492 | * Set if a package/die has multiple NUMA nodes inside. |
459 | * AMD Magny-Cours and Intel Cluster-on-Die have this. | 493 | * AMD Magny-Cours, Intel Cluster-on-Die, and Intel |
494 | * Sub-NUMA Clustering have this. | ||
460 | */ | 495 | */ |
461 | static bool x86_has_numa_in_package; | 496 | static bool x86_has_numa_in_package; |
462 | 497 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ef32297ff17e..91e6da48cbb6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) | |||
317 | hpet2 -= hpet1; | 317 | hpet2 -= hpet1; |
318 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | 318 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); |
319 | do_div(tmp, 1000000); | 319 | do_div(tmp, 1000000); |
320 | do_div(deltatsc, tmp); | 320 | deltatsc = div64_u64(deltatsc, tmp); |
321 | 321 | ||
322 | return (unsigned long) deltatsc; | 322 | return (unsigned long) deltatsc; |
323 | } | 323 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b58787daf9f8..1fc05e428aba 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |||
1423 | seg->base = 0; | 1423 | seg->base = 0; |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | ||
1427 | { | ||
1428 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1429 | |||
1430 | if (is_guest_mode(vcpu)) | ||
1431 | return svm->nested.hsave->control.tsc_offset; | ||
1432 | |||
1433 | return vcpu->arch.tsc_offset; | ||
1434 | } | ||
1435 | |||
1426 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1436 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
1427 | { | 1437 | { |
1428 | struct vcpu_svm *svm = to_svm(vcpu); | 1438 | struct vcpu_svm *svm = to_svm(vcpu); |
1429 | u64 g_tsc_offset = 0; | 1439 | u64 g_tsc_offset = 0; |
1430 | 1440 | ||
1431 | if (is_guest_mode(vcpu)) { | 1441 | if (is_guest_mode(vcpu)) { |
1442 | /* Write L1's TSC offset. */ | ||
1432 | g_tsc_offset = svm->vmcb->control.tsc_offset - | 1443 | g_tsc_offset = svm->vmcb->control.tsc_offset - |
1433 | svm->nested.hsave->control.tsc_offset; | 1444 | svm->nested.hsave->control.tsc_offset; |
1434 | svm->nested.hsave->control.tsc_offset = offset; | 1445 | svm->nested.hsave->control.tsc_offset = offset; |
@@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
3322 | /* Restore the original control entries */ | 3333 | /* Restore the original control entries */ |
3323 | copy_vmcb_control_area(vmcb, hsave); | 3334 | copy_vmcb_control_area(vmcb, hsave); |
3324 | 3335 | ||
3336 | svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; | ||
3325 | kvm_clear_exception_queue(&svm->vcpu); | 3337 | kvm_clear_exception_queue(&svm->vcpu); |
3326 | kvm_clear_interrupt_queue(&svm->vcpu); | 3338 | kvm_clear_interrupt_queue(&svm->vcpu); |
3327 | 3339 | ||
@@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, | |||
3482 | /* We don't want to see VMMCALLs from a nested guest */ | 3494 | /* We don't want to see VMMCALLs from a nested guest */ |
3483 | clr_intercept(svm, INTERCEPT_VMMCALL); | 3495 | clr_intercept(svm, INTERCEPT_VMMCALL); |
3484 | 3496 | ||
3497 | svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; | ||
3498 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; | ||
3499 | |||
3485 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; | 3500 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; |
3486 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; | 3501 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
3487 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; | 3502 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
3488 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; | ||
3489 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; | 3503 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
3490 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; | 3504 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
3491 | 3505 | ||
@@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
4035 | struct vcpu_svm *svm = to_svm(vcpu); | 4049 | struct vcpu_svm *svm = to_svm(vcpu); |
4036 | 4050 | ||
4037 | switch (msr_info->index) { | 4051 | switch (msr_info->index) { |
4038 | case MSR_IA32_TSC: { | ||
4039 | msr_info->data = svm->vmcb->control.tsc_offset + | ||
4040 | kvm_scale_tsc(vcpu, rdtsc()); | ||
4041 | |||
4042 | break; | ||
4043 | } | ||
4044 | case MSR_STAR: | 4052 | case MSR_STAR: |
4045 | msr_info->data = svm->vmcb->save.star; | 4053 | msr_info->data = svm->vmcb->save.star; |
4046 | break; | 4054 | break; |
@@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
4193 | svm->vmcb->save.g_pat = data; | 4201 | svm->vmcb->save.g_pat = data; |
4194 | mark_dirty(svm->vmcb, VMCB_NPT); | 4202 | mark_dirty(svm->vmcb, VMCB_NPT); |
4195 | break; | 4203 | break; |
4196 | case MSR_IA32_TSC: | ||
4197 | kvm_write_tsc(vcpu, msr); | ||
4198 | break; | ||
4199 | case MSR_IA32_SPEC_CTRL: | 4204 | case MSR_IA32_SPEC_CTRL: |
4200 | if (!msr->host_initiated && | 4205 | if (!msr->host_initiated && |
4201 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) | 4206 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) |
@@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
5265 | } | 5270 | } |
5266 | 5271 | ||
5267 | if (!ret && svm) { | 5272 | if (!ret && svm) { |
5268 | trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, | 5273 | trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, |
5269 | host_irq, e->gsi, | 5274 | e->gsi, vcpu_info.vector, |
5270 | vcpu_info.vector, | ||
5271 | vcpu_info.pi_desc_addr, set); | 5275 | vcpu_info.pi_desc_addr, set); |
5272 | } | 5276 | } |
5273 | 5277 | ||
@@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
7102 | 7106 | ||
7103 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 7107 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
7104 | 7108 | ||
7109 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, | ||
7105 | .write_tsc_offset = svm_write_tsc_offset, | 7110 | .write_tsc_offset = svm_write_tsc_offset, |
7106 | 7111 | ||
7107 | .set_tdp_cr3 = set_tdp_cr3, | 7112 | .set_tdp_cr3 = set_tdp_cr3, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aafcc9881e88..aa66ccd6ed6c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
2880 | vmx_update_msr_bitmap(&vmx->vcpu); | 2880 | vmx_update_msr_bitmap(&vmx->vcpu); |
2881 | } | 2881 | } |
2882 | 2882 | ||
2883 | /* | 2883 | static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) |
2884 | * reads and returns guest's timestamp counter "register" | ||
2885 | * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset | ||
2886 | * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 | ||
2887 | */ | ||
2888 | static u64 guest_read_tsc(struct kvm_vcpu *vcpu) | ||
2889 | { | 2884 | { |
2890 | u64 host_tsc, tsc_offset; | 2885 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
2891 | 2886 | ||
2892 | host_tsc = rdtsc(); | 2887 | if (is_guest_mode(vcpu) && |
2893 | tsc_offset = vmcs_read64(TSC_OFFSET); | 2888 | (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) |
2894 | return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; | 2889 | return vcpu->arch.tsc_offset - vmcs12->tsc_offset; |
2890 | |||
2891 | return vcpu->arch.tsc_offset; | ||
2895 | } | 2892 | } |
2896 | 2893 | ||
2897 | /* | 2894 | /* |
@@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3524 | #endif | 3521 | #endif |
3525 | case MSR_EFER: | 3522 | case MSR_EFER: |
3526 | return kvm_get_msr_common(vcpu, msr_info); | 3523 | return kvm_get_msr_common(vcpu, msr_info); |
3527 | case MSR_IA32_TSC: | ||
3528 | msr_info->data = guest_read_tsc(vcpu); | ||
3529 | break; | ||
3530 | case MSR_IA32_SPEC_CTRL: | 3524 | case MSR_IA32_SPEC_CTRL: |
3531 | if (!msr_info->host_initiated && | 3525 | if (!msr_info->host_initiated && |
3532 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | 3526 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && |
@@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3646 | return 1; | 3640 | return 1; |
3647 | vmcs_write64(GUEST_BNDCFGS, data); | 3641 | vmcs_write64(GUEST_BNDCFGS, data); |
3648 | break; | 3642 | break; |
3649 | case MSR_IA32_TSC: | ||
3650 | kvm_write_tsc(vcpu, msr_info); | ||
3651 | break; | ||
3652 | case MSR_IA32_SPEC_CTRL: | 3643 | case MSR_IA32_SPEC_CTRL: |
3653 | if (!msr_info->host_initiated && | 3644 | if (!msr_info->host_initiated && |
3654 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | 3645 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && |
@@ -10608,6 +10599,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, | |||
10608 | return true; | 10599 | return true; |
10609 | } | 10600 | } |
10610 | 10601 | ||
10602 | static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, | ||
10603 | struct vmcs12 *vmcs12) | ||
10604 | { | ||
10605 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && | ||
10606 | !page_address_valid(vcpu, vmcs12->apic_access_addr)) | ||
10607 | return -EINVAL; | ||
10608 | else | ||
10609 | return 0; | ||
10610 | } | ||
10611 | |||
10611 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | 10612 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, |
10612 | struct vmcs12 *vmcs12) | 10613 | struct vmcs12 *vmcs12) |
10613 | { | 10614 | { |
@@ -11176,11 +11177,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
11176 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); | 11177 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); |
11177 | } | 11178 | } |
11178 | 11179 | ||
11179 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | 11180 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
11180 | vmcs_write64(TSC_OFFSET, | 11181 | |
11181 | vcpu->arch.tsc_offset + vmcs12->tsc_offset); | ||
11182 | else | ||
11183 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | ||
11184 | if (kvm_has_tsc_control) | 11182 | if (kvm_has_tsc_control) |
11185 | decache_tsc_multiplier(vmx); | 11183 | decache_tsc_multiplier(vmx); |
11186 | 11184 | ||
@@ -11299,6 +11297,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
11299 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) | 11297 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) |
11300 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 11298 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
11301 | 11299 | ||
11300 | if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) | ||
11301 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
11302 | |||
11302 | if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) | 11303 | if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) |
11303 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 11304 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
11304 | 11305 | ||
@@ -11420,6 +11421,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
11420 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 11421 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
11421 | u32 msr_entry_idx; | 11422 | u32 msr_entry_idx; |
11422 | u32 exit_qual; | 11423 | u32 exit_qual; |
11424 | int r; | ||
11423 | 11425 | ||
11424 | enter_guest_mode(vcpu); | 11426 | enter_guest_mode(vcpu); |
11425 | 11427 | ||
@@ -11429,26 +11431,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
11429 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); | 11431 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); |
11430 | vmx_segment_cache_clear(vmx); | 11432 | vmx_segment_cache_clear(vmx); |
11431 | 11433 | ||
11432 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { | 11434 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) |
11433 | leave_guest_mode(vcpu); | 11435 | vcpu->arch.tsc_offset += vmcs12->tsc_offset; |
11434 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | 11436 | |
11435 | nested_vmx_entry_failure(vcpu, vmcs12, | 11437 | r = EXIT_REASON_INVALID_STATE; |
11436 | EXIT_REASON_INVALID_STATE, exit_qual); | 11438 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) |
11437 | return 1; | 11439 | goto fail; |
11438 | } | ||
11439 | 11440 | ||
11440 | nested_get_vmcs12_pages(vcpu, vmcs12); | 11441 | nested_get_vmcs12_pages(vcpu, vmcs12); |
11441 | 11442 | ||
11443 | r = EXIT_REASON_MSR_LOAD_FAIL; | ||
11442 | msr_entry_idx = nested_vmx_load_msr(vcpu, | 11444 | msr_entry_idx = nested_vmx_load_msr(vcpu, |
11443 | vmcs12->vm_entry_msr_load_addr, | 11445 | vmcs12->vm_entry_msr_load_addr, |
11444 | vmcs12->vm_entry_msr_load_count); | 11446 | vmcs12->vm_entry_msr_load_count); |
11445 | if (msr_entry_idx) { | 11447 | if (msr_entry_idx) |
11446 | leave_guest_mode(vcpu); | 11448 | goto fail; |
11447 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
11448 | nested_vmx_entry_failure(vcpu, vmcs12, | ||
11449 | EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); | ||
11450 | return 1; | ||
11451 | } | ||
11452 | 11449 | ||
11453 | /* | 11450 | /* |
11454 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | 11451 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point |
@@ -11457,6 +11454,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
11457 | * the success flag) when L2 exits (see nested_vmx_vmexit()). | 11454 | * the success flag) when L2 exits (see nested_vmx_vmexit()). |
11458 | */ | 11455 | */ |
11459 | return 0; | 11456 | return 0; |
11457 | |||
11458 | fail: | ||
11459 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
11460 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
11461 | leave_guest_mode(vcpu); | ||
11462 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
11463 | nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); | ||
11464 | return 1; | ||
11460 | } | 11465 | } |
11461 | 11466 | ||
11462 | /* | 11467 | /* |
@@ -12028,6 +12033,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
12028 | 12033 | ||
12029 | leave_guest_mode(vcpu); | 12034 | leave_guest_mode(vcpu); |
12030 | 12035 | ||
12036 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
12037 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
12038 | |||
12031 | if (likely(!vmx->fail)) { | 12039 | if (likely(!vmx->fail)) { |
12032 | if (exit_reason == -1) | 12040 | if (exit_reason == -1) |
12033 | sync_vmcs12(vcpu, vmcs12); | 12041 | sync_vmcs12(vcpu, vmcs12); |
@@ -12224,10 +12232,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift, | |||
12224 | 12232 | ||
12225 | static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) | 12233 | static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) |
12226 | { | 12234 | { |
12227 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 12235 | struct vcpu_vmx *vmx; |
12228 | u64 tscl = rdtsc(); | 12236 | u64 tscl, guest_tscl, delta_tsc; |
12229 | u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); | 12237 | |
12230 | u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; | 12238 | if (kvm_mwait_in_guest(vcpu->kvm)) |
12239 | return -EOPNOTSUPP; | ||
12240 | |||
12241 | vmx = to_vmx(vcpu); | ||
12242 | tscl = rdtsc(); | ||
12243 | guest_tscl = kvm_read_l1_tsc(vcpu, tscl); | ||
12244 | delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; | ||
12231 | 12245 | ||
12232 | /* Convert to host delta tsc if tsc scaling is enabled */ | 12246 | /* Convert to host delta tsc if tsc scaling is enabled */ |
12233 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && | 12247 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && |
@@ -12533,7 +12547,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
12533 | vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); | 12547 | vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); |
12534 | vcpu_info.vector = irq.vector; | 12548 | vcpu_info.vector = irq.vector; |
12535 | 12549 | ||
12536 | trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, | 12550 | trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, |
12537 | vcpu_info.vector, vcpu_info.pi_desc_addr, set); | 12551 | vcpu_info.vector, vcpu_info.pi_desc_addr, set); |
12538 | 12552 | ||
12539 | if (set) | 12553 | if (set) |
@@ -12712,6 +12726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
12712 | 12726 | ||
12713 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 12727 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
12714 | 12728 | ||
12729 | .read_l1_tsc_offset = vmx_read_l1_tsc_offset, | ||
12715 | .write_tsc_offset = vmx_write_tsc_offset, | 12730 | .write_tsc_offset = vmx_write_tsc_offset, |
12716 | 12731 | ||
12717 | .set_tdp_cr3 = vmx_set_cr3, | 12732 | .set_tdp_cr3 = vmx_set_cr3, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2ff74b12ec4..51ecd381793b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | |||
1490 | 1490 | ||
1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) | 1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) |
1492 | { | 1492 | { |
1493 | u64 curr_offset = vcpu->arch.tsc_offset; | 1493 | u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; | 1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; |
1495 | } | 1495 | } |
1496 | 1496 | ||
@@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
1532 | 1532 | ||
1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | 1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) |
1534 | { | 1534 | { |
1535 | return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | 1535 | u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
1536 | |||
1537 | return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | ||
1536 | } | 1538 | } |
1537 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); | 1539 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); |
1538 | 1540 | ||
@@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2362 | return 1; | 2364 | return 1; |
2363 | vcpu->arch.smbase = data; | 2365 | vcpu->arch.smbase = data; |
2364 | break; | 2366 | break; |
2367 | case MSR_IA32_TSC: | ||
2368 | kvm_write_tsc(vcpu, msr_info); | ||
2369 | break; | ||
2365 | case MSR_SMI_COUNT: | 2370 | case MSR_SMI_COUNT: |
2366 | if (!msr_info->host_initiated) | 2371 | if (!msr_info->host_initiated) |
2367 | return 1; | 2372 | return 1; |
@@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2605 | case MSR_IA32_UCODE_REV: | 2610 | case MSR_IA32_UCODE_REV: |
2606 | msr_info->data = vcpu->arch.microcode_version; | 2611 | msr_info->data = vcpu->arch.microcode_version; |
2607 | break; | 2612 | break; |
2613 | case MSR_IA32_TSC: | ||
2614 | msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; | ||
2615 | break; | ||
2608 | case MSR_MTRRcap: | 2616 | case MSR_MTRRcap: |
2609 | case 0x200 ... 0x2ff: | 2617 | case 0x200 ... 0x2ff: |
2610 | return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); | 2618 | return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); |
@@ -2819,7 +2827,8 @@ out: | |||
2819 | static inline bool kvm_can_mwait_in_guest(void) | 2827 | static inline bool kvm_can_mwait_in_guest(void) |
2820 | { | 2828 | { |
2821 | return boot_cpu_has(X86_FEATURE_MWAIT) && | 2829 | return boot_cpu_has(X86_FEATURE_MWAIT) && |
2822 | !boot_cpu_has_bug(X86_BUG_MONITOR); | 2830 | !boot_cpu_has_bug(X86_BUG_MONITOR) && |
2831 | boot_cpu_has(X86_FEATURE_ARAT); | ||
2823 | } | 2832 | } |
2824 | 2833 | ||
2825 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | 2834 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 62a7e9f65dec..cc7ff5957194 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/highmem.h> | ||
21 | 22 | ||
22 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
23 | 24 | ||
@@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, | |||
334 | pgprotval_t eff_in, unsigned long P) | 335 | pgprotval_t eff_in, unsigned long P) |
335 | { | 336 | { |
336 | int i; | 337 | int i; |
337 | pte_t *start; | 338 | pte_t *pte; |
338 | pgprotval_t prot, eff; | 339 | pgprotval_t prot, eff; |
339 | 340 | ||
340 | start = (pte_t *)pmd_page_vaddr(addr); | ||
341 | for (i = 0; i < PTRS_PER_PTE; i++) { | 341 | for (i = 0; i < PTRS_PER_PTE; i++) { |
342 | prot = pte_flags(*start); | ||
343 | eff = effective_prot(eff_in, prot); | ||
344 | st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); | 342 | st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); |
343 | pte = pte_offset_map(&addr, st->current_address); | ||
344 | prot = pte_flags(*pte); | ||
345 | eff = effective_prot(eff_in, prot); | ||
345 | note_page(m, st, __pgprot(prot), eff, 5); | 346 | note_page(m, st, __pgprot(prot), eff, 5); |
346 | start++; | 347 | pte_unmap(pte); |
347 | } | 348 | } |
348 | } | 349 | } |
349 | #ifdef CONFIG_KASAN | 350 | #ifdef CONFIG_KASAN |
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 48b14b534897..ccf4a49bb065 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) | |||
98 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); | 98 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); |
99 | } else { | 99 | } else { |
100 | /* No p4d for 4-level paging: point the pgd to the pud page table */ | 100 | /* No p4d for 4-level paging: point the pgd to the pud page table */ |
101 | pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); | 101 | pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); |
102 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); | 102 | set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); |
103 | } | 103 | } |
104 | 104 | ||
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 1efefe919555..02c6fd9caff7 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -481,7 +481,7 @@ static void ghes_do_proc(struct ghes *ghes, | |||
481 | if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { | 481 | if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { |
482 | struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); | 482 | struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); |
483 | 483 | ||
484 | ghes_edac_report_mem_error(ghes, sev, mem_err); | 484 | ghes_edac_report_mem_error(sev, mem_err); |
485 | 485 | ||
486 | arch_apei_report_mem_error(sev, mem_err); | 486 | arch_apei_report_mem_error(sev, mem_err); |
487 | ghes_handle_memory_failure(gdata, sev); | 487 | ghes_handle_memory_failure(gdata, sev); |
@@ -1087,10 +1087,6 @@ static int ghes_probe(struct platform_device *ghes_dev) | |||
1087 | goto err; | 1087 | goto err; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | rc = ghes_edac_register(ghes, &ghes_dev->dev); | ||
1091 | if (rc < 0) | ||
1092 | goto err; | ||
1093 | |||
1094 | switch (generic->notify.type) { | 1090 | switch (generic->notify.type) { |
1095 | case ACPI_HEST_NOTIFY_POLLED: | 1091 | case ACPI_HEST_NOTIFY_POLLED: |
1096 | timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); | 1092 | timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); |
@@ -1102,14 +1098,14 @@ static int ghes_probe(struct platform_device *ghes_dev) | |||
1102 | if (rc) { | 1098 | if (rc) { |
1103 | pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", | 1099 | pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", |
1104 | generic->header.source_id); | 1100 | generic->header.source_id); |
1105 | goto err_edac_unreg; | 1101 | goto err; |
1106 | } | 1102 | } |
1107 | rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED, | 1103 | rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED, |
1108 | "GHES IRQ", ghes); | 1104 | "GHES IRQ", ghes); |
1109 | if (rc) { | 1105 | if (rc) { |
1110 | pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", | 1106 | pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", |
1111 | generic->header.source_id); | 1107 | generic->header.source_id); |
1112 | goto err_edac_unreg; | 1108 | goto err; |
1113 | } | 1109 | } |
1114 | break; | 1110 | break; |
1115 | 1111 | ||
@@ -1132,14 +1128,16 @@ static int ghes_probe(struct platform_device *ghes_dev) | |||
1132 | default: | 1128 | default: |
1133 | BUG(); | 1129 | BUG(); |
1134 | } | 1130 | } |
1131 | |||
1135 | platform_set_drvdata(ghes_dev, ghes); | 1132 | platform_set_drvdata(ghes_dev, ghes); |
1136 | 1133 | ||
1134 | ghes_edac_register(ghes, &ghes_dev->dev); | ||
1135 | |||
1137 | /* Handle any pending errors right away */ | 1136 | /* Handle any pending errors right away */ |
1138 | ghes_proc(ghes); | 1137 | ghes_proc(ghes); |
1139 | 1138 | ||
1140 | return 0; | 1139 | return 0; |
1141 | err_edac_unreg: | 1140 | |
1142 | ghes_edac_unregister(ghes); | ||
1143 | err: | 1141 | err: |
1144 | if (ghes) { | 1142 | if (ghes) { |
1145 | ghes_fini(ghes); | 1143 | ghes_fini(ghes); |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 44abb8a0a5e5..be076606d30e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) { | |||
671 | if ((vcc->pop) && (skb1->len != 0)) | 671 | if ((vcc->pop) && (skb1->len != 0)) |
672 | { | 672 | { |
673 | vcc->pop(vcc, skb1); | 673 | vcc->pop(vcc, skb1); |
674 | IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", | 674 | IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", |
675 | (long)skb1);) | 675 | (long)skb1);) |
676 | } | 676 | } |
677 | else | 677 | else |
@@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev) | |||
1665 | status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); | 1665 | status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); |
1666 | if (status & TRANSMIT_DONE){ | 1666 | if (status & TRANSMIT_DONE){ |
1667 | 1667 | ||
1668 | IF_EVENT(printk("Tansmit Done Intr logic run\n");) | 1668 | IF_EVENT(printk("Transmit Done Intr logic run\n");) |
1669 | spin_lock_irqsave(&iadev->tx_lock, flags); | 1669 | spin_lock_irqsave(&iadev->tx_lock, flags); |
1670 | ia_tx_poll(iadev); | 1670 | ia_tx_poll(iadev); |
1671 | spin_unlock_irqrestore(&iadev->tx_lock, flags); | 1671 | spin_unlock_irqrestore(&iadev->tx_lock, flags); |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07dc5419bd63..8e8b04cc569a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) | |||
732 | */ | 732 | */ |
733 | enum { | 733 | enum { |
734 | Opt_queue_depth, | 734 | Opt_queue_depth, |
735 | Opt_lock_timeout, | ||
735 | Opt_last_int, | 736 | Opt_last_int, |
736 | /* int args above */ | 737 | /* int args above */ |
737 | Opt_last_string, | 738 | Opt_last_string, |
@@ -740,11 +741,13 @@ enum { | |||
740 | Opt_read_write, | 741 | Opt_read_write, |
741 | Opt_lock_on_read, | 742 | Opt_lock_on_read, |
742 | Opt_exclusive, | 743 | Opt_exclusive, |
744 | Opt_notrim, | ||
743 | Opt_err | 745 | Opt_err |
744 | }; | 746 | }; |
745 | 747 | ||
746 | static match_table_t rbd_opts_tokens = { | 748 | static match_table_t rbd_opts_tokens = { |
747 | {Opt_queue_depth, "queue_depth=%d"}, | 749 | {Opt_queue_depth, "queue_depth=%d"}, |
750 | {Opt_lock_timeout, "lock_timeout=%d"}, | ||
748 | /* int args above */ | 751 | /* int args above */ |
749 | /* string args above */ | 752 | /* string args above */ |
750 | {Opt_read_only, "read_only"}, | 753 | {Opt_read_only, "read_only"}, |
@@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = { | |||
753 | {Opt_read_write, "rw"}, /* Alternate spelling */ | 756 | {Opt_read_write, "rw"}, /* Alternate spelling */ |
754 | {Opt_lock_on_read, "lock_on_read"}, | 757 | {Opt_lock_on_read, "lock_on_read"}, |
755 | {Opt_exclusive, "exclusive"}, | 758 | {Opt_exclusive, "exclusive"}, |
759 | {Opt_notrim, "notrim"}, | ||
756 | {Opt_err, NULL} | 760 | {Opt_err, NULL} |
757 | }; | 761 | }; |
758 | 762 | ||
759 | struct rbd_options { | 763 | struct rbd_options { |
760 | int queue_depth; | 764 | int queue_depth; |
765 | unsigned long lock_timeout; | ||
761 | bool read_only; | 766 | bool read_only; |
762 | bool lock_on_read; | 767 | bool lock_on_read; |
763 | bool exclusive; | 768 | bool exclusive; |
769 | bool trim; | ||
764 | }; | 770 | }; |
765 | 771 | ||
766 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ | 772 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ |
773 | #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ | ||
767 | #define RBD_READ_ONLY_DEFAULT false | 774 | #define RBD_READ_ONLY_DEFAULT false |
768 | #define RBD_LOCK_ON_READ_DEFAULT false | 775 | #define RBD_LOCK_ON_READ_DEFAULT false |
769 | #define RBD_EXCLUSIVE_DEFAULT false | 776 | #define RBD_EXCLUSIVE_DEFAULT false |
777 | #define RBD_TRIM_DEFAULT true | ||
770 | 778 | ||
771 | static int parse_rbd_opts_token(char *c, void *private) | 779 | static int parse_rbd_opts_token(char *c, void *private) |
772 | { | 780 | { |
@@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
796 | } | 804 | } |
797 | rbd_opts->queue_depth = intval; | 805 | rbd_opts->queue_depth = intval; |
798 | break; | 806 | break; |
807 | case Opt_lock_timeout: | ||
808 | /* 0 is "wait forever" (i.e. infinite timeout) */ | ||
809 | if (intval < 0 || intval > INT_MAX / 1000) { | ||
810 | pr_err("lock_timeout out of range\n"); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); | ||
814 | break; | ||
799 | case Opt_read_only: | 815 | case Opt_read_only: |
800 | rbd_opts->read_only = true; | 816 | rbd_opts->read_only = true; |
801 | break; | 817 | break; |
@@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
808 | case Opt_exclusive: | 824 | case Opt_exclusive: |
809 | rbd_opts->exclusive = true; | 825 | rbd_opts->exclusive = true; |
810 | break; | 826 | break; |
827 | case Opt_notrim: | ||
828 | rbd_opts->trim = false; | ||
829 | break; | ||
811 | default: | 830 | default: |
812 | /* libceph prints "bad option" msg */ | 831 | /* libceph prints "bad option" msg */ |
813 | return -EINVAL; | 832 | return -EINVAL; |
@@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) | |||
1392 | case OBJ_OP_DISCARD: | 1411 | case OBJ_OP_DISCARD: |
1393 | return true; | 1412 | return true; |
1394 | default: | 1413 | default: |
1395 | rbd_assert(0); | 1414 | BUG(); |
1396 | } | 1415 | } |
1397 | } | 1416 | } |
1398 | 1417 | ||
@@ -2466,7 +2485,7 @@ again: | |||
2466 | } | 2485 | } |
2467 | return false; | 2486 | return false; |
2468 | default: | 2487 | default: |
2469 | rbd_assert(0); | 2488 | BUG(); |
2470 | } | 2489 | } |
2471 | } | 2490 | } |
2472 | 2491 | ||
@@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) | |||
2494 | } | 2513 | } |
2495 | return false; | 2514 | return false; |
2496 | default: | 2515 | default: |
2497 | rbd_assert(0); | 2516 | BUG(); |
2498 | } | 2517 | } |
2499 | } | 2518 | } |
2500 | 2519 | ||
@@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, | |||
3533 | /* | 3552 | /* |
3534 | * lock_rwsem must be held for read | 3553 | * lock_rwsem must be held for read |
3535 | */ | 3554 | */ |
3536 | static void rbd_wait_state_locked(struct rbd_device *rbd_dev) | 3555 | static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) |
3537 | { | 3556 | { |
3538 | DEFINE_WAIT(wait); | 3557 | DEFINE_WAIT(wait); |
3558 | unsigned long timeout; | ||
3559 | int ret = 0; | ||
3560 | |||
3561 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) | ||
3562 | return -EBLACKLISTED; | ||
3563 | |||
3564 | if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) | ||
3565 | return 0; | ||
3566 | |||
3567 | if (!may_acquire) { | ||
3568 | rbd_warn(rbd_dev, "exclusive lock required"); | ||
3569 | return -EROFS; | ||
3570 | } | ||
3539 | 3571 | ||
3540 | do { | 3572 | do { |
3541 | /* | 3573 | /* |
@@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) | |||
3547 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, | 3579 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, |
3548 | TASK_UNINTERRUPTIBLE); | 3580 | TASK_UNINTERRUPTIBLE); |
3549 | up_read(&rbd_dev->lock_rwsem); | 3581 | up_read(&rbd_dev->lock_rwsem); |
3550 | schedule(); | 3582 | timeout = schedule_timeout(ceph_timeout_jiffies( |
3583 | rbd_dev->opts->lock_timeout)); | ||
3551 | down_read(&rbd_dev->lock_rwsem); | 3584 | down_read(&rbd_dev->lock_rwsem); |
3552 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && | 3585 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { |
3553 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); | 3586 | ret = -EBLACKLISTED; |
3587 | break; | ||
3588 | } | ||
3589 | if (!timeout) { | ||
3590 | rbd_warn(rbd_dev, "timed out waiting for lock"); | ||
3591 | ret = -ETIMEDOUT; | ||
3592 | break; | ||
3593 | } | ||
3594 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); | ||
3554 | 3595 | ||
3555 | finish_wait(&rbd_dev->lock_waitq, &wait); | 3596 | finish_wait(&rbd_dev->lock_waitq, &wait); |
3597 | return ret; | ||
3556 | } | 3598 | } |
3557 | 3599 | ||
3558 | static void rbd_queue_workfn(struct work_struct *work) | 3600 | static void rbd_queue_workfn(struct work_struct *work) |
@@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
3638 | (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); | 3680 | (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); |
3639 | if (must_be_locked) { | 3681 | if (must_be_locked) { |
3640 | down_read(&rbd_dev->lock_rwsem); | 3682 | down_read(&rbd_dev->lock_rwsem); |
3641 | if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && | 3683 | result = rbd_wait_state_locked(rbd_dev, |
3642 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | 3684 | !rbd_dev->opts->exclusive); |
3643 | if (rbd_dev->opts->exclusive) { | 3685 | if (result) |
3644 | rbd_warn(rbd_dev, "exclusive lock required"); | ||
3645 | result = -EROFS; | ||
3646 | goto err_unlock; | ||
3647 | } | ||
3648 | rbd_wait_state_locked(rbd_dev); | ||
3649 | } | ||
3650 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | ||
3651 | result = -EBLACKLISTED; | ||
3652 | goto err_unlock; | 3686 | goto err_unlock; |
3653 | } | ||
3654 | } | 3687 | } |
3655 | 3688 | ||
3656 | img_request = rbd_img_request_create(rbd_dev, op_type, snapc); | 3689 | img_request = rbd_img_request_create(rbd_dev, op_type, snapc); |
@@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
3902 | { | 3935 | { |
3903 | struct gendisk *disk; | 3936 | struct gendisk *disk; |
3904 | struct request_queue *q; | 3937 | struct request_queue *q; |
3905 | u64 segment_size; | 3938 | unsigned int objset_bytes = |
3939 | rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; | ||
3906 | int err; | 3940 | int err; |
3907 | 3941 | ||
3908 | /* create gendisk info */ | 3942 | /* create gendisk info */ |
@@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
3942 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 3976 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
3943 | /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ | 3977 | /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ |
3944 | 3978 | ||
3945 | /* set io sizes to object size */ | 3979 | blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); |
3946 | segment_size = rbd_obj_bytes(&rbd_dev->header); | ||
3947 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); | ||
3948 | q->limits.max_sectors = queue_max_hw_sectors(q); | 3980 | q->limits.max_sectors = queue_max_hw_sectors(q); |
3949 | blk_queue_max_segments(q, USHRT_MAX); | 3981 | blk_queue_max_segments(q, USHRT_MAX); |
3950 | blk_queue_max_segment_size(q, UINT_MAX); | 3982 | blk_queue_max_segment_size(q, UINT_MAX); |
3951 | blk_queue_io_min(q, segment_size); | 3983 | blk_queue_io_min(q, objset_bytes); |
3952 | blk_queue_io_opt(q, segment_size); | 3984 | blk_queue_io_opt(q, objset_bytes); |
3953 | 3985 | ||
3954 | /* enable the discard support */ | 3986 | if (rbd_dev->opts->trim) { |
3955 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); | 3987 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
3956 | q->limits.discard_granularity = segment_size; | 3988 | q->limits.discard_granularity = objset_bytes; |
3957 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); | 3989 | blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); |
3958 | blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); | 3990 | blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); |
3991 | } | ||
3959 | 3992 | ||
3960 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) | 3993 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) |
3961 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | 3994 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
@@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf, | |||
5179 | 5212 | ||
5180 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; | 5213 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; |
5181 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; | 5214 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; |
5215 | rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; | ||
5182 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; | 5216 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; |
5183 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; | 5217 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; |
5218 | rbd_opts->trim = RBD_TRIM_DEFAULT; | ||
5184 | 5219 | ||
5185 | copts = ceph_parse_options(options, mon_addrs, | 5220 | copts = ceph_parse_options(options, mon_addrs, |
5186 | mon_addrs + mon_addrs_size - 1, | 5221 | mon_addrs + mon_addrs_size - 1, |
@@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) | |||
5216 | 5251 | ||
5217 | static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) | 5252 | static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) |
5218 | { | 5253 | { |
5254 | int ret; | ||
5255 | |||
5219 | if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { | 5256 | if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { |
5220 | rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); | 5257 | rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); |
5221 | return -EINVAL; | 5258 | return -EINVAL; |
@@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) | |||
5223 | 5260 | ||
5224 | /* FIXME: "rbd map --exclusive" should be in interruptible */ | 5261 | /* FIXME: "rbd map --exclusive" should be in interruptible */ |
5225 | down_read(&rbd_dev->lock_rwsem); | 5262 | down_read(&rbd_dev->lock_rwsem); |
5226 | rbd_wait_state_locked(rbd_dev); | 5263 | ret = rbd_wait_state_locked(rbd_dev, true); |
5227 | up_read(&rbd_dev->lock_rwsem); | 5264 | up_read(&rbd_dev->lock_rwsem); |
5228 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | 5265 | if (ret) { |
5229 | rbd_warn(rbd_dev, "failed to acquire exclusive lock"); | 5266 | rbd_warn(rbd_dev, "failed to acquire exclusive lock"); |
5230 | return -EROFS; | 5267 | return -EROFS; |
5231 | } | 5268 | } |
diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..3cd3aae24d6d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -427,8 +427,9 @@ struct crng_state primary_crng = { | |||
427 | * its value (from 0->1->2). | 427 | * its value (from 0->1->2). |
428 | */ | 428 | */ |
429 | static int crng_init = 0; | 429 | static int crng_init = 0; |
430 | #define crng_ready() (likely(crng_init > 0)) | 430 | #define crng_ready() (likely(crng_init > 1)) |
431 | static int crng_init_cnt = 0; | 431 | static int crng_init_cnt = 0; |
432 | static unsigned long crng_global_init_time = 0; | ||
432 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) | 433 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) |
433 | static void _extract_crng(struct crng_state *crng, | 434 | static void _extract_crng(struct crng_state *crng, |
434 | __u32 out[CHACHA20_BLOCK_WORDS]); | 435 | __u32 out[CHACHA20_BLOCK_WORDS]); |
@@ -787,6 +788,36 @@ static void crng_initialize(struct crng_state *crng) | |||
787 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; | 788 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; |
788 | } | 789 | } |
789 | 790 | ||
791 | #ifdef CONFIG_NUMA | ||
792 | static void numa_crng_init(void) | ||
793 | { | ||
794 | int i; | ||
795 | struct crng_state *crng; | ||
796 | struct crng_state **pool; | ||
797 | |||
798 | pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); | ||
799 | for_each_online_node(i) { | ||
800 | crng = kmalloc_node(sizeof(struct crng_state), | ||
801 | GFP_KERNEL | __GFP_NOFAIL, i); | ||
802 | spin_lock_init(&crng->lock); | ||
803 | crng_initialize(crng); | ||
804 | pool[i] = crng; | ||
805 | } | ||
806 | mb(); | ||
807 | if (cmpxchg(&crng_node_pool, NULL, pool)) { | ||
808 | for_each_node(i) | ||
809 | kfree(pool[i]); | ||
810 | kfree(pool); | ||
811 | } | ||
812 | } | ||
813 | #else | ||
814 | static void numa_crng_init(void) {} | ||
815 | #endif | ||
816 | |||
817 | /* | ||
818 | * crng_fast_load() can be called by code in the interrupt service | ||
819 | * path. So we can't afford to dilly-dally. | ||
820 | */ | ||
790 | static int crng_fast_load(const char *cp, size_t len) | 821 | static int crng_fast_load(const char *cp, size_t len) |
791 | { | 822 | { |
792 | unsigned long flags; | 823 | unsigned long flags; |
@@ -794,7 +825,7 @@ static int crng_fast_load(const char *cp, size_t len) | |||
794 | 825 | ||
795 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) | 826 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) |
796 | return 0; | 827 | return 0; |
797 | if (crng_ready()) { | 828 | if (crng_init != 0) { |
798 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 829 | spin_unlock_irqrestore(&primary_crng.lock, flags); |
799 | return 0; | 830 | return 0; |
800 | } | 831 | } |
@@ -813,6 +844,51 @@ static int crng_fast_load(const char *cp, size_t len) | |||
813 | return 1; | 844 | return 1; |
814 | } | 845 | } |
815 | 846 | ||
847 | /* | ||
848 | * crng_slow_load() is called by add_device_randomness, which has two | ||
849 | * attributes. (1) We can't trust the buffer passed to it is | ||
850 | * guaranteed to be unpredictable (so it might not have any entropy at | ||
851 | * all), and (2) it doesn't have the performance constraints of | ||
852 | * crng_fast_load(). | ||
853 | * | ||
854 | * So we do something more comprehensive which is guaranteed to touch | ||
855 | * all of the primary_crng's state, and which uses a LFSR with a | ||
856 | * period of 255 as part of the mixing algorithm. Finally, we do | ||
857 | * *not* advance crng_init_cnt since buffer we may get may be something | ||
858 | * like a fixed DMI table (for example), which might very well be | ||
859 | * unique to the machine, but is otherwise unvarying. | ||
860 | */ | ||
861 | static int crng_slow_load(const char *cp, size_t len) | ||
862 | { | ||
863 | unsigned long flags; | ||
864 | static unsigned char lfsr = 1; | ||
865 | unsigned char tmp; | ||
866 | unsigned i, max = CHACHA20_KEY_SIZE; | ||
867 | const char * src_buf = cp; | ||
868 | char * dest_buf = (char *) &primary_crng.state[4]; | ||
869 | |||
870 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) | ||
871 | return 0; | ||
872 | if (crng_init != 0) { | ||
873 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
874 | return 0; | ||
875 | } | ||
876 | if (len > max) | ||
877 | max = len; | ||
878 | |||
879 | for (i = 0; i < max ; i++) { | ||
880 | tmp = lfsr; | ||
881 | lfsr >>= 1; | ||
882 | if (tmp & 1) | ||
883 | lfsr ^= 0xE1; | ||
884 | tmp = dest_buf[i % CHACHA20_KEY_SIZE]; | ||
885 | dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; | ||
886 | lfsr += (tmp << 3) | (tmp >> 5); | ||
887 | } | ||
888 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
889 | return 1; | ||
890 | } | ||
891 | |||
816 | static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | 892 | static void crng_reseed(struct crng_state *crng, struct entropy_store *r) |
817 | { | 893 | { |
818 | unsigned long flags; | 894 | unsigned long flags; |
@@ -831,7 +907,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
831 | _crng_backtrack_protect(&primary_crng, buf.block, | 907 | _crng_backtrack_protect(&primary_crng, buf.block, |
832 | CHACHA20_KEY_SIZE); | 908 | CHACHA20_KEY_SIZE); |
833 | } | 909 | } |
834 | spin_lock_irqsave(&primary_crng.lock, flags); | 910 | spin_lock_irqsave(&crng->lock, flags); |
835 | for (i = 0; i < 8; i++) { | 911 | for (i = 0; i < 8; i++) { |
836 | unsigned long rv; | 912 | unsigned long rv; |
837 | if (!arch_get_random_seed_long(&rv) && | 913 | if (!arch_get_random_seed_long(&rv) && |
@@ -841,9 +917,10 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
841 | } | 917 | } |
842 | memzero_explicit(&buf, sizeof(buf)); | 918 | memzero_explicit(&buf, sizeof(buf)); |
843 | crng->init_time = jiffies; | 919 | crng->init_time = jiffies; |
844 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 920 | spin_unlock_irqrestore(&crng->lock, flags); |
845 | if (crng == &primary_crng && crng_init < 2) { | 921 | if (crng == &primary_crng && crng_init < 2) { |
846 | invalidate_batched_entropy(); | 922 | invalidate_batched_entropy(); |
923 | numa_crng_init(); | ||
847 | crng_init = 2; | 924 | crng_init = 2; |
848 | process_random_ready_list(); | 925 | process_random_ready_list(); |
849 | wake_up_interruptible(&crng_init_wait); | 926 | wake_up_interruptible(&crng_init_wait); |
@@ -856,8 +933,9 @@ static void _extract_crng(struct crng_state *crng, | |||
856 | { | 933 | { |
857 | unsigned long v, flags; | 934 | unsigned long v, flags; |
858 | 935 | ||
859 | if (crng_init > 1 && | 936 | if (crng_ready() && |
860 | time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) | 937 | (time_after(crng_global_init_time, crng->init_time) || |
938 | time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) | ||
861 | crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); | 939 | crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); |
862 | spin_lock_irqsave(&crng->lock, flags); | 940 | spin_lock_irqsave(&crng->lock, flags); |
863 | if (arch_get_random_long(&v)) | 941 | if (arch_get_random_long(&v)) |
@@ -981,10 +1059,8 @@ void add_device_randomness(const void *buf, unsigned int size) | |||
981 | unsigned long time = random_get_entropy() ^ jiffies; | 1059 | unsigned long time = random_get_entropy() ^ jiffies; |
982 | unsigned long flags; | 1060 | unsigned long flags; |
983 | 1061 | ||
984 | if (!crng_ready()) { | 1062 | if (!crng_ready() && size) |
985 | crng_fast_load(buf, size); | 1063 | crng_slow_load(buf, size); |
986 | return; | ||
987 | } | ||
988 | 1064 | ||
989 | trace_add_device_randomness(size, _RET_IP_); | 1065 | trace_add_device_randomness(size, _RET_IP_); |
990 | spin_lock_irqsave(&input_pool.lock, flags); | 1066 | spin_lock_irqsave(&input_pool.lock, flags); |
@@ -1139,7 +1215,7 @@ void add_interrupt_randomness(int irq, int irq_flags) | |||
1139 | fast_mix(fast_pool); | 1215 | fast_mix(fast_pool); |
1140 | add_interrupt_bench(cycles); | 1216 | add_interrupt_bench(cycles); |
1141 | 1217 | ||
1142 | if (!crng_ready()) { | 1218 | if (unlikely(crng_init == 0)) { |
1143 | if ((fast_pool->count >= 64) && | 1219 | if ((fast_pool->count >= 64) && |
1144 | crng_fast_load((char *) fast_pool->pool, | 1220 | crng_fast_load((char *) fast_pool->pool, |
1145 | sizeof(fast_pool->pool))) { | 1221 | sizeof(fast_pool->pool))) { |
@@ -1680,28 +1756,10 @@ static void init_std_data(struct entropy_store *r) | |||
1680 | */ | 1756 | */ |
1681 | static int rand_initialize(void) | 1757 | static int rand_initialize(void) |
1682 | { | 1758 | { |
1683 | #ifdef CONFIG_NUMA | ||
1684 | int i; | ||
1685 | struct crng_state *crng; | ||
1686 | struct crng_state **pool; | ||
1687 | #endif | ||
1688 | |||
1689 | init_std_data(&input_pool); | 1759 | init_std_data(&input_pool); |
1690 | init_std_data(&blocking_pool); | 1760 | init_std_data(&blocking_pool); |
1691 | crng_initialize(&primary_crng); | 1761 | crng_initialize(&primary_crng); |
1692 | 1762 | crng_global_init_time = jiffies; | |
1693 | #ifdef CONFIG_NUMA | ||
1694 | pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); | ||
1695 | for_each_online_node(i) { | ||
1696 | crng = kmalloc_node(sizeof(struct crng_state), | ||
1697 | GFP_KERNEL | __GFP_NOFAIL, i); | ||
1698 | spin_lock_init(&crng->lock); | ||
1699 | crng_initialize(crng); | ||
1700 | pool[i] = crng; | ||
1701 | } | ||
1702 | mb(); | ||
1703 | crng_node_pool = pool; | ||
1704 | #endif | ||
1705 | return 0; | 1763 | return 0; |
1706 | } | 1764 | } |
1707 | early_initcall(rand_initialize); | 1765 | early_initcall(rand_initialize); |
@@ -1875,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | |||
1875 | input_pool.entropy_count = 0; | 1933 | input_pool.entropy_count = 0; |
1876 | blocking_pool.entropy_count = 0; | 1934 | blocking_pool.entropy_count = 0; |
1877 | return 0; | 1935 | return 0; |
1936 | case RNDRESEEDCRNG: | ||
1937 | if (!capable(CAP_SYS_ADMIN)) | ||
1938 | return -EPERM; | ||
1939 | if (crng_init < 2) | ||
1940 | return -ENODATA; | ||
1941 | crng_reseed(&primary_crng, NULL); | ||
1942 | crng_global_init_time = jiffies - 1; | ||
1943 | return 0; | ||
1878 | default: | 1944 | default: |
1879 | return -EINVAL; | 1945 | return -EINVAL; |
1880 | } | 1946 | } |
@@ -2212,7 +2278,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, | |||
2212 | { | 2278 | { |
2213 | struct entropy_store *poolp = &input_pool; | 2279 | struct entropy_store *poolp = &input_pool; |
2214 | 2280 | ||
2215 | if (!crng_ready()) { | 2281 | if (unlikely(crng_init == 0)) { |
2216 | crng_fast_load(buffer, count); | 2282 | crng_fast_load(buffer, count); |
2217 | return; | 2283 | return; |
2218 | } | 2284 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 9ee2888275c1..8e8a09755d10 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -133,6 +133,14 @@ config VT8500_TIMER | |||
133 | help | 133 | help |
134 | Enables support for the VT8500 driver. | 134 | Enables support for the VT8500 driver. |
135 | 135 | ||
136 | config NPCM7XX_TIMER | ||
137 | bool "NPCM7xx timer driver" if COMPILE_TEST | ||
138 | depends on HAS_IOMEM | ||
139 | select CLKSRC_MMIO | ||
140 | help | ||
141 | Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, | ||
142 | While TIMER0 serves as clockevent and TIMER1 serves as clocksource. | ||
143 | |||
136 | config CADENCE_TTC_TIMER | 144 | config CADENCE_TTC_TIMER |
137 | bool "Cadence TTC timer driver" if COMPILE_TEST | 145 | bool "Cadence TTC timer driver" if COMPILE_TEST |
138 | depends on COMMON_CLK | 146 | depends on COMMON_CLK |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e8e76dfef00b..00caf37e52f9 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o | |||
56 | obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o | 56 | obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o |
57 | obj-$(CONFIG_OWL_TIMER) += owl-timer.o | 57 | obj-$(CONFIG_OWL_TIMER) += owl-timer.o |
58 | obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o | 58 | obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o |
59 | obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o | ||
59 | 60 | ||
60 | obj-$(CONFIG_ARC_TIMERS) += arc_timer.o | 61 | obj-$(CONFIG_ARC_TIMERS) += arc_timer.o |
61 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | 62 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 21bffdcb2f20..6c8318470b48 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c | |||
@@ -17,9 +17,14 @@ | |||
17 | #include <linux/of_irq.h> | 17 | #include <linux/of_irq.h> |
18 | #include <linux/sched_clock.h> | 18 | #include <linux/sched_clock.h> |
19 | 19 | ||
20 | #define TPM_PARAM 0x4 | ||
21 | #define TPM_PARAM_WIDTH_SHIFT 16 | ||
22 | #define TPM_PARAM_WIDTH_MASK (0xff << 16) | ||
20 | #define TPM_SC 0x10 | 23 | #define TPM_SC 0x10 |
21 | #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) | 24 | #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) |
22 | #define TPM_SC_CMOD_DIV_DEFAULT 0x3 | 25 | #define TPM_SC_CMOD_DIV_DEFAULT 0x3 |
26 | #define TPM_SC_CMOD_DIV_MAX 0x7 | ||
27 | #define TPM_SC_TOF_MASK (0x1 << 7) | ||
23 | #define TPM_CNT 0x14 | 28 | #define TPM_CNT 0x14 |
24 | #define TPM_MOD 0x18 | 29 | #define TPM_MOD 0x18 |
25 | #define TPM_STATUS 0x1c | 30 | #define TPM_STATUS 0x1c |
@@ -29,8 +34,11 @@ | |||
29 | #define TPM_C0SC_MODE_SHIFT 2 | 34 | #define TPM_C0SC_MODE_SHIFT 2 |
30 | #define TPM_C0SC_MODE_MASK 0x3c | 35 | #define TPM_C0SC_MODE_MASK 0x3c |
31 | #define TPM_C0SC_MODE_SW_COMPARE 0x4 | 36 | #define TPM_C0SC_MODE_SW_COMPARE 0x4 |
37 | #define TPM_C0SC_CHF_MASK (0x1 << 7) | ||
32 | #define TPM_C0V 0x24 | 38 | #define TPM_C0V 0x24 |
33 | 39 | ||
40 | static int counter_width; | ||
41 | static int rating; | ||
34 | static void __iomem *timer_base; | 42 | static void __iomem *timer_base; |
35 | static struct clock_event_device clockevent_tpm; | 43 | static struct clock_event_device clockevent_tpm; |
36 | 44 | ||
@@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate) | |||
83 | tpm_delay_timer.freq = rate; | 91 | tpm_delay_timer.freq = rate; |
84 | register_current_timer_delay(&tpm_delay_timer); | 92 | register_current_timer_delay(&tpm_delay_timer); |
85 | 93 | ||
86 | sched_clock_register(tpm_read_sched_clock, 32, rate); | 94 | sched_clock_register(tpm_read_sched_clock, counter_width, rate); |
87 | 95 | ||
88 | return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", | 96 | return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", |
89 | rate, 200, 32, clocksource_mmio_readl_up); | 97 | rate, rating, counter_width, |
98 | clocksource_mmio_readl_up); | ||
90 | } | 99 | } |
91 | 100 | ||
92 | static int tpm_set_next_event(unsigned long delta, | 101 | static int tpm_set_next_event(unsigned long delta, |
@@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta, | |||
105 | * of writing CNT registers which may cause the min_delta event got | 114 | * of writing CNT registers which may cause the min_delta event got |
106 | * missed, so we need add a ETIME check here in case it happened. | 115 | * missed, so we need add a ETIME check here in case it happened. |
107 | */ | 116 | */ |
108 | return (int)((next - now) <= 0) ? -ETIME : 0; | 117 | return (int)(next - now) <= 0 ? -ETIME : 0; |
109 | } | 118 | } |
110 | 119 | ||
111 | static int tpm_set_state_oneshot(struct clock_event_device *evt) | 120 | static int tpm_set_state_oneshot(struct clock_event_device *evt) |
@@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = { | |||
139 | .set_state_oneshot = tpm_set_state_oneshot, | 148 | .set_state_oneshot = tpm_set_state_oneshot, |
140 | .set_next_event = tpm_set_next_event, | 149 | .set_next_event = tpm_set_next_event, |
141 | .set_state_shutdown = tpm_set_state_shutdown, | 150 | .set_state_shutdown = tpm_set_state_shutdown, |
142 | .rating = 200, | ||
143 | }; | 151 | }; |
144 | 152 | ||
145 | static int __init tpm_clockevent_init(unsigned long rate, int irq) | 153 | static int __init tpm_clockevent_init(unsigned long rate, int irq) |
@@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq) | |||
149 | ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, | 157 | ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, |
150 | "i.MX7ULP TPM Timer", &clockevent_tpm); | 158 | "i.MX7ULP TPM Timer", &clockevent_tpm); |
151 | 159 | ||
160 | clockevent_tpm.rating = rating; | ||
152 | clockevent_tpm.cpumask = cpumask_of(0); | 161 | clockevent_tpm.cpumask = cpumask_of(0); |
153 | clockevent_tpm.irq = irq; | 162 | clockevent_tpm.irq = irq; |
154 | clockevents_config_and_register(&clockevent_tpm, | 163 | clockevents_config_and_register(&clockevent_tpm, rate, 300, |
155 | rate, 300, 0xfffffffe); | 164 | GENMASK(counter_width - 1, 1)); |
156 | 165 | ||
157 | return ret; | 166 | return ret; |
158 | } | 167 | } |
@@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np) | |||
179 | ipg = of_clk_get_by_name(np, "ipg"); | 188 | ipg = of_clk_get_by_name(np, "ipg"); |
180 | per = of_clk_get_by_name(np, "per"); | 189 | per = of_clk_get_by_name(np, "per"); |
181 | if (IS_ERR(ipg) || IS_ERR(per)) { | 190 | if (IS_ERR(ipg) || IS_ERR(per)) { |
182 | pr_err("tpm: failed to get igp or per clk\n"); | 191 | pr_err("tpm: failed to get ipg or per clk\n"); |
183 | ret = -ENODEV; | 192 | ret = -ENODEV; |
184 | goto err_clk_get; | 193 | goto err_clk_get; |
185 | } | 194 | } |
@@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np) | |||
197 | goto err_per_clk_enable; | 206 | goto err_per_clk_enable; |
198 | } | 207 | } |
199 | 208 | ||
209 | counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK) | ||
210 | >> TPM_PARAM_WIDTH_SHIFT; | ||
211 | /* use rating 200 for 32-bit counter and 150 for 16-bit counter */ | ||
212 | rating = counter_width == 0x20 ? 200 : 150; | ||
213 | |||
200 | /* | 214 | /* |
201 | * Initialize tpm module to a known state | 215 | * Initialize tpm module to a known state |
202 | * 1) Counter disabled | 216 | * 1) Counter disabled |
@@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np) | |||
205 | * 4) Channel0 disabled | 219 | * 4) Channel0 disabled |
206 | * 5) DMA transfers disabled | 220 | * 5) DMA transfers disabled |
207 | */ | 221 | */ |
222 | /* make sure counter is disabled */ | ||
208 | writel(0, timer_base + TPM_SC); | 223 | writel(0, timer_base + TPM_SC); |
224 | /* TOF is W1C */ | ||
225 | writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); | ||
209 | writel(0, timer_base + TPM_CNT); | 226 | writel(0, timer_base + TPM_CNT); |
210 | writel(0, timer_base + TPM_C0SC); | 227 | /* CHF is W1C */ |
228 | writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); | ||
211 | 229 | ||
212 | /* increase per cnt, div 8 by default */ | 230 | /* |
213 | writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, | 231 | * increase per cnt, |
232 | * div 8 for 32-bit counter and div 128 for 16-bit counter | ||
233 | */ | ||
234 | writel(TPM_SC_CMOD_INC_PER_CNT | | ||
235 | (counter_width == 0x20 ? | ||
236 | TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX), | ||
214 | timer_base + TPM_SC); | 237 | timer_base + TPM_SC); |
215 | 238 | ||
216 | /* set MOD register to maximum for free running mode */ | 239 | /* set MOD register to maximum for free running mode */ |
217 | writel(0xffffffff, timer_base + TPM_MOD); | 240 | writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD); |
218 | 241 | ||
219 | rate = clk_get_rate(per) >> 3; | 242 | rate = clk_get_rate(per) >> 3; |
220 | ret = tpm_clocksource_init(rate); | 243 | ret = tpm_clocksource_init(rate); |
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c new file mode 100644 index 000000000000..7a9bb5532d99 --- /dev/null +++ b/drivers/clocksource/timer-npcm7xx.c | |||
@@ -0,0 +1,215 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com | ||
4 | * All rights reserved. | ||
5 | * | ||
6 | * Copyright 2017 Google, Inc. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/clockchips.h> | ||
17 | #include <linux/of_irq.h> | ||
18 | #include <linux/of_address.h> | ||
19 | #include "timer-of.h" | ||
20 | |||
21 | /* Timers registers */ | ||
22 | #define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */ | ||
23 | #define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */ | ||
24 | #define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */ | ||
25 | #define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */ | ||
26 | #define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */ | ||
27 | #define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */ | ||
28 | |||
29 | /* Timers control */ | ||
30 | #define NPCM7XX_Tx_RESETINT 0x1f | ||
31 | #define NPCM7XX_Tx_PERIOD BIT(27) | ||
32 | #define NPCM7XX_Tx_INTEN BIT(29) | ||
33 | #define NPCM7XX_Tx_COUNTEN BIT(30) | ||
34 | #define NPCM7XX_Tx_ONESHOT 0x0 | ||
35 | #define NPCM7XX_Tx_OPER GENMASK(3, 27) | ||
36 | #define NPCM7XX_Tx_MIN_PRESCALE 0x1 | ||
37 | #define NPCM7XX_Tx_TDR_MASK_BITS 24 | ||
38 | #define NPCM7XX_Tx_MAX_CNT 0xFFFFFF | ||
39 | #define NPCM7XX_T0_CLR_INT 0x1 | ||
40 | #define NPCM7XX_Tx_CLR_CSR 0x0 | ||
41 | |||
42 | /* Timers operating mode */ | ||
43 | #define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \ | ||
44 | NPCM7XX_Tx_INTEN | \ | ||
45 | NPCM7XX_Tx_MIN_PRESCALE) | ||
46 | |||
47 | #define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \ | ||
48 | NPCM7XX_Tx_INTEN | \ | ||
49 | NPCM7XX_Tx_MIN_PRESCALE) | ||
50 | |||
51 | #define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \ | ||
52 | NPCM7XX_Tx_MIN_PRESCALE) | ||
53 | |||
54 | #define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE) | ||
55 | |||
56 | static int npcm7xx_timer_resume(struct clock_event_device *evt) | ||
57 | { | ||
58 | struct timer_of *to = to_timer_of(evt); | ||
59 | u32 val; | ||
60 | |||
61 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
62 | val |= NPCM7XX_Tx_COUNTEN; | ||
63 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int npcm7xx_timer_shutdown(struct clock_event_device *evt) | ||
69 | { | ||
70 | struct timer_of *to = to_timer_of(evt); | ||
71 | u32 val; | ||
72 | |||
73 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
74 | val &= ~NPCM7XX_Tx_COUNTEN; | ||
75 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int npcm7xx_timer_oneshot(struct clock_event_device *evt) | ||
81 | { | ||
82 | struct timer_of *to = to_timer_of(evt); | ||
83 | u32 val; | ||
84 | |||
85 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
86 | val &= ~NPCM7XX_Tx_OPER; | ||
87 | |||
88 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
89 | val |= NPCM7XX_START_ONESHOT_Tx; | ||
90 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int npcm7xx_timer_periodic(struct clock_event_device *evt) | ||
96 | { | ||
97 | struct timer_of *to = to_timer_of(evt); | ||
98 | u32 val; | ||
99 | |||
100 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
101 | val &= ~NPCM7XX_Tx_OPER; | ||
102 | |||
103 | writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0); | ||
104 | val |= NPCM7XX_START_PERIODIC_Tx; | ||
105 | |||
106 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static int npcm7xx_clockevent_set_next_event(unsigned long evt, | ||
112 | struct clock_event_device *clk) | ||
113 | { | ||
114 | struct timer_of *to = to_timer_of(clk); | ||
115 | u32 val; | ||
116 | |||
117 | writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0); | ||
118 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
119 | val |= NPCM7XX_START_Tx; | ||
120 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id) | ||
126 | { | ||
127 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | ||
128 | struct timer_of *to = to_timer_of(evt); | ||
129 | |||
130 | writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR); | ||
131 | |||
132 | evt->event_handler(evt); | ||
133 | |||
134 | return IRQ_HANDLED; | ||
135 | } | ||
136 | |||
137 | static struct timer_of npcm7xx_to = { | ||
138 | .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, | ||
139 | |||
140 | .clkevt = { | ||
141 | .name = "npcm7xx-timer0", | ||
142 | .features = CLOCK_EVT_FEAT_PERIODIC | | ||
143 | CLOCK_EVT_FEAT_ONESHOT, | ||
144 | .set_next_event = npcm7xx_clockevent_set_next_event, | ||
145 | .set_state_shutdown = npcm7xx_timer_shutdown, | ||
146 | .set_state_periodic = npcm7xx_timer_periodic, | ||
147 | .set_state_oneshot = npcm7xx_timer_oneshot, | ||
148 | .tick_resume = npcm7xx_timer_resume, | ||
149 | .rating = 300, | ||
150 | }, | ||
151 | |||
152 | .of_irq = { | ||
153 | .handler = npcm7xx_timer0_interrupt, | ||
154 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
155 | }, | ||
156 | }; | ||
157 | |||
158 | static void __init npcm7xx_clockevents_init(void) | ||
159 | { | ||
160 | writel(NPCM7XX_DEFAULT_CSR, | ||
161 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0); | ||
162 | |||
163 | writel(NPCM7XX_Tx_RESETINT, | ||
164 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR); | ||
165 | |||
166 | npcm7xx_to.clkevt.cpumask = cpumask_of(0); | ||
167 | clockevents_config_and_register(&npcm7xx_to.clkevt, | ||
168 | timer_of_rate(&npcm7xx_to), | ||
169 | 0x1, NPCM7XX_Tx_MAX_CNT); | ||
170 | } | ||
171 | |||
172 | static void __init npcm7xx_clocksource_init(void) | ||
173 | { | ||
174 | u32 val; | ||
175 | |||
176 | writel(NPCM7XX_DEFAULT_CSR, | ||
177 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
178 | writel(NPCM7XX_Tx_MAX_CNT, | ||
179 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1); | ||
180 | |||
181 | val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
182 | val |= NPCM7XX_START_Tx; | ||
183 | writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
184 | |||
185 | clocksource_mmio_init(timer_of_base(&npcm7xx_to) + | ||
186 | NPCM7XX_REG_TDR1, | ||
187 | "npcm7xx-timer1", timer_of_rate(&npcm7xx_to), | ||
188 | 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS, | ||
189 | clocksource_mmio_readl_down); | ||
190 | } | ||
191 | |||
192 | static int __init npcm7xx_timer_init(struct device_node *np) | ||
193 | { | ||
194 | int ret; | ||
195 | |||
196 | ret = timer_of_init(np, &npcm7xx_to); | ||
197 | if (ret) | ||
198 | return ret; | ||
199 | |||
200 | /* Clock input is divided by PRESCALE + 1 before it is fed */ | ||
201 | /* to the counter */ | ||
202 | npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate / | ||
203 | (NPCM7XX_Tx_MIN_PRESCALE + 1); | ||
204 | |||
205 | npcm7xx_clocksource_init(); | ||
206 | npcm7xx_clockevents_init(); | ||
207 | |||
208 | pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ", | ||
209 | timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to)); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); | ||
215 | |||
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/dax.h> | 19 | #include <linux/dax.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/mman.h> | ||
22 | #include "dax-private.h" | 23 | #include "dax-private.h" |
23 | #include "dax.h" | 24 | #include "dax.h" |
24 | 25 | ||
@@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { | |||
540 | .release = dax_release, | 541 | .release = dax_release, |
541 | .get_unmapped_area = dax_get_unmapped_area, | 542 | .get_unmapped_area = dax_get_unmapped_area, |
542 | .mmap = dax_mmap, | 543 | .mmap = dax_mmap, |
544 | .mmap_supported_flags = MAP_SYNC, | ||
543 | }; | 545 | }; |
544 | 546 | ||
545 | static void dev_dax_release(struct device *dev) | 547 | static void dev_dax_release(struct device *dev) |
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 68b6ee18bea6..b9bea77b3de8 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c | |||
@@ -172,8 +172,7 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg) | |||
172 | } | 172 | } |
173 | } | 173 | } |
174 | 174 | ||
175 | void ghes_edac_report_mem_error(struct ghes *ghes, int sev, | 175 | void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) |
176 | struct cper_sec_mem_err *mem_err) | ||
177 | { | 176 | { |
178 | enum hw_event_mc_err_type type; | 177 | enum hw_event_mc_err_type type; |
179 | struct edac_raw_error_desc *e; | 178 | struct edac_raw_error_desc *e; |
@@ -183,10 +182,8 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev, | |||
183 | char *p; | 182 | char *p; |
184 | u8 grain_bits; | 183 | u8 grain_bits; |
185 | 184 | ||
186 | if (!pvt) { | 185 | if (!pvt) |
187 | pr_err("Internal error: Can't find EDAC structure\n"); | ||
188 | return; | 186 | return; |
189 | } | ||
190 | 187 | ||
191 | /* | 188 | /* |
192 | * We can do the locking below because GHES defers error processing | 189 | * We can do the locking below because GHES defers error processing |
@@ -439,7 +436,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) | |||
439 | /* Check if safe to enable on this system */ | 436 | /* Check if safe to enable on this system */ |
440 | idx = acpi_match_platform_list(plat_list); | 437 | idx = acpi_match_platform_list(plat_list); |
441 | if (!force_load && idx < 0) | 438 | if (!force_load && idx < 0) |
442 | return 0; | 439 | return -ENODEV; |
443 | 440 | ||
444 | /* | 441 | /* |
445 | * We have only one logical memory controller to which all DIMMs belong. | 442 | * We have only one logical memory controller to which all DIMMs belong. |
@@ -519,6 +516,9 @@ void ghes_edac_unregister(struct ghes *ghes) | |||
519 | { | 516 | { |
520 | struct mem_ctl_info *mci; | 517 | struct mem_ctl_info *mci; |
521 | 518 | ||
519 | if (!ghes_pvt) | ||
520 | return; | ||
521 | |||
522 | mci = ghes_pvt->mci; | 522 | mci = ghes_pvt->mci; |
523 | edac_mc_del_mc(mci->pdev); | 523 | edac_mc_del_mc(mci->pdev); |
524 | edac_mc_free(mci); | 524 | edac_mc_free(mci); |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 8c5540160a23..4d0ea3563d47 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1743,7 +1743,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, | |||
1743 | err = "write parity error"; | 1743 | err = "write parity error"; |
1744 | break; | 1744 | break; |
1745 | case 19: | 1745 | case 19: |
1746 | err = "redundacy loss"; | 1746 | err = "redundancy loss"; |
1747 | break; | 1747 | break; |
1748 | case 20: | 1748 | case 20: |
1749 | err = "reserved"; | 1749 | err = "reserved"; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..25f064c01038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | |||
@@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) | |||
138 | lut = (struct drm_color_lut *)blob->data; | 138 | lut = (struct drm_color_lut *)blob->data; |
139 | lut_size = blob->length / sizeof(struct drm_color_lut); | 139 | lut_size = blob->length / sizeof(struct drm_color_lut); |
140 | 140 | ||
141 | if (__is_lut_linear(lut, lut_size)) { | ||
142 | /* Set to bypass if lut is set to linear */ | ||
143 | stream->out_transfer_func->type = TF_TYPE_BYPASS; | ||
144 | stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | gamma = dc_create_gamma(); | 141 | gamma = dc_create_gamma(); |
149 | if (!gamma) | 142 | if (!gamma) |
150 | return -ENOMEM; | 143 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..26fbeafc3c96 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) | |||
4743 | 4743 | ||
4744 | for (i=0; i < dep_table->count; i++) { | 4744 | for (i=0; i < dep_table->count; i++) { |
4745 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { | 4745 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
4746 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; | 4746 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; |
4747 | break; | 4747 | return; |
4748 | } | 4748 | } |
4749 | } | 4749 | } |
4750 | if (i == dep_table->count) | 4750 | if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
4751 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; | 4751 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; |
4752 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; | ||
4753 | } | ||
4752 | 4754 | ||
4753 | dep_table = table_info->vdd_dep_on_sclk; | 4755 | dep_table = table_info->vdd_dep_on_sclk; |
4754 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); | 4756 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); |
4755 | for (i=0; i < dep_table->count; i++) { | 4757 | for (i=0; i < dep_table->count; i++) { |
4756 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { | 4758 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
4757 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; | 4759 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; |
4758 | break; | 4760 | return; |
4759 | } | 4761 | } |
4760 | } | 4762 | } |
4761 | if (i == dep_table->count) | 4763 | if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
4762 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; | 4764 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; |
4765 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | ||
4766 | } | ||
4763 | } | 4767 | } |
4764 | 4768 | ||
4765 | static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, | 4769 | static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | |||
@@ -412,8 +412,10 @@ typedef struct { | |||
412 | QuadraticInt_t ReservedEquation2; | 412 | QuadraticInt_t ReservedEquation2; |
413 | QuadraticInt_t ReservedEquation3; | 413 | QuadraticInt_t ReservedEquation3; |
414 | 414 | ||
415 | uint16_t MinVoltageUlvGfx; | ||
416 | uint16_t MinVoltageUlvSoc; | ||
415 | 417 | ||
416 | uint32_t Reserved[15]; | 418 | uint32_t Reserved[14]; |
417 | 419 | ||
418 | 420 | ||
419 | 421 | ||
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 02a50929af67..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c | |||
@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, | |||
350 | { | 350 | { |
351 | uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; | 351 | uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; |
352 | ssize_t ret; | 352 | ssize_t ret; |
353 | int retry; | ||
353 | 354 | ||
354 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) | 355 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) |
355 | return 0; | 356 | return 0; |
356 | 357 | ||
357 | ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, | 358 | /* |
358 | &tmds_oen, sizeof(tmds_oen)); | 359 | * LSPCON adapters in low-power state may ignore the first write, so |
359 | if (ret) { | 360 | * read back and verify the written value a few times. |
360 | DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", | 361 | */ |
361 | enable ? "enable" : "disable"); | 362 | for (retry = 0; retry < 3; retry++) { |
362 | return ret; | 363 | uint8_t tmp; |
364 | |||
365 | ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
366 | &tmds_oen, sizeof(tmds_oen)); | ||
367 | if (ret) { | ||
368 | DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", | ||
369 | enable ? "enable" : "disable", | ||
370 | retry + 1); | ||
371 | return ret; | ||
372 | } | ||
373 | |||
374 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
375 | &tmp, sizeof(tmp)); | ||
376 | if (ret) { | ||
377 | DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", | ||
378 | enable ? "enabling" : "disabling", | ||
379 | retry + 1); | ||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | if (tmp == tmds_oen) | ||
384 | return 0; | ||
363 | } | 385 | } |
364 | 386 | ||
365 | return 0; | 387 | DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", |
388 | enable ? "enabling" : "disabling"); | ||
389 | |||
390 | return -EIO; | ||
366 | } | 391 | } |
367 | EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); | 392 | EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); |
368 | 393 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 0faaf829f5bf..f0e79178bde6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <drm/drm_fb_helper.h> | 18 | #include <drm/drm_fb_helper.h> |
19 | #include <drm/drm_atomic.h> | 19 | #include <drm/drm_atomic.h> |
20 | #include <drm/drm_atomic_helper.h> | 20 | #include <drm/drm_atomic_helper.h> |
21 | #include <drm/drm_gem_framebuffer_helper.h> | ||
21 | #include <uapi/drm/exynos_drm.h> | 22 | #include <uapi/drm/exynos_drm.h> |
22 | 23 | ||
23 | #include "exynos_drm_drv.h" | 24 | #include "exynos_drm_drv.h" |
@@ -26,20 +27,6 @@ | |||
26 | #include "exynos_drm_iommu.h" | 27 | #include "exynos_drm_iommu.h" |
27 | #include "exynos_drm_crtc.h" | 28 | #include "exynos_drm_crtc.h" |
28 | 29 | ||
29 | #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) | ||
30 | |||
31 | /* | ||
32 | * exynos specific framebuffer structure. | ||
33 | * | ||
34 | * @fb: drm framebuffer obejct. | ||
35 | * @exynos_gem: array of exynos specific gem object containing a gem object. | ||
36 | */ | ||
37 | struct exynos_drm_fb { | ||
38 | struct drm_framebuffer fb; | ||
39 | struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; | ||
40 | dma_addr_t dma_addr[MAX_FB_BUFFER]; | ||
41 | }; | ||
42 | |||
43 | static int check_fb_gem_memory_type(struct drm_device *drm_dev, | 30 | static int check_fb_gem_memory_type(struct drm_device *drm_dev, |
44 | struct exynos_drm_gem *exynos_gem) | 31 | struct exynos_drm_gem *exynos_gem) |
45 | { | 32 | { |
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, | |||
66 | return 0; | 53 | return 0; |
67 | } | 54 | } |
68 | 55 | ||
69 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | ||
70 | { | ||
71 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | ||
72 | unsigned int i; | ||
73 | |||
74 | drm_framebuffer_cleanup(fb); | ||
75 | |||
76 | for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { | ||
77 | struct drm_gem_object *obj; | ||
78 | |||
79 | if (exynos_fb->exynos_gem[i] == NULL) | ||
80 | continue; | ||
81 | |||
82 | obj = &exynos_fb->exynos_gem[i]->base; | ||
83 | drm_gem_object_unreference_unlocked(obj); | ||
84 | } | ||
85 | |||
86 | kfree(exynos_fb); | ||
87 | exynos_fb = NULL; | ||
88 | } | ||
89 | |||
90 | static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, | ||
91 | struct drm_file *file_priv, | ||
92 | unsigned int *handle) | ||
93 | { | ||
94 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | ||
95 | |||
96 | return drm_gem_handle_create(file_priv, | ||
97 | &exynos_fb->exynos_gem[0]->base, handle); | ||
98 | } | ||
99 | |||
100 | static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { | 56 | static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { |
101 | .destroy = exynos_drm_fb_destroy, | 57 | .destroy = drm_gem_fb_destroy, |
102 | .create_handle = exynos_drm_fb_create_handle, | 58 | .create_handle = drm_gem_fb_create_handle, |
103 | }; | 59 | }; |
104 | 60 | ||
105 | struct drm_framebuffer * | 61 | struct drm_framebuffer * |
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev, | |||
108 | struct exynos_drm_gem **exynos_gem, | 64 | struct exynos_drm_gem **exynos_gem, |
109 | int count) | 65 | int count) |
110 | { | 66 | { |
111 | struct exynos_drm_fb *exynos_fb; | 67 | struct drm_framebuffer *fb; |
112 | int i; | 68 | int i; |
113 | int ret; | 69 | int ret; |
114 | 70 | ||
115 | exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); | 71 | fb = kzalloc(sizeof(*fb), GFP_KERNEL); |
116 | if (!exynos_fb) | 72 | if (!fb) |
117 | return ERR_PTR(-ENOMEM); | 73 | return ERR_PTR(-ENOMEM); |
118 | 74 | ||
119 | for (i = 0; i < count; i++) { | 75 | for (i = 0; i < count; i++) { |
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev, | |||
121 | if (ret < 0) | 77 | if (ret < 0) |
122 | goto err; | 78 | goto err; |
123 | 79 | ||
124 | exynos_fb->exynos_gem[i] = exynos_gem[i]; | 80 | fb->obj[i] = &exynos_gem[i]->base; |
125 | exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr | ||
126 | + mode_cmd->offsets[i]; | ||
127 | } | 81 | } |
128 | 82 | ||
129 | drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); | 83 | drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); |
130 | 84 | ||
131 | ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); | 85 | ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); |
132 | if (ret < 0) { | 86 | if (ret < 0) { |
133 | DRM_ERROR("failed to initialize framebuffer\n"); | 87 | DRM_ERROR("failed to initialize framebuffer\n"); |
134 | goto err; | 88 | goto err; |
135 | } | 89 | } |
136 | 90 | ||
137 | return &exynos_fb->fb; | 91 | return fb; |
138 | 92 | ||
139 | err: | 93 | err: |
140 | kfree(exynos_fb); | 94 | kfree(fb); |
141 | return ERR_PTR(ret); | 95 | return ERR_PTR(ret); |
142 | } | 96 | } |
143 | 97 | ||
@@ -191,12 +145,13 @@ err: | |||
191 | 145 | ||
192 | dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) | 146 | dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) |
193 | { | 147 | { |
194 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 148 | struct exynos_drm_gem *exynos_gem; |
195 | 149 | ||
196 | if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) | 150 | if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) |
197 | return 0; | 151 | return 0; |
198 | 152 | ||
199 | return exynos_fb->dma_addr[index]; | 153 | exynos_gem = to_exynos_gem(fb->obj[index]); |
154 | return exynos_gem->dma_addr + fb->offsets[index]; | ||
200 | } | 155 | } |
201 | 156 | ||
202 | static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { | 157 | static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index db6b94dda5df..d85939bd7b47 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) | |||
1080 | { | 1080 | { |
1081 | set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, | 1081 | set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, |
1082 | s->workload->pending_events); | 1082 | s->workload->pending_events); |
1083 | patch_value(s, cmd_ptr(s, 0), MI_NOOP); | ||
1083 | return 0; | 1084 | return 0; |
1084 | } | 1085 | } |
1085 | 1086 | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index dd96ffc878ac..6d8180e8d1e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { | |||
169 | static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | 169 | static void emulate_monitor_status_change(struct intel_vgpu *vgpu) |
170 | { | 170 | { |
171 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 171 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
172 | int pipe; | ||
173 | |||
172 | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | | 174 | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | |
173 | SDE_PORTC_HOTPLUG_CPT | | 175 | SDE_PORTC_HOTPLUG_CPT | |
174 | SDE_PORTD_HOTPLUG_CPT); | 176 | SDE_PORTD_HOTPLUG_CPT); |
@@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
267 | if (IS_BROADWELL(dev_priv)) | 269 | if (IS_BROADWELL(dev_priv)) |
268 | vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; | 270 | vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; |
269 | 271 | ||
272 | /* Disable Primary/Sprite/Cursor plane */ | ||
273 | for_each_pipe(dev_priv, pipe) { | ||
274 | vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; | ||
275 | vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; | ||
276 | vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; | ||
277 | vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; | ||
278 | } | ||
279 | |||
270 | vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; | 280 | vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; |
271 | } | 281 | } |
272 | 282 | ||
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index b555eb26f9ce..6f4f8e941fc2 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, | |||
323 | struct intel_vgpu_fb_info *fb_info) | 323 | struct intel_vgpu_fb_info *fb_info) |
324 | { | 324 | { |
325 | gvt_dmabuf->drm_format = fb_info->drm_format; | 325 | gvt_dmabuf->drm_format = fb_info->drm_format; |
326 | gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; | ||
326 | gvt_dmabuf->width = fb_info->width; | 327 | gvt_dmabuf->width = fb_info->width; |
327 | gvt_dmabuf->height = fb_info->height; | 328 | gvt_dmabuf->height = fb_info->height; |
328 | gvt_dmabuf->stride = fb_info->stride; | 329 | gvt_dmabuf->stride = fb_info->stride; |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6b50fe78dc1b..1c120683e958 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
@@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
245 | plane->hw_format = fmt; | 245 | plane->hw_format = fmt; |
246 | 246 | ||
247 | plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; | 247 | plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; |
248 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { | 248 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) |
249 | gvt_vgpu_err("invalid gma address: %lx\n", | ||
250 | (unsigned long)plane->base); | ||
251 | return -EINVAL; | 249 | return -EINVAL; |
252 | } | ||
253 | 250 | ||
254 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 251 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
255 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { | 252 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { |
256 | gvt_vgpu_err("invalid gma address: %lx\n", | 253 | gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", |
257 | (unsigned long)plane->base); | 254 | plane->base); |
258 | return -EINVAL; | 255 | return -EINVAL; |
259 | } | 256 | } |
260 | 257 | ||
@@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, | |||
371 | alpha_plane, alpha_force); | 368 | alpha_plane, alpha_force); |
372 | 369 | ||
373 | plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; | 370 | plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; |
374 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { | 371 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) |
375 | gvt_vgpu_err("invalid gma address: %lx\n", | ||
376 | (unsigned long)plane->base); | ||
377 | return -EINVAL; | 372 | return -EINVAL; |
378 | } | ||
379 | 373 | ||
380 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 374 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
381 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { | 375 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { |
382 | gvt_vgpu_err("invalid gma address: %lx\n", | 376 | gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", |
383 | (unsigned long)plane->base); | 377 | plane->base); |
384 | return -EINVAL; | 378 | return -EINVAL; |
385 | } | 379 | } |
386 | 380 | ||
@@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, | |||
476 | plane->drm_format = drm_format; | 470 | plane->drm_format = drm_format; |
477 | 471 | ||
478 | plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; | 472 | plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; |
479 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { | 473 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) |
480 | gvt_vgpu_err("invalid gma address: %lx\n", | ||
481 | (unsigned long)plane->base); | ||
482 | return -EINVAL; | 474 | return -EINVAL; |
483 | } | ||
484 | 475 | ||
485 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 476 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
486 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { | 477 | if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { |
487 | gvt_vgpu_err("invalid gma address: %lx\n", | 478 | gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", |
488 | (unsigned long)plane->base); | 479 | plane->base); |
489 | return -EINVAL; | 480 | return -EINVAL; |
490 | } | 481 | } |
491 | 482 | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d29281231507..78e55aafc8bc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, | |||
530 | false, 0, mm->vgpu); | 530 | false, 0, mm->vgpu); |
531 | } | 531 | } |
532 | 532 | ||
533 | static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, | ||
534 | struct intel_gvt_gtt_entry *entry, unsigned long index) | ||
535 | { | ||
536 | struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; | ||
537 | |||
538 | GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); | ||
539 | |||
540 | pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); | ||
541 | } | ||
542 | |||
533 | static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, | 543 | static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, |
534 | struct intel_gvt_gtt_entry *entry, unsigned long index) | 544 | struct intel_gvt_gtt_entry *entry, unsigned long index) |
535 | { | 545 | { |
@@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, | |||
1818 | return ret; | 1828 | return ret; |
1819 | } | 1829 | } |
1820 | 1830 | ||
1831 | static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, | ||
1832 | struct intel_gvt_gtt_entry *entry) | ||
1833 | { | ||
1834 | struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; | ||
1835 | unsigned long pfn; | ||
1836 | |||
1837 | pfn = pte_ops->get_pfn(entry); | ||
1838 | if (pfn != vgpu->gvt->gtt.scratch_mfn) | ||
1839 | intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, | ||
1840 | pfn << PAGE_SHIFT); | ||
1841 | } | ||
1842 | |||
1821 | static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | 1843 | static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, |
1822 | void *p_data, unsigned int bytes) | 1844 | void *p_data, unsigned int bytes) |
1823 | { | 1845 | { |
@@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
1844 | 1866 | ||
1845 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, | 1867 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, |
1846 | bytes); | 1868 | bytes); |
1847 | m = e; | ||
1848 | 1869 | ||
1849 | if (ops->test_present(&e)) { | 1870 | if (ops->test_present(&e)) { |
1850 | gfn = ops->get_pfn(&e); | 1871 | gfn = ops->get_pfn(&e); |
1872 | m = e; | ||
1851 | 1873 | ||
1852 | /* one PTE update may be issued in multiple writes and the | 1874 | /* one PTE update may be issued in multiple writes and the |
1853 | * first write may not construct a valid gfn | 1875 | * first write may not construct a valid gfn |
@@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
1868 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); | 1890 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); |
1869 | } else | 1891 | } else |
1870 | ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); | 1892 | ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); |
1871 | } else | 1893 | } else { |
1894 | ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); | ||
1895 | ggtt_invalidate_pte(vgpu, &m); | ||
1872 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); | 1896 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); |
1897 | ops->clear_present(&m); | ||
1898 | } | ||
1873 | 1899 | ||
1874 | out: | 1900 | out: |
1875 | ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); | 1901 | ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); |
@@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
2030 | return PTR_ERR(gtt->ggtt_mm); | 2056 | return PTR_ERR(gtt->ggtt_mm); |
2031 | } | 2057 | } |
2032 | 2058 | ||
2033 | intel_vgpu_reset_ggtt(vgpu); | 2059 | intel_vgpu_reset_ggtt(vgpu, false); |
2034 | 2060 | ||
2035 | return create_scratch_page_tree(vgpu); | 2061 | return create_scratch_page_tree(vgpu); |
2036 | } | 2062 | } |
@@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) | |||
2315 | /** | 2341 | /** |
2316 | * intel_vgpu_reset_ggtt - reset the GGTT entry | 2342 | * intel_vgpu_reset_ggtt - reset the GGTT entry |
2317 | * @vgpu: a vGPU | 2343 | * @vgpu: a vGPU |
2344 | * @invalidate_old: invalidate old entries | ||
2318 | * | 2345 | * |
2319 | * This function is called at the vGPU create stage | 2346 | * This function is called at the vGPU create stage |
2320 | * to reset all the GGTT entries. | 2347 | * to reset all the GGTT entries. |
2321 | * | 2348 | * |
2322 | */ | 2349 | */ |
2323 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | 2350 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) |
2324 | { | 2351 | { |
2325 | struct intel_gvt *gvt = vgpu->gvt; | 2352 | struct intel_gvt *gvt = vgpu->gvt; |
2326 | struct drm_i915_private *dev_priv = gvt->dev_priv; | 2353 | struct drm_i915_private *dev_priv = gvt->dev_priv; |
2327 | struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; | 2354 | struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; |
2328 | struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; | 2355 | struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; |
2356 | struct intel_gvt_gtt_entry old_entry; | ||
2329 | u32 index; | 2357 | u32 index; |
2330 | u32 num_entries; | 2358 | u32 num_entries; |
2331 | 2359 | ||
@@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | |||
2334 | 2362 | ||
2335 | index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; | 2363 | index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; |
2336 | num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; | 2364 | num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; |
2337 | while (num_entries--) | 2365 | while (num_entries--) { |
2366 | if (invalidate_old) { | ||
2367 | ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); | ||
2368 | ggtt_invalidate_pte(vgpu, &old_entry); | ||
2369 | } | ||
2338 | ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); | 2370 | ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); |
2371 | } | ||
2339 | 2372 | ||
2340 | index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; | 2373 | index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; |
2341 | num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; | 2374 | num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; |
2342 | while (num_entries--) | 2375 | while (num_entries--) { |
2376 | if (invalidate_old) { | ||
2377 | ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); | ||
2378 | ggtt_invalidate_pte(vgpu, &old_entry); | ||
2379 | } | ||
2343 | ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); | 2380 | ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); |
2381 | } | ||
2344 | 2382 | ||
2345 | ggtt_invalidate(dev_priv); | 2383 | ggtt_invalidate(dev_priv); |
2346 | } | 2384 | } |
@@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) | |||
2360 | * removing the shadow pages. | 2398 | * removing the shadow pages. |
2361 | */ | 2399 | */ |
2362 | intel_vgpu_destroy_all_ppgtt_mm(vgpu); | 2400 | intel_vgpu_destroy_all_ppgtt_mm(vgpu); |
2363 | intel_vgpu_reset_ggtt(vgpu); | 2401 | intel_vgpu_reset_ggtt(vgpu, true); |
2364 | } | 2402 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a8b369cd352b..3792f2b7f4ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -193,7 +193,7 @@ struct intel_vgpu_gtt { | |||
193 | 193 | ||
194 | extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); | 194 | extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); |
195 | extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); | 195 | extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); |
196 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); | 196 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); |
197 | void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); | 197 | void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); |
198 | 198 | ||
199 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); | 199 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8c5d5d005854..a33c1c3e4a21 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) | |||
1150 | switch (notification) { | 1150 | switch (notification) { |
1151 | case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: | 1151 | case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: |
1152 | root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; | 1152 | root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; |
1153 | /* fall through */ | ||
1153 | case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: | 1154 | case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: |
1154 | mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); | 1155 | mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); |
1155 | return PTR_ERR_OR_ZERO(mm); | 1156 | return PTR_ERR_OR_ZERO(mm); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c16a492449d7..1466d8769ec9 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |||
1301 | 1301 | ||
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | return 0; | 1304 | return -ENOTTY; |
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | static ssize_t | 1307 | static ssize_t |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 84ca369f15a5..3b4daafebdcb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1105 | 1105 | ||
1106 | ret = i915_ggtt_probe_hw(dev_priv); | 1106 | ret = i915_ggtt_probe_hw(dev_priv); |
1107 | if (ret) | 1107 | if (ret) |
1108 | return ret; | 1108 | goto err_perf; |
1109 | 1109 | ||
1110 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, | 1110 | /* |
1111 | * otherwise the vga fbdev driver falls over. */ | 1111 | * WARNING: Apparently we must kick fbdev drivers before vgacon, |
1112 | * otherwise the vga fbdev driver falls over. | ||
1113 | */ | ||
1112 | ret = i915_kick_out_firmware_fb(dev_priv); | 1114 | ret = i915_kick_out_firmware_fb(dev_priv); |
1113 | if (ret) { | 1115 | if (ret) { |
1114 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); | 1116 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); |
1115 | goto out_ggtt; | 1117 | goto err_ggtt; |
1116 | } | 1118 | } |
1117 | 1119 | ||
1118 | ret = i915_kick_out_vgacon(dev_priv); | 1120 | ret = i915_kick_out_vgacon(dev_priv); |
1119 | if (ret) { | 1121 | if (ret) { |
1120 | DRM_ERROR("failed to remove conflicting VGA console\n"); | 1122 | DRM_ERROR("failed to remove conflicting VGA console\n"); |
1121 | goto out_ggtt; | 1123 | goto err_ggtt; |
1122 | } | 1124 | } |
1123 | 1125 | ||
1124 | ret = i915_ggtt_init_hw(dev_priv); | 1126 | ret = i915_ggtt_init_hw(dev_priv); |
1125 | if (ret) | 1127 | if (ret) |
1126 | return ret; | 1128 | goto err_ggtt; |
1127 | 1129 | ||
1128 | ret = i915_ggtt_enable_hw(dev_priv); | 1130 | ret = i915_ggtt_enable_hw(dev_priv); |
1129 | if (ret) { | 1131 | if (ret) { |
1130 | DRM_ERROR("failed to enable GGTT\n"); | 1132 | DRM_ERROR("failed to enable GGTT\n"); |
1131 | goto out_ggtt; | 1133 | goto err_ggtt; |
1132 | } | 1134 | } |
1133 | 1135 | ||
1134 | pci_set_master(pdev); | 1136 | pci_set_master(pdev); |
@@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1139 | if (ret) { | 1141 | if (ret) { |
1140 | DRM_ERROR("failed to set DMA mask\n"); | 1142 | DRM_ERROR("failed to set DMA mask\n"); |
1141 | 1143 | ||
1142 | goto out_ggtt; | 1144 | goto err_ggtt; |
1143 | } | 1145 | } |
1144 | } | 1146 | } |
1145 | 1147 | ||
@@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1157 | if (ret) { | 1159 | if (ret) { |
1158 | DRM_ERROR("failed to set DMA mask\n"); | 1160 | DRM_ERROR("failed to set DMA mask\n"); |
1159 | 1161 | ||
1160 | goto out_ggtt; | 1162 | goto err_ggtt; |
1161 | } | 1163 | } |
1162 | } | 1164 | } |
1163 | 1165 | ||
@@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1190 | 1192 | ||
1191 | ret = intel_gvt_init(dev_priv); | 1193 | ret = intel_gvt_init(dev_priv); |
1192 | if (ret) | 1194 | if (ret) |
1193 | goto out_ggtt; | 1195 | goto err_ggtt; |
1194 | 1196 | ||
1195 | return 0; | 1197 | return 0; |
1196 | 1198 | ||
1197 | out_ggtt: | 1199 | err_ggtt: |
1198 | i915_ggtt_cleanup_hw(dev_priv); | 1200 | i915_ggtt_cleanup_hw(dev_priv); |
1199 | 1201 | err_perf: | |
1202 | i915_perf_fini(dev_priv); | ||
1200 | return ret; | 1203 | return ret; |
1201 | } | 1204 | } |
1202 | 1205 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8c170db8495d..0414228cd2b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) | |||
728 | 728 | ||
729 | err = radix_tree_insert(handles_vma, handle, vma); | 729 | err = radix_tree_insert(handles_vma, handle, vma); |
730 | if (unlikely(err)) { | 730 | if (unlikely(err)) { |
731 | kfree(lut); | 731 | kmem_cache_free(eb->i915->luts, lut); |
732 | goto err_obj; | 732 | goto err_obj; |
733 | } | 733 | } |
734 | 734 | ||
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..f0519e31543a 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
@@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915) | |||
473 | spin_lock_irqsave(&i915->pmu.lock, flags); | 473 | spin_lock_irqsave(&i915->pmu.lock, flags); |
474 | spin_lock(&kdev->power.lock); | 474 | spin_lock(&kdev->power.lock); |
475 | 475 | ||
476 | if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) | 476 | /* |
477 | i915->pmu.suspended_jiffies_last = | 477 | * After the above branch intel_runtime_pm_get_if_in_use failed |
478 | kdev->power.suspended_jiffies; | 478 | * to get the runtime PM reference we cannot assume we are in |
479 | * runtime suspend since we can either: a) race with coming out | ||
480 | * of it before we took the power.lock, or b) there are other | ||
481 | * states than suspended which can bring us here. | ||
482 | * | ||
483 | * We need to double-check that we are indeed currently runtime | ||
484 | * suspended and if not we cannot do better than report the last | ||
485 | * known RC6 value. | ||
486 | */ | ||
487 | if (kdev->power.runtime_status == RPM_SUSPENDED) { | ||
488 | if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) | ||
489 | i915->pmu.suspended_jiffies_last = | ||
490 | kdev->power.suspended_jiffies; | ||
479 | 491 | ||
480 | val = kdev->power.suspended_jiffies - | 492 | val = kdev->power.suspended_jiffies - |
481 | i915->pmu.suspended_jiffies_last; | 493 | i915->pmu.suspended_jiffies_last; |
482 | val += jiffies - kdev->power.accounting_timestamp; | 494 | val += jiffies - kdev->power.accounting_timestamp; |
483 | 495 | ||
484 | spin_unlock(&kdev->power.lock); | 496 | val = jiffies_to_nsecs(val); |
497 | val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; | ||
485 | 498 | ||
486 | val = jiffies_to_nsecs(val); | 499 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; |
487 | val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; | 500 | } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { |
488 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; | 501 | val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; |
502 | } else { | ||
503 | val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; | ||
504 | } | ||
489 | 505 | ||
506 | spin_unlock(&kdev->power.lock); | ||
490 | spin_unlock_irqrestore(&i915->pmu.lock, flags); | 507 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
491 | } | 508 | } |
492 | 509 | ||
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 709d6ca68074..3ea566f99450 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, | |||
729 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); | 729 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
730 | u32 tmp; | 730 | u32 tmp; |
731 | 731 | ||
732 | if (!IS_GEN9_BC(dev_priv)) | 732 | if (!IS_GEN9(dev_priv)) |
733 | return; | 733 | return; |
734 | 734 | ||
735 | i915_audio_component_get_power(kdev); | 735 | i915_audio_component_get_power(kdev); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c5c7530ba157..447b721c3be9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1256 | return; | 1256 | return; |
1257 | 1257 | ||
1258 | aux_channel = child->aux_channel; | 1258 | aux_channel = child->aux_channel; |
1259 | ddc_pin = child->ddc_pin; | ||
1260 | 1259 | ||
1261 | is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; | 1260 | is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; |
1262 | is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; | 1261 | is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; |
@@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1303 | DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); | 1302 | DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); |
1304 | 1303 | ||
1305 | if (is_dvi) { | 1304 | if (is_dvi) { |
1306 | info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); | 1305 | ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); |
1307 | 1306 | if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { | |
1308 | sanitize_ddc_pin(dev_priv, port); | 1307 | info->alternate_ddc_pin = ddc_pin; |
1308 | sanitize_ddc_pin(dev_priv, port); | ||
1309 | } else { | ||
1310 | DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " | ||
1311 | "sticking to defaults\n", | ||
1312 | port_name(port), ddc_pin); | ||
1313 | } | ||
1309 | } | 1314 | } |
1310 | 1315 | ||
1311 | if (is_dp) { | 1316 | if (is_dp) { |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..e3a5f673ff67 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
577 | * know the next preemption status we see corresponds | 577 | * know the next preemption status we see corresponds |
578 | * to this ELSP update. | 578 | * to this ELSP update. |
579 | */ | 579 | */ |
580 | GEM_BUG_ON(!execlists_is_active(execlists, | ||
581 | EXECLISTS_ACTIVE_USER)); | ||
580 | GEM_BUG_ON(!port_count(&port[0])); | 582 | GEM_BUG_ON(!port_count(&port[0])); |
581 | if (port_count(&port[0]) > 1) | 583 | if (port_count(&port[0]) > 1) |
582 | goto unlock; | 584 | goto unlock; |
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) | |||
738 | memset(port, 0, sizeof(*port)); | 740 | memset(port, 0, sizeof(*port)); |
739 | port++; | 741 | port++; |
740 | } | 742 | } |
743 | |||
744 | execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); | ||
741 | } | 745 | } |
742 | 746 | ||
743 | static void execlists_cancel_requests(struct intel_engine_cs *engine) | 747 | static void execlists_cancel_requests(struct intel_engine_cs *engine) |
@@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data) | |||
1001 | 1005 | ||
1002 | if (fw) | 1006 | if (fw) |
1003 | intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); | 1007 | intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); |
1008 | |||
1009 | /* If the engine is now idle, so should be the flag; and vice versa. */ | ||
1010 | GEM_BUG_ON(execlists_is_active(&engine->execlists, | ||
1011 | EXECLISTS_ACTIVE_USER) == | ||
1012 | !port_isset(engine->execlists.port)); | ||
1004 | } | 1013 | } |
1005 | 1014 | ||
1006 | static void queue_request(struct intel_engine_cs *engine, | 1015 | static void queue_request(struct intel_engine_cs *engine, |
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 2decc8e2c79f..add9cc97a3b6 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c | |||
@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) | |||
195 | vc4_bo_set_label(obj, -1); | 195 | vc4_bo_set_label(obj, -1); |
196 | 196 | ||
197 | if (bo->validated_shader) { | 197 | if (bo->validated_shader) { |
198 | kfree(bo->validated_shader->uniform_addr_offsets); | ||
198 | kfree(bo->validated_shader->texture_samples); | 199 | kfree(bo->validated_shader->texture_samples); |
199 | kfree(bo->validated_shader); | 200 | kfree(bo->validated_shader); |
200 | bo->validated_shader = NULL; | 201 | bo->validated_shader = NULL; |
@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) | |||
591 | } | 592 | } |
592 | 593 | ||
593 | if (bo->validated_shader) { | 594 | if (bo->validated_shader) { |
595 | kfree(bo->validated_shader->uniform_addr_offsets); | ||
594 | kfree(bo->validated_shader->texture_samples); | 596 | kfree(bo->validated_shader->texture_samples); |
595 | kfree(bo->validated_shader); | 597 | kfree(bo->validated_shader); |
596 | bo->validated_shader = NULL; | 598 | bo->validated_shader = NULL; |
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c | |||
@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) | |||
942 | fail: | 942 | fail: |
943 | kfree(validation_state.branch_targets); | 943 | kfree(validation_state.branch_targets); |
944 | if (validated_shader) { | 944 | if (validated_shader) { |
945 | kfree(validated_shader->uniform_addr_offsets); | ||
945 | kfree(validated_shader->texture_samples); | 946 | kfree(validated_shader->texture_samples); |
946 | kfree(validated_shader); | 947 | kfree(validated_shader); |
947 | } | 948 | } |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a3a7ead3012..0b5cc910f62e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -525,6 +525,9 @@ | |||
525 | #define I2C_VENDOR_ID_HANTICK 0x0911 | 525 | #define I2C_VENDOR_ID_HANTICK 0x0911 |
526 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 | 526 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 |
527 | 527 | ||
528 | #define I2C_VENDOR_ID_RAYD 0x2386 | ||
529 | #define I2C_PRODUCT_ID_RAYD_3118 0x3118 | ||
530 | |||
528 | #define USB_VENDOR_ID_HANWANG 0x0b57 | 531 | #define USB_VENDOR_ID_HANWANG 0x0b57 |
529 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 | 532 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 |
530 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff | 533 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 6836a856c243..930652c25120 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy, | |||
387 | break; | 387 | break; |
388 | 388 | ||
389 | case POWER_SUPPLY_PROP_CAPACITY: | 389 | case POWER_SUPPLY_PROP_CAPACITY: |
390 | if (dev->battery_report_type == HID_FEATURE_REPORT) { | 390 | if (dev->battery_status != HID_BATTERY_REPORTED && |
391 | !dev->battery_avoid_query) { | ||
391 | value = hidinput_query_battery_capacity(dev); | 392 | value = hidinput_query_battery_capacity(dev); |
392 | if (value < 0) | 393 | if (value < 0) |
393 | return value; | 394 | return value; |
@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy, | |||
403 | break; | 404 | break; |
404 | 405 | ||
405 | case POWER_SUPPLY_PROP_STATUS: | 406 | case POWER_SUPPLY_PROP_STATUS: |
406 | if (!dev->battery_reported && | 407 | if (dev->battery_status != HID_BATTERY_REPORTED && |
407 | dev->battery_report_type == HID_FEATURE_REPORT) { | 408 | !dev->battery_avoid_query) { |
408 | value = hidinput_query_battery_capacity(dev); | 409 | value = hidinput_query_battery_capacity(dev); |
409 | if (value < 0) | 410 | if (value < 0) |
410 | return value; | 411 | return value; |
411 | 412 | ||
412 | dev->battery_capacity = value; | 413 | dev->battery_capacity = value; |
413 | dev->battery_reported = true; | 414 | dev->battery_status = HID_BATTERY_QUERIED; |
414 | } | 415 | } |
415 | 416 | ||
416 | if (!dev->battery_reported) | 417 | if (dev->battery_status == HID_BATTERY_UNKNOWN) |
417 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; | 418 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; |
418 | else if (dev->battery_capacity == 100) | 419 | else if (dev->battery_capacity == 100) |
419 | val->intval = POWER_SUPPLY_STATUS_FULL; | 420 | val->intval = POWER_SUPPLY_STATUS_FULL; |
@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, | |||
486 | dev->battery_report_type = report_type; | 487 | dev->battery_report_type = report_type; |
487 | dev->battery_report_id = field->report->id; | 488 | dev->battery_report_id = field->report->id; |
488 | 489 | ||
490 | /* | ||
491 | * Stylus is normally not connected to the device and thus we | ||
492 | * can't query the device and get meaningful battery strength. | ||
493 | * We have to wait for the device to report it on its own. | ||
494 | */ | ||
495 | dev->battery_avoid_query = report_type == HID_INPUT_REPORT && | ||
496 | field->physical == HID_DG_STYLUS; | ||
497 | |||
489 | dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); | 498 | dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); |
490 | if (IS_ERR(dev->battery)) { | 499 | if (IS_ERR(dev->battery)) { |
491 | error = PTR_ERR(dev->battery); | 500 | error = PTR_ERR(dev->battery); |
@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value) | |||
530 | 539 | ||
531 | capacity = hidinput_scale_battery_capacity(dev, value); | 540 | capacity = hidinput_scale_battery_capacity(dev, value); |
532 | 541 | ||
533 | if (!dev->battery_reported || capacity != dev->battery_capacity) { | 542 | if (dev->battery_status != HID_BATTERY_REPORTED || |
543 | capacity != dev->battery_capacity) { | ||
534 | dev->battery_capacity = capacity; | 544 | dev->battery_capacity = capacity; |
535 | dev->battery_reported = true; | 545 | dev->battery_status = HID_BATTERY_REPORTED; |
536 | power_supply_changed(dev->battery); | 546 | power_supply_changed(dev->battery); |
537 | } | 547 | } |
538 | } | 548 | } |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index fbfcc8009432..b39844adea47 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t | |||
192 | int ret = 0, len; | 192 | int ret = 0, len; |
193 | unsigned char report_number; | 193 | unsigned char report_number; |
194 | 194 | ||
195 | if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { | ||
196 | ret = -ENODEV; | ||
197 | goto out; | ||
198 | } | ||
199 | |||
195 | dev = hidraw_table[minor]->hid; | 200 | dev = hidraw_table[minor]->hid; |
196 | 201 | ||
197 | if (!dev->ll_driver->raw_request) { | 202 | if (!dev->ll_driver->raw_request) { |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 97689e98e53f..963328674e93 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -47,6 +47,7 @@ | |||
47 | /* quirks to control the device */ | 47 | /* quirks to control the device */ |
48 | #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) | 48 | #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) |
49 | #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) | 49 | #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) |
50 | #define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) | ||
50 | 51 | ||
51 | /* flags */ | 52 | /* flags */ |
52 | #define I2C_HID_STARTED 0 | 53 | #define I2C_HID_STARTED 0 |
@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks { | |||
171 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, | 172 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, |
172 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, | 173 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, |
173 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, | 174 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, |
175 | { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, | ||
176 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | ||
174 | { 0, 0 } | 177 | { 0, 0 } |
175 | }; | 178 | }; |
176 | 179 | ||
@@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev) | |||
1220 | if (ret) | 1223 | if (ret) |
1221 | return ret; | 1224 | return ret; |
1222 | 1225 | ||
1226 | /* RAYDIUM device (2386:3118) need to re-send report descr cmd | ||
1227 | * after resume, after this it will be back normal. | ||
1228 | * otherwise it issues too many incomplete reports. | ||
1229 | */ | ||
1230 | if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { | ||
1231 | ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); | ||
1232 | if (ret) | ||
1233 | return ret; | ||
1234 | } | ||
1235 | |||
1223 | if (hid->driver && hid->driver->reset_resume) { | 1236 | if (hid->driver && hid->driver->reset_resume) { |
1224 | ret = hid->driver->reset_resume(hid); | 1237 | ret = hid->driver->reset_resume(hid); |
1225 | return ret; | 1238 | return ret; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 6da16a879c9f..5f947ec20dcb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id) | |||
689 | return tool_type; | 689 | return tool_type; |
690 | } | 690 | } |
691 | 691 | ||
692 | static void wacom_exit_report(struct wacom_wac *wacom) | ||
693 | { | ||
694 | struct input_dev *input = wacom->pen_input; | ||
695 | struct wacom_features *features = &wacom->features; | ||
696 | unsigned char *data = wacom->data; | ||
697 | int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; | ||
698 | |||
699 | /* | ||
700 | * Reset all states otherwise we lose the initial states | ||
701 | * when in-prox next time | ||
702 | */ | ||
703 | input_report_abs(input, ABS_X, 0); | ||
704 | input_report_abs(input, ABS_Y, 0); | ||
705 | input_report_abs(input, ABS_DISTANCE, 0); | ||
706 | input_report_abs(input, ABS_TILT_X, 0); | ||
707 | input_report_abs(input, ABS_TILT_Y, 0); | ||
708 | if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { | ||
709 | input_report_key(input, BTN_LEFT, 0); | ||
710 | input_report_key(input, BTN_MIDDLE, 0); | ||
711 | input_report_key(input, BTN_RIGHT, 0); | ||
712 | input_report_key(input, BTN_SIDE, 0); | ||
713 | input_report_key(input, BTN_EXTRA, 0); | ||
714 | input_report_abs(input, ABS_THROTTLE, 0); | ||
715 | input_report_abs(input, ABS_RZ, 0); | ||
716 | } else { | ||
717 | input_report_abs(input, ABS_PRESSURE, 0); | ||
718 | input_report_key(input, BTN_STYLUS, 0); | ||
719 | input_report_key(input, BTN_STYLUS2, 0); | ||
720 | input_report_key(input, BTN_TOUCH, 0); | ||
721 | input_report_abs(input, ABS_WHEEL, 0); | ||
722 | if (features->type >= INTUOS3S) | ||
723 | input_report_abs(input, ABS_Z, 0); | ||
724 | } | ||
725 | input_report_key(input, wacom->tool[idx], 0); | ||
726 | input_report_abs(input, ABS_MISC, 0); /* reset tool id */ | ||
727 | input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); | ||
728 | wacom->id[idx] = 0; | ||
729 | } | ||
730 | |||
692 | static int wacom_intuos_inout(struct wacom_wac *wacom) | 731 | static int wacom_intuos_inout(struct wacom_wac *wacom) |
693 | { | 732 | { |
694 | struct wacom_features *features = &wacom->features; | 733 | struct wacom_features *features = &wacom->features; |
@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
741 | if (!wacom->id[idx]) | 780 | if (!wacom->id[idx]) |
742 | return 1; | 781 | return 1; |
743 | 782 | ||
744 | /* | 783 | wacom_exit_report(wacom); |
745 | * Reset all states otherwise we lose the initial states | ||
746 | * when in-prox next time | ||
747 | */ | ||
748 | input_report_abs(input, ABS_X, 0); | ||
749 | input_report_abs(input, ABS_Y, 0); | ||
750 | input_report_abs(input, ABS_DISTANCE, 0); | ||
751 | input_report_abs(input, ABS_TILT_X, 0); | ||
752 | input_report_abs(input, ABS_TILT_Y, 0); | ||
753 | if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { | ||
754 | input_report_key(input, BTN_LEFT, 0); | ||
755 | input_report_key(input, BTN_MIDDLE, 0); | ||
756 | input_report_key(input, BTN_RIGHT, 0); | ||
757 | input_report_key(input, BTN_SIDE, 0); | ||
758 | input_report_key(input, BTN_EXTRA, 0); | ||
759 | input_report_abs(input, ABS_THROTTLE, 0); | ||
760 | input_report_abs(input, ABS_RZ, 0); | ||
761 | } else { | ||
762 | input_report_abs(input, ABS_PRESSURE, 0); | ||
763 | input_report_key(input, BTN_STYLUS, 0); | ||
764 | input_report_key(input, BTN_STYLUS2, 0); | ||
765 | input_report_key(input, BTN_TOUCH, 0); | ||
766 | input_report_abs(input, ABS_WHEEL, 0); | ||
767 | if (features->type >= INTUOS3S) | ||
768 | input_report_abs(input, ABS_Z, 0); | ||
769 | } | ||
770 | input_report_key(input, wacom->tool[idx], 0); | ||
771 | input_report_abs(input, ABS_MISC, 0); /* reset tool id */ | ||
772 | input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); | ||
773 | wacom->id[idx] = 0; | ||
774 | return 2; | 784 | return 2; |
775 | } | 785 | } |
776 | 786 | ||
@@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) | |||
1235 | if (!valid) | 1245 | if (!valid) |
1236 | continue; | 1246 | continue; |
1237 | 1247 | ||
1248 | if (!prox) { | ||
1249 | wacom->shared->stylus_in_proximity = false; | ||
1250 | wacom_exit_report(wacom); | ||
1251 | input_sync(pen_input); | ||
1252 | return; | ||
1253 | } | ||
1238 | if (range) { | 1254 | if (range) { |
1239 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); | 1255 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); |
1240 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); | 1256 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); |
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c index a6e87076acc2..5336bbdbfdc5 100644 --- a/drivers/isdn/mISDN/dsp_hwec.c +++ b/drivers/isdn/mISDN/dsp_hwec.c | |||
@@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) | |||
68 | goto _do; | 68 | goto _do; |
69 | 69 | ||
70 | { | 70 | { |
71 | char _dup[len + 1]; | ||
72 | char *dup, *tok, *name, *val; | 71 | char *dup, *tok, *name, *val; |
73 | int tmp; | 72 | int tmp; |
74 | 73 | ||
75 | strcpy(_dup, arg); | 74 | dup = kstrdup(arg, GFP_ATOMIC); |
76 | dup = _dup; | 75 | if (!dup) |
76 | return; | ||
77 | 77 | ||
78 | while ((tok = strsep(&dup, ","))) { | 78 | while ((tok = strsep(&dup, ","))) { |
79 | if (!strlen(tok)) | 79 | if (!strlen(tok)) |
@@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) | |||
89 | deftaps = tmp; | 89 | deftaps = tmp; |
90 | } | 90 | } |
91 | } | 91 | } |
92 | |||
93 | kfree(dup); | ||
92 | } | 94 | } |
93 | 95 | ||
94 | _do: | 96 | _do: |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 21d50e4cc5e1..b05022f94f18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
@@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, | |||
279 | u16 timebase, u8 *buf, int len) | 279 | u16 timebase, u8 *buf, int len) |
280 | { | 280 | { |
281 | u8 *p; | 281 | u8 *p; |
282 | u8 frame[len + 32]; | 282 | u8 frame[MAX_DFRAME_LEN_L1 + 32]; |
283 | struct socket *socket = NULL; | 283 | struct socket *socket = NULL; |
284 | 284 | ||
285 | if (debug & DEBUG_L1OIP_MSG) | 285 | if (debug & DEBUG_L1OIP_MSG) |
@@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) | |||
902 | p = skb->data; | 902 | p = skb->data; |
903 | l = skb->len; | 903 | l = skb->len; |
904 | while (l) { | 904 | while (l) { |
905 | ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; | 905 | /* |
906 | * This is technically bounded by L1OIP_MAX_PERFRAME but | ||
907 | * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME | ||
908 | */ | ||
909 | ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; | ||
906 | l1oip_socket_send(hc, 0, dch->slot, 0, | 910 | l1oip_socket_send(hc, 0, dch->slot, 0, |
907 | hc->chan[dch->slot].tx_counter++, p, ll); | 911 | hc->chan[dch->slot].tx_counter++, p, ll); |
908 | p += ll; | 912 | p += ll; |
@@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) | |||
1140 | p = skb->data; | 1144 | p = skb->data; |
1141 | l = skb->len; | 1145 | l = skb->len; |
1142 | while (l) { | 1146 | while (l) { |
1143 | ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; | 1147 | /* |
1148 | * This is technically bounded by L1OIP_MAX_PERFRAME but | ||
1149 | * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME | ||
1150 | */ | ||
1151 | ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; | ||
1144 | l1oip_socket_send(hc, hc->codec, bch->slot, 0, | 1152 | l1oip_socket_send(hc, hc->codec, bch->slot, 0, |
1145 | hc->chan[bch->slot].tx_counter, p, ll); | 1153 | hc->chan[bch->slot].tx_counter, p, ll); |
1146 | hc->chan[bch->slot].tx_counter += ll; | 1154 | hc->chan[bch->slot].tx_counter += ll; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 3bea45e8ccff..c208c01f63a5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr) | |||
9256 | check_sb_changes(mddev, rdev); | 9256 | check_sb_changes(mddev, rdev); |
9257 | 9257 | ||
9258 | /* Read all rdev's to update recovery_offset */ | 9258 | /* Read all rdev's to update recovery_offset */ |
9259 | rdev_for_each_rcu(rdev, mddev) | 9259 | rdev_for_each_rcu(rdev, mddev) { |
9260 | read_rdev(mddev, rdev); | 9260 | if (!test_bit(Faulty, &rdev->flags)) |
9261 | read_rdev(mddev, rdev); | ||
9262 | } | ||
9261 | } | 9263 | } |
9262 | EXPORT_SYMBOL(md_reload_sb); | 9264 | EXPORT_SYMBOL(md_reload_sb); |
9263 | 9265 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2943fb74056..e9e3308cb0a7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
854 | * there is no normal IO happeing. It must arrange to call | 854 | * there is no normal IO happeing. It must arrange to call |
855 | * lower_barrier when the particular background IO completes. | 855 | * lower_barrier when the particular background IO completes. |
856 | */ | 856 | */ |
857 | static void raise_barrier(struct r1conf *conf, sector_t sector_nr) | 857 | static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr) |
858 | { | 858 | { |
859 | int idx = sector_to_idx(sector_nr); | 859 | int idx = sector_to_idx(sector_nr); |
860 | 860 | ||
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr) | |||
885 | * max resync count which allowed on current I/O barrier bucket. | 885 | * max resync count which allowed on current I/O barrier bucket. |
886 | */ | 886 | */ |
887 | wait_event_lock_irq(conf->wait_barrier, | 887 | wait_event_lock_irq(conf->wait_barrier, |
888 | !conf->array_frozen && | 888 | (!conf->array_frozen && |
889 | !atomic_read(&conf->nr_pending[idx]) && | 889 | !atomic_read(&conf->nr_pending[idx]) && |
890 | atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, | 890 | atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || |
891 | test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), | ||
891 | conf->resync_lock); | 892 | conf->resync_lock); |
892 | 893 | ||
894 | if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { | ||
895 | atomic_dec(&conf->barrier[idx]); | ||
896 | spin_unlock_irq(&conf->resync_lock); | ||
897 | wake_up(&conf->wait_barrier); | ||
898 | return -EINTR; | ||
899 | } | ||
900 | |||
893 | atomic_inc(&conf->nr_sync_pending); | 901 | atomic_inc(&conf->nr_sync_pending); |
894 | spin_unlock_irq(&conf->resync_lock); | 902 | spin_unlock_irq(&conf->resync_lock); |
903 | |||
904 | return 0; | ||
895 | } | 905 | } |
896 | 906 | ||
897 | static void lower_barrier(struct r1conf *conf, sector_t sector_nr) | 907 | static void lower_barrier(struct r1conf *conf, sector_t sector_nr) |
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, | |||
1092 | goto skip_copy; | 1102 | goto skip_copy; |
1093 | } | 1103 | } |
1094 | 1104 | ||
1105 | behind_bio->bi_write_hint = bio->bi_write_hint; | ||
1106 | |||
1095 | while (i < vcnt && size) { | 1107 | while (i < vcnt && size) { |
1096 | struct page *page; | 1108 | struct page *page; |
1097 | int len = min_t(int, PAGE_SIZE, size); | 1109 | int len = min_t(int, PAGE_SIZE, size); |
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2662 | 2674 | ||
2663 | bitmap_cond_end_sync(mddev->bitmap, sector_nr, | 2675 | bitmap_cond_end_sync(mddev->bitmap, sector_nr, |
2664 | mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); | 2676 | mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); |
2665 | r1_bio = raid1_alloc_init_r1buf(conf); | ||
2666 | 2677 | ||
2667 | raise_barrier(conf, sector_nr); | 2678 | |
2679 | if (raise_barrier(conf, sector_nr)) | ||
2680 | return 0; | ||
2681 | |||
2682 | r1_bio = raid1_alloc_init_r1buf(conf); | ||
2668 | 2683 | ||
2669 | rcu_read_lock(); | 2684 | rcu_read_lock(); |
2670 | /* | 2685 | /* |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8e0acd197c43..6af946d16d24 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/bitops.h> | ||
12 | #include <linux/device.h> | 13 | #include <linux/device.h> |
13 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
14 | #include <linux/io-64-nonatomic-hi-lo.h> | 15 | #include <linux/io-64-nonatomic-hi-lo.h> |
@@ -62,6 +63,17 @@ | |||
62 | * need a custom accessor. | 63 | * need a custom accessor. |
63 | */ | 64 | */ |
64 | 65 | ||
66 | static unsigned long global_flags; | ||
67 | /* | ||
68 | * Workaround for avoiding to use RX DMAC by multiple channels. | ||
69 | * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use | ||
70 | * RX DMAC simultaneously, sometimes hundreds of bytes data are not | ||
71 | * stored into the system memory even if the DMAC interrupt happened. | ||
72 | * So, this driver then uses one RX DMAC channel only. | ||
73 | */ | ||
74 | #define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 | ||
75 | #define SDHI_INTERNAL_DMAC_RX_IN_USE 1 | ||
76 | |||
65 | /* Definitions for sampling clocks */ | 77 | /* Definitions for sampling clocks */ |
66 | static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { | 78 | static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { |
67 | { | 79 | { |
@@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { | |||
126 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, | 138 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, |
127 | RST_RESERVED_BITS | val); | 139 | RST_RESERVED_BITS | val); |
128 | 140 | ||
141 | if (host->data && host->data->flags & MMC_DATA_READ) | ||
142 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); | ||
143 | |||
129 | renesas_sdhi_internal_dmac_enable_dma(host, true); | 144 | renesas_sdhi_internal_dmac_enable_dma(host, true); |
130 | } | 145 | } |
131 | 146 | ||
@@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, | |||
155 | if (data->flags & MMC_DATA_READ) { | 170 | if (data->flags & MMC_DATA_READ) { |
156 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; | 171 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; |
157 | dir = DMA_FROM_DEVICE; | 172 | dir = DMA_FROM_DEVICE; |
173 | if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && | ||
174 | test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) | ||
175 | goto force_pio; | ||
158 | } else { | 176 | } else { |
159 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; | 177 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; |
160 | dir = DMA_TO_DEVICE; | 178 | dir = DMA_TO_DEVICE; |
@@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) | |||
208 | renesas_sdhi_internal_dmac_enable_dma(host, false); | 226 | renesas_sdhi_internal_dmac_enable_dma(host, false); |
209 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); | 227 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); |
210 | 228 | ||
229 | if (dir == DMA_FROM_DEVICE) | ||
230 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); | ||
231 | |||
211 | tmio_mmc_do_data_irq(host); | 232 | tmio_mmc_do_data_irq(host); |
212 | out: | 233 | out: |
213 | spin_unlock_irq(&host->lock); | 234 | spin_unlock_irq(&host->lock); |
@@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { | |||
251 | * implementation as others may use a different implementation. | 272 | * implementation as others may use a different implementation. |
252 | */ | 273 | */ |
253 | static const struct soc_device_attribute gen3_soc_whitelist[] = { | 274 | static const struct soc_device_attribute gen3_soc_whitelist[] = { |
254 | { .soc_id = "r8a7795", .revision = "ES1.*" }, | 275 | { .soc_id = "r8a7795", .revision = "ES1.*", |
255 | { .soc_id = "r8a7795", .revision = "ES2.0" }, | 276 | .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, |
256 | { .soc_id = "r8a7796", .revision = "ES1.0" }, | 277 | { .soc_id = "r8a7795", .revision = "ES2.0" }, |
257 | { .soc_id = "r8a77995", .revision = "ES1.0" }, | 278 | { .soc_id = "r8a7796", .revision = "ES1.0", |
258 | { /* sentinel */ } | 279 | .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, |
280 | { .soc_id = "r8a77995", .revision = "ES1.0" }, | ||
281 | { /* sentinel */ } | ||
259 | }; | 282 | }; |
260 | 283 | ||
261 | static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) | 284 | static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) |
262 | { | 285 | { |
263 | if (!soc_device_match(gen3_soc_whitelist)) | 286 | const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); |
287 | |||
288 | if (!soc) | ||
264 | return -ENODEV; | 289 | return -ENODEV; |
265 | 290 | ||
291 | global_flags |= (unsigned long)soc->data; | ||
292 | |||
266 | return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); | 293 | return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); |
267 | } | 294 | } |
268 | 295 | ||
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 787434e5589d..78c25ad35fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev) | |||
1312 | pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); | 1312 | pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) | 1315 | static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) |
1316 | { | 1316 | { |
1317 | struct sdhci_pci_slot *slot = sdhci_priv(host); | 1317 | struct sdhci_pci_slot *slot = sdhci_priv(host); |
1318 | struct pci_dev *pdev = slot->chip->pdev; | 1318 | struct pci_dev *pdev = slot->chip->pdev; |
@@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) | |||
1351 | return 0; | 1351 | return 0; |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) | ||
1355 | { | ||
1356 | struct sdhci_host *host = mmc_priv(mmc); | ||
1357 | |||
1358 | /* AMD requires custom HS200 tuning */ | ||
1359 | if (host->timing == MMC_TIMING_MMC_HS200) | ||
1360 | return amd_execute_tuning_hs200(host, opcode); | ||
1361 | |||
1362 | /* Otherwise perform standard SDHCI tuning */ | ||
1363 | return sdhci_execute_tuning(mmc, opcode); | ||
1364 | } | ||
1365 | |||
1366 | static int amd_probe_slot(struct sdhci_pci_slot *slot) | ||
1367 | { | ||
1368 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; | ||
1369 | |||
1370 | ops->execute_tuning = amd_execute_tuning; | ||
1371 | |||
1372 | return 0; | ||
1373 | } | ||
1374 | |||
1354 | static int amd_probe(struct sdhci_pci_chip *chip) | 1375 | static int amd_probe(struct sdhci_pci_chip *chip) |
1355 | { | 1376 | { |
1356 | struct pci_dev *smbus_dev; | 1377 | struct pci_dev *smbus_dev; |
@@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = { | |||
1385 | .set_bus_width = sdhci_set_bus_width, | 1406 | .set_bus_width = sdhci_set_bus_width, |
1386 | .reset = sdhci_reset, | 1407 | .reset = sdhci_reset, |
1387 | .set_uhs_signaling = sdhci_set_uhs_signaling, | 1408 | .set_uhs_signaling = sdhci_set_uhs_signaling, |
1388 | .platform_execute_tuning = amd_execute_tuning, | ||
1389 | }; | 1409 | }; |
1390 | 1410 | ||
1391 | static const struct sdhci_pci_fixes sdhci_amd = { | 1411 | static const struct sdhci_pci_fixes sdhci_amd = { |
1392 | .probe = amd_probe, | 1412 | .probe = amd_probe, |
1393 | .ops = &amd_sdhci_pci_ops, | 1413 | .ops = &amd_sdhci_pci_ops, |
1414 | .probe_slot = amd_probe_slot, | ||
1394 | }; | 1415 | }; |
1395 | 1416 | ||
1396 | static const struct pci_device_id pci_ids[] = { | 1417 | static const struct pci_device_id pci_ids[] = { |
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index ac7694c71266..a036c490b7ce 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c | |||
@@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, | |||
285 | struct sk_buff_head *rxq) | 285 | struct sk_buff_head *rxq) |
286 | { | 286 | { |
287 | u16 buf[4] = { 0 }, status, seq_id; | 287 | u16 buf[4] = { 0 }, status, seq_id; |
288 | u64 ns, timelo, timehi; | ||
289 | struct skb_shared_hwtstamps *shwt; | 288 | struct skb_shared_hwtstamps *shwt; |
289 | struct sk_buff_head received; | ||
290 | u64 ns, timelo, timehi; | ||
291 | unsigned long flags; | ||
290 | int err; | 292 | int err; |
291 | 293 | ||
294 | /* The latched timestamp belongs to one of the received frames. */ | ||
295 | __skb_queue_head_init(&received); | ||
296 | spin_lock_irqsave(&rxq->lock, flags); | ||
297 | skb_queue_splice_tail_init(rxq, &received); | ||
298 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
299 | |||
292 | mutex_lock(&chip->reg_lock); | 300 | mutex_lock(&chip->reg_lock); |
293 | err = mv88e6xxx_port_ptp_read(chip, ps->port_id, | 301 | err = mv88e6xxx_port_ptp_read(chip, ps->port_id, |
294 | reg, buf, ARRAY_SIZE(buf)); | 302 | reg, buf, ARRAY_SIZE(buf)); |
@@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, | |||
311 | /* Since the device can only handle one time stamp at a time, | 319 | /* Since the device can only handle one time stamp at a time, |
312 | * we purge any extra frames from the queue. | 320 | * we purge any extra frames from the queue. |
313 | */ | 321 | */ |
314 | for ( ; skb; skb = skb_dequeue(rxq)) { | 322 | for ( ; skb; skb = __skb_dequeue(&received)) { |
315 | if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { | 323 | if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { |
316 | ns = timehi << 16 | timelo; | 324 | ns = timehi << 16 | timelo; |
317 | 325 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f622ca2a64f..8ba14ae00e8f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | |||
@@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) | |||
1927 | return retval; | 1927 | return retval; |
1928 | } | 1928 | } |
1929 | 1929 | ||
1930 | static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) | 1930 | static void bnxt_get_pkgver(struct net_device *dev) |
1931 | { | 1931 | { |
1932 | struct bnxt *bp = netdev_priv(dev); | ||
1932 | u16 index = 0; | 1933 | u16 index = 0; |
1933 | u32 datalen; | 1934 | char *pkgver; |
1935 | u32 pkglen; | ||
1936 | u8 *pkgbuf; | ||
1937 | int len; | ||
1934 | 1938 | ||
1935 | if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, | 1939 | if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, |
1936 | BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, | 1940 | BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, |
1937 | &index, NULL, &datalen) != 0) | 1941 | &index, NULL, &pkglen) != 0) |
1938 | return NULL; | 1942 | return; |
1939 | 1943 | ||
1940 | memset(buf, 0, buflen); | 1944 | pkgbuf = kzalloc(pkglen, GFP_KERNEL); |
1941 | if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) | 1945 | if (!pkgbuf) { |
1942 | return NULL; | 1946 | dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", |
1947 | pkglen); | ||
1948 | return; | ||
1949 | } | ||
1950 | |||
1951 | if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) | ||
1952 | goto err; | ||
1943 | 1953 | ||
1944 | return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, | 1954 | pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, |
1945 | datalen); | 1955 | pkglen); |
1956 | if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { | ||
1957 | len = strlen(bp->fw_ver_str); | ||
1958 | snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, | ||
1959 | "/pkg %s", pkgver); | ||
1960 | } | ||
1961 | err: | ||
1962 | kfree(pkgbuf); | ||
1946 | } | 1963 | } |
1947 | 1964 | ||
1948 | static int bnxt_get_eeprom(struct net_device *dev, | 1965 | static int bnxt_get_eeprom(struct net_device *dev, |
@@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp) | |||
2615 | struct hwrm_selftest_qlist_input req = {0}; | 2632 | struct hwrm_selftest_qlist_input req = {0}; |
2616 | struct bnxt_test_info *test_info; | 2633 | struct bnxt_test_info *test_info; |
2617 | struct net_device *dev = bp->dev; | 2634 | struct net_device *dev = bp->dev; |
2618 | char *pkglog; | ||
2619 | int i, rc; | 2635 | int i, rc; |
2620 | 2636 | ||
2621 | pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); | 2637 | bnxt_get_pkgver(dev); |
2622 | if (pkglog) { | ||
2623 | char *pkgver; | ||
2624 | int len; | ||
2625 | 2638 | ||
2626 | pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); | ||
2627 | if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { | ||
2628 | len = strlen(bp->fw_ver_str); | ||
2629 | snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, | ||
2630 | "/pkg %s", pkgver); | ||
2631 | } | ||
2632 | kfree(pkglog); | ||
2633 | } | ||
2634 | if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) | 2639 | if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) |
2635 | return; | 2640 | return; |
2636 | 2641 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 73f2249555b5..83444811d3c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h | |||
@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { | |||
59 | #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) | 59 | #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) |
60 | #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) | 60 | #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) |
61 | 61 | ||
62 | #define BNX_PKG_LOG_MAX_LENGTH 4096 | ||
63 | |||
64 | enum bnxnvm_pkglog_field_index { | 62 | enum bnxnvm_pkglog_field_index { |
65 | BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, | 63 | BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, |
66 | BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, | 64 | BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af011..fa5b30f547f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
@@ -87,7 +87,7 @@ do { \ | |||
87 | 87 | ||
88 | #define HNAE_AE_REGISTER 0x1 | 88 | #define HNAE_AE_REGISTER 0x1 |
89 | 89 | ||
90 | #define RCB_RING_NAME_LEN 16 | 90 | #define RCB_RING_NAME_LEN (IFNAMSIZ + 4) |
91 | 91 | ||
92 | #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 | 92 | #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 |
93 | #define HNAE_LOW_LATENCY_COAL_PARAM 80 | 93 | #define HNAE_LOW_LATENCY_COAL_PARAM 80 |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index aad5658d79d5..2df01ad98df7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev) | |||
794 | { | 794 | { |
795 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 795 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
796 | unsigned long timeout = msecs_to_jiffies(30000); | 796 | unsigned long timeout = msecs_to_jiffies(30000); |
797 | struct device *dev = &adapter->vdev->dev; | 797 | int retry_count = 0; |
798 | int rc; | 798 | int rc; |
799 | 799 | ||
800 | do { | 800 | do { |
801 | if (adapter->renegotiate) { | 801 | if (retry_count > IBMVNIC_MAX_QUEUES) { |
802 | adapter->renegotiate = false; | 802 | netdev_warn(netdev, "Login attempts exceeded\n"); |
803 | return -1; | ||
804 | } | ||
805 | |||
806 | adapter->init_done_rc = 0; | ||
807 | reinit_completion(&adapter->init_done); | ||
808 | rc = send_login(adapter); | ||
809 | if (rc) { | ||
810 | netdev_warn(netdev, "Unable to login\n"); | ||
811 | return rc; | ||
812 | } | ||
813 | |||
814 | if (!wait_for_completion_timeout(&adapter->init_done, | ||
815 | timeout)) { | ||
816 | netdev_warn(netdev, "Login timed out\n"); | ||
817 | return -1; | ||
818 | } | ||
819 | |||
820 | if (adapter->init_done_rc == PARTIALSUCCESS) { | ||
821 | retry_count++; | ||
803 | release_sub_crqs(adapter, 1); | 822 | release_sub_crqs(adapter, 1); |
804 | 823 | ||
824 | adapter->init_done_rc = 0; | ||
805 | reinit_completion(&adapter->init_done); | 825 | reinit_completion(&adapter->init_done); |
806 | send_cap_queries(adapter); | 826 | send_cap_queries(adapter); |
807 | if (!wait_for_completion_timeout(&adapter->init_done, | 827 | if (!wait_for_completion_timeout(&adapter->init_done, |
808 | timeout)) { | 828 | timeout)) { |
809 | dev_err(dev, "Capabilities query timeout\n"); | 829 | netdev_warn(netdev, |
830 | "Capabilities query timed out\n"); | ||
810 | return -1; | 831 | return -1; |
811 | } | 832 | } |
833 | |||
812 | rc = init_sub_crqs(adapter); | 834 | rc = init_sub_crqs(adapter); |
813 | if (rc) { | 835 | if (rc) { |
814 | dev_err(dev, | 836 | netdev_warn(netdev, |
815 | "Initialization of SCRQ's failed\n"); | 837 | "SCRQ initialization failed\n"); |
816 | return -1; | 838 | return -1; |
817 | } | 839 | } |
840 | |||
818 | rc = init_sub_crq_irqs(adapter); | 841 | rc = init_sub_crq_irqs(adapter); |
819 | if (rc) { | 842 | if (rc) { |
820 | dev_err(dev, | 843 | netdev_warn(netdev, |
821 | "Initialization of SCRQ's irqs failed\n"); | 844 | "SCRQ irq initialization failed\n"); |
822 | return -1; | 845 | return -1; |
823 | } | 846 | } |
824 | } | 847 | } else if (adapter->init_done_rc) { |
825 | 848 | netdev_warn(netdev, "Adapter login failed\n"); | |
826 | reinit_completion(&adapter->init_done); | ||
827 | rc = send_login(adapter); | ||
828 | if (rc) { | ||
829 | dev_err(dev, "Unable to attempt device login\n"); | ||
830 | return rc; | ||
831 | } else if (!wait_for_completion_timeout(&adapter->init_done, | ||
832 | timeout)) { | ||
833 | dev_err(dev, "Login timeout\n"); | ||
834 | return -1; | 849 | return -1; |
835 | } | 850 | } |
836 | } while (adapter->renegotiate); | 851 | } while (adapter->init_done_rc == PARTIALSUCCESS); |
837 | 852 | ||
838 | /* handle pending MAC address changes after successful login */ | 853 | /* handle pending MAC address changes after successful login */ |
839 | if (adapter->mac_change_pending) { | 854 | if (adapter->mac_change_pending) { |
@@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev) | |||
1034 | netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); | 1049 | netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); |
1035 | if (prev_state == VNIC_CLOSED) | 1050 | if (prev_state == VNIC_CLOSED) |
1036 | enable_irq(adapter->rx_scrq[i]->irq); | 1051 | enable_irq(adapter->rx_scrq[i]->irq); |
1037 | else | 1052 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); |
1038 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); | ||
1039 | } | 1053 | } |
1040 | 1054 | ||
1041 | for (i = 0; i < adapter->req_tx_queues; i++) { | 1055 | for (i = 0; i < adapter->req_tx_queues; i++) { |
1042 | netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); | 1056 | netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); |
1043 | if (prev_state == VNIC_CLOSED) | 1057 | if (prev_state == VNIC_CLOSED) |
1044 | enable_irq(adapter->tx_scrq[i]->irq); | 1058 | enable_irq(adapter->tx_scrq[i]->irq); |
1045 | else | 1059 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); |
1046 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); | ||
1047 | } | 1060 | } |
1048 | 1061 | ||
1049 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); | 1062 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); |
@@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) | |||
1184 | if (adapter->tx_scrq[i]->irq) { | 1197 | if (adapter->tx_scrq[i]->irq) { |
1185 | netdev_dbg(netdev, | 1198 | netdev_dbg(netdev, |
1186 | "Disabling tx_scrq[%d] irq\n", i); | 1199 | "Disabling tx_scrq[%d] irq\n", i); |
1200 | disable_scrq_irq(adapter, adapter->tx_scrq[i]); | ||
1187 | disable_irq(adapter->tx_scrq[i]->irq); | 1201 | disable_irq(adapter->tx_scrq[i]->irq); |
1188 | } | 1202 | } |
1189 | } | 1203 | } |
@@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) | |||
1193 | if (adapter->rx_scrq[i]->irq) { | 1207 | if (adapter->rx_scrq[i]->irq) { |
1194 | netdev_dbg(netdev, | 1208 | netdev_dbg(netdev, |
1195 | "Disabling rx_scrq[%d] irq\n", i); | 1209 | "Disabling rx_scrq[%d] irq\n", i); |
1210 | disable_scrq_irq(adapter, adapter->rx_scrq[i]); | ||
1196 | disable_irq(adapter->rx_scrq[i]->irq); | 1211 | disable_irq(adapter->rx_scrq[i]->irq); |
1197 | } | 1212 | } |
1198 | } | 1213 | } |
@@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1828 | for (i = 0; i < adapter->req_rx_queues; i++) | 1843 | for (i = 0; i < adapter->req_rx_queues; i++) |
1829 | napi_schedule(&adapter->napi[i]); | 1844 | napi_schedule(&adapter->napi[i]); |
1830 | 1845 | ||
1831 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) | 1846 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
1847 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) | ||
1832 | netdev_notify_peers(netdev); | 1848 | netdev_notify_peers(netdev); |
1833 | 1849 | ||
1834 | netif_carrier_on(netdev); | 1850 | netif_carrier_on(netdev); |
@@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, | |||
2601 | { | 2617 | { |
2602 | struct device *dev = &adapter->vdev->dev; | 2618 | struct device *dev = &adapter->vdev->dev; |
2603 | unsigned long rc; | 2619 | unsigned long rc; |
2620 | u64 val; | ||
2604 | 2621 | ||
2605 | if (scrq->hw_irq > 0x100000000ULL) { | 2622 | if (scrq->hw_irq > 0x100000000ULL) { |
2606 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); | 2623 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); |
2607 | return 1; | 2624 | return 1; |
2608 | } | 2625 | } |
2609 | 2626 | ||
2627 | val = (0xff000000) | scrq->hw_irq; | ||
2628 | rc = plpar_hcall_norets(H_EOI, val); | ||
2629 | if (rc) | ||
2630 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", | ||
2631 | val, rc); | ||
2632 | |||
2610 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, | 2633 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
2611 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); | 2634 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); |
2612 | if (rc) | 2635 | if (rc) |
@@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) | |||
3170 | struct vnic_login_client_data { | 3193 | struct vnic_login_client_data { |
3171 | u8 type; | 3194 | u8 type; |
3172 | __be16 len; | 3195 | __be16 len; |
3173 | char name; | 3196 | char name[]; |
3174 | } __packed; | 3197 | } __packed; |
3175 | 3198 | ||
3176 | static int vnic_client_data_len(struct ibmvnic_adapter *adapter) | 3199 | static int vnic_client_data_len(struct ibmvnic_adapter *adapter) |
@@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, | |||
3199 | vlcd->type = 1; | 3222 | vlcd->type = 1; |
3200 | len = strlen(os_name) + 1; | 3223 | len = strlen(os_name) + 1; |
3201 | vlcd->len = cpu_to_be16(len); | 3224 | vlcd->len = cpu_to_be16(len); |
3202 | strncpy(&vlcd->name, os_name, len); | 3225 | strncpy(vlcd->name, os_name, len); |
3203 | vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); | 3226 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
3204 | 3227 | ||
3205 | /* Type 2 - LPAR name */ | 3228 | /* Type 2 - LPAR name */ |
3206 | vlcd->type = 2; | 3229 | vlcd->type = 2; |
3207 | len = strlen(utsname()->nodename) + 1; | 3230 | len = strlen(utsname()->nodename) + 1; |
3208 | vlcd->len = cpu_to_be16(len); | 3231 | vlcd->len = cpu_to_be16(len); |
3209 | strncpy(&vlcd->name, utsname()->nodename, len); | 3232 | strncpy(vlcd->name, utsname()->nodename, len); |
3210 | vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); | 3233 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
3211 | 3234 | ||
3212 | /* Type 3 - device name */ | 3235 | /* Type 3 - device name */ |
3213 | vlcd->type = 3; | 3236 | vlcd->type = 3; |
3214 | len = strlen(adapter->netdev->name) + 1; | 3237 | len = strlen(adapter->netdev->name) + 1; |
3215 | vlcd->len = cpu_to_be16(len); | 3238 | vlcd->len = cpu_to_be16(len); |
3216 | strncpy(&vlcd->name, adapter->netdev->name, len); | 3239 | strncpy(vlcd->name, adapter->netdev->name, len); |
3217 | } | 3240 | } |
3218 | 3241 | ||
3219 | static int send_login(struct ibmvnic_adapter *adapter) | 3242 | static int send_login(struct ibmvnic_adapter *adapter) |
@@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |||
3942 | * to resend the login buffer with fewer queues requested. | 3965 | * to resend the login buffer with fewer queues requested. |
3943 | */ | 3966 | */ |
3944 | if (login_rsp_crq->generic.rc.code) { | 3967 | if (login_rsp_crq->generic.rc.code) { |
3945 | adapter->renegotiate = true; | 3968 | adapter->init_done_rc = login_rsp_crq->generic.rc.code; |
3946 | complete(&adapter->init_done); | 3969 | complete(&adapter->init_done); |
3947 | return 0; | 3970 | return 0; |
3948 | } | 3971 | } |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 99c0b58c2c39..22391e8805f6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
@@ -1035,7 +1035,6 @@ struct ibmvnic_adapter { | |||
1035 | 1035 | ||
1036 | struct ibmvnic_sub_crq_queue **tx_scrq; | 1036 | struct ibmvnic_sub_crq_queue **tx_scrq; |
1037 | struct ibmvnic_sub_crq_queue **rx_scrq; | 1037 | struct ibmvnic_sub_crq_queue **rx_scrq; |
1038 | bool renegotiate; | ||
1039 | 1038 | ||
1040 | /* rx structs */ | 1039 | /* rx structs */ |
1041 | struct napi_struct *napi; | 1040 | struct napi_struct *napi; |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 54a038943c06..4202f9b5b966 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -663,7 +663,7 @@ enum mvpp2_tag_type { | |||
663 | #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) | 663 | #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) |
664 | #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ | 664 | #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ |
665 | MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) | 665 | MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) |
666 | #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) | 666 | #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) |
667 | #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) | 667 | #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) |
668 | #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) | 668 | #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) |
669 | #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) | 669 | #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) |
@@ -916,6 +916,8 @@ static struct { | |||
916 | 916 | ||
917 | #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) | 917 | #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) |
918 | 918 | ||
919 | #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) | ||
920 | |||
919 | /* Definitions */ | 921 | /* Definitions */ |
920 | 922 | ||
921 | /* Shared Packet Processor resources */ | 923 | /* Shared Packet Processor resources */ |
@@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, | |||
1429 | if (port->priv->hw_version == MVPP21) | 1431 | if (port->priv->hw_version == MVPP21) |
1430 | return tx_desc->pp21.buf_dma_addr; | 1432 | return tx_desc->pp21.buf_dma_addr; |
1431 | else | 1433 | else |
1432 | return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); | 1434 | return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; |
1433 | } | 1435 | } |
1434 | 1436 | ||
1435 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, | 1437 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
@@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, | |||
1447 | } else { | 1449 | } else { |
1448 | u64 val = (u64)addr; | 1450 | u64 val = (u64)addr; |
1449 | 1451 | ||
1450 | tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); | 1452 | tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; |
1451 | tx_desc->pp22.buf_dma_addr_ptp |= val; | 1453 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
1452 | tx_desc->pp22.packet_offset = offset; | 1454 | tx_desc->pp22.packet_offset = offset; |
1453 | } | 1455 | } |
@@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, | |||
1507 | if (port->priv->hw_version == MVPP21) | 1509 | if (port->priv->hw_version == MVPP21) |
1508 | return rx_desc->pp21.buf_dma_addr; | 1510 | return rx_desc->pp21.buf_dma_addr; |
1509 | else | 1511 | else |
1510 | return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); | 1512 | return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; |
1511 | } | 1513 | } |
1512 | 1514 | ||
1513 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, | 1515 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
@@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, | |||
1516 | if (port->priv->hw_version == MVPP21) | 1518 | if (port->priv->hw_version == MVPP21) |
1517 | return rx_desc->pp21.buf_cookie; | 1519 | return rx_desc->pp21.buf_cookie; |
1518 | else | 1520 | else |
1519 | return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); | 1521 | return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; |
1520 | } | 1522 | } |
1521 | 1523 | ||
1522 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, | 1524 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
@@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
8789 | } | 8791 | } |
8790 | 8792 | ||
8791 | if (priv->hw_version == MVPP22) { | 8793 | if (priv->hw_version == MVPP22) { |
8792 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); | 8794 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
8793 | if (err) | 8795 | if (err) |
8794 | goto err_mg_clk; | 8796 | goto err_mg_clk; |
8795 | /* Sadly, the BM pools all share the same register to | 8797 | /* Sadly, the BM pools all share the same register to |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 3735c09d2112..577659f332e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c | |||
@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) | |||
258 | case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: | 258 | case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: |
259 | nfp_tunnel_keep_alive(app, skb); | 259 | nfp_tunnel_keep_alive(app, skb); |
260 | break; | 260 | break; |
261 | case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: | ||
262 | /* Acks from the NFP that the route is added - ignore. */ | ||
263 | break; | ||
264 | default: | 261 | default: |
265 | nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", | 262 | nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", |
266 | type); | 263 | type); |
@@ -275,18 +272,49 @@ out: | |||
275 | 272 | ||
276 | void nfp_flower_cmsg_process_rx(struct work_struct *work) | 273 | void nfp_flower_cmsg_process_rx(struct work_struct *work) |
277 | { | 274 | { |
275 | struct sk_buff_head cmsg_joined; | ||
278 | struct nfp_flower_priv *priv; | 276 | struct nfp_flower_priv *priv; |
279 | struct sk_buff *skb; | 277 | struct sk_buff *skb; |
280 | 278 | ||
281 | priv = container_of(work, struct nfp_flower_priv, cmsg_work); | 279 | priv = container_of(work, struct nfp_flower_priv, cmsg_work); |
280 | skb_queue_head_init(&cmsg_joined); | ||
281 | |||
282 | spin_lock_bh(&priv->cmsg_skbs_high.lock); | ||
283 | skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); | ||
284 | spin_unlock_bh(&priv->cmsg_skbs_high.lock); | ||
282 | 285 | ||
283 | while ((skb = skb_dequeue(&priv->cmsg_skbs))) | 286 | spin_lock_bh(&priv->cmsg_skbs_low.lock); |
287 | skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); | ||
288 | spin_unlock_bh(&priv->cmsg_skbs_low.lock); | ||
289 | |||
290 | while ((skb = __skb_dequeue(&cmsg_joined))) | ||
284 | nfp_flower_cmsg_process_one_rx(priv->app, skb); | 291 | nfp_flower_cmsg_process_one_rx(priv->app, skb); |
285 | } | 292 | } |
286 | 293 | ||
287 | void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | 294 | static void |
295 | nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) | ||
288 | { | 296 | { |
289 | struct nfp_flower_priv *priv = app->priv; | 297 | struct nfp_flower_priv *priv = app->priv; |
298 | struct sk_buff_head *skb_head; | ||
299 | |||
300 | if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || | ||
301 | type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) | ||
302 | skb_head = &priv->cmsg_skbs_high; | ||
303 | else | ||
304 | skb_head = &priv->cmsg_skbs_low; | ||
305 | |||
306 | if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { | ||
307 | nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); | ||
308 | dev_kfree_skb_any(skb); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | skb_queue_tail(skb_head, skb); | ||
313 | schedule_work(&priv->cmsg_work); | ||
314 | } | ||
315 | |||
316 | void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | ||
317 | { | ||
290 | struct nfp_flower_cmsg_hdr *cmsg_hdr; | 318 | struct nfp_flower_cmsg_hdr *cmsg_hdr; |
291 | 319 | ||
292 | cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); | 320 | cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); |
@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | |||
306 | nfp_flower_process_mtu_ack(app, skb)) { | 334 | nfp_flower_process_mtu_ack(app, skb)) { |
307 | /* Handle MTU acks outside wq to prevent RTNL conflict. */ | 335 | /* Handle MTU acks outside wq to prevent RTNL conflict. */ |
308 | dev_consume_skb_any(skb); | 336 | dev_consume_skb_any(skb); |
337 | } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { | ||
338 | /* Acks from the NFP that the route is added - ignore. */ | ||
339 | dev_consume_skb_any(skb); | ||
309 | } else { | 340 | } else { |
310 | skb_queue_tail(&priv->cmsg_skbs, skb); | 341 | nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); |
311 | schedule_work(&priv->cmsg_work); | ||
312 | } | 342 | } |
313 | } | 343 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 96bc0e33980c..b6c0fd053a50 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
@@ -108,6 +108,8 @@ | |||
108 | #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) | 108 | #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) |
109 | #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) | 109 | #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) |
110 | 110 | ||
111 | #define NFP_FLOWER_WORKQ_MAX_SKBS 30000 | ||
112 | |||
111 | #define nfp_flower_cmsg_warn(app, fmt, args...) \ | 113 | #define nfp_flower_cmsg_warn(app, fmt, args...) \ |
112 | do { \ | 114 | do { \ |
113 | if (net_ratelimit()) \ | 115 | if (net_ratelimit()) \ |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6357e0720f43..ad02592a82b7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c | |||
@@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app) | |||
519 | 519 | ||
520 | app->priv = app_priv; | 520 | app->priv = app_priv; |
521 | app_priv->app = app; | 521 | app_priv->app = app; |
522 | skb_queue_head_init(&app_priv->cmsg_skbs); | 522 | skb_queue_head_init(&app_priv->cmsg_skbs_high); |
523 | skb_queue_head_init(&app_priv->cmsg_skbs_low); | ||
523 | INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); | 524 | INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); |
524 | init_waitqueue_head(&app_priv->reify_wait_queue); | 525 | init_waitqueue_head(&app_priv->reify_wait_queue); |
525 | 526 | ||
@@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app) | |||
549 | { | 550 | { |
550 | struct nfp_flower_priv *app_priv = app->priv; | 551 | struct nfp_flower_priv *app_priv = app->priv; |
551 | 552 | ||
552 | skb_queue_purge(&app_priv->cmsg_skbs); | 553 | skb_queue_purge(&app_priv->cmsg_skbs_high); |
554 | skb_queue_purge(&app_priv->cmsg_skbs_low); | ||
553 | flush_work(&app_priv->cmsg_work); | 555 | flush_work(&app_priv->cmsg_work); |
554 | 556 | ||
555 | nfp_flower_metadata_cleanup(app); | 557 | nfp_flower_metadata_cleanup(app); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e030b3ce4510..c67e1b54c614 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h | |||
@@ -107,7 +107,10 @@ struct nfp_mtu_conf { | |||
107 | * @mask_table: Hash table used to store masks | 107 | * @mask_table: Hash table used to store masks |
108 | * @flow_table: Hash table used to store flower rules | 108 | * @flow_table: Hash table used to store flower rules |
109 | * @cmsg_work: Workqueue for control messages processing | 109 | * @cmsg_work: Workqueue for control messages processing |
110 | * @cmsg_skbs: List of skbs for control message processing | 110 | * @cmsg_skbs_high: List of higher priority skbs for control message |
111 | * processing | ||
112 | * @cmsg_skbs_low: List of lower priority skbs for control message | ||
113 | * processing | ||
111 | * @nfp_mac_off_list: List of MAC addresses to offload | 114 | * @nfp_mac_off_list: List of MAC addresses to offload |
112 | * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs | 115 | * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs |
113 | * @nfp_ipv4_off_list: List of IPv4 addresses to offload | 116 | * @nfp_ipv4_off_list: List of IPv4 addresses to offload |
@@ -136,7 +139,8 @@ struct nfp_flower_priv { | |||
136 | DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); | 139 | DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); |
137 | DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); | 140 | DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); |
138 | struct work_struct cmsg_work; | 141 | struct work_struct cmsg_work; |
139 | struct sk_buff_head cmsg_skbs; | 142 | struct sk_buff_head cmsg_skbs_high; |
143 | struct sk_buff_head cmsg_skbs_low; | ||
140 | struct list_head nfp_mac_off_list; | 144 | struct list_head nfp_mac_off_list; |
141 | struct list_head nfp_mac_index_list; | 145 | struct list_head nfp_mac_index_list; |
142 | struct list_head nfp_ipv4_off_list; | 146 | struct list_head nfp_ipv4_off_list; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index f7b958181126..cb28ac03e4ca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c | |||
@@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) | |||
211 | break; | 211 | break; |
212 | 212 | ||
213 | err = msleep_interruptible(timeout_ms); | 213 | err = msleep_interruptible(timeout_ms); |
214 | if (err != 0) | 214 | if (err != 0) { |
215 | nfp_info(mutex->cpp, | ||
216 | "interrupted waiting for NFP mutex\n"); | ||
215 | return -ERESTARTSYS; | 217 | return -ERESTARTSYS; |
218 | } | ||
216 | 219 | ||
217 | if (time_is_before_eq_jiffies(warn_at)) { | 220 | if (time_is_before_eq_jiffies(warn_at)) { |
218 | warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; | 221 | warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 99bb679a9801..2abee0fe3a7c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | |||
@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, | |||
281 | if ((*reg & mask) == val) | 281 | if ((*reg & mask) == val) |
282 | return 0; | 282 | return 0; |
283 | 283 | ||
284 | if (msleep_interruptible(25)) | 284 | msleep(25); |
285 | return -ERESTARTSYS; | ||
286 | 285 | ||
287 | if (time_after(start_time, wait_until)) | 286 | if (time_after(start_time, wait_until)) |
288 | return -ETIMEDOUT; | 287 | return -ETIMEDOUT; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index d33988570217..5f4e447c5dce 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
@@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
350 | 350 | ||
351 | real_dev = priv->real_dev; | 351 | real_dev = priv->real_dev; |
352 | 352 | ||
353 | if (!rmnet_is_real_dev_registered(real_dev)) | ||
354 | return -ENODEV; | ||
355 | |||
356 | if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) | 353 | if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) |
357 | goto nla_put_failure; | 354 | goto nla_put_failure; |
358 | 355 | ||
359 | port = rmnet_get_port_rtnl(real_dev); | 356 | if (rmnet_is_real_dev_registered(real_dev)) { |
357 | port = rmnet_get_port_rtnl(real_dev); | ||
358 | f.flags = port->data_format; | ||
359 | } else { | ||
360 | f.flags = 0; | ||
361 | } | ||
360 | 362 | ||
361 | f.flags = port->data_format; | ||
362 | f.mask = ~0; | 363 | f.mask = ~0; |
363 | 364 | ||
364 | if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) | 365 | if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50daad0a1482..83ce229f4eb7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -4776,8 +4776,7 @@ static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |||
4776 | goto out_unlock; | 4776 | goto out_unlock; |
4777 | } | 4777 | } |
4778 | 4778 | ||
4779 | if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, | 4779 | if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { |
4780 | flow_id, filter_idx)) { | ||
4781 | ret = false; | 4780 | ret = false; |
4782 | goto out_unlock; | 4781 | goto out_unlock; |
4783 | } | 4782 | } |
@@ -5265,7 +5264,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
5265 | ids = vlan->uc; | 5264 | ids = vlan->uc; |
5266 | } | 5265 | } |
5267 | 5266 | ||
5268 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | 5267 | filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; |
5269 | 5268 | ||
5270 | /* Insert/renew filters */ | 5269 | /* Insert/renew filters */ |
5271 | for (i = 0; i < addr_count; i++) { | 5270 | for (i = 0; i < addr_count; i++) { |
@@ -5334,7 +5333,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, | |||
5334 | int rc; | 5333 | int rc; |
5335 | u16 *id; | 5334 | u16 *id; |
5336 | 5335 | ||
5337 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | 5336 | filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; |
5338 | 5337 | ||
5339 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); | 5338 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
5340 | 5339 | ||
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4a19c7efdf8d..7174ef5e5c5e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -2912,7 +2912,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |||
2912 | if (test_bit(index, table->used_bitmap) && | 2912 | if (test_bit(index, table->used_bitmap) && |
2913 | table->spec[index].priority == EFX_FILTER_PRI_HINT && | 2913 | table->spec[index].priority == EFX_FILTER_PRI_HINT && |
2914 | rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, | 2914 | rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, |
2915 | flow_id, index)) { | 2915 | flow_id, 0)) { |
2916 | efx_farch_filter_table_clear_entry(efx, table, index); | 2916 | efx_farch_filter_table_clear_entry(efx, table, index); |
2917 | ret = true; | 2917 | ret = true; |
2918 | } | 2918 | } |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5e379a83c729..eea3808b3f25 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -733,6 +733,27 @@ struct efx_rss_context { | |||
733 | u32 rx_indir_table[128]; | 733 | u32 rx_indir_table[128]; |
734 | }; | 734 | }; |
735 | 735 | ||
736 | #ifdef CONFIG_RFS_ACCEL | ||
737 | /** | ||
738 | * struct efx_async_filter_insertion - Request to asynchronously insert a filter | ||
739 | * @net_dev: Reference to the netdevice | ||
740 | * @spec: The filter to insert | ||
741 | * @work: Workitem for this request | ||
742 | * @rxq_index: Identifies the channel for which this request was made | ||
743 | * @flow_id: Identifies the kernel-side flow for which this request was made | ||
744 | */ | ||
745 | struct efx_async_filter_insertion { | ||
746 | struct net_device *net_dev; | ||
747 | struct efx_filter_spec spec; | ||
748 | struct work_struct work; | ||
749 | u16 rxq_index; | ||
750 | u32 flow_id; | ||
751 | }; | ||
752 | |||
753 | /* Maximum number of ARFS workitems that may be in flight on an efx_nic */ | ||
754 | #define EFX_RPS_MAX_IN_FLIGHT 8 | ||
755 | #endif /* CONFIG_RFS_ACCEL */ | ||
756 | |||
736 | /** | 757 | /** |
737 | * struct efx_nic - an Efx NIC | 758 | * struct efx_nic - an Efx NIC |
738 | * @name: Device name (net device name or bus id before net device registered) | 759 | * @name: Device name (net device name or bus id before net device registered) |
@@ -850,6 +871,8 @@ struct efx_rss_context { | |||
850 | * @rps_expire_channel: Next channel to check for expiry | 871 | * @rps_expire_channel: Next channel to check for expiry |
851 | * @rps_expire_index: Next index to check for expiry in | 872 | * @rps_expire_index: Next index to check for expiry in |
852 | * @rps_expire_channel's @rps_flow_id | 873 | * @rps_expire_channel's @rps_flow_id |
874 | * @rps_slot_map: bitmap of in-flight entries in @rps_slot | ||
875 | * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() | ||
853 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. | 876 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. |
854 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. | 877 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. |
855 | * Decremented when the efx_flush_rx_queue() is called. | 878 | * Decremented when the efx_flush_rx_queue() is called. |
@@ -1004,6 +1027,8 @@ struct efx_nic { | |||
1004 | struct mutex rps_mutex; | 1027 | struct mutex rps_mutex; |
1005 | unsigned int rps_expire_channel; | 1028 | unsigned int rps_expire_channel; |
1006 | unsigned int rps_expire_index; | 1029 | unsigned int rps_expire_index; |
1030 | unsigned long rps_slot_map; | ||
1031 | struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; | ||
1007 | #endif | 1032 | #endif |
1008 | 1033 | ||
1009 | atomic_t active_queues; | 1034 | atomic_t active_queues; |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 95682831484e..9c593c661cbf 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -827,31 +827,16 @@ MODULE_PARM_DESC(rx_refill_threshold, | |||
827 | 827 | ||
828 | #ifdef CONFIG_RFS_ACCEL | 828 | #ifdef CONFIG_RFS_ACCEL |
829 | 829 | ||
830 | /** | ||
831 | * struct efx_async_filter_insertion - Request to asynchronously insert a filter | ||
832 | * @net_dev: Reference to the netdevice | ||
833 | * @spec: The filter to insert | ||
834 | * @work: Workitem for this request | ||
835 | * @rxq_index: Identifies the channel for which this request was made | ||
836 | * @flow_id: Identifies the kernel-side flow for which this request was made | ||
837 | */ | ||
838 | struct efx_async_filter_insertion { | ||
839 | struct net_device *net_dev; | ||
840 | struct efx_filter_spec spec; | ||
841 | struct work_struct work; | ||
842 | u16 rxq_index; | ||
843 | u32 flow_id; | ||
844 | }; | ||
845 | |||
846 | static void efx_filter_rfs_work(struct work_struct *data) | 830 | static void efx_filter_rfs_work(struct work_struct *data) |
847 | { | 831 | { |
848 | struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, | 832 | struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, |
849 | work); | 833 | work); |
850 | struct efx_nic *efx = netdev_priv(req->net_dev); | 834 | struct efx_nic *efx = netdev_priv(req->net_dev); |
851 | struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); | 835 | struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); |
836 | int slot_idx = req - efx->rps_slot; | ||
852 | int rc; | 837 | int rc; |
853 | 838 | ||
854 | rc = efx->type->filter_insert(efx, &req->spec, false); | 839 | rc = efx->type->filter_insert(efx, &req->spec, true); |
855 | if (rc >= 0) { | 840 | if (rc >= 0) { |
856 | /* Remember this so we can check whether to expire the filter | 841 | /* Remember this so we can check whether to expire the filter |
857 | * later. | 842 | * later. |
@@ -878,8 +863,8 @@ static void efx_filter_rfs_work(struct work_struct *data) | |||
878 | } | 863 | } |
879 | 864 | ||
880 | /* Release references */ | 865 | /* Release references */ |
866 | clear_bit(slot_idx, &efx->rps_slot_map); | ||
881 | dev_put(req->net_dev); | 867 | dev_put(req->net_dev); |
882 | kfree(req); | ||
883 | } | 868 | } |
884 | 869 | ||
885 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | 870 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, |
@@ -888,22 +873,36 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
888 | struct efx_nic *efx = netdev_priv(net_dev); | 873 | struct efx_nic *efx = netdev_priv(net_dev); |
889 | struct efx_async_filter_insertion *req; | 874 | struct efx_async_filter_insertion *req; |
890 | struct flow_keys fk; | 875 | struct flow_keys fk; |
876 | int slot_idx; | ||
877 | int rc; | ||
891 | 878 | ||
892 | if (flow_id == RPS_FLOW_ID_INVALID) | 879 | /* find a free slot */ |
893 | return -EINVAL; | 880 | for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) |
881 | if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) | ||
882 | break; | ||
883 | if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) | ||
884 | return -EBUSY; | ||
894 | 885 | ||
895 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) | 886 | if (flow_id == RPS_FLOW_ID_INVALID) { |
896 | return -EPROTONOSUPPORT; | 887 | rc = -EINVAL; |
888 | goto out_clear; | ||
889 | } | ||
897 | 890 | ||
898 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) | 891 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { |
899 | return -EPROTONOSUPPORT; | 892 | rc = -EPROTONOSUPPORT; |
900 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) | 893 | goto out_clear; |
901 | return -EPROTONOSUPPORT; | 894 | } |
902 | 895 | ||
903 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 896 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { |
904 | if (!req) | 897 | rc = -EPROTONOSUPPORT; |
905 | return -ENOMEM; | 898 | goto out_clear; |
899 | } | ||
900 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { | ||
901 | rc = -EPROTONOSUPPORT; | ||
902 | goto out_clear; | ||
903 | } | ||
906 | 904 | ||
905 | req = efx->rps_slot + slot_idx; | ||
907 | efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, | 906 | efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, |
908 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, | 907 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, |
909 | rxq_index); | 908 | rxq_index); |
@@ -933,6 +932,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
933 | req->flow_id = flow_id; | 932 | req->flow_id = flow_id; |
934 | schedule_work(&req->work); | 933 | schedule_work(&req->work); |
935 | return 0; | 934 | return 0; |
935 | out_clear: | ||
936 | clear_bit(slot_idx, &efx->rps_slot_map); | ||
937 | return rc; | ||
936 | } | 938 | } |
937 | 939 | ||
938 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) | 940 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index c7bff596c665..dedd40613090 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h | |||
@@ -347,7 +347,7 @@ enum power_event { | |||
347 | #define MTL_RX_OVERFLOW_INT BIT(16) | 347 | #define MTL_RX_OVERFLOW_INT BIT(16) |
348 | 348 | ||
349 | /* Default operating mode of the MAC */ | 349 | /* Default operating mode of the MAC */ |
350 | #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ | 350 | #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ |
351 | GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) | 351 | GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) |
352 | 352 | ||
353 | /* To dump the core regs excluding the Address Registers */ | 353 | /* To dump the core regs excluding the Address Registers */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a3af92ebbca8..517b1f6736a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
@@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, | |||
31 | 31 | ||
32 | value |= GMAC_CORE_INIT; | 32 | value |= GMAC_CORE_INIT; |
33 | 33 | ||
34 | /* Clear ACS bit because Ethernet switch tagging formats such as | ||
35 | * Broadcom tags can look like invalid LLC/SNAP packets and cause the | ||
36 | * hardware to truncate packets on reception. | ||
37 | */ | ||
38 | if (netdev_uses_dsa(dev)) | ||
39 | value &= ~GMAC_CONFIG_ACS; | ||
40 | |||
41 | if (mtu > 1500) | 34 | if (mtu > 1500) |
42 | value |= GMAC_CONFIG_2K; | 35 | value |= GMAC_CONFIG_2K; |
43 | if (mtu > 2000) | 36 | if (mtu > 2000) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a16931ce39d..b65e2d144698 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3495 | 3495 | ||
3496 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 3496 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
3497 | * Type frames (LLC/LLC-SNAP) | 3497 | * Type frames (LLC/LLC-SNAP) |
3498 | * | ||
3499 | * llc_snap is never checked in GMAC >= 4, so this ACS | ||
3500 | * feature is always disabled and packets need to be | ||
3501 | * stripped manually. | ||
3498 | */ | 3502 | */ |
3499 | if (unlikely(status != llc_snap)) | 3503 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
3504 | unlikely(status != llc_snap)) | ||
3500 | frame_len -= ETH_FCS_LEN; | 3505 | frame_len -= ETH_FCS_LEN; |
3501 | 3506 | ||
3502 | if (netif_msg_rx_status(priv)) { | 3507 | if (netif_msg_rx_status(priv)) { |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 9cbb0c8a896a..7de88b33d5b9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3277 | 3277 | ||
3278 | err = netdev_upper_dev_link(real_dev, dev, extack); | 3278 | err = netdev_upper_dev_link(real_dev, dev, extack); |
3279 | if (err < 0) | 3279 | if (err < 0) |
3280 | goto put_dev; | 3280 | goto unregister; |
3281 | 3281 | ||
3282 | /* need to be already registered so that ->init has run and | 3282 | /* need to be already registered so that ->init has run and |
3283 | * the MAC addr is set | 3283 | * the MAC addr is set |
@@ -3316,8 +3316,7 @@ del_dev: | |||
3316 | macsec_del_dev(macsec); | 3316 | macsec_del_dev(macsec); |
3317 | unlink: | 3317 | unlink: |
3318 | netdev_upper_dev_unlink(real_dev, dev); | 3318 | netdev_upper_dev_unlink(real_dev, dev); |
3319 | put_dev: | 3319 | unregister: |
3320 | dev_put(real_dev); | ||
3321 | unregister_netdevice(dev); | 3320 | unregister_netdevice(dev); |
3322 | return err; | 3321 | return err; |
3323 | } | 3322 | } |
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..a97ac8c12c4c 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ethtool.h> | 20 | #include <linux/ethtool.h> |
21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
22 | #include <linux/microchipphy.h> | 22 | #include <linux/microchipphy.h> |
23 | #include <linux/delay.h> | ||
23 | 24 | ||
24 | #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" | 25 | #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" |
25 | #define DRIVER_DESC "Microchip LAN88XX PHY driver" | 26 | #define DRIVER_DESC "Microchip LAN88XX PHY driver" |
@@ -30,6 +31,16 @@ struct lan88xx_priv { | |||
30 | __u32 wolopts; | 31 | __u32 wolopts; |
31 | }; | 32 | }; |
32 | 33 | ||
34 | static int lan88xx_read_page(struct phy_device *phydev) | ||
35 | { | ||
36 | return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS); | ||
37 | } | ||
38 | |||
39 | static int lan88xx_write_page(struct phy_device *phydev, int page) | ||
40 | { | ||
41 | return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); | ||
42 | } | ||
43 | |||
33 | static int lan88xx_phy_config_intr(struct phy_device *phydev) | 44 | static int lan88xx_phy_config_intr(struct phy_device *phydev) |
34 | { | 45 | { |
35 | int rc; | 46 | int rc; |
@@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev) | |||
66 | return 0; | 77 | return 0; |
67 | } | 78 | } |
68 | 79 | ||
80 | static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, | ||
81 | u32 data) | ||
82 | { | ||
83 | int val, save_page, ret = 0; | ||
84 | u16 buf; | ||
85 | |||
86 | /* Save current page */ | ||
87 | save_page = phy_save_page(phydev); | ||
88 | if (save_page < 0) { | ||
89 | pr_warn("Failed to get current page\n"); | ||
90 | goto err; | ||
91 | } | ||
92 | |||
93 | /* Switch to TR page */ | ||
94 | lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR); | ||
95 | |||
96 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, | ||
97 | (data & 0xFFFF)); | ||
98 | if (ret < 0) { | ||
99 | pr_warn("Failed to write TR low data\n"); | ||
100 | goto err; | ||
101 | } | ||
102 | |||
103 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, | ||
104 | (data & 0x00FF0000) >> 16); | ||
105 | if (ret < 0) { | ||
106 | pr_warn("Failed to write TR high data\n"); | ||
107 | goto err; | ||
108 | } | ||
109 | |||
110 | /* Config control bits [15:13] of register */ | ||
111 | buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */ | ||
112 | buf |= 0x8000; /* Set [15] to Packet transmit */ | ||
113 | |||
114 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); | ||
115 | if (ret < 0) { | ||
116 | pr_warn("Failed to write data in reg\n"); | ||
117 | goto err; | ||
118 | } | ||
119 | |||
120 | usleep_range(1000, 2000);/* Wait for Data to be written */ | ||
121 | val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); | ||
122 | if (!(val & 0x8000)) | ||
123 | pr_warn("TR Register[0x%X] configuration failed\n", regaddr); | ||
124 | err: | ||
125 | return phy_restore_page(phydev, save_page, ret); | ||
126 | } | ||
127 | |||
128 | static void lan88xx_config_TR_regs(struct phy_device *phydev) | ||
129 | { | ||
130 | int err; | ||
131 | |||
132 | /* Get access to Channel 0x1, Node 0xF , Register 0x01. | ||
133 | * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf, | ||
134 | * MrvlTrFix1000Kp, MasterEnableTR bits. | ||
135 | */ | ||
136 | err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); | ||
137 | if (err < 0) | ||
138 | pr_warn("Failed to Set Register[0x0F82]\n"); | ||
139 | |||
140 | /* Get access to Channel b'10, Node b'1101, Register 0x06. | ||
141 | * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, | ||
142 | * SSTrKp1000Mas bits. | ||
143 | */ | ||
144 | err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); | ||
145 | if (err < 0) | ||
146 | pr_warn("Failed to Set Register[0x168C]\n"); | ||
147 | |||
148 | /* Get access to Channel b'10, Node b'1111, Register 0x11. | ||
149 | * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh | ||
150 | * bits | ||
151 | */ | ||
152 | err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); | ||
153 | if (err < 0) | ||
154 | pr_warn("Failed to Set Register[0x17A2]\n"); | ||
155 | |||
156 | /* Get access to Channel b'10, Node b'1101, Register 0x10. | ||
157 | * Write 24-bit value 0xEEFFDD to register. Setting | ||
158 | * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000, | ||
159 | * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits. | ||
160 | */ | ||
161 | err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); | ||
162 | if (err < 0) | ||
163 | pr_warn("Failed to Set Register[0x16A0]\n"); | ||
164 | |||
165 | /* Get access to Channel b'10, Node b'1101, Register 0x13. | ||
166 | * Write 24-bit value 0x071448 to register. Setting | ||
167 | * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits. | ||
168 | */ | ||
169 | err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); | ||
170 | if (err < 0) | ||
171 | pr_warn("Failed to Set Register[0x16A6]\n"); | ||
172 | |||
173 | /* Get access to Channel b'10, Node b'1101, Register 0x12. | ||
174 | * Write 24-bit value 0x13132F to register. Setting | ||
175 | * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits. | ||
176 | */ | ||
177 | err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); | ||
178 | if (err < 0) | ||
179 | pr_warn("Failed to Set Register[0x16A4]\n"); | ||
180 | |||
181 | /* Get access to Channel b'10, Node b'1101, Register 0x14. | ||
182 | * Write 24-bit value 0x0 to register. Setting eee_3level_delay, | ||
183 | * eee_TrKf_freeze_delay bits. | ||
184 | */ | ||
185 | err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); | ||
186 | if (err < 0) | ||
187 | pr_warn("Failed to Set Register[0x16A8]\n"); | ||
188 | |||
189 | /* Get access to Channel b'01, Node b'1111, Register 0x34. | ||
190 | * Write 24-bit value 0x91B06C to register. Setting | ||
191 | * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000, | ||
192 | * FastMseSearchUpdGain1000 bits. | ||
193 | */ | ||
194 | err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); | ||
195 | if (err < 0) | ||
196 | pr_warn("Failed to Set Register[0x0FE8]\n"); | ||
197 | |||
198 | /* Get access to Channel b'01, Node b'1111, Register 0x3E. | ||
199 | * Write 24-bit value 0xC0A028 to register. Setting | ||
200 | * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000, | ||
201 | * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits. | ||
202 | */ | ||
203 | err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); | ||
204 | if (err < 0) | ||
205 | pr_warn("Failed to Set Register[0x0FFC]\n"); | ||
206 | |||
207 | /* Get access to Channel b'01, Node b'1111, Register 0x35. | ||
208 | * Write 24-bit value 0x041600 to register. Setting | ||
209 | * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000, | ||
210 | * FastMsePhChangeDelay1000 bits. | ||
211 | */ | ||
212 | err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); | ||
213 | if (err < 0) | ||
214 | pr_warn("Failed to Set Register[0x0FEA]\n"); | ||
215 | |||
216 | /* Get access to Channel b'10, Node b'1101, Register 0x03. | ||
217 | * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. | ||
218 | */ | ||
219 | err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); | ||
220 | if (err < 0) | ||
221 | pr_warn("Failed to Set Register[0x1686]\n"); | ||
222 | } | ||
223 | |||
69 | static int lan88xx_probe(struct phy_device *phydev) | 224 | static int lan88xx_probe(struct phy_device *phydev) |
70 | { | 225 | { |
71 | struct device *dev = &phydev->mdio.dev; | 226 | struct device *dev = &phydev->mdio.dev; |
@@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev) | |||
132 | phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); | 287 | phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); |
133 | } | 288 | } |
134 | 289 | ||
290 | static int lan88xx_config_init(struct phy_device *phydev) | ||
291 | { | ||
292 | int val; | ||
293 | |||
294 | genphy_config_init(phydev); | ||
295 | /*Zerodetect delay enable */ | ||
296 | val = phy_read_mmd(phydev, MDIO_MMD_PCS, | ||
297 | PHY_ARDENNES_MMD_DEV_3_PHY_CFG); | ||
298 | val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_; | ||
299 | |||
300 | phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG, | ||
301 | val); | ||
302 | |||
303 | /* Config DSP registers */ | ||
304 | lan88xx_config_TR_regs(phydev); | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
135 | static int lan88xx_config_aneg(struct phy_device *phydev) | 309 | static int lan88xx_config_aneg(struct phy_device *phydev) |
136 | { | 310 | { |
137 | lan88xx_set_mdix(phydev); | 311 | lan88xx_set_mdix(phydev); |
@@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = { | |||
151 | .probe = lan88xx_probe, | 325 | .probe = lan88xx_probe, |
152 | .remove = lan88xx_remove, | 326 | .remove = lan88xx_remove, |
153 | 327 | ||
154 | .config_init = genphy_config_init, | 328 | .config_init = lan88xx_config_init, |
155 | .config_aneg = lan88xx_config_aneg, | 329 | .config_aneg = lan88xx_config_aneg, |
156 | 330 | ||
157 | .ack_interrupt = lan88xx_phy_ack_interrupt, | 331 | .ack_interrupt = lan88xx_phy_ack_interrupt, |
@@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = { | |||
160 | .suspend = lan88xx_suspend, | 334 | .suspend = lan88xx_suspend, |
161 | .resume = genphy_resume, | 335 | .resume = genphy_resume, |
162 | .set_wol = lan88xx_set_wol, | 336 | .set_wol = lan88xx_set_wol, |
337 | .read_page = lan88xx_read_page, | ||
338 | .write_page = lan88xx_write_page, | ||
163 | } }; | 339 | } }; |
164 | 340 | ||
165 | module_phy_driver(microchip_phy_driver); | 341 | module_phy_driver(microchip_phy_driver); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a6c6ce19eeee..acbe84967834 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, | |||
261 | } | 261 | } |
262 | } | 262 | } |
263 | 263 | ||
264 | static bool __team_option_inst_tmp_find(const struct list_head *opts, | ||
265 | const struct team_option_inst *needle) | ||
266 | { | ||
267 | struct team_option_inst *opt_inst; | ||
268 | |||
269 | list_for_each_entry(opt_inst, opts, tmp_list) | ||
270 | if (opt_inst == needle) | ||
271 | return true; | ||
272 | return false; | ||
273 | } | ||
274 | |||
264 | static int __team_options_register(struct team *team, | 275 | static int __team_options_register(struct team *team, |
265 | const struct team_option *option, | 276 | const struct team_option *option, |
266 | size_t option_count) | 277 | size_t option_count) |
@@ -2568,6 +2579,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2568 | if (err) | 2579 | if (err) |
2569 | goto team_put; | 2580 | goto team_put; |
2570 | opt_inst->changed = true; | 2581 | opt_inst->changed = true; |
2582 | |||
2583 | /* dumb/evil user-space can send us duplicate opt, | ||
2584 | * keep only the last one | ||
2585 | */ | ||
2586 | if (__team_option_inst_tmp_find(&opt_inst_list, | ||
2587 | opt_inst)) | ||
2588 | continue; | ||
2589 | |||
2571 | list_add(&opt_inst->tmp_list, &opt_inst_list); | 2590 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
2572 | } | 2591 | } |
2573 | if (!opt_found) { | 2592 | if (!opt_found) { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 28583aa0c17d..ef33950a45d9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1102 | goto drop; | 1102 | goto drop; |
1103 | 1103 | ||
1104 | len = run_ebpf_filter(tun, skb, len); | 1104 | len = run_ebpf_filter(tun, skb, len); |
1105 | 1105 | if (len == 0 || pskb_trim(skb, len)) | |
1106 | /* Trim extra bytes since we may insert vlan proto & TCI | ||
1107 | * in tun_put_user(). | ||
1108 | */ | ||
1109 | len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; | ||
1110 | if (len <= 0 || pskb_trim(skb, len)) | ||
1111 | goto drop; | 1106 | goto drop; |
1112 | 1107 | ||
1113 | if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) | 1108 | if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ca066b785e9f..c853e7410f5a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = { | |||
1107 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ | 1107 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ |
1108 | {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ | 1108 | {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ |
1109 | {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ | 1109 | {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ |
1110 | {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ | ||
1110 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ | 1111 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ |
1111 | {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ | 1112 | {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ |
1112 | {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ | 1113 | {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b187ec7411e..770422e953f7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -147,6 +147,17 @@ struct receive_queue { | |||
147 | struct xdp_rxq_info xdp_rxq; | 147 | struct xdp_rxq_info xdp_rxq; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | /* Control VQ buffers: protected by the rtnl lock */ | ||
151 | struct control_buf { | ||
152 | struct virtio_net_ctrl_hdr hdr; | ||
153 | virtio_net_ctrl_ack status; | ||
154 | struct virtio_net_ctrl_mq mq; | ||
155 | u8 promisc; | ||
156 | u8 allmulti; | ||
157 | __virtio16 vid; | ||
158 | __virtio64 offloads; | ||
159 | }; | ||
160 | |||
150 | struct virtnet_info { | 161 | struct virtnet_info { |
151 | struct virtio_device *vdev; | 162 | struct virtio_device *vdev; |
152 | struct virtqueue *cvq; | 163 | struct virtqueue *cvq; |
@@ -192,14 +203,7 @@ struct virtnet_info { | |||
192 | struct hlist_node node; | 203 | struct hlist_node node; |
193 | struct hlist_node node_dead; | 204 | struct hlist_node node_dead; |
194 | 205 | ||
195 | /* Control VQ buffers: protected by the rtnl lock */ | 206 | struct control_buf *ctrl; |
196 | struct virtio_net_ctrl_hdr ctrl_hdr; | ||
197 | virtio_net_ctrl_ack ctrl_status; | ||
198 | struct virtio_net_ctrl_mq ctrl_mq; | ||
199 | u8 ctrl_promisc; | ||
200 | u8 ctrl_allmulti; | ||
201 | u16 ctrl_vid; | ||
202 | u64 ctrl_offloads; | ||
203 | 207 | ||
204 | /* Ethtool settings */ | 208 | /* Ethtool settings */ |
205 | u8 duplex; | 209 | u8 duplex; |
@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1269 | { | 1273 | { |
1270 | struct receive_queue *rq = | 1274 | struct receive_queue *rq = |
1271 | container_of(napi, struct receive_queue, napi); | 1275 | container_of(napi, struct receive_queue, napi); |
1272 | unsigned int received; | 1276 | struct virtnet_info *vi = rq->vq->vdev->priv; |
1277 | struct send_queue *sq; | ||
1278 | unsigned int received, qp; | ||
1273 | bool xdp_xmit = false; | 1279 | bool xdp_xmit = false; |
1274 | 1280 | ||
1275 | virtnet_poll_cleantx(rq); | 1281 | virtnet_poll_cleantx(rq); |
@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
1280 | if (received < budget) | 1286 | if (received < budget) |
1281 | virtqueue_napi_complete(napi, rq->vq, received); | 1287 | virtqueue_napi_complete(napi, rq->vq, received); |
1282 | 1288 | ||
1283 | if (xdp_xmit) | 1289 | if (xdp_xmit) { |
1290 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + | ||
1291 | smp_processor_id(); | ||
1292 | sq = &vi->sq[qp]; | ||
1293 | virtqueue_kick(sq->vq); | ||
1284 | xdp_do_flush_map(); | 1294 | xdp_do_flush_map(); |
1295 | } | ||
1285 | 1296 | ||
1286 | return received; | 1297 | return received; |
1287 | } | 1298 | } |
@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
1454 | /* Caller should know better */ | 1465 | /* Caller should know better */ |
1455 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); | 1466 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
1456 | 1467 | ||
1457 | vi->ctrl_status = ~0; | 1468 | vi->ctrl->status = ~0; |
1458 | vi->ctrl_hdr.class = class; | 1469 | vi->ctrl->hdr.class = class; |
1459 | vi->ctrl_hdr.cmd = cmd; | 1470 | vi->ctrl->hdr.cmd = cmd; |
1460 | /* Add header */ | 1471 | /* Add header */ |
1461 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); | 1472 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
1462 | sgs[out_num++] = &hdr; | 1473 | sgs[out_num++] = &hdr; |
1463 | 1474 | ||
1464 | if (out) | 1475 | if (out) |
1465 | sgs[out_num++] = out; | 1476 | sgs[out_num++] = out; |
1466 | 1477 | ||
1467 | /* Add return status. */ | 1478 | /* Add return status. */ |
1468 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); | 1479 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
1469 | sgs[out_num] = &stat; | 1480 | sgs[out_num] = &stat; |
1470 | 1481 | ||
1471 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); | 1482 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
1472 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); | 1483 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
1473 | 1484 | ||
1474 | if (unlikely(!virtqueue_kick(vi->cvq))) | 1485 | if (unlikely(!virtqueue_kick(vi->cvq))) |
1475 | return vi->ctrl_status == VIRTIO_NET_OK; | 1486 | return vi->ctrl->status == VIRTIO_NET_OK; |
1476 | 1487 | ||
1477 | /* Spin for a response, the kick causes an ioport write, trapping | 1488 | /* Spin for a response, the kick causes an ioport write, trapping |
1478 | * into the hypervisor, so the request should be handled immediately. | 1489 | * into the hypervisor, so the request should be handled immediately. |
@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
1481 | !virtqueue_is_broken(vi->cvq)) | 1492 | !virtqueue_is_broken(vi->cvq)) |
1482 | cpu_relax(); | 1493 | cpu_relax(); |
1483 | 1494 | ||
1484 | return vi->ctrl_status == VIRTIO_NET_OK; | 1495 | return vi->ctrl->status == VIRTIO_NET_OK; |
1485 | } | 1496 | } |
1486 | 1497 | ||
1487 | static int virtnet_set_mac_address(struct net_device *dev, void *p) | 1498 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) | |||
1593 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | 1604 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
1594 | return 0; | 1605 | return 0; |
1595 | 1606 | ||
1596 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); | 1607 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1597 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); | 1608 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); |
1598 | 1609 | ||
1599 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | 1610 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
1600 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { | 1611 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) | |||
1653 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) | 1664 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1654 | return; | 1665 | return; |
1655 | 1666 | ||
1656 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); | 1667 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
1657 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | 1668 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
1658 | 1669 | ||
1659 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); | 1670 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
1660 | 1671 | ||
1661 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1672 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1662 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) | 1673 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
1663 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", | 1674 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
1664 | vi->ctrl_promisc ? "en" : "dis"); | 1675 | vi->ctrl->promisc ? "en" : "dis"); |
1665 | 1676 | ||
1666 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); | 1677 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
1667 | 1678 | ||
1668 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1679 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1669 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) | 1680 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
1670 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", | 1681 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
1671 | vi->ctrl_allmulti ? "en" : "dis"); | 1682 | vi->ctrl->allmulti ? "en" : "dis"); |
1672 | 1683 | ||
1673 | uc_count = netdev_uc_count(dev); | 1684 | uc_count = netdev_uc_count(dev); |
1674 | mc_count = netdev_mc_count(dev); | 1685 | mc_count = netdev_mc_count(dev); |
@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, | |||
1714 | struct virtnet_info *vi = netdev_priv(dev); | 1725 | struct virtnet_info *vi = netdev_priv(dev); |
1715 | struct scatterlist sg; | 1726 | struct scatterlist sg; |
1716 | 1727 | ||
1717 | vi->ctrl_vid = vid; | 1728 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
1718 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | 1729 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
1719 | 1730 | ||
1720 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1731 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
1721 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) | 1732 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, | |||
1729 | struct virtnet_info *vi = netdev_priv(dev); | 1740 | struct virtnet_info *vi = netdev_priv(dev); |
1730 | struct scatterlist sg; | 1741 | struct scatterlist sg; |
1731 | 1742 | ||
1732 | vi->ctrl_vid = vid; | 1743 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
1733 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | 1744 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
1734 | 1745 | ||
1735 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1746 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
1736 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) | 1747 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) | |||
2126 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) | 2137 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
2127 | { | 2138 | { |
2128 | struct scatterlist sg; | 2139 | struct scatterlist sg; |
2129 | vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); | 2140 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
2130 | 2141 | ||
2131 | sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); | 2142 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
2132 | 2143 | ||
2133 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, | 2144 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
2134 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { | 2145 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) | |||
2351 | 2362 | ||
2352 | kfree(vi->rq); | 2363 | kfree(vi->rq); |
2353 | kfree(vi->sq); | 2364 | kfree(vi->sq); |
2365 | kfree(vi->ctrl); | ||
2354 | } | 2366 | } |
2355 | 2367 | ||
2356 | static void _free_receive_bufs(struct virtnet_info *vi) | 2368 | static void _free_receive_bufs(struct virtnet_info *vi) |
@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) | |||
2543 | { | 2555 | { |
2544 | int i; | 2556 | int i; |
2545 | 2557 | ||
2558 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); | ||
2559 | if (!vi->ctrl) | ||
2560 | goto err_ctrl; | ||
2546 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | 2561 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
2547 | if (!vi->sq) | 2562 | if (!vi->sq) |
2548 | goto err_sq; | 2563 | goto err_sq; |
@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) | |||
2571 | err_rq: | 2586 | err_rq: |
2572 | kfree(vi->sq); | 2587 | kfree(vi->sq); |
2573 | err_sq: | 2588 | err_sq: |
2589 | kfree(vi->ctrl); | ||
2590 | err_ctrl: | ||
2574 | return -ENOMEM; | 2591 | return -ENOMEM; |
2575 | } | 2592 | } |
2576 | 2593 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e04937f44f33..9ebe2a689966 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, | |||
1218 | union { | 1218 | union { |
1219 | void *ptr; | 1219 | void *ptr; |
1220 | struct ethhdr *eth; | 1220 | struct ethhdr *eth; |
1221 | struct vlan_ethhdr *veth; | ||
1221 | struct iphdr *ipv4; | 1222 | struct iphdr *ipv4; |
1222 | struct ipv6hdr *ipv6; | 1223 | struct ipv6hdr *ipv6; |
1223 | struct tcphdr *tcp; | 1224 | struct tcphdr *tcp; |
@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, | |||
1228 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) | 1229 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) |
1229 | return 0; | 1230 | return 0; |
1230 | 1231 | ||
1232 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || | ||
1233 | skb->protocol == cpu_to_be16(ETH_P_8021AD)) | ||
1234 | hlen = sizeof(struct vlan_ethhdr); | ||
1235 | else | ||
1236 | hlen = sizeof(struct ethhdr); | ||
1237 | |||
1231 | hdr.eth = eth_hdr(skb); | 1238 | hdr.eth = eth_hdr(skb); |
1232 | if (gdesc->rcd.v4) { | 1239 | if (gdesc->rcd.v4) { |
1233 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); | 1240 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && |
1234 | hdr.ptr += sizeof(struct ethhdr); | 1241 | hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); |
1242 | hdr.ptr += hlen; | ||
1235 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); | 1243 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); |
1236 | hlen = hdr.ipv4->ihl << 2; | 1244 | hlen = hdr.ipv4->ihl << 2; |
1237 | hdr.ptr += hdr.ipv4->ihl << 2; | 1245 | hdr.ptr += hdr.ipv4->ihl << 2; |
1238 | } else if (gdesc->rcd.v6) { | 1246 | } else if (gdesc->rcd.v6) { |
1239 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); | 1247 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && |
1240 | hdr.ptr += sizeof(struct ethhdr); | 1248 | hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); |
1249 | hdr.ptr += hlen; | ||
1241 | /* Use an estimated value, since we also need to handle | 1250 | /* Use an estimated value, since we also need to handle |
1242 | * TSO case. | 1251 | * TSO case. |
1243 | */ | 1252 | */ |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59ec34052a65..a3326463b71f 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig | |||
@@ -103,8 +103,7 @@ config NVDIMM_DAX | |||
103 | Select Y if unsure | 103 | Select Y if unsure |
104 | 104 | ||
105 | config OF_PMEM | 105 | config OF_PMEM |
106 | # FIXME: make tristate once OF_NUMA dependency removed | 106 | tristate "Device-tree support for persistent memory regions" |
107 | bool "Device-tree support for persistent memory regions" | ||
108 | depends on OF | 107 | depends on OF |
109 | default LIBNVDIMM | 108 | default LIBNVDIMM |
110 | help | 109 | help |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |||
88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | 88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) |
89 | { | 89 | { |
90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | 90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
91 | int rc = validate_dimm(ndd), cmd_rc = 0; | ||
91 | struct nd_cmd_get_config_data_hdr *cmd; | 92 | struct nd_cmd_get_config_data_hdr *cmd; |
92 | struct nvdimm_bus_descriptor *nd_desc; | 93 | struct nvdimm_bus_descriptor *nd_desc; |
93 | int rc = validate_dimm(ndd); | ||
94 | u32 max_cmd_size, config_size; | 94 | u32 max_cmd_size, config_size; |
95 | size_t offset; | 95 | size_t offset; |
96 | 96 | ||
@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |||
124 | cmd->in_offset = offset; | 124 | cmd->in_offset = offset; |
125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | 125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
126 | ND_CMD_GET_CONFIG_DATA, cmd, | 126 | ND_CMD_GET_CONFIG_DATA, cmd, |
127 | cmd->in_length + sizeof(*cmd), NULL); | 127 | cmd->in_length + sizeof(*cmd), &cmd_rc); |
128 | if (rc || cmd->status) { | 128 | if (rc < 0) |
129 | rc = -ENXIO; | 129 | break; |
130 | if (cmd_rc < 0) { | ||
131 | rc = cmd_rc; | ||
130 | break; | 132 | break; |
131 | } | 133 | } |
132 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | 134 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); |
@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |||
140 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | 142 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
141 | void *buf, size_t len) | 143 | void *buf, size_t len) |
142 | { | 144 | { |
143 | int rc = validate_dimm(ndd); | ||
144 | size_t max_cmd_size, buf_offset; | 145 | size_t max_cmd_size, buf_offset; |
145 | struct nd_cmd_set_config_hdr *cmd; | 146 | struct nd_cmd_set_config_hdr *cmd; |
147 | int rc = validate_dimm(ndd), cmd_rc = 0; | ||
146 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | 148 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
147 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | 149 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
148 | 150 | ||
@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | |||
164 | for (buf_offset = 0; len; len -= cmd->in_length, | 166 | for (buf_offset = 0; len; len -= cmd->in_length, |
165 | buf_offset += cmd->in_length) { | 167 | buf_offset += cmd->in_length) { |
166 | size_t cmd_size; | 168 | size_t cmd_size; |
167 | u32 *status; | ||
168 | 169 | ||
169 | cmd->in_offset = offset + buf_offset; | 170 | cmd->in_offset = offset + buf_offset; |
170 | cmd->in_length = min(max_cmd_size, len); | 171 | cmd->in_length = min(max_cmd_size, len); |
@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | |||
172 | 173 | ||
173 | /* status is output in the last 4-bytes of the command buffer */ | 174 | /* status is output in the last 4-bytes of the command buffer */ |
174 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | 175 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); |
175 | status = ((void *) cmd) + cmd_size - sizeof(u32); | ||
176 | 176 | ||
177 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | 177 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
178 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); | 178 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
179 | if (rc || *status) { | 179 | if (rc < 0) |
180 | rc = rc ? rc : -ENXIO; | 180 | break; |
181 | if (cmd_rc < 0) { | ||
182 | rc = cmd_rc; | ||
181 | break; | 183 | break; |
182 | } | 184 | } |
183 | } | 185 | } |
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c | |||
@@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) | |||
67 | */ | 67 | */ |
68 | memset(&ndr_desc, 0, sizeof(ndr_desc)); | 68 | memset(&ndr_desc, 0, sizeof(ndr_desc)); |
69 | ndr_desc.attr_groups = region_attr_groups; | 69 | ndr_desc.attr_groups = region_attr_groups; |
70 | ndr_desc.numa_node = of_node_to_nid(np); | 70 | ndr_desc.numa_node = dev_to_node(&pdev->dev); |
71 | ndr_desc.res = &pdev->resource[i]; | 71 | ndr_desc.res = &pdev->resource[i]; |
72 | ndr_desc.of_node = np; | 72 | ndr_desc.of_node = np; |
73 | set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); | 73 | set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, | |||
740 | tx->callback = dma_xfer_callback; | 740 | tx->callback = dma_xfer_callback; |
741 | tx->callback_param = req; | 741 | tx->callback_param = req; |
742 | 742 | ||
743 | req->dmach = chan; | ||
744 | req->sync = sync; | ||
745 | req->status = DMA_IN_PROGRESS; | 743 | req->status = DMA_IN_PROGRESS; |
746 | init_completion(&req->req_comp); | ||
747 | kref_get(&req->refcount); | 744 | kref_get(&req->refcount); |
748 | 745 | ||
749 | cookie = dmaengine_submit(tx); | 746 | cookie = dmaengine_submit(tx); |
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
831 | if (!req) | 828 | if (!req) |
832 | return -ENOMEM; | 829 | return -ENOMEM; |
833 | 830 | ||
834 | kref_init(&req->refcount); | ||
835 | |||
836 | ret = get_dma_channel(priv); | 831 | ret = get_dma_channel(priv); |
837 | if (ret) { | 832 | if (ret) { |
838 | kfree(req); | 833 | kfree(req); |
839 | return ret; | 834 | return ret; |
840 | } | 835 | } |
836 | chan = priv->dmach; | ||
837 | |||
838 | kref_init(&req->refcount); | ||
839 | init_completion(&req->req_comp); | ||
840 | req->dir = dir; | ||
841 | req->filp = filp; | ||
842 | req->priv = priv; | ||
843 | req->dmach = chan; | ||
844 | req->sync = sync; | ||
841 | 845 | ||
842 | /* | 846 | /* |
843 | * If parameter loc_addr != NULL, we are transferring data from/to | 847 | * If parameter loc_addr != NULL, we are transferring data from/to |
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
925 | xfer->offset, xfer->length); | 929 | xfer->offset, xfer->length); |
926 | } | 930 | } |
927 | 931 | ||
928 | req->dir = dir; | ||
929 | req->filp = filp; | ||
930 | req->priv = priv; | ||
931 | chan = priv->dmach; | ||
932 | |||
933 | nents = dma_map_sg(chan->device->dev, | 932 | nents = dma_map_sg(chan->device->dev, |
934 | req->sgt.sgl, req->sgt.nents, dir); | 933 | req->sgt.sgl, req->sgt.nents, dir); |
935 | if (nents == 0) { | 934 | if (nents == 0) { |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index f035c2f25d35..131f1989f6f3 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/io.h> | 27 | #include <asm/io.h> |
28 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
29 | #include <asm/vtoc.h> | 29 | #include <asm/vtoc.h> |
30 | #include <asm/diag.h> | ||
31 | 30 | ||
32 | #include "dasd_int.h" | 31 | #include "dasd_int.h" |
33 | #include "dasd_diag.h" | 32 | #include "dasd_diag.h" |
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 5f8d9ea69ebd..eceba3858cef 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c | |||
@@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized; | |||
18 | * Used to keep track of the size of the event masks. Qemu until version 2.11 | 18 | * Used to keep track of the size of the event masks. Qemu until version 2.11 |
19 | * only supports 4 and needs a workaround. | 19 | * only supports 4 and needs a workaround. |
20 | */ | 20 | */ |
21 | bool sclp_mask_compat_mode; | 21 | bool sclp_mask_compat_mode __section(.data); |
22 | 22 | ||
23 | void sclp_early_wait_irq(void) | 23 | void sclp_early_wait_irq(void) |
24 | { | 24 | { |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 50a313806dde..2ad6f12f3d49 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/hash.h> | 22 | #include <linux/hash.h> |
23 | #include <linux/hashtable.h> | 23 | #include <linux/hashtable.h> |
24 | #include <linux/string.h> | ||
25 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
26 | #include "qeth_core.h" | 25 | #include "qeth_core.h" |
27 | #include "qeth_l2.h" | 26 | #include "qeth_l2.h" |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3b0c8b8a7634..066b5c3aaae6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = { | |||
176 | 176 | ||
177 | static void __exit smsg_exit(void) | 177 | static void __exit smsg_exit(void) |
178 | { | 178 | { |
179 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 179 | cpcmd("SET SMSG OFF", NULL, 0, NULL); |
180 | device_unregister(smsg_dev); | 180 | device_unregister(smsg_dev); |
181 | iucv_unregister(&smsg_handler, 1); | 181 | iucv_unregister(&smsg_handler, 1); |
182 | driver_unregister(&smsg_driver); | 182 | driver_unregister(&smsg_driver); |
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c | |||
@@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); | |||
55 | #define WDT_CTRL_WDT_INTR BIT(2) | 55 | #define WDT_CTRL_WDT_INTR BIT(2) |
56 | #define WDT_CTRL_RESET_SYSTEM BIT(1) | 56 | #define WDT_CTRL_RESET_SYSTEM BIT(1) |
57 | #define WDT_CTRL_ENABLE BIT(0) | 57 | #define WDT_CTRL_ENABLE BIT(0) |
58 | #define WDT_TIMEOUT_STATUS 0x10 | ||
59 | #define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) | ||
58 | 60 | ||
59 | /* | 61 | /* |
60 | * WDT_RESET_WIDTH controls the characteristics of the external pulse (if | 62 | * WDT_RESET_WIDTH controls the characteristics of the external pulse (if |
@@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) | |||
192 | struct device_node *np; | 194 | struct device_node *np; |
193 | const char *reset_type; | 195 | const char *reset_type; |
194 | u32 duration; | 196 | u32 duration; |
197 | u32 status; | ||
195 | int ret; | 198 | int ret; |
196 | 199 | ||
197 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); | 200 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); |
@@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) | |||
307 | writel(duration - 1, wdt->base + WDT_RESET_WIDTH); | 310 | writel(duration - 1, wdt->base + WDT_RESET_WIDTH); |
308 | } | 311 | } |
309 | 312 | ||
313 | status = readl(wdt->base + WDT_TIMEOUT_STATUS); | ||
314 | if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) | ||
315 | wdt->wdd.bootstatus = WDIOF_CARDRESET; | ||
316 | |||
310 | ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); | 317 | ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); |
311 | if (ret) { | 318 | if (ret) { |
312 | dev_err(&pdev->dev, "failed to register\n"); | 319 | dev_err(&pdev->dev, "failed to register\n"); |
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c | |||
@@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, | |||
121 | } | 121 | } |
122 | 122 | ||
123 | static const struct watchdog_info rwdt_ident = { | 123 | static const struct watchdog_info rwdt_ident = { |
124 | .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, | 124 | .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | |
125 | WDIOF_CARDRESET, | ||
125 | .identity = "Renesas WDT Watchdog", | 126 | .identity = "Renesas WDT Watchdog", |
126 | }; | 127 | }; |
127 | 128 | ||
@@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) | |||
197 | return PTR_ERR(clk); | 198 | return PTR_ERR(clk); |
198 | 199 | ||
199 | pm_runtime_enable(&pdev->dev); | 200 | pm_runtime_enable(&pdev->dev); |
200 | |||
201 | pm_runtime_get_sync(&pdev->dev); | 201 | pm_runtime_get_sync(&pdev->dev); |
202 | priv->clk_rate = clk_get_rate(clk); | 202 | priv->clk_rate = clk_get_rate(clk); |
203 | priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & | ||
204 | RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; | ||
203 | pm_runtime_put(&pdev->dev); | 205 | pm_runtime_put(&pdev->dev); |
204 | 206 | ||
205 | if (!priv->clk_rate) { | 207 | if (!priv->clk_rate) { |
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c | |||
@@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
299 | if (sch311x_wdt_set_heartbeat(new_timeout)) | 299 | if (sch311x_wdt_set_heartbeat(new_timeout)) |
300 | return -EINVAL; | 300 | return -EINVAL; |
301 | sch311x_wdt_keepalive(); | 301 | sch311x_wdt_keepalive(); |
302 | /* Fall */ | 302 | /* Fall through */ |
303 | case WDIOC_GETTIMEOUT: | 303 | case WDIOC_GETTIMEOUT: |
304 | return put_user(timeout, p); | 304 | return put_user(timeout, p); |
305 | default: | 305 | default: |
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c | |||
@@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
427 | return -EINVAL; | 427 | return -EINVAL; |
428 | 428 | ||
429 | wdt_keepalive(); | 429 | wdt_keepalive(); |
430 | /* Fall */ | 430 | /* Fall through */ |
431 | 431 | ||
432 | case WDIOC_GETTIMEOUT: | 432 | case WDIOC_GETTIMEOUT: |
433 | return put_user(timeout, uarg.i); | 433 | return put_user(timeout, uarg.i); |
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c | |||
@@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, | |||
178 | timeout = new_timeout; | 178 | timeout = new_timeout; |
179 | wafwdt_stop(); | 179 | wafwdt_stop(); |
180 | wafwdt_start(); | 180 | wafwdt_start(); |
181 | /* Fall */ | 181 | /* Fall through */ |
182 | case WDIOC_GETTIMEOUT: | 182 | case WDIOC_GETTIMEOUT: |
183 | return put_user(timeout, p); | 183 | return put_user(timeout, p); |
184 | 184 | ||
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index 89d9744ece61..ed593d1042a6 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c | |||
@@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev) | |||
95 | struct xen_pcibk_config_quirk *quirk; | 95 | struct xen_pcibk_config_quirk *quirk; |
96 | int ret = 0; | 96 | int ret = 0; |
97 | 97 | ||
98 | quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); | 98 | quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); |
99 | if (!quirk) { | 99 | if (!quirk) { |
100 | ret = -ENOMEM; | 100 | ret = -ENOMEM; |
101 | goto out; | 101 | goto out; |
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9e480fdebe1f..59661db144e5 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
@@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) | |||
71 | 71 | ||
72 | dev_dbg(&dev->dev, "pcistub_device_alloc\n"); | 72 | dev_dbg(&dev->dev, "pcistub_device_alloc\n"); |
73 | 73 | ||
74 | psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); | 74 | psdev = kzalloc(sizeof(*psdev), GFP_KERNEL); |
75 | if (!psdev) | 75 | if (!psdev) |
76 | return NULL; | 76 | return NULL; |
77 | 77 | ||
@@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev) | |||
364 | * here and then to call kfree(pci_get_drvdata(psdev->dev)). | 364 | * here and then to call kfree(pci_get_drvdata(psdev->dev)). |
365 | */ | 365 | */ |
366 | dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") | 366 | dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") |
367 | + strlen(pci_name(dev)) + 1, GFP_ATOMIC); | 367 | + strlen(pci_name(dev)) + 1, GFP_KERNEL); |
368 | if (!dev_data) { | 368 | if (!dev_data) { |
369 | err = -ENOMEM; | 369 | err = -ENOMEM; |
370 | goto out; | 370 | goto out; |
@@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
577 | } | 577 | } |
578 | 578 | ||
579 | if (!match) { | 579 | if (!match) { |
580 | pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); | 580 | pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); |
581 | if (!pci_dev_id) { | 581 | if (!pci_dev_id) { |
582 | err = -ENOMEM; | 582 | err = -ENOMEM; |
583 | goto out; | 583 | goto out; |
@@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, | |||
1149 | } | 1149 | } |
1150 | dev = psdev->dev; | 1150 | dev = psdev->dev; |
1151 | 1151 | ||
1152 | field = kzalloc(sizeof(*field), GFP_ATOMIC); | 1152 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
1153 | if (!field) { | 1153 | if (!field) { |
1154 | err = -ENOMEM; | 1154 | err = -ENOMEM; |
1155 | goto out; | 1155 | goto out; |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0d6d9264d6a9..c3e201025ef0 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, | |||
403 | { | 403 | { |
404 | struct { | 404 | struct { |
405 | struct xsd_sockmsg hdr; | 405 | struct xsd_sockmsg hdr; |
406 | const char body[16]; | 406 | char body[16]; |
407 | } msg; | 407 | } msg; |
408 | int rc; | 408 | int rc; |
409 | 409 | ||
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, | |||
412 | msg.hdr.len = strlen(reply) + 1; | 412 | msg.hdr.len = strlen(reply) + 1; |
413 | if (msg.hdr.len > sizeof(msg.body)) | 413 | if (msg.hdr.len > sizeof(msg.body)) |
414 | return -E2BIG; | 414 | return -E2BIG; |
415 | memcpy(&msg.body, reply, msg.hdr.len); | ||
415 | 416 | ||
416 | mutex_lock(&u->reply_mutex); | 417 | mutex_lock(&u->reply_mutex); |
417 | rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); | 418 | rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); |
diff --git a/fs/afs/server.c b/fs/afs/server.c index e23be63998a8..629c74986cff 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
@@ -428,8 +428,15 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) | |||
428 | } | 428 | } |
429 | write_sequnlock(&net->fs_lock); | 429 | write_sequnlock(&net->fs_lock); |
430 | 430 | ||
431 | if (deleted) | 431 | if (deleted) { |
432 | write_seqlock(&net->fs_addr_lock); | ||
433 | if (!hlist_unhashed(&server->addr4_link)) | ||
434 | hlist_del_rcu(&server->addr4_link); | ||
435 | if (!hlist_unhashed(&server->addr6_link)) | ||
436 | hlist_del_rcu(&server->addr6_link); | ||
437 | write_sequnlock(&net->fs_addr_lock); | ||
432 | afs_destroy_server(net, server); | 438 | afs_destroy_server(net, server); |
439 | } | ||
433 | } | 440 | } |
434 | } | 441 | } |
435 | 442 | ||
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 82e8f6edfb48..b12e37f27530 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir, | |||
749 | 749 | ||
750 | autofs4_del_active(dentry); | 750 | autofs4_del_active(dentry); |
751 | 751 | ||
752 | inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); | 752 | inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); |
753 | if (!inode) | 753 | if (!inode) |
754 | return -ENOMEM; | 754 | return -ENOMEM; |
755 | d_add(dentry, inode); | 755 | d_add(dentry, inode); |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 41e04183e4ce..4ad6f669fe34 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, | |||
377 | } else | 377 | } else |
378 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | 378 | map_addr = vm_mmap(filep, addr, size, prot, type, off); |
379 | 379 | ||
380 | if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) | 380 | if ((type & MAP_FIXED_NOREPLACE) && |
381 | pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", | 381 | PTR_ERR((void *)map_addr) == -EEXIST) |
382 | task_pid_nr(current), current->comm, | 382 | pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", |
383 | (void *)addr); | 383 | task_pid_nr(current), current->comm, (void *)addr); |
384 | 384 | ||
385 | return(map_addr); | 385 | return(map_addr); |
386 | } | 386 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5474ef14d6e6..2771cc56a622 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -459,6 +459,25 @@ struct btrfs_block_rsv { | |||
459 | unsigned short full; | 459 | unsigned short full; |
460 | unsigned short type; | 460 | unsigned short type; |
461 | unsigned short failfast; | 461 | unsigned short failfast; |
462 | |||
463 | /* | ||
464 | * Qgroup equivalent for @size @reserved | ||
465 | * | ||
466 | * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care | ||
467 | * about things like csum size nor how many tree blocks it will need to | ||
468 | * reserve. | ||
469 | * | ||
470 | * Qgroup cares more about net change of the extent usage. | ||
471 | * | ||
472 | * So for one newly inserted file extent, in worst case it will cause | ||
473 | * leaf split and level increase, nodesize for each file extent is | ||
474 | * already too much. | ||
475 | * | ||
476 | * In short, qgroup_size/reserved is the upper limit of possible needed | ||
477 | * qgroup metadata reservation. | ||
478 | */ | ||
479 | u64 qgroup_rsv_size; | ||
480 | u64 qgroup_rsv_reserved; | ||
462 | }; | 481 | }; |
463 | 482 | ||
464 | /* | 483 | /* |
@@ -714,6 +733,12 @@ struct btrfs_delayed_root; | |||
714 | */ | 733 | */ |
715 | #define BTRFS_FS_EXCL_OP 16 | 734 | #define BTRFS_FS_EXCL_OP 16 |
716 | 735 | ||
736 | /* | ||
737 | * To info transaction_kthread we need an immediate commit so it doesn't | ||
738 | * need to wait for commit_interval | ||
739 | */ | ||
740 | #define BTRFS_FS_NEED_ASYNC_COMMIT 17 | ||
741 | |||
717 | struct btrfs_fs_info { | 742 | struct btrfs_fs_info { |
718 | u8 fsid[BTRFS_FSID_SIZE]; | 743 | u8 fsid[BTRFS_FSID_SIZE]; |
719 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; | 744 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 06ec8ab6d9ba..a8d492dbd3e7 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, | |||
556 | dst_rsv = &fs_info->delayed_block_rsv; | 556 | dst_rsv = &fs_info->delayed_block_rsv; |
557 | 557 | ||
558 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); | 558 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); |
559 | |||
560 | /* | ||
561 | * Here we migrate space rsv from transaction rsv, since have already | ||
562 | * reserved space when starting a transaction. So no need to reserve | ||
563 | * qgroup space here. | ||
564 | */ | ||
559 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); | 565 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); |
560 | if (!ret) { | 566 | if (!ret) { |
561 | trace_btrfs_space_reservation(fs_info, "delayed_item", | 567 | trace_btrfs_space_reservation(fs_info, "delayed_item", |
@@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, | |||
577 | return; | 583 | return; |
578 | 584 | ||
579 | rsv = &fs_info->delayed_block_rsv; | 585 | rsv = &fs_info->delayed_block_rsv; |
580 | btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved); | 586 | /* |
587 | * Check btrfs_delayed_item_reserve_metadata() to see why we don't need | ||
588 | * to release/reserve qgroup space. | ||
589 | */ | ||
581 | trace_btrfs_space_reservation(fs_info, "delayed_item", | 590 | trace_btrfs_space_reservation(fs_info, "delayed_item", |
582 | item->key.objectid, item->bytes_reserved, | 591 | item->key.objectid, item->bytes_reserved, |
583 | 0); | 592 | 0); |
@@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata( | |||
602 | 611 | ||
603 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); | 612 | num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); |
604 | 613 | ||
605 | ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); | ||
606 | if (ret < 0) | ||
607 | return ret; | ||
608 | /* | 614 | /* |
609 | * btrfs_dirty_inode will update the inode under btrfs_join_transaction | 615 | * btrfs_dirty_inode will update the inode under btrfs_join_transaction |
610 | * which doesn't reserve space for speed. This is a problem since we | 616 | * which doesn't reserve space for speed. This is a problem since we |
@@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata( | |||
616 | */ | 622 | */ |
617 | if (!src_rsv || (!trans->bytes_reserved && | 623 | if (!src_rsv || (!trans->bytes_reserved && |
618 | src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { | 624 | src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { |
625 | ret = btrfs_qgroup_reserve_meta_prealloc(root, | ||
626 | fs_info->nodesize, true); | ||
627 | if (ret < 0) | ||
628 | return ret; | ||
619 | ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, | 629 | ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, |
620 | BTRFS_RESERVE_NO_FLUSH); | 630 | BTRFS_RESERVE_NO_FLUSH); |
621 | /* | 631 | /* |
@@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata( | |||
634 | "delayed_inode", | 644 | "delayed_inode", |
635 | btrfs_ino(inode), | 645 | btrfs_ino(inode), |
636 | num_bytes, 1); | 646 | num_bytes, 1); |
647 | } else { | ||
648 | btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize); | ||
637 | } | 649 | } |
638 | return ret; | 650 | return ret; |
639 | } | 651 | } |
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 9e98295de7ce..e1b0651686f7 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |||
540 | struct btrfs_delayed_ref_head *head_ref, | 540 | struct btrfs_delayed_ref_head *head_ref, |
541 | struct btrfs_qgroup_extent_record *qrecord, | 541 | struct btrfs_qgroup_extent_record *qrecord, |
542 | u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, | 542 | u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, |
543 | int action, int is_data, int *qrecord_inserted_ret, | 543 | int action, int is_data, int is_system, |
544 | int *qrecord_inserted_ret, | ||
544 | int *old_ref_mod, int *new_ref_mod) | 545 | int *old_ref_mod, int *new_ref_mod) |
546 | |||
545 | { | 547 | { |
546 | struct btrfs_delayed_ref_head *existing; | 548 | struct btrfs_delayed_ref_head *existing; |
547 | struct btrfs_delayed_ref_root *delayed_refs; | 549 | struct btrfs_delayed_ref_root *delayed_refs; |
@@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |||
585 | head_ref->ref_mod = count_mod; | 587 | head_ref->ref_mod = count_mod; |
586 | head_ref->must_insert_reserved = must_insert_reserved; | 588 | head_ref->must_insert_reserved = must_insert_reserved; |
587 | head_ref->is_data = is_data; | 589 | head_ref->is_data = is_data; |
590 | head_ref->is_system = is_system; | ||
588 | head_ref->ref_tree = RB_ROOT; | 591 | head_ref->ref_tree = RB_ROOT; |
589 | INIT_LIST_HEAD(&head_ref->ref_add_list); | 592 | INIT_LIST_HEAD(&head_ref->ref_add_list); |
590 | RB_CLEAR_NODE(&head_ref->href_node); | 593 | RB_CLEAR_NODE(&head_ref->href_node); |
@@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |||
772 | struct btrfs_delayed_ref_root *delayed_refs; | 775 | struct btrfs_delayed_ref_root *delayed_refs; |
773 | struct btrfs_qgroup_extent_record *record = NULL; | 776 | struct btrfs_qgroup_extent_record *record = NULL; |
774 | int qrecord_inserted; | 777 | int qrecord_inserted; |
778 | int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID); | ||
775 | 779 | ||
776 | BUG_ON(extent_op && extent_op->is_data); | 780 | BUG_ON(extent_op && extent_op->is_data); |
777 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); | 781 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
@@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |||
800 | */ | 804 | */ |
801 | head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, | 805 | head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, |
802 | bytenr, num_bytes, 0, 0, action, 0, | 806 | bytenr, num_bytes, 0, 0, action, 0, |
803 | &qrecord_inserted, old_ref_mod, | 807 | is_system, &qrecord_inserted, |
804 | new_ref_mod); | 808 | old_ref_mod, new_ref_mod); |
805 | 809 | ||
806 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, | 810 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
807 | num_bytes, parent, ref_root, level, action); | 811 | num_bytes, parent, ref_root, level, action); |
@@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
868 | */ | 872 | */ |
869 | head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, | 873 | head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, |
870 | bytenr, num_bytes, ref_root, reserved, | 874 | bytenr, num_bytes, ref_root, reserved, |
871 | action, 1, &qrecord_inserted, | 875 | action, 1, 0, &qrecord_inserted, |
872 | old_ref_mod, new_ref_mod); | 876 | old_ref_mod, new_ref_mod); |
873 | 877 | ||
874 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, | 878 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
@@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | |||
898 | delayed_refs = &trans->transaction->delayed_refs; | 902 | delayed_refs = &trans->transaction->delayed_refs; |
899 | spin_lock(&delayed_refs->lock); | 903 | spin_lock(&delayed_refs->lock); |
900 | 904 | ||
905 | /* | ||
906 | * extent_ops just modify the flags of an extent and they don't result | ||
907 | * in ref count changes, hence it's safe to pass false/0 for is_system | ||
908 | * argument | ||
909 | */ | ||
901 | add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, | 910 | add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, |
902 | num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, | 911 | num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, |
903 | extent_op->is_data, NULL, NULL, NULL); | 912 | extent_op->is_data, 0, NULL, NULL, NULL); |
904 | 913 | ||
905 | spin_unlock(&delayed_refs->lock); | 914 | spin_unlock(&delayed_refs->lock); |
906 | return 0; | 915 | return 0; |
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 741869dbc316..7f00db50bd24 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head { | |||
127 | */ | 127 | */ |
128 | unsigned int must_insert_reserved:1; | 128 | unsigned int must_insert_reserved:1; |
129 | unsigned int is_data:1; | 129 | unsigned int is_data:1; |
130 | unsigned int is_system:1; | ||
130 | unsigned int processing:1; | 131 | unsigned int processing:1; |
131 | }; | 132 | }; |
132 | 133 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4ac8b1d21baf..60caa68c3618 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg) | |||
1824 | 1824 | ||
1825 | now = get_seconds(); | 1825 | now = get_seconds(); |
1826 | if (cur->state < TRANS_STATE_BLOCKED && | 1826 | if (cur->state < TRANS_STATE_BLOCKED && |
1827 | !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && | ||
1827 | (now < cur->start_time || | 1828 | (now < cur->start_time || |
1828 | now - cur->start_time < fs_info->commit_interval)) { | 1829 | now - cur->start_time < fs_info->commit_interval)) { |
1829 | spin_unlock(&fs_info->trans_lock); | 1830 | spin_unlock(&fs_info->trans_lock); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 75cfb80d2551..e2f16b68fcbf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2601,13 +2601,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, | |||
2601 | trace_run_delayed_ref_head(fs_info, head, 0); | 2601 | trace_run_delayed_ref_head(fs_info, head, 0); |
2602 | 2602 | ||
2603 | if (head->total_ref_mod < 0) { | 2603 | if (head->total_ref_mod < 0) { |
2604 | struct btrfs_block_group_cache *cache; | 2604 | struct btrfs_space_info *space_info; |
2605 | u64 flags; | ||
2605 | 2606 | ||
2606 | cache = btrfs_lookup_block_group(fs_info, head->bytenr); | 2607 | if (head->is_data) |
2607 | ASSERT(cache); | 2608 | flags = BTRFS_BLOCK_GROUP_DATA; |
2608 | percpu_counter_add(&cache->space_info->total_bytes_pinned, | 2609 | else if (head->is_system) |
2610 | flags = BTRFS_BLOCK_GROUP_SYSTEM; | ||
2611 | else | ||
2612 | flags = BTRFS_BLOCK_GROUP_METADATA; | ||
2613 | space_info = __find_space_info(fs_info, flags); | ||
2614 | ASSERT(space_info); | ||
2615 | percpu_counter_add(&space_info->total_bytes_pinned, | ||
2609 | -head->num_bytes); | 2616 | -head->num_bytes); |
2610 | btrfs_put_block_group(cache); | ||
2611 | 2617 | ||
2612 | if (head->is_data) { | 2618 | if (head->is_data) { |
2613 | spin_lock(&delayed_refs->lock); | 2619 | spin_lock(&delayed_refs->lock); |
@@ -5559,14 +5565,18 @@ again: | |||
5559 | 5565 | ||
5560 | static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, | 5566 | static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, |
5561 | struct btrfs_block_rsv *block_rsv, | 5567 | struct btrfs_block_rsv *block_rsv, |
5562 | struct btrfs_block_rsv *dest, u64 num_bytes) | 5568 | struct btrfs_block_rsv *dest, u64 num_bytes, |
5569 | u64 *qgroup_to_release_ret) | ||
5563 | { | 5570 | { |
5564 | struct btrfs_space_info *space_info = block_rsv->space_info; | 5571 | struct btrfs_space_info *space_info = block_rsv->space_info; |
5572 | u64 qgroup_to_release = 0; | ||
5565 | u64 ret; | 5573 | u64 ret; |
5566 | 5574 | ||
5567 | spin_lock(&block_rsv->lock); | 5575 | spin_lock(&block_rsv->lock); |
5568 | if (num_bytes == (u64)-1) | 5576 | if (num_bytes == (u64)-1) { |
5569 | num_bytes = block_rsv->size; | 5577 | num_bytes = block_rsv->size; |
5578 | qgroup_to_release = block_rsv->qgroup_rsv_size; | ||
5579 | } | ||
5570 | block_rsv->size -= num_bytes; | 5580 | block_rsv->size -= num_bytes; |
5571 | if (block_rsv->reserved >= block_rsv->size) { | 5581 | if (block_rsv->reserved >= block_rsv->size) { |
5572 | num_bytes = block_rsv->reserved - block_rsv->size; | 5582 | num_bytes = block_rsv->reserved - block_rsv->size; |
@@ -5575,6 +5585,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, | |||
5575 | } else { | 5585 | } else { |
5576 | num_bytes = 0; | 5586 | num_bytes = 0; |
5577 | } | 5587 | } |
5588 | if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { | ||
5589 | qgroup_to_release = block_rsv->qgroup_rsv_reserved - | ||
5590 | block_rsv->qgroup_rsv_size; | ||
5591 | block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; | ||
5592 | } else { | ||
5593 | qgroup_to_release = 0; | ||
5594 | } | ||
5578 | spin_unlock(&block_rsv->lock); | 5595 | spin_unlock(&block_rsv->lock); |
5579 | 5596 | ||
5580 | ret = num_bytes; | 5597 | ret = num_bytes; |
@@ -5597,6 +5614,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, | |||
5597 | space_info_add_old_bytes(fs_info, space_info, | 5614 | space_info_add_old_bytes(fs_info, space_info, |
5598 | num_bytes); | 5615 | num_bytes); |
5599 | } | 5616 | } |
5617 | if (qgroup_to_release_ret) | ||
5618 | *qgroup_to_release_ret = qgroup_to_release; | ||
5600 | return ret; | 5619 | return ret; |
5601 | } | 5620 | } |
5602 | 5621 | ||
@@ -5738,17 +5757,21 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, | |||
5738 | struct btrfs_root *root = inode->root; | 5757 | struct btrfs_root *root = inode->root; |
5739 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; | 5758 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
5740 | u64 num_bytes = 0; | 5759 | u64 num_bytes = 0; |
5760 | u64 qgroup_num_bytes = 0; | ||
5741 | int ret = -ENOSPC; | 5761 | int ret = -ENOSPC; |
5742 | 5762 | ||
5743 | spin_lock(&block_rsv->lock); | 5763 | spin_lock(&block_rsv->lock); |
5744 | if (block_rsv->reserved < block_rsv->size) | 5764 | if (block_rsv->reserved < block_rsv->size) |
5745 | num_bytes = block_rsv->size - block_rsv->reserved; | 5765 | num_bytes = block_rsv->size - block_rsv->reserved; |
5766 | if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size) | ||
5767 | qgroup_num_bytes = block_rsv->qgroup_rsv_size - | ||
5768 | block_rsv->qgroup_rsv_reserved; | ||
5746 | spin_unlock(&block_rsv->lock); | 5769 | spin_unlock(&block_rsv->lock); |
5747 | 5770 | ||
5748 | if (num_bytes == 0) | 5771 | if (num_bytes == 0) |
5749 | return 0; | 5772 | return 0; |
5750 | 5773 | ||
5751 | ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); | 5774 | ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true); |
5752 | if (ret) | 5775 | if (ret) |
5753 | return ret; | 5776 | return ret; |
5754 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); | 5777 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); |
@@ -5756,7 +5779,13 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, | |||
5756 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | 5779 | block_rsv_add_bytes(block_rsv, num_bytes, 0); |
5757 | trace_btrfs_space_reservation(root->fs_info, "delalloc", | 5780 | trace_btrfs_space_reservation(root->fs_info, "delalloc", |
5758 | btrfs_ino(inode), num_bytes, 1); | 5781 | btrfs_ino(inode), num_bytes, 1); |
5759 | } | 5782 | |
5783 | /* Don't forget to increase qgroup_rsv_reserved */ | ||
5784 | spin_lock(&block_rsv->lock); | ||
5785 | block_rsv->qgroup_rsv_reserved += qgroup_num_bytes; | ||
5786 | spin_unlock(&block_rsv->lock); | ||
5787 | } else | ||
5788 | btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); | ||
5760 | return ret; | 5789 | return ret; |
5761 | } | 5790 | } |
5762 | 5791 | ||
@@ -5777,20 +5806,23 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free) | |||
5777 | struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; | 5806 | struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; |
5778 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; | 5807 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
5779 | u64 released = 0; | 5808 | u64 released = 0; |
5809 | u64 qgroup_to_release = 0; | ||
5780 | 5810 | ||
5781 | /* | 5811 | /* |
5782 | * Since we statically set the block_rsv->size we just want to say we | 5812 | * Since we statically set the block_rsv->size we just want to say we |
5783 | * are releasing 0 bytes, and then we'll just get the reservation over | 5813 | * are releasing 0 bytes, and then we'll just get the reservation over |
5784 | * the size free'd. | 5814 | * the size free'd. |
5785 | */ | 5815 | */ |
5786 | released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0); | 5816 | released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0, |
5817 | &qgroup_to_release); | ||
5787 | if (released > 0) | 5818 | if (released > 0) |
5788 | trace_btrfs_space_reservation(fs_info, "delalloc", | 5819 | trace_btrfs_space_reservation(fs_info, "delalloc", |
5789 | btrfs_ino(inode), released, 0); | 5820 | btrfs_ino(inode), released, 0); |
5790 | if (qgroup_free) | 5821 | if (qgroup_free) |
5791 | btrfs_qgroup_free_meta_prealloc(inode->root, released); | 5822 | btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release); |
5792 | else | 5823 | else |
5793 | btrfs_qgroup_convert_reserved_meta(inode->root, released); | 5824 | btrfs_qgroup_convert_reserved_meta(inode->root, |
5825 | qgroup_to_release); | ||
5794 | } | 5826 | } |
5795 | 5827 | ||
5796 | void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, | 5828 | void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, |
@@ -5802,7 +5834,7 @@ void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, | |||
5802 | if (global_rsv == block_rsv || | 5834 | if (global_rsv == block_rsv || |
5803 | block_rsv->space_info != global_rsv->space_info) | 5835 | block_rsv->space_info != global_rsv->space_info) |
5804 | global_rsv = NULL; | 5836 | global_rsv = NULL; |
5805 | block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); | 5837 | block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL); |
5806 | } | 5838 | } |
5807 | 5839 | ||
5808 | static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | 5840 | static void update_global_block_rsv(struct btrfs_fs_info *fs_info) |
@@ -5882,7 +5914,7 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
5882 | static void release_global_block_rsv(struct btrfs_fs_info *fs_info) | 5914 | static void release_global_block_rsv(struct btrfs_fs_info *fs_info) |
5883 | { | 5915 | { |
5884 | block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, | 5916 | block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, |
5885 | (u64)-1); | 5917 | (u64)-1, NULL); |
5886 | WARN_ON(fs_info->trans_block_rsv.size > 0); | 5918 | WARN_ON(fs_info->trans_block_rsv.size > 0); |
5887 | WARN_ON(fs_info->trans_block_rsv.reserved > 0); | 5919 | WARN_ON(fs_info->trans_block_rsv.reserved > 0); |
5888 | WARN_ON(fs_info->chunk_block_rsv.size > 0); | 5920 | WARN_ON(fs_info->chunk_block_rsv.size > 0); |
@@ -5906,7 +5938,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) | |||
5906 | WARN_ON_ONCE(!list_empty(&trans->new_bgs)); | 5938 | WARN_ON_ONCE(!list_empty(&trans->new_bgs)); |
5907 | 5939 | ||
5908 | block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, | 5940 | block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, |
5909 | trans->chunk_bytes_reserved); | 5941 | trans->chunk_bytes_reserved, NULL); |
5910 | trans->chunk_bytes_reserved = 0; | 5942 | trans->chunk_bytes_reserved = 0; |
5911 | } | 5943 | } |
5912 | 5944 | ||
@@ -6011,6 +6043,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, | |||
6011 | { | 6043 | { |
6012 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; | 6044 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
6013 | u64 reserve_size = 0; | 6045 | u64 reserve_size = 0; |
6046 | u64 qgroup_rsv_size = 0; | ||
6014 | u64 csum_leaves; | 6047 | u64 csum_leaves; |
6015 | unsigned outstanding_extents; | 6048 | unsigned outstanding_extents; |
6016 | 6049 | ||
@@ -6023,9 +6056,17 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, | |||
6023 | inode->csum_bytes); | 6056 | inode->csum_bytes); |
6024 | reserve_size += btrfs_calc_trans_metadata_size(fs_info, | 6057 | reserve_size += btrfs_calc_trans_metadata_size(fs_info, |
6025 | csum_leaves); | 6058 | csum_leaves); |
6059 | /* | ||
6060 | * For qgroup rsv, the calculation is very simple: | ||
6061 | * account one nodesize for each outstanding extent | ||
6062 | * | ||
6063 | * This is overestimating in most cases. | ||
6064 | */ | ||
6065 | qgroup_rsv_size = outstanding_extents * fs_info->nodesize; | ||
6026 | 6066 | ||
6027 | spin_lock(&block_rsv->lock); | 6067 | spin_lock(&block_rsv->lock); |
6028 | block_rsv->size = reserve_size; | 6068 | block_rsv->size = reserve_size; |
6069 | block_rsv->qgroup_rsv_size = qgroup_rsv_size; | ||
6029 | spin_unlock(&block_rsv->lock); | 6070 | spin_unlock(&block_rsv->lock); |
6030 | } | 6071 | } |
6031 | 6072 | ||
@@ -8403,7 +8444,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, | |||
8403 | struct btrfs_block_rsv *block_rsv, u32 blocksize) | 8444 | struct btrfs_block_rsv *block_rsv, u32 blocksize) |
8404 | { | 8445 | { |
8405 | block_rsv_add_bytes(block_rsv, blocksize, 0); | 8446 | block_rsv_add_bytes(block_rsv, blocksize, 0); |
8406 | block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); | 8447 | block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL); |
8407 | } | 8448 | } |
8408 | 8449 | ||
8409 | /* | 8450 | /* |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0167a9c97c9c..f660ba1e5e58 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1748,7 +1748,7 @@ again: | |||
1748 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | 1748 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, |
1749 | lockstart, lockend, &cached_state); | 1749 | lockstart, lockend, &cached_state); |
1750 | btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, | 1750 | btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, |
1751 | (ret != 0)); | 1751 | true); |
1752 | if (ret) { | 1752 | if (ret) { |
1753 | btrfs_drop_pages(pages, num_pages); | 1753 | btrfs_drop_pages(pages, num_pages); |
1754 | break; | 1754 | break; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e064c49c9a9a..d241285a0d2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/uio.h> | 31 | #include <linux/uio.h> |
32 | #include <linux/magic.h> | 32 | #include <linux/magic.h> |
33 | #include <linux/iversion.h> | 33 | #include <linux/iversion.h> |
34 | #include <asm/unaligned.h> | ||
34 | #include "ctree.h" | 35 | #include "ctree.h" |
35 | #include "disk-io.h" | 36 | #include "disk-io.h" |
36 | #include "transaction.h" | 37 | #include "transaction.h" |
@@ -5905,11 +5906,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) | |||
5905 | struct dir_entry *entry = addr; | 5906 | struct dir_entry *entry = addr; |
5906 | char *name = (char *)(entry + 1); | 5907 | char *name = (char *)(entry + 1); |
5907 | 5908 | ||
5908 | ctx->pos = entry->offset; | 5909 | ctx->pos = get_unaligned(&entry->offset); |
5909 | if (!dir_emit(ctx, name, entry->name_len, entry->ino, | 5910 | if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), |
5910 | entry->type)) | 5911 | get_unaligned(&entry->ino), |
5912 | get_unaligned(&entry->type))) | ||
5911 | return 1; | 5913 | return 1; |
5912 | addr += sizeof(struct dir_entry) + entry->name_len; | 5914 | addr += sizeof(struct dir_entry) + |
5915 | get_unaligned(&entry->name_len); | ||
5913 | ctx->pos++; | 5916 | ctx->pos++; |
5914 | } | 5917 | } |
5915 | return 0; | 5918 | return 0; |
@@ -5999,14 +6002,15 @@ again: | |||
5999 | } | 6002 | } |
6000 | 6003 | ||
6001 | entry = addr; | 6004 | entry = addr; |
6002 | entry->name_len = name_len; | 6005 | put_unaligned(name_len, &entry->name_len); |
6003 | name_ptr = (char *)(entry + 1); | 6006 | name_ptr = (char *)(entry + 1); |
6004 | read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), | 6007 | read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), |
6005 | name_len); | 6008 | name_len); |
6006 | entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | 6009 | put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)], |
6010 | &entry->type); | ||
6007 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | 6011 | btrfs_dir_item_key_to_cpu(leaf, di, &location); |
6008 | entry->ino = location.objectid; | 6012 | put_unaligned(location.objectid, &entry->ino); |
6009 | entry->offset = found_key.offset; | 6013 | put_unaligned(found_key.offset, &entry->offset); |
6010 | entries++; | 6014 | entries++; |
6011 | addr += sizeof(struct dir_entry) + name_len; | 6015 | addr += sizeof(struct dir_entry) + name_len; |
6012 | total_len += sizeof(struct dir_entry) + name_len; | 6016 | total_len += sizeof(struct dir_entry) + name_len; |
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 124276bba8cf..21a831d3d087 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c | |||
@@ -189,9 +189,10 @@ void btrfs_print_leaf(struct extent_buffer *l) | |||
189 | fs_info = l->fs_info; | 189 | fs_info = l->fs_info; |
190 | nr = btrfs_header_nritems(l); | 190 | nr = btrfs_header_nritems(l); |
191 | 191 | ||
192 | btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d", | 192 | btrfs_info(fs_info, |
193 | btrfs_header_bytenr(l), nr, | 193 | "leaf %llu gen %llu total ptrs %d free space %d owner %llu", |
194 | btrfs_leaf_free_space(fs_info, l)); | 194 | btrfs_header_bytenr(l), btrfs_header_generation(l), nr, |
195 | btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l)); | ||
195 | for (i = 0 ; i < nr ; i++) { | 196 | for (i = 0 ; i < nr ; i++) { |
196 | item = btrfs_item_nr(i); | 197 | item = btrfs_item_nr(i); |
197 | btrfs_item_key_to_cpu(l, &key, i); | 198 | btrfs_item_key_to_cpu(l, &key, i); |
@@ -325,7 +326,7 @@ void btrfs_print_leaf(struct extent_buffer *l) | |||
325 | } | 326 | } |
326 | } | 327 | } |
327 | 328 | ||
328 | void btrfs_print_tree(struct extent_buffer *c) | 329 | void btrfs_print_tree(struct extent_buffer *c, bool follow) |
329 | { | 330 | { |
330 | struct btrfs_fs_info *fs_info; | 331 | struct btrfs_fs_info *fs_info; |
331 | int i; u32 nr; | 332 | int i; u32 nr; |
@@ -342,15 +343,19 @@ void btrfs_print_tree(struct extent_buffer *c) | |||
342 | return; | 343 | return; |
343 | } | 344 | } |
344 | btrfs_info(fs_info, | 345 | btrfs_info(fs_info, |
345 | "node %llu level %d total ptrs %d free spc %u", | 346 | "node %llu level %d gen %llu total ptrs %d free spc %u owner %llu", |
346 | btrfs_header_bytenr(c), level, nr, | 347 | btrfs_header_bytenr(c), level, btrfs_header_generation(c), |
347 | (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr); | 348 | nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr, |
349 | btrfs_header_owner(c)); | ||
348 | for (i = 0; i < nr; i++) { | 350 | for (i = 0; i < nr; i++) { |
349 | btrfs_node_key_to_cpu(c, &key, i); | 351 | btrfs_node_key_to_cpu(c, &key, i); |
350 | pr_info("\tkey %d (%llu %u %llu) block %llu\n", | 352 | pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n", |
351 | i, key.objectid, key.type, key.offset, | 353 | i, key.objectid, key.type, key.offset, |
352 | btrfs_node_blockptr(c, i)); | 354 | btrfs_node_blockptr(c, i), |
355 | btrfs_node_ptr_generation(c, i)); | ||
353 | } | 356 | } |
357 | if (!follow) | ||
358 | return; | ||
354 | for (i = 0; i < nr; i++) { | 359 | for (i = 0; i < nr; i++) { |
355 | struct btrfs_key first_key; | 360 | struct btrfs_key first_key; |
356 | struct extent_buffer *next; | 361 | struct extent_buffer *next; |
@@ -372,7 +377,7 @@ void btrfs_print_tree(struct extent_buffer *c) | |||
372 | if (btrfs_header_level(next) != | 377 | if (btrfs_header_level(next) != |
373 | level - 1) | 378 | level - 1) |
374 | BUG(); | 379 | BUG(); |
375 | btrfs_print_tree(next); | 380 | btrfs_print_tree(next, follow); |
376 | free_extent_buffer(next); | 381 | free_extent_buffer(next); |
377 | } | 382 | } |
378 | } | 383 | } |
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h index 4a98481688f4..e6bb38fd75ad 100644 --- a/fs/btrfs/print-tree.h +++ b/fs/btrfs/print-tree.h | |||
@@ -7,6 +7,6 @@ | |||
7 | #define BTRFS_PRINT_TREE_H | 7 | #define BTRFS_PRINT_TREE_H |
8 | 8 | ||
9 | void btrfs_print_leaf(struct extent_buffer *l); | 9 | void btrfs_print_leaf(struct extent_buffer *l); |
10 | void btrfs_print_tree(struct extent_buffer *c); | 10 | void btrfs_print_tree(struct extent_buffer *c, bool follow); |
11 | 11 | ||
12 | #endif | 12 | #endif |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 09c7e4fd550f..9fb758d5077a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
13 | #include <linux/btrfs.h> | 13 | #include <linux/btrfs.h> |
14 | #include <linux/sizes.h> | ||
14 | 15 | ||
15 | #include "ctree.h" | 16 | #include "ctree.h" |
16 | #include "transaction.h" | 17 | #include "transaction.h" |
@@ -2375,8 +2376,21 @@ out: | |||
2375 | return ret; | 2376 | return ret; |
2376 | } | 2377 | } |
2377 | 2378 | ||
2378 | static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) | 2379 | /* |
2380 | * Two limits to commit transaction in advance. | ||
2381 | * | ||
2382 | * For RATIO, it will be 1/RATIO of the remaining limit | ||
2383 | * (excluding data and prealloc meta) as threshold. | ||
2384 | * For SIZE, it will be in byte unit as threshold. | ||
2385 | */ | ||
2386 | #define QGROUP_PERTRANS_RATIO 32 | ||
2387 | #define QGROUP_PERTRANS_SIZE SZ_32M | ||
2388 | static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, | ||
2389 | const struct btrfs_qgroup *qg, u64 num_bytes) | ||
2379 | { | 2390 | { |
2391 | u64 limit; | ||
2392 | u64 threshold; | ||
2393 | |||
2380 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && | 2394 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && |
2381 | qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) | 2395 | qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) |
2382 | return false; | 2396 | return false; |
@@ -2385,6 +2399,31 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) | |||
2385 | qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) | 2399 | qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) |
2386 | return false; | 2400 | return false; |
2387 | 2401 | ||
2402 | /* | ||
2403 | * Even if we passed the check, it's better to check if reservation | ||
2404 | * for meta_pertrans is pushing us near limit. | ||
2405 | * If there is too much pertrans reservation or it's near the limit, | ||
2406 | * let's try commit transaction to free some, using transaction_kthread | ||
2407 | */ | ||
2408 | if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | | ||
2409 | BTRFS_QGROUP_LIMIT_MAX_EXCL))) { | ||
2410 | if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) | ||
2411 | limit = qg->max_excl; | ||
2412 | else | ||
2413 | limit = qg->max_rfer; | ||
2414 | threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] - | ||
2415 | qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) / | ||
2416 | QGROUP_PERTRANS_RATIO; | ||
2417 | threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE); | ||
2418 | |||
2419 | /* | ||
2420 | * Use transaction_kthread to commit transaction, so we no | ||
2421 | * longer need to bother nested transaction nor lock context. | ||
2422 | */ | ||
2423 | if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold) | ||
2424 | btrfs_commit_transaction_locksafe(fs_info); | ||
2425 | } | ||
2426 | |||
2388 | return true; | 2427 | return true; |
2389 | } | 2428 | } |
2390 | 2429 | ||
@@ -2434,7 +2473,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, | |||
2434 | 2473 | ||
2435 | qg = unode_aux_to_qgroup(unode); | 2474 | qg = unode_aux_to_qgroup(unode); |
2436 | 2475 | ||
2437 | if (enforce && !qgroup_check_limits(qg, num_bytes)) { | 2476 | if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { |
2438 | ret = -EDQUOT; | 2477 | ret = -EDQUOT; |
2439 | goto out; | 2478 | goto out; |
2440 | } | 2479 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 63fdcab64b01..c944b4769e3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -2267,6 +2267,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) | |||
2267 | */ | 2267 | */ |
2268 | cur_trans->state = TRANS_STATE_COMPLETED; | 2268 | cur_trans->state = TRANS_STATE_COMPLETED; |
2269 | wake_up(&cur_trans->commit_wait); | 2269 | wake_up(&cur_trans->commit_wait); |
2270 | clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); | ||
2270 | 2271 | ||
2271 | spin_lock(&fs_info->trans_lock); | 2272 | spin_lock(&fs_info->trans_lock); |
2272 | list_del_init(&cur_trans->list); | 2273 | list_del_init(&cur_trans->list); |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index c88fccd80bc5..d8c0826bc2c7 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -199,6 +199,20 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); | |||
199 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans); | 199 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans); |
200 | int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | 200 | int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, |
201 | int wait_for_unblock); | 201 | int wait_for_unblock); |
202 | |||
203 | /* | ||
204 | * Try to commit transaction asynchronously, so this is safe to call | ||
205 | * even holding a spinlock. | ||
206 | * | ||
207 | * It's done by informing transaction_kthread to commit transaction without | ||
208 | * waiting for commit interval. | ||
209 | */ | ||
210 | static inline void btrfs_commit_transaction_locksafe( | ||
211 | struct btrfs_fs_info *fs_info) | ||
212 | { | ||
213 | set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); | ||
214 | wake_up_process(fs_info->transaction_kthread); | ||
215 | } | ||
202 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); | 216 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); |
203 | int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); | 217 | int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); |
204 | void btrfs_throttle(struct btrfs_fs_info *fs_info); | 218 | void btrfs_throttle(struct btrfs_fs_info *fs_info); |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 8bf60250309e..ae056927080d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -669,13 +669,15 @@ void ceph_fill_file_time(struct inode *inode, int issued, | |||
669 | CEPH_CAP_FILE_BUFFER| | 669 | CEPH_CAP_FILE_BUFFER| |
670 | CEPH_CAP_AUTH_EXCL| | 670 | CEPH_CAP_AUTH_EXCL| |
671 | CEPH_CAP_XATTR_EXCL)) { | 671 | CEPH_CAP_XATTR_EXCL)) { |
672 | if (timespec_compare(ctime, &inode->i_ctime) > 0) { | 672 | if (ci->i_version == 0 || |
673 | timespec_compare(ctime, &inode->i_ctime) > 0) { | ||
673 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", | 674 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", |
674 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, | 675 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, |
675 | ctime->tv_sec, ctime->tv_nsec); | 676 | ctime->tv_sec, ctime->tv_nsec); |
676 | inode->i_ctime = *ctime; | 677 | inode->i_ctime = *ctime; |
677 | } | 678 | } |
678 | if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { | 679 | if (ci->i_version == 0 || |
680 | ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { | ||
679 | /* the MDS did a utimes() */ | 681 | /* the MDS did a utimes() */ |
680 | dout("mtime %ld.%09ld -> %ld.%09ld " | 682 | dout("mtime %ld.%09ld -> %ld.%09ld " |
681 | "tw %d -> %d\n", | 683 | "tw %d -> %d\n", |
@@ -795,7 +797,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, | |||
795 | new_issued = ~issued & le32_to_cpu(info->cap.caps); | 797 | new_issued = ~issued & le32_to_cpu(info->cap.caps); |
796 | 798 | ||
797 | /* update inode */ | 799 | /* update inode */ |
798 | ci->i_version = le64_to_cpu(info->version); | ||
799 | inode->i_rdev = le32_to_cpu(info->rdev); | 800 | inode->i_rdev = le32_to_cpu(info->rdev); |
800 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; | 801 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; |
801 | 802 | ||
@@ -868,6 +869,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page, | |||
868 | xattr_blob = NULL; | 869 | xattr_blob = NULL; |
869 | } | 870 | } |
870 | 871 | ||
872 | /* finally update i_version */ | ||
873 | ci->i_version = le64_to_cpu(info->version); | ||
874 | |||
871 | inode->i_mapping->a_ops = &ceph_aops; | 875 | inode->i_mapping->a_ops = &ceph_aops; |
872 | 876 | ||
873 | switch (inode->i_mode & S_IFMT) { | 877 | switch (inode->i_mode & S_IFMT) { |
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index fe5567655662..0e74690d11bc 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h | |||
@@ -54,7 +54,7 @@ do { \ | |||
54 | pr_debug_ ## ratefunc("%s: " \ | 54 | pr_debug_ ## ratefunc("%s: " \ |
55 | fmt, __FILE__, ##__VA_ARGS__); \ | 55 | fmt, __FILE__, ##__VA_ARGS__); \ |
56 | } else if ((type) & VFS) { \ | 56 | } else if ((type) & VFS) { \ |
57 | pr_err_ ## ratefunc("CuIFS VFS: " \ | 57 | pr_err_ ## ratefunc("CIFS VFS: " \ |
58 | fmt, ##__VA_ARGS__); \ | 58 | fmt, ##__VA_ARGS__); \ |
59 | } else if ((type) & NOISY && (NOISY != 0)) { \ | 59 | } else if ((type) & NOISY && (NOISY != 0)) { \ |
60 | pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \ | 60 | pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \ |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 81ba6e0d88d8..925844343038 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
684 | goto mknod_out; | 684 | goto mknod_out; |
685 | } | 685 | } |
686 | 686 | ||
687 | if (!S_ISCHR(mode) && !S_ISBLK(mode)) | ||
688 | goto mknod_out; | ||
689 | |||
687 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) | 690 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) |
688 | goto mknod_out; | 691 | goto mknod_out; |
689 | 692 | ||
@@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
692 | 695 | ||
693 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); | 696 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); |
694 | if (buf == NULL) { | 697 | if (buf == NULL) { |
695 | kfree(full_path); | ||
696 | rc = -ENOMEM; | 698 | rc = -ENOMEM; |
697 | free_xid(xid); | 699 | goto mknod_out; |
698 | return rc; | ||
699 | } | 700 | } |
700 | 701 | ||
701 | if (backup_cred(cifs_sb)) | 702 | if (backup_cred(cifs_sb)) |
@@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, | |||
742 | pdev->minor = cpu_to_le64(MINOR(device_number)); | 743 | pdev->minor = cpu_to_le64(MINOR(device_number)); |
743 | rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, | 744 | rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, |
744 | &bytes_written, iov, 1); | 745 | &bytes_written, iov, 1); |
745 | } /* else if (S_ISFIFO) */ | 746 | } |
746 | tcon->ses->server->ops->close(xid, tcon, &fid); | 747 | tcon->ses->server->ops->close(xid, tcon, &fid); |
747 | d_drop(direntry); | 748 | d_drop(direntry); |
748 | 749 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4bcd4e838b47..23fd430fe74a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3462,7 +3462,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) | |||
3462 | * If the page is mmap'ed into a process' page tables, then we need to make | 3462 | * If the page is mmap'ed into a process' page tables, then we need to make |
3463 | * sure that it doesn't change while being written back. | 3463 | * sure that it doesn't change while being written back. |
3464 | */ | 3464 | */ |
3465 | static int | 3465 | static vm_fault_t |
3466 | cifs_page_mkwrite(struct vm_fault *vmf) | 3466 | cifs_page_mkwrite(struct vm_fault *vmf) |
3467 | { | 3467 | { |
3468 | struct page *page = vmf->page; | 3468 | struct page *page = vmf->page; |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b4ae932ea134..38ebf3f357d2 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -1452,7 +1452,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
1452 | struct cifs_open_parms oparms; | 1452 | struct cifs_open_parms oparms; |
1453 | struct cifs_fid fid; | 1453 | struct cifs_fid fid; |
1454 | struct kvec err_iov = {NULL, 0}; | 1454 | struct kvec err_iov = {NULL, 0}; |
1455 | struct smb2_err_rsp *err_buf = NULL; | 1455 | struct smb2_err_rsp *err_buf; |
1456 | struct smb2_symlink_err_rsp *symlink; | 1456 | struct smb2_symlink_err_rsp *symlink; |
1457 | unsigned int sub_len; | 1457 | unsigned int sub_len; |
1458 | unsigned int sub_offset; | 1458 | unsigned int sub_offset; |
@@ -1476,7 +1476,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, | |||
1476 | 1476 | ||
1477 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); | 1477 | rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); |
1478 | 1478 | ||
1479 | if (!rc || !err_buf) { | 1479 | if (!rc || !err_iov.iov_base) { |
1480 | kfree(utf16_path); | 1480 | kfree(utf16_path); |
1481 | return -ENOENT; | 1481 | return -ENOENT; |
1482 | } | 1482 | } |
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 5008af546dd1..87817ddcc096 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c | |||
@@ -1028,7 +1028,7 @@ static int smbd_post_send(struct smbd_connection *info, | |||
1028 | for (i = 0; i < request->num_sge; i++) { | 1028 | for (i = 0; i < request->num_sge; i++) { |
1029 | log_rdma_send(INFO, | 1029 | log_rdma_send(INFO, |
1030 | "rdma_request sge[%d] addr=%llu length=%u\n", | 1030 | "rdma_request sge[%d] addr=%llu length=%u\n", |
1031 | i, request->sge[0].addr, request->sge[0].length); | 1031 | i, request->sge[i].addr, request->sge[i].length); |
1032 | ib_dma_sync_single_for_device( | 1032 | ib_dma_sync_single_for_device( |
1033 | info->id->device, | 1033 | info->id->device, |
1034 | request->sge[i].addr, | 1034 | request->sge[i].addr, |
@@ -2139,6 +2139,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) | |||
2139 | goto done; | 2139 | goto done; |
2140 | } | 2140 | } |
2141 | 2141 | ||
2142 | cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); | ||
2143 | for (i = 0; i < rqst->rq_nvec-1; i++) | ||
2144 | dump_smb(iov[i].iov_base, iov[i].iov_len); | ||
2145 | |||
2142 | remaining_data_length = buflen; | 2146 | remaining_data_length = buflen; |
2143 | 2147 | ||
2144 | log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " | 2148 | log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " |
@@ -2194,6 +2198,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) | |||
2194 | goto done; | 2198 | goto done; |
2195 | } | 2199 | } |
2196 | i++; | 2200 | i++; |
2201 | if (i == rqst->rq_nvec) | ||
2202 | break; | ||
2197 | } | 2203 | } |
2198 | start = i; | 2204 | start = i; |
2199 | buflen = 0; | 2205 | buflen = 0; |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 846ca150d52e..4dd842f72846 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -1997,6 +1997,16 @@ out: | |||
1997 | return rc; | 1997 | return rc; |
1998 | } | 1998 | } |
1999 | 1999 | ||
2000 | static bool is_dot_dotdot(const char *name, size_t name_size) | ||
2001 | { | ||
2002 | if (name_size == 1 && name[0] == '.') | ||
2003 | return true; | ||
2004 | else if (name_size == 2 && name[0] == '.' && name[1] == '.') | ||
2005 | return true; | ||
2006 | |||
2007 | return false; | ||
2008 | } | ||
2009 | |||
2000 | /** | 2010 | /** |
2001 | * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext | 2011 | * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext |
2002 | * @plaintext_name: The plaintext name | 2012 | * @plaintext_name: The plaintext name |
@@ -2021,13 +2031,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, | |||
2021 | size_t packet_size; | 2031 | size_t packet_size; |
2022 | int rc = 0; | 2032 | int rc = 0; |
2023 | 2033 | ||
2024 | if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) | 2034 | if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && |
2025 | && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) | 2035 | !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) { |
2026 | && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) | 2036 | if (is_dot_dotdot(name, name_size)) { |
2027 | && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, | 2037 | rc = ecryptfs_copy_filename(plaintext_name, |
2028 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { | 2038 | plaintext_name_size, |
2029 | const char *orig_name = name; | 2039 | name, name_size); |
2030 | size_t orig_name_size = name_size; | 2040 | goto out; |
2041 | } | ||
2042 | |||
2043 | if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE || | ||
2044 | strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, | ||
2045 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) { | ||
2046 | rc = -EINVAL; | ||
2047 | goto out; | ||
2048 | } | ||
2031 | 2049 | ||
2032 | name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; | 2050 | name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; |
2033 | name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; | 2051 | name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; |
@@ -2047,12 +2065,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, | |||
2047 | decoded_name, | 2065 | decoded_name, |
2048 | decoded_name_size); | 2066 | decoded_name_size); |
2049 | if (rc) { | 2067 | if (rc) { |
2050 | printk(KERN_INFO "%s: Could not parse tag 70 packet " | 2068 | ecryptfs_printk(KERN_DEBUG, |
2051 | "from filename; copying through filename " | 2069 | "%s: Could not parse tag 70 packet from filename\n", |
2052 | "as-is\n", __func__); | 2070 | __func__); |
2053 | rc = ecryptfs_copy_filename(plaintext_name, | ||
2054 | plaintext_name_size, | ||
2055 | orig_name, orig_name_size); | ||
2056 | goto out_free; | 2071 | goto out_free; |
2057 | } | 2072 | } |
2058 | } else { | 2073 | } else { |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c74ed3ca3372..b76a9853325e 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name, | |||
82 | buf->sb, lower_name, | 82 | buf->sb, lower_name, |
83 | lower_namelen); | 83 | lower_namelen); |
84 | if (rc) { | 84 | if (rc) { |
85 | printk(KERN_ERR "%s: Error attempting to decode and decrypt " | 85 | if (rc != -EINVAL) { |
86 | "filename [%s]; rc = [%d]\n", __func__, lower_name, | 86 | ecryptfs_printk(KERN_DEBUG, |
87 | rc); | 87 | "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n", |
88 | goto out; | 88 | __func__, lower_name, rc); |
89 | return rc; | ||
90 | } | ||
91 | |||
92 | /* Mask -EINVAL errors as these are most likely due a plaintext | ||
93 | * filename present in the lower filesystem despite filename | ||
94 | * encryption being enabled. One unavoidable example would be | ||
95 | * the "lost+found" dentry in the root directory of an Ext4 | ||
96 | * filesystem. | ||
97 | */ | ||
98 | return 0; | ||
89 | } | 99 | } |
100 | |||
90 | buf->caller->pos = buf->ctx.pos; | 101 | buf->caller->pos = buf->ctx.pos; |
91 | rc = !dir_emit(buf->caller, name, name_size, ino, d_type); | 102 | rc = !dir_emit(buf->caller, name, name_size, ino, d_type); |
92 | kfree(name); | 103 | kfree(name); |
93 | if (!rc) | 104 | if (!rc) |
94 | buf->entries_written++; | 105 | buf->entries_written++; |
95 | out: | 106 | |
96 | return rc; | 107 | return rc; |
97 | } | 108 | } |
98 | 109 | ||
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 847904aa63a9..97d17eaeba07 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -395,8 +395,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
395 | 395 | ||
396 | mount_crypt_stat = &ecryptfs_superblock_to_private( | 396 | mount_crypt_stat = &ecryptfs_superblock_to_private( |
397 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 397 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
398 | if (mount_crypt_stat | 398 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { |
399 | && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { | ||
400 | rc = ecryptfs_encrypt_and_encode_filename( | 399 | rc = ecryptfs_encrypt_and_encode_filename( |
401 | &encrypted_and_encoded_name, &len, | 400 | &encrypted_and_encoded_name, &len, |
402 | mount_crypt_stat, name, len); | 401 | mount_crypt_stat, name, len); |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c89a58cfc991..e74fe84d0886 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -1880,7 +1880,7 @@ find_next_matching_auth_tok: | |||
1880 | candidate_auth_tok = &auth_tok_list_item->auth_tok; | 1880 | candidate_auth_tok = &auth_tok_list_item->auth_tok; |
1881 | if (unlikely(ecryptfs_verbosity > 0)) { | 1881 | if (unlikely(ecryptfs_verbosity > 0)) { |
1882 | ecryptfs_printk(KERN_DEBUG, | 1882 | ecryptfs_printk(KERN_DEBUG, |
1883 | "Considering cadidate auth tok:\n"); | 1883 | "Considering candidate auth tok:\n"); |
1884 | ecryptfs_dump_auth_tok(candidate_auth_tok); | 1884 | ecryptfs_dump_auth_tok(candidate_auth_tok); |
1885 | } | 1885 | } |
1886 | rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, | 1886 | rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, |
diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 09640220fda8..047c327a6b23 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c | |||
@@ -88,11 +88,11 @@ out_unlock: | |||
88 | * The default page_lock and i_size verification done by non-DAX fault paths | 88 | * The default page_lock and i_size verification done by non-DAX fault paths |
89 | * is sufficient because ext2 doesn't support hole punching. | 89 | * is sufficient because ext2 doesn't support hole punching. |
90 | */ | 90 | */ |
91 | static int ext2_dax_fault(struct vm_fault *vmf) | 91 | static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) |
92 | { | 92 | { |
93 | struct inode *inode = file_inode(vmf->vma->vm_file); | 93 | struct inode *inode = file_inode(vmf->vma->vm_file); |
94 | struct ext2_inode_info *ei = EXT2_I(inode); | 94 | struct ext2_inode_info *ei = EXT2_I(inode); |
95 | int ret; | 95 | vm_fault_t ret; |
96 | 96 | ||
97 | if (vmf->flags & FAULT_FLAG_WRITE) { | 97 | if (vmf->flags & FAULT_FLAG_WRITE) { |
98 | sb_start_pagefault(inode->i_sb); | 98 | sb_start_pagefault(inode->i_sb); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4b12ba70a895..47d7c151fcba 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) | |||
745 | */ | 745 | */ |
746 | if (inode && inode_to_wb_is_valid(inode)) { | 746 | if (inode && inode_to_wb_is_valid(inode)) { |
747 | struct bdi_writeback *wb; | 747 | struct bdi_writeback *wb; |
748 | bool locked, congested; | 748 | struct wb_lock_cookie lock_cookie = {}; |
749 | bool congested; | ||
749 | 750 | ||
750 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 751 | wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); |
751 | congested = wb_congested(wb, cong_bits); | 752 | congested = wb_congested(wb, cong_bits); |
752 | unlocked_inode_to_wb_end(inode, locked); | 753 | unlocked_inode_to_wb_end(inode, &lock_cookie); |
753 | return congested; | 754 | return congested; |
754 | } | 755 | } |
755 | 756 | ||
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 9bb2fe35799d..10205ececc27 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/bio.h> | 21 | #include <linux/bio.h> |
22 | 22 | ||
23 | #include <linux/slab.h> | ||
23 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
24 | #include <linux/zlib.h> | 25 | #include <linux/zlib.h> |
25 | 26 | ||
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, | |||
59 | >> bufshift; | 60 | >> bufshift; |
60 | int haveblocks; | 61 | int haveblocks; |
61 | blkcnt_t blocknum; | 62 | blkcnt_t blocknum; |
62 | struct buffer_head *bhs[needblocks + 1]; | 63 | struct buffer_head **bhs; |
63 | int curbh, curpage; | 64 | int curbh, curpage; |
64 | 65 | ||
65 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { | 66 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { |
@@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, | |||
80 | 81 | ||
81 | /* Because zlib is not thread-safe, do all the I/O at the top. */ | 82 | /* Because zlib is not thread-safe, do all the I/O at the top. */ |
82 | blocknum = block_start >> bufshift; | 83 | blocknum = block_start >> bufshift; |
83 | memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); | 84 | bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); |
85 | if (!bhs) { | ||
86 | *errp = -ENOMEM; | ||
87 | return 0; | ||
88 | } | ||
84 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); | 89 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); |
85 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); | 90 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); |
86 | 91 | ||
@@ -190,6 +195,7 @@ z_eio: | |||
190 | b_eio: | 195 | b_eio: |
191 | for (i = 0; i < haveblocks; i++) | 196 | for (i = 0; i < haveblocks; i++) |
192 | brelse(bhs[i]); | 197 | brelse(bhs[i]); |
198 | kfree(bhs); | ||
193 | return stream.total_out; | 199 | return stream.total_out; |
194 | } | 200 | } |
195 | 201 | ||
@@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
305 | unsigned int zisofs_pages_per_cblock = | 311 | unsigned int zisofs_pages_per_cblock = |
306 | PAGE_SHIFT <= zisofs_block_shift ? | 312 | PAGE_SHIFT <= zisofs_block_shift ? |
307 | (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; | 313 | (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; |
308 | struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; | 314 | struct page **pages; |
309 | pgoff_t index = page->index, end_index; | 315 | pgoff_t index = page->index, end_index; |
310 | 316 | ||
311 | end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 317 | end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
@@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
330 | full_page = 0; | 336 | full_page = 0; |
331 | pcount = 1; | 337 | pcount = 1; |
332 | } | 338 | } |
339 | pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), | ||
340 | sizeof(*pages), GFP_KERNEL); | ||
341 | if (!pages) { | ||
342 | unlock_page(page); | ||
343 | return -ENOMEM; | ||
344 | } | ||
333 | pages[full_page] = page; | 345 | pages[full_page] = page; |
334 | 346 | ||
335 | for (i = 0; i < pcount; i++, index++) { | 347 | for (i = 0; i < pcount; i++, index++) { |
@@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
357 | } | 369 | } |
358 | 370 | ||
359 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ | 371 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ |
372 | kfree(pages); | ||
360 | return err; | 373 | return err; |
361 | } | 374 | } |
362 | 375 | ||
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bc258a4402f6..ec3fba7d492f 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt) | |||
394 | break; | 394 | break; |
395 | #ifdef CONFIG_JOLIET | 395 | #ifdef CONFIG_JOLIET |
396 | case Opt_iocharset: | 396 | case Opt_iocharset: |
397 | kfree(popt->iocharset); | ||
397 | popt->iocharset = match_strdup(&args[0]); | 398 | popt->iocharset = match_strdup(&args[0]); |
399 | if (!popt->iocharset) | ||
400 | return 0; | ||
398 | break; | 401 | break; |
399 | #endif | 402 | #endif |
400 | case Opt_map_a: | 403 | case Opt_map_a: |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f60dee7faf03..87bdf0f4cba1 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
342 | static void jffs2_kill_sb(struct super_block *sb) | 342 | static void jffs2_kill_sb(struct super_block *sb) |
343 | { | 343 | { |
344 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | 344 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); |
345 | if (!sb_rdonly(sb)) | 345 | if (c && !sb_rdonly(sb)) |
346 | jffs2_stop_garbage_collect_thread(c); | 346 | jffs2_stop_garbage_collect_thread(c); |
347 | kill_mtd_super(sb); | 347 | kill_mtd_super(sb); |
348 | kfree(c); | 348 | kfree(c); |
diff --git a/fs/namespace.c b/fs/namespace.c index e398f32d7541..5f75969adff1 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
1089 | goto out_free; | 1089 | goto out_free; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); | 1092 | mnt->mnt.mnt_flags = old->mnt.mnt_flags; |
1093 | mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); | ||
1093 | /* Don't allow unprivileged users to change mount flags */ | 1094 | /* Don't allow unprivileged users to change mount flags */ |
1094 | if (flag & CL_UNPRIVILEGED) { | 1095 | if (flag & CL_UNPRIVILEGED) { |
1095 | mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; | 1096 | mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; |
@@ -2814,7 +2815,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, | |||
2814 | mnt_flags |= MNT_NODIRATIME; | 2815 | mnt_flags |= MNT_NODIRATIME; |
2815 | if (flags & MS_STRICTATIME) | 2816 | if (flags & MS_STRICTATIME) |
2816 | mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); | 2817 | mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); |
2817 | if (flags & SB_RDONLY) | 2818 | if (flags & MS_RDONLY) |
2818 | mnt_flags |= MNT_READONLY; | 2819 | mnt_flags |= MNT_READONLY; |
2819 | 2820 | ||
2820 | /* The default atime for remount is preservation */ | 2821 | /* The default atime for remount is preservation */ |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index d51e1bb781cf..d94e8031fe5f 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
92 | u32 event_mask, | 92 | u32 event_mask, |
93 | const void *data, int data_type) | 93 | const void *data, int data_type) |
94 | { | 94 | { |
95 | __u32 marks_mask, marks_ignored_mask; | 95 | __u32 marks_mask = 0, marks_ignored_mask = 0; |
96 | const struct path *path = data; | 96 | const struct path *path = data; |
97 | 97 | ||
98 | pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" | 98 | pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" |
@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
108 | !d_can_lookup(path->dentry)) | 108 | !d_can_lookup(path->dentry)) |
109 | return false; | 109 | return false; |
110 | 110 | ||
111 | if (inode_mark && vfsmnt_mark) { | 111 | /* |
112 | marks_mask = (vfsmnt_mark->mask | inode_mark->mask); | 112 | * if the event is for a child and this inode doesn't care about |
113 | marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); | 113 | * events on the child, don't send it! |
114 | } else if (inode_mark) { | 114 | */ |
115 | /* | 115 | if (inode_mark && |
116 | * if the event is for a child and this inode doesn't care about | 116 | (!(event_mask & FS_EVENT_ON_CHILD) || |
117 | * events on the child, don't send it! | 117 | (inode_mark->mask & FS_EVENT_ON_CHILD))) { |
118 | */ | 118 | marks_mask |= inode_mark->mask; |
119 | if ((event_mask & FS_EVENT_ON_CHILD) && | 119 | marks_ignored_mask |= inode_mark->ignored_mask; |
120 | !(inode_mark->mask & FS_EVENT_ON_CHILD)) | 120 | } |
121 | return false; | 121 | |
122 | marks_mask = inode_mark->mask; | 122 | if (vfsmnt_mark) { |
123 | marks_ignored_mask = inode_mark->ignored_mask; | 123 | marks_mask |= vfsmnt_mark->mask; |
124 | } else if (vfsmnt_mark) { | 124 | marks_ignored_mask |= vfsmnt_mark->ignored_mask; |
125 | marks_mask = vfsmnt_mark->mask; | ||
126 | marks_ignored_mask = vfsmnt_mark->ignored_mask; | ||
127 | } else { | ||
128 | BUG(); | ||
129 | } | 125 | } |
130 | 126 | ||
131 | if (d_is_dir(path->dentry) && | 127 | if (d_is_dir(path->dentry) && |
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 219b269c737e..613ec7e5a465 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c | |||
@@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell, | |||
192 | struct fsnotify_iter_info *iter_info) | 192 | struct fsnotify_iter_info *iter_info) |
193 | { | 193 | { |
194 | struct fsnotify_group *group = NULL; | 194 | struct fsnotify_group *group = NULL; |
195 | __u32 inode_test_mask = 0; | 195 | __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); |
196 | __u32 vfsmount_test_mask = 0; | 196 | __u32 marks_mask = 0; |
197 | __u32 marks_ignored_mask = 0; | ||
197 | 198 | ||
198 | if (unlikely(!inode_mark && !vfsmount_mark)) { | 199 | if (unlikely(!inode_mark && !vfsmount_mark)) { |
199 | BUG(); | 200 | BUG(); |
@@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell, | |||
213 | /* does the inode mark tell us to do something? */ | 214 | /* does the inode mark tell us to do something? */ |
214 | if (inode_mark) { | 215 | if (inode_mark) { |
215 | group = inode_mark->group; | 216 | group = inode_mark->group; |
216 | inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); | 217 | marks_mask |= inode_mark->mask; |
217 | inode_test_mask &= inode_mark->mask; | 218 | marks_ignored_mask |= inode_mark->ignored_mask; |
218 | inode_test_mask &= ~inode_mark->ignored_mask; | ||
219 | } | 219 | } |
220 | 220 | ||
221 | /* does the vfsmount_mark tell us to do something? */ | 221 | /* does the vfsmount_mark tell us to do something? */ |
222 | if (vfsmount_mark) { | 222 | if (vfsmount_mark) { |
223 | vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); | ||
224 | group = vfsmount_mark->group; | 223 | group = vfsmount_mark->group; |
225 | vfsmount_test_mask &= vfsmount_mark->mask; | 224 | marks_mask |= vfsmount_mark->mask; |
226 | vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; | 225 | marks_ignored_mask |= vfsmount_mark->ignored_mask; |
227 | if (inode_mark) | ||
228 | vfsmount_test_mask &= ~inode_mark->ignored_mask; | ||
229 | } | 226 | } |
230 | 227 | ||
231 | pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" | 228 | pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" |
232 | " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" | 229 | " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" |
233 | " data=%p data_is=%d cookie=%d\n", | 230 | " data=%p data_is=%d cookie=%d\n", |
234 | __func__, group, to_tell, mask, inode_mark, | 231 | __func__, group, to_tell, mask, inode_mark, vfsmount_mark, |
235 | inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, | 232 | marks_mask, marks_ignored_mask, data, |
236 | data_is, cookie); | 233 | data_is, cookie); |
237 | 234 | ||
238 | if (!inode_test_mask && !vfsmount_test_mask) | 235 | if (!(test_mask & marks_mask & ~marks_ignored_mask)) |
239 | return 0; | 236 | return 0; |
240 | 237 | ||
241 | return group->ops->handle_event(group, to_tell, inode_mark, | 238 | return group->ops->handle_event(group, to_tell, inode_mark, |
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 3ae5fdba0225..10796d3fe27d 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c | |||
@@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb) | |||
579 | /* provided sb cleanup */ | 579 | /* provided sb cleanup */ |
580 | kill_anon_super(sb); | 580 | kill_anon_super(sb); |
581 | 581 | ||
582 | if (!ORANGEFS_SB(sb)) { | ||
583 | mutex_lock(&orangefs_request_mutex); | ||
584 | mutex_unlock(&orangefs_request_mutex); | ||
585 | return; | ||
586 | } | ||
582 | /* | 587 | /* |
583 | * issue the unmount to userspace to tell it to remove the | 588 | * issue the unmount to userspace to tell it to remove the |
584 | * dynamic mount info it has for this superblock | 589 | * dynamic mount info it has for this superblock |
diff --git a/fs/proc/base.c b/fs/proc/base.c index eafa39a3a88c..1b2ede6abcdf 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode, | |||
1693 | kuid_t uid; | 1693 | kuid_t uid; |
1694 | kgid_t gid; | 1694 | kgid_t gid; |
1695 | 1695 | ||
1696 | if (unlikely(task->flags & PF_KTHREAD)) { | ||
1697 | *ruid = GLOBAL_ROOT_UID; | ||
1698 | *rgid = GLOBAL_ROOT_GID; | ||
1699 | return; | ||
1700 | } | ||
1701 | |||
1696 | /* Default to the tasks effective ownership */ | 1702 | /* Default to the tasks effective ownership */ |
1697 | rcu_read_lock(); | 1703 | rcu_read_lock(); |
1698 | cred = __task_cred(task); | 1704 | cred = __task_cred(task); |
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index a000d7547479..b572cc865b92 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c | |||
@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v) | |||
24 | LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), | 24 | LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), |
25 | LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), | 25 | LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), |
26 | nr_running(), nr_threads, | 26 | nr_running(), nr_threads, |
27 | idr_get_cursor(&task_active_pid_ns(current)->idr)); | 27 | idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); |
28 | return 0; | 28 | return 0; |
29 | } | 29 | } |
30 | 30 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 65ae54659833..c486ad4b43f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, | |||
1310 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 1310 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
1311 | else if (is_swap_pmd(pmd)) { | 1311 | else if (is_swap_pmd(pmd)) { |
1312 | swp_entry_t entry = pmd_to_swp_entry(pmd); | 1312 | swp_entry_t entry = pmd_to_swp_entry(pmd); |
1313 | unsigned long offset = swp_offset(entry); | ||
1313 | 1314 | ||
1315 | offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; | ||
1314 | frame = swp_type(entry) | | 1316 | frame = swp_type(entry) | |
1315 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | 1317 | (offset << MAX_SWAPFILES_SHIFT); |
1316 | flags |= PM_SWAP; | 1318 | flags |= PM_SWAP; |
1317 | if (pmd_swp_soft_dirty(pmd)) | 1319 | if (pmd_swp_soft_dirty(pmd)) |
1318 | flags |= PM_SOFT_DIRTY; | 1320 | flags |= PM_SOFT_DIRTY; |
@@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, | |||
1332 | break; | 1334 | break; |
1333 | if (pm->show_pfn && (flags & PM_PRESENT)) | 1335 | if (pm->show_pfn && (flags & PM_PRESENT)) |
1334 | frame++; | 1336 | frame++; |
1337 | else if (flags & PM_SWAP) | ||
1338 | frame += (1 << MAX_SWAPFILES_SHIFT); | ||
1335 | } | 1339 | } |
1336 | spin_unlock(ptl); | 1340 | spin_unlock(ptl); |
1337 | return err; | 1341 | return err; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 020c597ef9b6..d88231e3b2be 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2966,7 +2966,7 @@ static int __init dquot_init(void) | |||
2966 | NULL); | 2966 | NULL); |
2967 | 2967 | ||
2968 | order = 0; | 2968 | order = 0; |
2969 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | 2969 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); |
2970 | if (!dquot_hash) | 2970 | if (!dquot_hash) |
2971 | panic("Cannot create dquot hash table"); | 2971 | panic("Cannot create dquot hash table"); |
2972 | 2972 | ||
diff --git a/fs/super.c b/fs/super.c index 5fa9a8d8d865..122c402049a2 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -167,6 +167,7 @@ static void destroy_unused_super(struct super_block *s) | |||
167 | security_sb_free(s); | 167 | security_sb_free(s); |
168 | put_user_ns(s->s_user_ns); | 168 | put_user_ns(s->s_user_ns); |
169 | kfree(s->s_subtype); | 169 | kfree(s->s_subtype); |
170 | free_prealloced_shrinker(&s->s_shrink); | ||
170 | /* no delays needed */ | 171 | /* no delays needed */ |
171 | destroy_super_work(&s->destroy_work); | 172 | destroy_super_work(&s->destroy_work); |
172 | } | 173 | } |
@@ -252,6 +253,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, | |||
252 | s->s_shrink.count_objects = super_cache_count; | 253 | s->s_shrink.count_objects = super_cache_count; |
253 | s->s_shrink.batch = 1024; | 254 | s->s_shrink.batch = 1024; |
254 | s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; | 255 | s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; |
256 | if (prealloc_shrinker(&s->s_shrink)) | ||
257 | goto fail; | ||
255 | return s; | 258 | return s; |
256 | 259 | ||
257 | fail: | 260 | fail: |
@@ -518,11 +521,7 @@ retry: | |||
518 | hlist_add_head(&s->s_instances, &type->fs_supers); | 521 | hlist_add_head(&s->s_instances, &type->fs_supers); |
519 | spin_unlock(&sb_lock); | 522 | spin_unlock(&sb_lock); |
520 | get_filesystem(type); | 523 | get_filesystem(type); |
521 | err = register_shrinker(&s->s_shrink); | 524 | register_shrinker_prepared(&s->s_shrink); |
522 | if (err) { | ||
523 | deactivate_locked_super(s); | ||
524 | s = ERR_PTR(err); | ||
525 | } | ||
526 | return s; | 525 | return s; |
527 | } | 526 | } |
528 | 527 | ||
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index f897e55f2cd0..16a8ad21b77e 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c | |||
@@ -28,6 +28,9 @@ | |||
28 | 28 | ||
29 | #include "udf_sb.h" | 29 | #include "udf_sb.h" |
30 | 30 | ||
31 | #define SURROGATE_MASK 0xfffff800 | ||
32 | #define SURROGATE_PAIR 0x0000d800 | ||
33 | |||
31 | static int udf_uni2char_utf8(wchar_t uni, | 34 | static int udf_uni2char_utf8(wchar_t uni, |
32 | unsigned char *out, | 35 | unsigned char *out, |
33 | int boundlen) | 36 | int boundlen) |
@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni, | |||
37 | if (boundlen <= 0) | 40 | if (boundlen <= 0) |
38 | return -ENAMETOOLONG; | 41 | return -ENAMETOOLONG; |
39 | 42 | ||
43 | if ((uni & SURROGATE_MASK) == SURROGATE_PAIR) | ||
44 | return -EINVAL; | ||
45 | |||
40 | if (uni < 0x80) { | 46 | if (uni < 0x80) { |
41 | out[u_len++] = (unsigned char)uni; | 47 | out[u_len++] = (unsigned char)uni; |
42 | } else if (uni < 0x800) { | 48 | } else if (uni < 0x800) { |
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 8feb0c866ee0..1624e2be485c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h | |||
@@ -55,22 +55,21 @@ enum { | |||
55 | /* From drivers/edac/ghes_edac.c */ | 55 | /* From drivers/edac/ghes_edac.c */ |
56 | 56 | ||
57 | #ifdef CONFIG_EDAC_GHES | 57 | #ifdef CONFIG_EDAC_GHES |
58 | void ghes_edac_report_mem_error(struct ghes *ghes, int sev, | 58 | void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); |
59 | struct cper_sec_mem_err *mem_err); | ||
60 | 59 | ||
61 | int ghes_edac_register(struct ghes *ghes, struct device *dev); | 60 | int ghes_edac_register(struct ghes *ghes, struct device *dev); |
62 | 61 | ||
63 | void ghes_edac_unregister(struct ghes *ghes); | 62 | void ghes_edac_unregister(struct ghes *ghes); |
64 | 63 | ||
65 | #else | 64 | #else |
66 | static inline void ghes_edac_report_mem_error(struct ghes *ghes, int sev, | 65 | static inline void ghes_edac_report_mem_error(int sev, |
67 | struct cper_sec_mem_err *mem_err) | 66 | struct cper_sec_mem_err *mem_err) |
68 | { | 67 | { |
69 | } | 68 | } |
70 | 69 | ||
71 | static inline int ghes_edac_register(struct ghes *ghes, struct device *dev) | 70 | static inline int ghes_edac_register(struct ghes *ghes, struct device *dev) |
72 | { | 71 | { |
73 | return 0; | 72 | return -ENODEV; |
74 | } | 73 | } |
75 | 74 | ||
76 | static inline void ghes_edac_unregister(struct ghes *ghes) | 75 | static inline void ghes_edac_unregister(struct ghes *ghes) |
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index 562fa7df2637..98e63d870139 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define DRM_HDCP_RI_LEN 2 | 19 | #define DRM_HDCP_RI_LEN 2 |
20 | #define DRM_HDCP_V_PRIME_PART_LEN 4 | 20 | #define DRM_HDCP_V_PRIME_PART_LEN 4 |
21 | #define DRM_HDCP_V_PRIME_NUM_PARTS 5 | 21 | #define DRM_HDCP_V_PRIME_NUM_PARTS 5 |
22 | #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) | 22 | #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) |
23 | #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) | 23 | #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) |
24 | #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) | 24 | #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) |
25 | 25 | ||
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index bfe86b54f6c1..0bd432a4d7bd 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
223 | set_wb_congested(bdi->wb.congested, sync); | 223 | set_wb_congested(bdi->wb.congested, sync); |
224 | } | 224 | } |
225 | 225 | ||
226 | struct wb_lock_cookie { | ||
227 | bool locked; | ||
228 | unsigned long flags; | ||
229 | }; | ||
230 | |||
226 | #ifdef CONFIG_CGROUP_WRITEBACK | 231 | #ifdef CONFIG_CGROUP_WRITEBACK |
227 | 232 | ||
228 | /** | 233 | /** |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f6be4b0b6c18..72ca0f3d39f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) | |||
347 | /** | 347 | /** |
348 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction | 348 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction |
349 | * @inode: target inode | 349 | * @inode: target inode |
350 | * @lockedp: temp bool output param, to be passed to the end function | 350 | * @cookie: output param, to be passed to the end function |
351 | * | 351 | * |
352 | * The caller wants to access the wb associated with @inode but isn't | 352 | * The caller wants to access the wb associated with @inode but isn't |
353 | * holding inode->i_lock, the i_pages lock or wb->list_lock. This | 353 | * holding inode->i_lock, the i_pages lock or wb->list_lock. This |
@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) | |||
355 | * association doesn't change until the transaction is finished with | 355 | * association doesn't change until the transaction is finished with |
356 | * unlocked_inode_to_wb_end(). | 356 | * unlocked_inode_to_wb_end(). |
357 | * | 357 | * |
358 | * The caller must call unlocked_inode_to_wb_end() with *@lockdep | 358 | * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and |
359 | * afterwards and can't sleep during transaction. IRQ may or may not be | 359 | * can't sleep during the transaction. IRQs may or may not be disabled on |
360 | * disabled on return. | 360 | * return. |
361 | */ | 361 | */ |
362 | static inline struct bdi_writeback * | 362 | static inline struct bdi_writeback * |
363 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | 363 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
364 | { | 364 | { |
365 | rcu_read_lock(); | 365 | rcu_read_lock(); |
366 | 366 | ||
@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | |||
368 | * Paired with store_release in inode_switch_wb_work_fn() and | 368 | * Paired with store_release in inode_switch_wb_work_fn() and |
369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. |
370 | */ | 370 | */ |
371 | *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; | 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
372 | 372 | ||
373 | if (unlikely(*lockedp)) | 373 | if (unlikely(cookie->locked)) |
374 | xa_lock_irq(&inode->i_mapping->i_pages); | 374 | xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); |
375 | 375 | ||
376 | /* | 376 | /* |
377 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages | 377 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages |
@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | |||
383 | /** | 383 | /** |
384 | * unlocked_inode_to_wb_end - end inode wb access transaction | 384 | * unlocked_inode_to_wb_end - end inode wb access transaction |
385 | * @inode: target inode | 385 | * @inode: target inode |
386 | * @locked: *@lockedp from unlocked_inode_to_wb_begin() | 386 | * @cookie: @cookie from unlocked_inode_to_wb_begin() |
387 | */ | 387 | */ |
388 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | 388 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
389 | struct wb_lock_cookie *cookie) | ||
389 | { | 390 | { |
390 | if (unlikely(locked)) | 391 | if (unlikely(cookie->locked)) |
391 | xa_unlock_irq(&inode->i_mapping->i_pages); | 392 | xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); |
392 | 393 | ||
393 | rcu_read_unlock(); | 394 | rcu_read_unlock(); |
394 | } | 395 | } |
@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | |||
435 | } | 436 | } |
436 | 437 | ||
437 | static inline struct bdi_writeback * | 438 | static inline struct bdi_writeback * |
438 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | 439 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
439 | { | 440 | { |
440 | return inode_to_wb(inode); | 441 | return inode_to_wb(inode); |
441 | } | 442 | } |
442 | 443 | ||
443 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | 444 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
445 | struct wb_lock_cookie *cookie) | ||
444 | { | 446 | { |
445 | } | 447 | } |
446 | 448 | ||
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ceb96ecab96e..7d98e263e048 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -25,6 +25,9 @@ | |||
25 | #define __SANITIZE_ADDRESS__ | 25 | #define __SANITIZE_ADDRESS__ |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #undef __no_sanitize_address | ||
29 | #define __no_sanitize_address __attribute__((no_sanitize("address"))) | ||
30 | |||
28 | /* Clang doesn't have a way to turn it off per-function, yet. */ | 31 | /* Clang doesn't have a way to turn it off per-function, yet. */ |
29 | #ifdef __noretpoline | 32 | #ifdef __noretpoline |
30 | #undef __noretpoline | 33 | #undef __noretpoline |
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h | |||
@@ -1,18 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #ifndef _LINUX_CORESIGHT_PMU_H | 7 | #ifndef _LINUX_CORESIGHT_PMU_H |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 9f1edb92c97e..e0c95c9f1e29 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -248,7 +248,7 @@ struct fsnotify_mark { | |||
248 | /* Group this mark is for. Set on mark creation, stable until last ref | 248 | /* Group this mark is for. Set on mark creation, stable until last ref |
249 | * is dropped */ | 249 | * is dropped */ |
250 | struct fsnotify_group *group; | 250 | struct fsnotify_group *group; |
251 | /* List of marks by group->i_fsnotify_marks. Also reused for queueing | 251 | /* List of marks by group->marks_list. Also reused for queueing |
252 | * mark into destroy_list when it's waiting for the end of SRCU period | 252 | * mark into destroy_list when it's waiting for the end of SRCU period |
253 | * before it can be freed. [group->mark_mutex] */ | 253 | * before it can be freed. [group->mark_mutex] */ |
254 | struct list_head g_list; | 254 | struct list_head g_list; |
diff --git a/include/linux/hid.h b/include/linux/hid.h index 8da3e1f48195..26240a22978a 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -516,6 +516,12 @@ enum hid_type { | |||
516 | HID_TYPE_USBNONE | 516 | HID_TYPE_USBNONE |
517 | }; | 517 | }; |
518 | 518 | ||
519 | enum hid_battery_status { | ||
520 | HID_BATTERY_UNKNOWN = 0, | ||
521 | HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ | ||
522 | HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ | ||
523 | }; | ||
524 | |||
519 | struct hid_driver; | 525 | struct hid_driver; |
520 | struct hid_ll_driver; | 526 | struct hid_ll_driver; |
521 | 527 | ||
@@ -558,7 +564,8 @@ struct hid_device { /* device report descriptor */ | |||
558 | __s32 battery_max; | 564 | __s32 battery_max; |
559 | __s32 battery_report_type; | 565 | __s32 battery_report_type; |
560 | __s32 battery_report_id; | 566 | __s32 battery_report_id; |
561 | bool battery_reported; | 567 | enum hid_battery_status battery_status; |
568 | bool battery_avoid_query; | ||
562 | #endif | 569 | #endif |
563 | 570 | ||
564 | unsigned int status; /* see STAT flags above */ | 571 | unsigned int status; /* see STAT flags above */ |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d11f41d5269f..78a5a90b4267 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) | |||
663 | * Returns true if the skb is tagged with multiple vlan headers, regardless | 663 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
664 | * of whether it is hardware accelerated or not. | 664 | * of whether it is hardware accelerated or not. |
665 | */ | 665 | */ |
666 | static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | 666 | static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
667 | { | 667 | { |
668 | __be16 protocol = skb->protocol; | 668 | __be16 protocol = skb->protocol; |
669 | 669 | ||
@@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |||
673 | if (likely(!eth_type_vlan(protocol))) | 673 | if (likely(!eth_type_vlan(protocol))) |
674 | return false; | 674 | return false; |
675 | 675 | ||
676 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) | ||
677 | return false; | ||
678 | |||
676 | veh = (struct vlan_ethhdr *)skb->data; | 679 | veh = (struct vlan_ethhdr *)skb->data; |
677 | protocol = veh->h_vlan_encapsulated_proto; | 680 | protocol = veh->h_vlan_encapsulated_proto; |
678 | } | 681 | } |
@@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |||
690 | * | 693 | * |
691 | * Returns features without unsafe ones if the skb has multiple tags. | 694 | * Returns features without unsafe ones if the skb has multiple tags. |
692 | */ | 695 | */ |
693 | static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, | 696 | static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
694 | netdev_features_t features) | 697 | netdev_features_t features) |
695 | { | 698 | { |
696 | if (skb_vlan_tagged_multi(skb)) { | 699 | if (skb_vlan_tagged_multi(skb)) { |
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 4754f01c1abb..aec44b1d9582 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
@@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void) | |||
186 | IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); | 186 | IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); |
187 | } | 187 | } |
188 | 188 | ||
189 | typedef int (*klp_shadow_ctor_t)(void *obj, | ||
190 | void *shadow_data, | ||
191 | void *ctor_data); | ||
192 | typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); | ||
193 | |||
189 | void *klp_shadow_get(void *obj, unsigned long id); | 194 | void *klp_shadow_get(void *obj, unsigned long id); |
190 | void *klp_shadow_alloc(void *obj, unsigned long id, void *data, | 195 | void *klp_shadow_alloc(void *obj, unsigned long id, |
191 | size_t size, gfp_t gfp_flags); | 196 | size_t size, gfp_t gfp_flags, |
192 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 197 | klp_shadow_ctor_t ctor, void *ctor_data); |
193 | size_t size, gfp_t gfp_flags); | 198 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, |
194 | void klp_shadow_free(void *obj, unsigned long id); | 199 | size_t size, gfp_t gfp_flags, |
195 | void klp_shadow_free_all(unsigned long id); | 200 | klp_shadow_ctor_t ctor, void *ctor_data); |
201 | void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); | ||
202 | void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); | ||
196 | 203 | ||
197 | #else /* !CONFIG_LIVEPATCH */ | 204 | #else /* !CONFIG_LIVEPATCH */ |
198 | 205 | ||
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8f9c90379732 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h | |||
@@ -70,4 +70,12 @@ | |||
70 | #define LAN88XX_MMD3_CHIP_ID (32877) | 70 | #define LAN88XX_MMD3_CHIP_ID (32877) |
71 | #define LAN88XX_MMD3_CHIP_REV (32878) | 71 | #define LAN88XX_MMD3_CHIP_REV (32878) |
72 | 72 | ||
73 | /* DSP registers */ | ||
74 | #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) | ||
75 | #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) | ||
76 | #define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) | ||
77 | #define LAN88XX_EXT_PAGE_TR_CR 16 | ||
78 | #define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 | ||
79 | #define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 | ||
80 | |||
73 | #endif /* _MICROCHIPPHY_H */ | 81 | #endif /* _MICROCHIPPHY_H */ |
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 388ff2936a87..6794490f25b2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h | |||
@@ -75,6 +75,9 @@ struct shrinker { | |||
75 | #define SHRINKER_NUMA_AWARE (1 << 0) | 75 | #define SHRINKER_NUMA_AWARE (1 << 0) |
76 | #define SHRINKER_MEMCG_AWARE (1 << 1) | 76 | #define SHRINKER_MEMCG_AWARE (1 << 1) |
77 | 77 | ||
78 | extern int register_shrinker(struct shrinker *); | 78 | extern int prealloc_shrinker(struct shrinker *shrinker); |
79 | extern void unregister_shrinker(struct shrinker *); | 79 | extern void register_shrinker_prepared(struct shrinker *shrinker); |
80 | extern int register_shrinker(struct shrinker *shrinker); | ||
81 | extern void unregister_shrinker(struct shrinker *shrinker); | ||
82 | extern void free_prealloced_shrinker(struct shrinker *shrinker); | ||
80 | #endif | 83 | #endif |
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 0494db3fd9e8..13770cfe33ad 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h | |||
@@ -62,7 +62,7 @@ struct ts_config | |||
62 | int flags; | 62 | int flags; |
63 | 63 | ||
64 | /** | 64 | /** |
65 | * get_next_block - fetch next block of data | 65 | * @get_next_block: fetch next block of data |
66 | * @consumed: number of bytes consumed by the caller | 66 | * @consumed: number of bytes consumed by the caller |
67 | * @dst: destination buffer | 67 | * @dst: destination buffer |
68 | * @conf: search configuration | 68 | * @conf: search configuration |
@@ -79,7 +79,7 @@ struct ts_config | |||
79 | struct ts_state *state); | 79 | struct ts_state *state); |
80 | 80 | ||
81 | /** | 81 | /** |
82 | * finish - finalize/clean a series of get_next_block() calls | 82 | * @finish: finalize/clean a series of get_next_block() calls |
83 | * @conf: search configuration | 83 | * @conf: search configuration |
84 | * @state: search state | 84 | * @state: search state |
85 | * | 85 | * |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a9..cf2862bd134a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -43,11 +43,7 @@ enum { | |||
43 | #define THREAD_ALIGN THREAD_SIZE | 43 | #define THREAD_ALIGN THREAD_SIZE |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) | 46 | #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) |
47 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) | ||
48 | #else | ||
49 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) | ||
50 | #endif | ||
51 | 47 | ||
52 | /* | 48 | /* |
53 | * flag set/clear/test wrappers | 49 | * flag set/clear/test wrappers |
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index af4114d5dc17..3616b4becb59 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h | |||
@@ -9,9 +9,6 @@ | |||
9 | extern void do_gettimeofday(struct timeval *tv); | 9 | extern void do_gettimeofday(struct timeval *tv); |
10 | unsigned long get_seconds(void); | 10 | unsigned long get_seconds(void); |
11 | 11 | ||
12 | /* does not take xtime_lock */ | ||
13 | struct timespec __current_kernel_time(void); | ||
14 | |||
15 | static inline struct timespec current_kernel_time(void) | 12 | static inline struct timespec current_kernel_time(void) |
16 | { | 13 | { |
17 | struct timespec64 now = current_kernel_time64(); | 14 | struct timespec64 now = current_kernel_time64(); |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 2448f9cc48a3..7b066fd38248 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -8,8 +8,6 @@ | |||
8 | #include <linux/debugobjects.h> | 8 | #include <linux/debugobjects.h> |
9 | #include <linux/stringify.h> | 9 | #include <linux/stringify.h> |
10 | 10 | ||
11 | struct tvec_base; | ||
12 | |||
13 | struct timer_list { | 11 | struct timer_list { |
14 | /* | 12 | /* |
15 | * All fields that change during normal runtime grouped to the | 13 | * All fields that change during normal runtime grouped to the |
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
@@ -650,11 +650,23 @@ struct perf_event_mmap_page { | |||
650 | #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) | 650 | #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) |
651 | #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) | 651 | #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) |
652 | /* | 652 | /* |
653 | * Indicates that the content of PERF_SAMPLE_IP points to | 653 | * These PERF_RECORD_MISC_* flags below are safely reused |
654 | * the actual instruction that triggered the event. See also | 654 | * for the following events: |
655 | * perf_event_attr::precise_ip. | 655 | * |
656 | * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events | ||
657 | * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events | ||
658 | * | ||
659 | * | ||
660 | * PERF_RECORD_MISC_EXACT_IP: | ||
661 | * Indicates that the content of PERF_SAMPLE_IP points to | ||
662 | * the actual instruction that triggered the event. See also | ||
663 | * perf_event_attr::precise_ip. | ||
664 | * | ||
665 | * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: | ||
666 | * Indicates that thread was preempted in TASK_RUNNING state. | ||
656 | */ | 667 | */ |
657 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) | 668 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) |
669 | #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) | ||
658 | /* | 670 | /* |
659 | * Reserve the last bit to indicate some extended misc field | 671 | * Reserve the last bit to indicate some extended misc field |
660 | */ | 672 | */ |
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h index c34f4490d025..26ee91300e3e 100644 --- a/include/uapi/linux/random.h +++ b/include/uapi/linux/random.h | |||
@@ -35,6 +35,9 @@ | |||
35 | /* Clear the entropy pool and associated counters. (Superuser only.) */ | 35 | /* Clear the entropy pool and associated counters. (Superuser only.) */ |
36 | #define RNDCLEARPOOL _IO( 'R', 0x06 ) | 36 | #define RNDCLEARPOOL _IO( 'R', 0x06 ) |
37 | 37 | ||
38 | /* Reseed CRNG. (Superuser only.) */ | ||
39 | #define RNDRESEEDCRNG _IO( 'R', 0x07 ) | ||
40 | |||
38 | struct rand_pool_info { | 41 | struct rand_pool_info { |
39 | int entropy_count; | 42 | int entropy_count; |
40 | int buf_size; | 43 | int buf_size; |
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h index 5c918276835e..78bb5d9f8d83 100644 --- a/include/xen/interface/io/sndif.h +++ b/include/xen/interface/io/sndif.h | |||
@@ -38,6 +38,13 @@ | |||
38 | 38 | ||
39 | /* | 39 | /* |
40 | ****************************************************************************** | 40 | ****************************************************************************** |
41 | * Protocol version | ||
42 | ****************************************************************************** | ||
43 | */ | ||
44 | #define XENSND_PROTOCOL_VERSION 2 | ||
45 | |||
46 | /* | ||
47 | ****************************************************************************** | ||
41 | * Feature and Parameter Negotiation | 48 | * Feature and Parameter Negotiation |
42 | ****************************************************************************** | 49 | ****************************************************************************** |
43 | * | 50 | * |
@@ -106,6 +113,8 @@ | |||
106 | * | 113 | * |
107 | * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" | 114 | * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" |
108 | * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" | 115 | * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" |
116 | * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386" | ||
117 | * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215" | ||
109 | * | 118 | * |
110 | *------------------------------ Stream 1, capture ---------------------------- | 119 | *------------------------------ Stream 1, capture ---------------------------- |
111 | * | 120 | * |
@@ -115,6 +124,8 @@ | |||
115 | * | 124 | * |
116 | * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" | 125 | * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" |
117 | * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" | 126 | * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" |
127 | * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384" | ||
128 | * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213" | ||
118 | * | 129 | * |
119 | *------------------------------- PCM device 1 -------------------------------- | 130 | *------------------------------- PCM device 1 -------------------------------- |
120 | * | 131 | * |
@@ -128,6 +139,8 @@ | |||
128 | * | 139 | * |
129 | * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" | 140 | * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" |
130 | * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" | 141 | * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" |
142 | * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387" | ||
143 | * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351" | ||
131 | * | 144 | * |
132 | *------------------------------- PCM device 2 -------------------------------- | 145 | *------------------------------- PCM device 2 -------------------------------- |
133 | * | 146 | * |
@@ -140,6 +153,8 @@ | |||
140 | * | 153 | * |
141 | * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" | 154 | * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" |
142 | * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" | 155 | * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" |
156 | * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389" | ||
157 | * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452" | ||
143 | * | 158 | * |
144 | ****************************************************************************** | 159 | ****************************************************************************** |
145 | * Backend XenBus Nodes | 160 | * Backend XenBus Nodes |
@@ -285,6 +300,23 @@ | |||
285 | * The Xen grant reference granting permission for the backend to map | 300 | * The Xen grant reference granting permission for the backend to map |
286 | * a sole page in a single page sized ring buffer. | 301 | * a sole page in a single page sized ring buffer. |
287 | * | 302 | * |
303 | *--------------------- Stream Event Transport Parameters --------------------- | ||
304 | * | ||
305 | * This communication path is used to deliver asynchronous events from backend | ||
306 | * to frontend, set up per stream. | ||
307 | * | ||
308 | * evt-event-channel | ||
309 | * Values: <uint32_t> | ||
310 | * | ||
311 | * The identifier of the Xen event channel used to signal activity | ||
312 | * in the ring buffer. | ||
313 | * | ||
314 | * evt-ring-ref | ||
315 | * Values: <uint32_t> | ||
316 | * | ||
317 | * The Xen grant reference granting permission for the backend to map | ||
318 | * a sole page in a single page sized ring buffer. | ||
319 | * | ||
288 | ****************************************************************************** | 320 | ****************************************************************************** |
289 | * STATE DIAGRAMS | 321 | * STATE DIAGRAMS |
290 | ****************************************************************************** | 322 | ****************************************************************************** |
@@ -432,6 +464,20 @@ | |||
432 | #define XENSND_OP_GET_VOLUME 5 | 464 | #define XENSND_OP_GET_VOLUME 5 |
433 | #define XENSND_OP_MUTE 6 | 465 | #define XENSND_OP_MUTE 6 |
434 | #define XENSND_OP_UNMUTE 7 | 466 | #define XENSND_OP_UNMUTE 7 |
467 | #define XENSND_OP_TRIGGER 8 | ||
468 | #define XENSND_OP_HW_PARAM_QUERY 9 | ||
469 | |||
470 | #define XENSND_OP_TRIGGER_START 0 | ||
471 | #define XENSND_OP_TRIGGER_PAUSE 1 | ||
472 | #define XENSND_OP_TRIGGER_STOP 2 | ||
473 | #define XENSND_OP_TRIGGER_RESUME 3 | ||
474 | |||
475 | /* | ||
476 | ****************************************************************************** | ||
477 | * EVENT CODES | ||
478 | ****************************************************************************** | ||
479 | */ | ||
480 | #define XENSND_EVT_CUR_POS 0 | ||
435 | 481 | ||
436 | /* | 482 | /* |
437 | ****************************************************************************** | 483 | ****************************************************************************** |
@@ -448,6 +494,8 @@ | |||
448 | #define XENSND_FIELD_VCARD_LONG_NAME "long-name" | 494 | #define XENSND_FIELD_VCARD_LONG_NAME "long-name" |
449 | #define XENSND_FIELD_RING_REF "ring-ref" | 495 | #define XENSND_FIELD_RING_REF "ring-ref" |
450 | #define XENSND_FIELD_EVT_CHNL "event-channel" | 496 | #define XENSND_FIELD_EVT_CHNL "event-channel" |
497 | #define XENSND_FIELD_EVT_RING_REF "evt-ring-ref" | ||
498 | #define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel" | ||
451 | #define XENSND_FIELD_DEVICE_NAME "name" | 499 | #define XENSND_FIELD_DEVICE_NAME "name" |
452 | #define XENSND_FIELD_TYPE "type" | 500 | #define XENSND_FIELD_TYPE "type" |
453 | #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" | 501 | #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" |
@@ -526,7 +574,7 @@ | |||
526 | * | 574 | * |
527 | *---------------------------------- Requests --------------------------------- | 575 | *---------------------------------- Requests --------------------------------- |
528 | * | 576 | * |
529 | * All request packets have the same length (32 octets) | 577 | * All request packets have the same length (64 octets) |
530 | * All request packets have common header: | 578 | * All request packets have common header: |
531 | * 0 1 2 3 octet | 579 | * 0 1 2 3 octet |
532 | * +----------------+----------------+----------------+----------------+ | 580 | * +----------------+----------------+----------------+----------------+ |
@@ -559,11 +607,13 @@ | |||
559 | * +----------------+----------------+----------------+----------------+ | 607 | * +----------------+----------------+----------------+----------------+ |
560 | * | gref_directory | 24 | 608 | * | gref_directory | 24 |
561 | * +----------------+----------------+----------------+----------------+ | 609 | * +----------------+----------------+----------------+----------------+ |
562 | * | reserved | 28 | 610 | * | period_sz | 28 |
611 | * +----------------+----------------+----------------+----------------+ | ||
612 | * | reserved | 32 | ||
563 | * +----------------+----------------+----------------+----------------+ | 613 | * +----------------+----------------+----------------+----------------+ |
564 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 614 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
565 | * +----------------+----------------+----------------+----------------+ | 615 | * +----------------+----------------+----------------+----------------+ |
566 | * | reserved | 32 | 616 | * | reserved | 64 |
567 | * +----------------+----------------+----------------+----------------+ | 617 | * +----------------+----------------+----------------+----------------+ |
568 | * | 618 | * |
569 | * pcm_rate - uint32_t, stream data rate, Hz | 619 | * pcm_rate - uint32_t, stream data rate, Hz |
@@ -571,6 +621,14 @@ | |||
571 | * pcm_channels - uint8_t, number of channels of this stream, | 621 | * pcm_channels - uint8_t, number of channels of this stream, |
572 | * [channels-min; channels-max] | 622 | * [channels-min; channels-max] |
573 | * buffer_sz - uint32_t, buffer size to be allocated, octets | 623 | * buffer_sz - uint32_t, buffer size to be allocated, octets |
624 | * period_sz - uint32_t, event period size, octets | ||
625 | * This is the requested value of the period at which frontend would | ||
626 | * like to receive XENSND_EVT_CUR_POS notifications from the backend when | ||
627 | * stream position advances during playback/capture. | ||
628 | * It shows how many octets are expected to be played/captured before | ||
629 | * sending such an event. | ||
630 | * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend. | ||
631 | * | ||
574 | * gref_directory - grant_ref_t, a reference to the first shared page | 632 | * gref_directory - grant_ref_t, a reference to the first shared page |
575 | * describing shared buffer references. At least one page exists. If shared | 633 | * describing shared buffer references. At least one page exists. If shared |
576 | * buffer size (buffer_sz) exceeds what can be addressed by this single page, | 634 | * buffer size (buffer_sz) exceeds what can be addressed by this single page, |
@@ -585,6 +643,7 @@ struct xensnd_open_req { | |||
585 | uint16_t reserved; | 643 | uint16_t reserved; |
586 | uint32_t buffer_sz; | 644 | uint32_t buffer_sz; |
587 | grant_ref_t gref_directory; | 645 | grant_ref_t gref_directory; |
646 | uint32_t period_sz; | ||
588 | }; | 647 | }; |
589 | 648 | ||
590 | /* | 649 | /* |
@@ -632,7 +691,7 @@ struct xensnd_page_directory { | |||
632 | * +----------------+----------------+----------------+----------------+ | 691 | * +----------------+----------------+----------------+----------------+ |
633 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 692 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
634 | * +----------------+----------------+----------------+----------------+ | 693 | * +----------------+----------------+----------------+----------------+ |
635 | * | reserved | 32 | 694 | * | reserved | 64 |
636 | * +----------------+----------------+----------------+----------------+ | 695 | * +----------------+----------------+----------------+----------------+ |
637 | * | 696 | * |
638 | * Request read/write - used for read (for capture) or write (for playback): | 697 | * Request read/write - used for read (for capture) or write (for playback): |
@@ -650,7 +709,7 @@ struct xensnd_page_directory { | |||
650 | * +----------------+----------------+----------------+----------------+ | 709 | * +----------------+----------------+----------------+----------------+ |
651 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 710 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
652 | * +----------------+----------------+----------------+----------------+ | 711 | * +----------------+----------------+----------------+----------------+ |
653 | * | reserved | 32 | 712 | * | reserved | 64 |
654 | * +----------------+----------------+----------------+----------------+ | 713 | * +----------------+----------------+----------------+----------------+ |
655 | * | 714 | * |
656 | * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write | 715 | * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write |
@@ -673,9 +732,11 @@ struct xensnd_rw_req { | |||
673 | * +----------------+----------------+----------------+----------------+ | 732 | * +----------------+----------------+----------------+----------------+ |
674 | * | length | 16 | 733 | * | length | 16 |
675 | * +----------------+----------------+----------------+----------------+ | 734 | * +----------------+----------------+----------------+----------------+ |
735 | * | reserved | 20 | ||
736 | * +----------------+----------------+----------------+----------------+ | ||
676 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 737 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
677 | * +----------------+----------------+----------------+----------------+ | 738 | * +----------------+----------------+----------------+----------------+ |
678 | * | reserved | 32 | 739 | * | reserved | 64 |
679 | * +----------------+----------------+----------------+----------------+ | 740 | * +----------------+----------------+----------------+----------------+ |
680 | * | 741 | * |
681 | * operation - XENSND_OP_SET_VOLUME for volume set | 742 | * operation - XENSND_OP_SET_VOLUME for volume set |
@@ -713,9 +774,11 @@ struct xensnd_rw_req { | |||
713 | * +----------------+----------------+----------------+----------------+ | 774 | * +----------------+----------------+----------------+----------------+ |
714 | * | length | 16 | 775 | * | length | 16 |
715 | * +----------------+----------------+----------------+----------------+ | 776 | * +----------------+----------------+----------------+----------------+ |
777 | * | reserved | 20 | ||
778 | * +----------------+----------------+----------------+----------------+ | ||
716 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 779 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
717 | * +----------------+----------------+----------------+----------------+ | 780 | * +----------------+----------------+----------------+----------------+ |
718 | * | reserved | 32 | 781 | * | reserved | 64 |
719 | * +----------------+----------------+----------------+----------------+ | 782 | * +----------------+----------------+----------------+----------------+ |
720 | * | 783 | * |
721 | * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute | 784 | * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute |
@@ -743,32 +806,213 @@ struct xensnd_rw_req { | |||
743 | * | 806 | * |
744 | * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, | 807 | * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, |
745 | * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. | 808 | * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. |
809 | * | ||
810 | * Request stream running state change - trigger PCM stream running state | ||
811 | * to start, stop, pause or resume: | ||
812 | * | ||
813 | * 0 1 2 3 octet | ||
814 | * +----------------+----------------+----------------+----------------+ | ||
815 | * | id | _OP_TRIGGER | reserved | 4 | ||
816 | * +----------------+----------------+----------------+----------------+ | ||
817 | * | reserved | 8 | ||
818 | * +----------------+----------------+----------------+----------------+ | ||
819 | * | type | reserved | 12 | ||
820 | * +----------------+----------------+----------------+----------------+ | ||
821 | * | reserved | 16 | ||
822 | * +----------------+----------------+----------------+----------------+ | ||
823 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
824 | * +----------------+----------------+----------------+----------------+ | ||
825 | * | reserved | 64 | ||
826 | * +----------------+----------------+----------------+----------------+ | ||
827 | * | ||
828 | * type - uint8_t, XENSND_OP_TRIGGER_XXX value | ||
746 | */ | 829 | */ |
747 | 830 | ||
831 | struct xensnd_trigger_req { | ||
832 | uint8_t type; | ||
833 | }; | ||
834 | |||
748 | /* | 835 | /* |
749 | *---------------------------------- Responses -------------------------------- | 836 | * Request stream parameter ranges: request intervals and |
837 | * masks of supported ranges for stream configuration values. | ||
750 | * | 838 | * |
751 | * All response packets have the same length (32 octets) | 839 | * Sound device configuration for a particular stream is a limited subset |
840 | * of the multidimensional configuration available on XenStore, e.g. | ||
841 | * once the frame rate has been selected there is a limited supported range | ||
842 | * for sample rates becomes available (which might be the same set configured | ||
843 | * on XenStore or less). For example, selecting 96kHz sample rate may limit | ||
844 | * number of channels available for such configuration from 4 to 2, etc. | ||
845 | * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration | ||
846 | * space making it possible to iteratively get the final stream configuration, | ||
847 | * used in XENSND_OP_OPEN request. | ||
848 | * | ||
849 | * See response format for this request. | ||
752 | * | 850 | * |
753 | * Response for all requests: | ||
754 | * 0 1 2 3 octet | 851 | * 0 1 2 3 octet |
755 | * +----------------+----------------+----------------+----------------+ | 852 | * +----------------+----------------+----------------+----------------+ |
756 | * | id | operation | reserved | 4 | 853 | * | id | _HW_PARAM_QUERY| reserved | 4 |
757 | * +----------------+----------------+----------------+----------------+ | 854 | * +----------------+----------------+----------------+----------------+ |
758 | * | status | 8 | 855 | * | reserved | 8 |
856 | * +----------------+----------------+----------------+----------------+ | ||
857 | * | formats mask low 32-bit | 12 | ||
858 | * +----------------+----------------+----------------+----------------+ | ||
859 | * | formats mask high 32-bit | 16 | ||
759 | * +----------------+----------------+----------------+----------------+ | 860 | * +----------------+----------------+----------------+----------------+ |
760 | * | reserved | 12 | 861 | * | min rate | 20 |
862 | * +----------------+----------------+----------------+----------------+ | ||
863 | * | max rate | 24 | ||
864 | * +----------------+----------------+----------------+----------------+ | ||
865 | * | min channels | 28 | ||
866 | * +----------------+----------------+----------------+----------------+ | ||
867 | * | max channels | 32 | ||
868 | * +----------------+----------------+----------------+----------------+ | ||
869 | * | min buffer frames | 36 | ||
870 | * +----------------+----------------+----------------+----------------+ | ||
871 | * | max buffer frames | 40 | ||
872 | * +----------------+----------------+----------------+----------------+ | ||
873 | * | min period frames | 44 | ||
874 | * +----------------+----------------+----------------+----------------+ | ||
875 | * | max period frames | 48 | ||
876 | * +----------------+----------------+----------------+----------------+ | ||
877 | * | reserved | 52 | ||
761 | * +----------------+----------------+----------------+----------------+ | 878 | * +----------------+----------------+----------------+----------------+ |
762 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 879 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
763 | * +----------------+----------------+----------------+----------------+ | 880 | * +----------------+----------------+----------------+----------------+ |
764 | * | reserved | 32 | 881 | * | reserved | 64 |
882 | * +----------------+----------------+----------------+----------------+ | ||
883 | * | ||
884 | * formats - uint64_t, bit mask representing values of the parameter | ||
885 | * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values | ||
886 | * | ||
887 | * For interval parameters: | ||
888 | * min - uint32_t, minimum value of the parameter | ||
889 | * max - uint32_t, maximum value of the parameter | ||
890 | * | ||
891 | * Frame is defined as a product of the number of channels by the | ||
892 | * number of octets per one sample. | ||
893 | */ | ||
894 | |||
895 | struct xensnd_query_hw_param { | ||
896 | uint64_t formats; | ||
897 | struct { | ||
898 | uint32_t min; | ||
899 | uint32_t max; | ||
900 | } rates; | ||
901 | struct { | ||
902 | uint32_t min; | ||
903 | uint32_t max; | ||
904 | } channels; | ||
905 | struct { | ||
906 | uint32_t min; | ||
907 | uint32_t max; | ||
908 | } buffer; | ||
909 | struct { | ||
910 | uint32_t min; | ||
911 | uint32_t max; | ||
912 | } period; | ||
913 | }; | ||
914 | |||
915 | /* | ||
916 | *---------------------------------- Responses -------------------------------- | ||
917 | * | ||
918 | * All response packets have the same length (64 octets) | ||
919 | * | ||
920 | * All response packets have common header: | ||
921 | * 0 1 2 3 octet | ||
922 | * +----------------+----------------+----------------+----------------+ | ||
923 | * | id | operation | reserved | 4 | ||
924 | * +----------------+----------------+----------------+----------------+ | ||
925 | * | status | 8 | ||
765 | * +----------------+----------------+----------------+----------------+ | 926 | * +----------------+----------------+----------------+----------------+ |
766 | * | 927 | * |
767 | * id - uint16_t, copied from the request | 928 | * id - uint16_t, copied from the request |
768 | * operation - uint8_t, XENSND_OP_* - copied from request | 929 | * operation - uint8_t, XENSND_OP_* - copied from request |
769 | * status - int32_t, response status, zero on success and -XEN_EXX on failure | 930 | * status - int32_t, response status, zero on success and -XEN_EXX on failure |
931 | * | ||
932 | * | ||
933 | * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY: | ||
934 | * 0 1 2 3 octet | ||
935 | * +----------------+----------------+----------------+----------------+ | ||
936 | * | id | operation | reserved | 4 | ||
937 | * +----------------+----------------+----------------+----------------+ | ||
938 | * | status | 8 | ||
939 | * +----------------+----------------+----------------+----------------+ | ||
940 | * | formats mask low 32-bit | 12 | ||
941 | * +----------------+----------------+----------------+----------------+ | ||
942 | * | formats mask high 32-bit | 16 | ||
943 | * +----------------+----------------+----------------+----------------+ | ||
944 | * | min rate | 20 | ||
945 | * +----------------+----------------+----------------+----------------+ | ||
946 | * | max rate | 24 | ||
947 | * +----------------+----------------+----------------+----------------+ | ||
948 | * | min channels | 28 | ||
949 | * +----------------+----------------+----------------+----------------+ | ||
950 | * | max channels | 32 | ||
951 | * +----------------+----------------+----------------+----------------+ | ||
952 | * | min buffer frames | 36 | ||
953 | * +----------------+----------------+----------------+----------------+ | ||
954 | * | max buffer frames | 40 | ||
955 | * +----------------+----------------+----------------+----------------+ | ||
956 | * | min period frames | 44 | ||
957 | * +----------------+----------------+----------------+----------------+ | ||
958 | * | max period frames | 48 | ||
959 | * +----------------+----------------+----------------+----------------+ | ||
960 | * | reserved | 52 | ||
961 | * +----------------+----------------+----------------+----------------+ | ||
962 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
963 | * +----------------+----------------+----------------+----------------+ | ||
964 | * | reserved | 64 | ||
965 | * +----------------+----------------+----------------+----------------+ | ||
966 | * | ||
967 | * Meaning of the values in this response is the same as for | ||
968 | * XENSND_OP_HW_PARAM_QUERY request. | ||
969 | */ | ||
970 | |||
971 | /* | ||
972 | *----------------------------------- Events ---------------------------------- | ||
973 | * | ||
974 | * Events are sent via shared page allocated by the front and propagated by | ||
975 | * evt-event-channel/evt-ring-ref XenStore entries | ||
976 | * All event packets have the same length (64 octets) | ||
977 | * All event packets have common header: | ||
978 | * 0 1 2 3 octet | ||
979 | * +----------------+----------------+----------------+----------------+ | ||
980 | * | id | type | reserved | 4 | ||
981 | * +----------------+----------------+----------------+----------------+ | ||
982 | * | reserved | 8 | ||
983 | * +----------------+----------------+----------------+----------------+ | ||
984 | * | ||
985 | * id - uint16_t, event id, may be used by front | ||
986 | * type - uint8_t, type of the event | ||
987 | * | ||
988 | * | ||
989 | * Current stream position - event from back to front when stream's | ||
990 | * playback/capture position has advanced: | ||
991 | * 0 1 2 3 octet | ||
992 | * +----------------+----------------+----------------+----------------+ | ||
993 | * | id | _EVT_CUR_POS | reserved | 4 | ||
994 | * +----------------+----------------+----------------+----------------+ | ||
995 | * | reserved | 8 | ||
996 | * +----------------+----------------+----------------+----------------+ | ||
997 | * | position low 32-bit | 12 | ||
998 | * +----------------+----------------+----------------+----------------+ | ||
999 | * | position high 32-bit | 16 | ||
1000 | * +----------------+----------------+----------------+----------------+ | ||
1001 | * | reserved | 20 | ||
1002 | * +----------------+----------------+----------------+----------------+ | ||
1003 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
1004 | * +----------------+----------------+----------------+----------------+ | ||
1005 | * | reserved | 64 | ||
1006 | * +----------------+----------------+----------------+----------------+ | ||
1007 | * | ||
1008 | * position - current value of stream's playback/capture position, octets | ||
1009 | * | ||
770 | */ | 1010 | */ |
771 | 1011 | ||
1012 | struct xensnd_cur_pos_evt { | ||
1013 | uint64_t position; | ||
1014 | }; | ||
1015 | |||
772 | struct xensnd_req { | 1016 | struct xensnd_req { |
773 | uint16_t id; | 1017 | uint16_t id; |
774 | uint8_t operation; | 1018 | uint8_t operation; |
@@ -776,7 +1020,9 @@ struct xensnd_req { | |||
776 | union { | 1020 | union { |
777 | struct xensnd_open_req open; | 1021 | struct xensnd_open_req open; |
778 | struct xensnd_rw_req rw; | 1022 | struct xensnd_rw_req rw; |
779 | uint8_t reserved[24]; | 1023 | struct xensnd_trigger_req trigger; |
1024 | struct xensnd_query_hw_param hw_param; | ||
1025 | uint8_t reserved[56]; | ||
780 | } op; | 1026 | } op; |
781 | }; | 1027 | }; |
782 | 1028 | ||
@@ -785,9 +1031,53 @@ struct xensnd_resp { | |||
785 | uint8_t operation; | 1031 | uint8_t operation; |
786 | uint8_t reserved; | 1032 | uint8_t reserved; |
787 | int32_t status; | 1033 | int32_t status; |
788 | uint8_t reserved1[24]; | 1034 | union { |
1035 | struct xensnd_query_hw_param hw_param; | ||
1036 | uint8_t reserved1[56]; | ||
1037 | } resp; | ||
1038 | }; | ||
1039 | |||
1040 | struct xensnd_evt { | ||
1041 | uint16_t id; | ||
1042 | uint8_t type; | ||
1043 | uint8_t reserved[5]; | ||
1044 | union { | ||
1045 | struct xensnd_cur_pos_evt cur_pos; | ||
1046 | uint8_t reserved[56]; | ||
1047 | } op; | ||
789 | }; | 1048 | }; |
790 | 1049 | ||
791 | DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); | 1050 | DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); |
792 | 1051 | ||
1052 | /* | ||
1053 | ****************************************************************************** | ||
1054 | * Back to front events delivery | ||
1055 | ****************************************************************************** | ||
1056 | * In order to deliver asynchronous events from back to front a shared page is | ||
1057 | * allocated by front and its granted reference propagated to back via | ||
1058 | * XenStore entries (evt-ring-ref/evt-event-channel). | ||
1059 | * This page has a common header used by both front and back to synchronize | ||
1060 | * access and control event's ring buffer, while back being a producer of the | ||
1061 | * events and front being a consumer. The rest of the page after the header | ||
1062 | * is used for event packets. | ||
1063 | * | ||
1064 | * Upon reception of an event(s) front may confirm its reception | ||
1065 | * for either each event, group of events or none. | ||
1066 | */ | ||
1067 | |||
1068 | struct xensnd_event_page { | ||
1069 | uint32_t in_cons; | ||
1070 | uint32_t in_prod; | ||
1071 | uint8_t reserved[56]; | ||
1072 | }; | ||
1073 | |||
1074 | #define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE | ||
1075 | #define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page)) | ||
1076 | #define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS) | ||
1077 | #define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt)) | ||
1078 | #define XENSND_IN_RING(page) \ | ||
1079 | ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS)) | ||
1080 | #define XENSND_IN_RING_REF(page, idx) \ | ||
1081 | (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN]) | ||
1082 | |||
793 | #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ | 1083 | #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ |
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 772a43fea825..c187aa3df3c8 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c | |||
@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack) | |||
119 | goto exit; | 119 | goto exit; |
120 | } | 120 | } |
121 | 121 | ||
122 | if (count > 1) { | 122 | /* |
123 | /* If the allocation failed, give up */ | 123 | * If requesting per event more than the global cap, |
124 | if (!callchain_cpus_entries) | 124 | * return a different error to help userspace figure |
125 | err = -ENOMEM; | 125 | * this out. |
126 | /* | 126 | * |
127 | * If requesting per event more than the global cap, | 127 | * And also do it here so that we have &callchain_mutex held. |
128 | * return a different error to help userspace figure | 128 | */ |
129 | * this out. | 129 | if (event_max_stack > sysctl_perf_event_max_stack) { |
130 | * | 130 | err = -EOVERFLOW; |
131 | * And also do it here so that we have &callchain_mutex held. | ||
132 | */ | ||
133 | if (event_max_stack > sysctl_perf_event_max_stack) | ||
134 | err = -EOVERFLOW; | ||
135 | goto exit; | 131 | goto exit; |
136 | } | 132 | } |
137 | 133 | ||
138 | err = alloc_callchain_buffers(); | 134 | if (count == 1) |
135 | err = alloc_callchain_buffers(); | ||
139 | exit: | 136 | exit: |
140 | if (err) | 137 | if (err) |
141 | atomic_dec(&nr_callchain_events); | 138 | atomic_dec(&nr_callchain_events); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d5fe26551f8..67612ce359ad 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task, | |||
7587 | }, | 7587 | }, |
7588 | }; | 7588 | }; |
7589 | 7589 | ||
7590 | if (!sched_in && task->state == TASK_RUNNING) | ||
7591 | switch_event.event_id.header.misc |= | ||
7592 | PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; | ||
7593 | |||
7590 | perf_iterate_sb(perf_event_switch_output, | 7594 | perf_iterate_sb(perf_event_switch_output, |
7591 | &switch_event, | 7595 | &switch_event, |
7592 | NULL); | 7596 | NULL); |
@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
10205 | * __u16 sample size limit. | 10209 | * __u16 sample size limit. |
10206 | */ | 10210 | */ |
10207 | if (attr->sample_stack_user >= USHRT_MAX) | 10211 | if (attr->sample_stack_user >= USHRT_MAX) |
10208 | ret = -EINVAL; | 10212 | return -EINVAL; |
10209 | else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) | 10213 | else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) |
10210 | ret = -EINVAL; | 10214 | return -EINVAL; |
10211 | } | 10215 | } |
10212 | 10216 | ||
10213 | if (!attr->sample_max_stack) | 10217 | if (!attr->sample_max_stack) |
diff --git a/kernel/fork.c b/kernel/fork.c index 242c8c93d285..a5d21c42acfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
216 | if (!s) | 216 | if (!s) |
217 | continue; | 217 | continue; |
218 | 218 | ||
219 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
220 | /* Clear stale pointers from reused stack. */ | 219 | /* Clear stale pointers from reused stack. */ |
221 | memset(s->addr, 0, THREAD_SIZE); | 220 | memset(s->addr, 0, THREAD_SIZE); |
222 | #endif | 221 | |
223 | tsk->stack_vm_area = s; | 222 | tsk->stack_vm_area = s; |
224 | return s->addr; | 223 | return s->addr; |
225 | } | 224 | } |
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index fdac27588d60..83958c814439 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c | |||
@@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id) | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(klp_shadow_get); | 114 | EXPORT_SYMBOL_GPL(klp_shadow_get); |
115 | 115 | ||
116 | static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 116 | static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, |
117 | size_t size, gfp_t gfp_flags, bool warn_on_exist) | 117 | size_t size, gfp_t gfp_flags, |
118 | klp_shadow_ctor_t ctor, void *ctor_data, | ||
119 | bool warn_on_exist) | ||
118 | { | 120 | { |
119 | struct klp_shadow *new_shadow; | 121 | struct klp_shadow *new_shadow; |
120 | void *shadow_data; | 122 | void *shadow_data; |
@@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | |||
125 | if (shadow_data) | 127 | if (shadow_data) |
126 | goto exists; | 128 | goto exists; |
127 | 129 | ||
128 | /* Allocate a new shadow variable for use inside the lock below */ | 130 | /* |
131 | * Allocate a new shadow variable. Fill it with zeroes by default. | ||
132 | * More complex setting can be done by @ctor function. But it is | ||
133 | * called only when the buffer is really used (under klp_shadow_lock). | ||
134 | */ | ||
129 | new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); | 135 | new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); |
130 | if (!new_shadow) | 136 | if (!new_shadow) |
131 | return NULL; | 137 | return NULL; |
132 | 138 | ||
133 | new_shadow->obj = obj; | ||
134 | new_shadow->id = id; | ||
135 | |||
136 | /* Initialize the shadow variable if data provided */ | ||
137 | if (data) | ||
138 | memcpy(new_shadow->data, data, size); | ||
139 | |||
140 | /* Look for <obj, id> again under the lock */ | 139 | /* Look for <obj, id> again under the lock */ |
141 | spin_lock_irqsave(&klp_shadow_lock, flags); | 140 | spin_lock_irqsave(&klp_shadow_lock, flags); |
142 | shadow_data = klp_shadow_get(obj, id); | 141 | shadow_data = klp_shadow_get(obj, id); |
@@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | |||
150 | goto exists; | 149 | goto exists; |
151 | } | 150 | } |
152 | 151 | ||
152 | new_shadow->obj = obj; | ||
153 | new_shadow->id = id; | ||
154 | |||
155 | if (ctor) { | ||
156 | int err; | ||
157 | |||
158 | err = ctor(obj, new_shadow->data, ctor_data); | ||
159 | if (err) { | ||
160 | spin_unlock_irqrestore(&klp_shadow_lock, flags); | ||
161 | kfree(new_shadow); | ||
162 | pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n", | ||
163 | obj, id, err); | ||
164 | return NULL; | ||
165 | } | ||
166 | } | ||
167 | |||
153 | /* No <obj, id> found, so attach the newly allocated one */ | 168 | /* No <obj, id> found, so attach the newly allocated one */ |
154 | hash_add_rcu(klp_shadow_hash, &new_shadow->node, | 169 | hash_add_rcu(klp_shadow_hash, &new_shadow->node, |
155 | (unsigned long)new_shadow->obj); | 170 | (unsigned long)new_shadow->obj); |
@@ -170,26 +185,32 @@ exists: | |||
170 | * klp_shadow_alloc() - allocate and add a new shadow variable | 185 | * klp_shadow_alloc() - allocate and add a new shadow variable |
171 | * @obj: pointer to parent object | 186 | * @obj: pointer to parent object |
172 | * @id: data identifier | 187 | * @id: data identifier |
173 | * @data: pointer to data to attach to parent | ||
174 | * @size: size of attached data | 188 | * @size: size of attached data |
175 | * @gfp_flags: GFP mask for allocation | 189 | * @gfp_flags: GFP mask for allocation |
190 | * @ctor: custom constructor to initialize the shadow data (optional) | ||
191 | * @ctor_data: pointer to any data needed by @ctor (optional) | ||
192 | * | ||
193 | * Allocates @size bytes for new shadow variable data using @gfp_flags. | ||
194 | * The data are zeroed by default. They are further initialized by @ctor | ||
195 | * function if it is not NULL. The new shadow variable is then added | ||
196 | * to the global hashtable. | ||
176 | * | 197 | * |
177 | * Allocates @size bytes for new shadow variable data using @gfp_flags | 198 | * If an existing <obj, id> shadow variable can be found, this routine will |
178 | * and copies @size bytes from @data into the new shadow variable's own | 199 | * issue a WARN, exit early and return NULL. |
179 | * data space. If @data is NULL, @size bytes are still allocated, but | ||
180 | * no copy is performed. The new shadow variable is then added to the | ||
181 | * global hashtable. | ||
182 | * | 200 | * |
183 | * If an existing <obj, id> shadow variable can be found, this routine | 201 | * This function guarantees that the constructor function is called only when |
184 | * will issue a WARN, exit early and return NULL. | 202 | * the variable did not exist before. The cost is that @ctor is called |
203 | * in atomic context under a spin lock. | ||
185 | * | 204 | * |
186 | * Return: the shadow variable data element, NULL on duplicate or | 205 | * Return: the shadow variable data element, NULL on duplicate or |
187 | * failure. | 206 | * failure. |
188 | */ | 207 | */ |
189 | void *klp_shadow_alloc(void *obj, unsigned long id, void *data, | 208 | void *klp_shadow_alloc(void *obj, unsigned long id, |
190 | size_t size, gfp_t gfp_flags) | 209 | size_t size, gfp_t gfp_flags, |
210 | klp_shadow_ctor_t ctor, void *ctor_data) | ||
191 | { | 211 | { |
192 | return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); | 212 | return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, |
213 | ctor, ctor_data, true); | ||
193 | } | 214 | } |
194 | EXPORT_SYMBOL_GPL(klp_shadow_alloc); | 215 | EXPORT_SYMBOL_GPL(klp_shadow_alloc); |
195 | 216 | ||
@@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc); | |||
197 | * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable | 218 | * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable |
198 | * @obj: pointer to parent object | 219 | * @obj: pointer to parent object |
199 | * @id: data identifier | 220 | * @id: data identifier |
200 | * @data: pointer to data to attach to parent | ||
201 | * @size: size of attached data | 221 | * @size: size of attached data |
202 | * @gfp_flags: GFP mask for allocation | 222 | * @gfp_flags: GFP mask for allocation |
223 | * @ctor: custom constructor to initialize the shadow data (optional) | ||
224 | * @ctor_data: pointer to any data needed by @ctor (optional) | ||
203 | * | 225 | * |
204 | * Returns a pointer to existing shadow data if an <obj, id> shadow | 226 | * Returns a pointer to existing shadow data if an <obj, id> shadow |
205 | * variable is already present. Otherwise, it creates a new shadow | 227 | * variable is already present. Otherwise, it creates a new shadow |
206 | * variable like klp_shadow_alloc(). | 228 | * variable like klp_shadow_alloc(). |
207 | * | 229 | * |
208 | * This function guarantees that only one shadow variable exists with | 230 | * This function guarantees that only one shadow variable exists with the given |
209 | * the given @id for the given @obj. It also guarantees that the shadow | 231 | * @id for the given @obj. It also guarantees that the constructor function |
210 | * variable will be initialized by the given @data only when it did not | 232 | * will be called only when the variable did not exist before. The cost is |
211 | * exist before. | 233 | * that @ctor is called in atomic context under a spin lock. |
212 | * | 234 | * |
213 | * Return: the shadow variable data element, NULL on failure. | 235 | * Return: the shadow variable data element, NULL on failure. |
214 | */ | 236 | */ |
215 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 237 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, |
216 | size_t size, gfp_t gfp_flags) | 238 | size_t size, gfp_t gfp_flags, |
239 | klp_shadow_ctor_t ctor, void *ctor_data) | ||
217 | { | 240 | { |
218 | return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); | 241 | return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, |
242 | ctor, ctor_data, false); | ||
219 | } | 243 | } |
220 | EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); | 244 | EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); |
221 | 245 | ||
246 | static void klp_shadow_free_struct(struct klp_shadow *shadow, | ||
247 | klp_shadow_dtor_t dtor) | ||
248 | { | ||
249 | hash_del_rcu(&shadow->node); | ||
250 | if (dtor) | ||
251 | dtor(shadow->obj, shadow->data); | ||
252 | kfree_rcu(shadow, rcu_head); | ||
253 | } | ||
254 | |||
222 | /** | 255 | /** |
223 | * klp_shadow_free() - detach and free a <obj, id> shadow variable | 256 | * klp_shadow_free() - detach and free a <obj, id> shadow variable |
224 | * @obj: pointer to parent object | 257 | * @obj: pointer to parent object |
225 | * @id: data identifier | 258 | * @id: data identifier |
259 | * @dtor: custom callback that can be used to unregister the variable | ||
260 | * and/or free data that the shadow variable points to (optional) | ||
226 | * | 261 | * |
227 | * This function releases the memory for this <obj, id> shadow variable | 262 | * This function releases the memory for this <obj, id> shadow variable |
228 | * instance, callers should stop referencing it accordingly. | 263 | * instance, callers should stop referencing it accordingly. |
229 | */ | 264 | */ |
230 | void klp_shadow_free(void *obj, unsigned long id) | 265 | void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) |
231 | { | 266 | { |
232 | struct klp_shadow *shadow; | 267 | struct klp_shadow *shadow; |
233 | unsigned long flags; | 268 | unsigned long flags; |
@@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id) | |||
239 | (unsigned long)obj) { | 274 | (unsigned long)obj) { |
240 | 275 | ||
241 | if (klp_shadow_match(shadow, obj, id)) { | 276 | if (klp_shadow_match(shadow, obj, id)) { |
242 | hash_del_rcu(&shadow->node); | 277 | klp_shadow_free_struct(shadow, dtor); |
243 | kfree_rcu(shadow, rcu_head); | ||
244 | break; | 278 | break; |
245 | } | 279 | } |
246 | } | 280 | } |
@@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free); | |||
252 | /** | 286 | /** |
253 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables | 287 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables |
254 | * @id: data identifier | 288 | * @id: data identifier |
289 | * @dtor: custom callback that can be used to unregister the variable | ||
290 | * and/or free data that the shadow variable points to (optional) | ||
255 | * | 291 | * |
256 | * This function releases the memory for all <*, id> shadow variable | 292 | * This function releases the memory for all <*, id> shadow variable |
257 | * instances, callers should stop referencing them accordingly. | 293 | * instances, callers should stop referencing them accordingly. |
258 | */ | 294 | */ |
259 | void klp_shadow_free_all(unsigned long id) | 295 | void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) |
260 | { | 296 | { |
261 | struct klp_shadow *shadow; | 297 | struct klp_shadow *shadow; |
262 | unsigned long flags; | 298 | unsigned long flags; |
@@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id) | |||
266 | 302 | ||
267 | /* Delete all <*, id> from hash */ | 303 | /* Delete all <*, id> from hash */ |
268 | hash_for_each(klp_shadow_hash, i, shadow, node) { | 304 | hash_for_each(klp_shadow_hash, i, shadow, node) { |
269 | if (klp_shadow_match(shadow, shadow->obj, id)) { | 305 | if (klp_shadow_match(shadow, shadow->obj, id)) |
270 | hash_del_rcu(&shadow->node); | 306 | klp_shadow_free_struct(shadow, dtor); |
271 | kfree_rcu(shadow, rcu_head); | ||
272 | } | ||
273 | } | 307 | } |
274 | 308 | ||
275 | spin_unlock_irqrestore(&klp_shadow_lock, flags); | 309 | spin_unlock_irqrestore(&klp_shadow_lock, flags); |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 2541bd89f20e..5a6251ac6f7a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1205 | u64 *newval, u64 *oldval) | 1205 | u64 *newval, u64 *oldval) |
1206 | { | 1206 | { |
1207 | u64 now; | 1207 | u64 now; |
1208 | int ret; | ||
1208 | 1209 | ||
1209 | WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); | 1210 | WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); |
1211 | ret = cpu_timer_sample_group(clock_idx, tsk, &now); | ||
1210 | 1212 | ||
1211 | if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) { | 1213 | if (oldval && ret != -EINVAL) { |
1212 | /* | 1214 | /* |
1213 | * We are setting itimer. The *oldval is absolute and we update | 1215 | * We are setting itimer. The *oldval is absolute and we update |
1214 | * it to be relative, *newval argument is relative and we update | 1216 | * it to be relative, *newval argument is relative and we update |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index c1f518e7aa80..6fe615d57ebb 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) | |||
82 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || | 82 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || |
83 | !tick_device_is_functional(dev)) { | 83 | !tick_device_is_functional(dev)) { |
84 | 84 | ||
85 | printk(KERN_INFO "Clockevents: " | 85 | pr_info("Clockevents: could not switch to one-shot mode:"); |
86 | "could not switch to one-shot mode:"); | ||
87 | if (!dev) { | 86 | if (!dev) { |
88 | printk(" no tick device\n"); | 87 | pr_cont(" no tick device\n"); |
89 | } else { | 88 | } else { |
90 | if (!tick_device_is_functional(dev)) | 89 | if (!tick_device_is_functional(dev)) |
91 | printk(" %s is not functional.\n", dev->name); | 90 | pr_cont(" %s is not functional.\n", dev->name); |
92 | else | 91 | else |
93 | printk(" %s does not support one-shot mode.\n", | 92 | pr_cont(" %s does not support one-shot mode.\n", |
94 | dev->name); | 93 | dev->name); |
95 | } | 94 | } |
96 | return -EINVAL; | 95 | return -EINVAL; |
97 | } | 96 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ca90219a1e73..dcf7f20fcd12 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -2139,13 +2139,6 @@ unsigned long get_seconds(void) | |||
2139 | } | 2139 | } |
2140 | EXPORT_SYMBOL(get_seconds); | 2140 | EXPORT_SYMBOL(get_seconds); |
2141 | 2141 | ||
2142 | struct timespec __current_kernel_time(void) | ||
2143 | { | ||
2144 | struct timekeeper *tk = &tk_core.timekeeper; | ||
2145 | |||
2146 | return timespec64_to_timespec(tk_xtime(tk)); | ||
2147 | } | ||
2148 | |||
2149 | struct timespec64 current_kernel_time64(void) | 2142 | struct timespec64 current_kernel_time64(void) |
2150 | { | 2143 | { |
2151 | struct timekeeper *tk = &tk_core.timekeeper; | 2144 | struct timekeeper *tk = &tk_core.timekeeper; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1cd3fb4d70f8..02aed76e0978 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) | |||
512 | if (ret == 0) | 512 | if (ret == 0) |
513 | tk->tp.flags |= TP_FLAG_REGISTERED; | 513 | tk->tp.flags |= TP_FLAG_REGISTERED; |
514 | else { | 514 | else { |
515 | pr_warn("Could not insert probe at %s+%lu: %d\n", | ||
516 | trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); | ||
517 | if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { | 515 | if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { |
518 | pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); | 516 | pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); |
519 | ret = 0; | 517 | ret = 0; |
diff --git a/lib/textsearch.c b/lib/textsearch.c index 0b79908dfe89..5939549c0e7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c | |||
@@ -10,7 +10,10 @@ | |||
10 | * Pablo Neira Ayuso <pablo@netfilter.org> | 10 | * Pablo Neira Ayuso <pablo@netfilter.org> |
11 | * | 11 | * |
12 | * ========================================================================== | 12 | * ========================================================================== |
13 | * | 13 | */ |
14 | |||
15 | /** | ||
16 | * DOC: ts_intro | ||
14 | * INTRODUCTION | 17 | * INTRODUCTION |
15 | * | 18 | * |
16 | * The textsearch infrastructure provides text searching facilities for | 19 | * The textsearch infrastructure provides text searching facilities for |
@@ -19,7 +22,9 @@ | |||
19 | * | 22 | * |
20 | * ARCHITECTURE | 23 | * ARCHITECTURE |
21 | * | 24 | * |
22 | * User | 25 | * .. code-block:: none |
26 | * | ||
27 | * User | ||
23 | * +----------------+ | 28 | * +----------------+ |
24 | * | finish()|<--------------(6)-----------------+ | 29 | * | finish()|<--------------(6)-----------------+ |
25 | * |get_next_block()|<--------------(5)---------------+ | | 30 | * |get_next_block()|<--------------(5)---------------+ | |
@@ -33,21 +38,21 @@ | |||
33 | * | (3)|----->| find()/next() |-----------+ | | 38 | * | (3)|----->| find()/next() |-----------+ | |
34 | * | (7)|----->| destroy() |----------------------+ | 39 | * | (7)|----->| destroy() |----------------------+ |
35 | * +----------------+ +---------------+ | 40 | * +----------------+ +---------------+ |
36 | * | 41 | * |
37 | * (1) User configures a search by calling _prepare() specifying the | 42 | * (1) User configures a search by calling textsearch_prepare() specifying |
38 | * search parameters such as the pattern and algorithm name. | 43 | * the search parameters such as the pattern and algorithm name. |
39 | * (2) Core requests the algorithm to allocate and initialize a search | 44 | * (2) Core requests the algorithm to allocate and initialize a search |
40 | * configuration according to the specified parameters. | 45 | * configuration according to the specified parameters. |
41 | * (3) User starts the search(es) by calling _find() or _next() to | 46 | * (3) User starts the search(es) by calling textsearch_find() or |
42 | * fetch subsequent occurrences. A state variable is provided | 47 | * textsearch_next() to fetch subsequent occurrences. A state variable |
43 | * to the algorithm to store persistent variables. | 48 | * is provided to the algorithm to store persistent variables. |
44 | * (4) Core eventually resets the search offset and forwards the find() | 49 | * (4) Core eventually resets the search offset and forwards the find() |
45 | * request to the algorithm. | 50 | * request to the algorithm. |
46 | * (5) Algorithm calls get_next_block() provided by the user continuously | 51 | * (5) Algorithm calls get_next_block() provided by the user continuously |
47 | * to fetch the data to be searched in block by block. | 52 | * to fetch the data to be searched in block by block. |
48 | * (6) Algorithm invokes finish() after the last call to get_next_block | 53 | * (6) Algorithm invokes finish() after the last call to get_next_block |
49 | * to clean up any leftovers from get_next_block. (Optional) | 54 | * to clean up any leftovers from get_next_block. (Optional) |
50 | * (7) User destroys the configuration by calling _destroy(). | 55 | * (7) User destroys the configuration by calling textsearch_destroy(). |
51 | * (8) Core notifies the algorithm to destroy algorithm specific | 56 | * (8) Core notifies the algorithm to destroy algorithm specific |
52 | * allocations. (Optional) | 57 | * allocations. (Optional) |
53 | * | 58 | * |
@@ -62,9 +67,10 @@ | |||
62 | * amount of times and even in parallel as long as a separate struct | 67 | * amount of times and even in parallel as long as a separate struct |
63 | * ts_state variable is provided to every instance. | 68 | * ts_state variable is provided to every instance. |
64 | * | 69 | * |
65 | * The actual search is performed by either calling textsearch_find_- | 70 | * The actual search is performed by either calling |
66 | * continuous() for linear data or by providing an own get_next_block() | 71 | * textsearch_find_continuous() for linear data or by providing |
67 | * implementation and calling textsearch_find(). Both functions return | 72 | * an own get_next_block() implementation and |
73 | * calling textsearch_find(). Both functions return | ||
68 | * the position of the first occurrence of the pattern or UINT_MAX if | 74 | * the position of the first occurrence of the pattern or UINT_MAX if |
69 | * no match was found. Subsequent occurrences can be found by calling | 75 | * no match was found. Subsequent occurrences can be found by calling |
70 | * textsearch_next() regardless of the linearity of the data. | 76 | * textsearch_next() regardless of the linearity of the data. |
@@ -72,7 +78,7 @@ | |||
72 | * Once you're done using a configuration it must be given back via | 78 | * Once you're done using a configuration it must be given back via |
73 | * textsearch_destroy. | 79 | * textsearch_destroy. |
74 | * | 80 | * |
75 | * EXAMPLE | 81 | * EXAMPLE:: |
76 | * | 82 | * |
77 | * int pos; | 83 | * int pos; |
78 | * struct ts_config *conf; | 84 | * struct ts_config *conf; |
@@ -87,13 +93,13 @@ | |||
87 | * goto errout; | 93 | * goto errout; |
88 | * } | 94 | * } |
89 | * | 95 | * |
90 | * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); | 96 | * pos = textsearch_find_continuous(conf, \&state, example, strlen(example)); |
91 | * if (pos != UINT_MAX) | 97 | * if (pos != UINT_MAX) |
92 | * panic("Oh my god, dancing chickens at %d\n", pos); | 98 | * panic("Oh my god, dancing chickens at \%d\n", pos); |
93 | * | 99 | * |
94 | * textsearch_destroy(conf); | 100 | * textsearch_destroy(conf); |
95 | * ========================================================================== | ||
96 | */ | 101 | */ |
102 | /* ========================================================================== */ | ||
97 | 103 | ||
98 | #include <linux/module.h> | 104 | #include <linux/module.h> |
99 | #include <linux/types.h> | 105 | #include <linux/types.h> |
@@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, | |||
225 | * | 231 | * |
226 | * Returns the position of first occurrence of the pattern or | 232 | * Returns the position of first occurrence of the pattern or |
227 | * %UINT_MAX if no occurrence was found. | 233 | * %UINT_MAX if no occurrence was found. |
228 | */ | 234 | */ |
229 | unsigned int textsearch_find_continuous(struct ts_config *conf, | 235 | unsigned int textsearch_find_continuous(struct ts_config *conf, |
230 | struct ts_state *state, | 236 | struct ts_state *state, |
231 | const void *data, unsigned int len) | 237 | const void *data, unsigned int len) |
diff --git a/mm/filemap.c b/mm/filemap.c index 9276bdb2343c..0604cb02e6f3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
786 | VM_BUG_ON_PAGE(!PageLocked(new), new); | 786 | VM_BUG_ON_PAGE(!PageLocked(new), new); |
787 | VM_BUG_ON_PAGE(new->mapping, new); | 787 | VM_BUG_ON_PAGE(new->mapping, new); |
788 | 788 | ||
789 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 789 | error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); |
790 | if (!error) { | 790 | if (!error) { |
791 | struct address_space *mapping = old->mapping; | 791 | struct address_space *mapping = old->mapping; |
792 | void (*freepage)(struct page *); | 792 | void (*freepage)(struct page *); |
@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page, | |||
842 | return error; | 842 | return error; |
843 | } | 843 | } |
844 | 844 | ||
845 | error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); | 845 | error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); |
846 | if (error) { | 846 | if (error) { |
847 | if (!huge) | 847 | if (!huge) |
848 | mem_cgroup_cancel_charge(page, memcg, false); | 848 | mem_cgroup_cancel_charge(page, memcg, false); |
@@ -1585,8 +1585,7 @@ no_page: | |||
1585 | if (fgp_flags & FGP_ACCESSED) | 1585 | if (fgp_flags & FGP_ACCESSED) |
1586 | __SetPageReferenced(page); | 1586 | __SetPageReferenced(page); |
1587 | 1587 | ||
1588 | err = add_to_page_cache_lru(page, mapping, offset, | 1588 | err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); |
1589 | gfp_mask & GFP_RECLAIM_MASK); | ||
1590 | if (unlikely(err)) { | 1589 | if (unlikely(err)) { |
1591 | put_page(page); | 1590 | put_page(page); |
1592 | page = NULL; | 1591 | page = NULL; |
@@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) | |||
2387 | if (!page) | 2386 | if (!page) |
2388 | return -ENOMEM; | 2387 | return -ENOMEM; |
2389 | 2388 | ||
2390 | ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); | 2389 | ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); |
2391 | if (ret == 0) | 2390 | if (ret == 0) |
2392 | ret = mapping->a_ops->readpage(file, page); | 2391 | ret = mapping->a_ops->readpage(file, page); |
2393 | else if (ret == -EEXIST) | 2392 | else if (ret == -EEXIST) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 14ed6ee5e02f..a3a1815f8e11 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) | |||
2925 | pmde = maybe_pmd_mkwrite(pmde, vma); | 2925 | pmde = maybe_pmd_mkwrite(pmde, vma); |
2926 | 2926 | ||
2927 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); | 2927 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); |
2928 | page_add_anon_rmap(new, vma, mmun_start, true); | 2928 | if (PageAnon(new)) |
2929 | page_add_anon_rmap(new, vma, mmun_start, true); | ||
2930 | else | ||
2931 | page_add_file_rmap(new, true); | ||
2929 | set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); | 2932 | set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); |
2930 | if (vma->vm_flags & VM_LOCKED) | 2933 | if (vma->vm_flags & VM_LOCKED) |
2931 | mlock_vma_page(new); | 2934 | mlock_vma_page(new); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e074f7c637aa..2bd3df3d101a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, | |||
2192 | { | 2192 | { |
2193 | struct memcg_kmem_cache_create_work *cw; | 2193 | struct memcg_kmem_cache_create_work *cw; |
2194 | 2194 | ||
2195 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT); | 2195 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); |
2196 | if (!cw) | 2196 | if (!cw) |
2197 | return; | 2197 | return; |
2198 | 2198 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index f65dd69e1fd1..568433023831 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
472 | pslot = radix_tree_lookup_slot(&mapping->i_pages, | 472 | pslot = radix_tree_lookup_slot(&mapping->i_pages, |
473 | page_index(page)); | 473 | page_index(page)); |
474 | 474 | ||
475 | expected_count += 1 + page_has_private(page); | 475 | expected_count += hpage_nr_pages(page) + page_has_private(page); |
476 | if (page_count(page) != expected_count || | 476 | if (page_count(page) != expected_count || |
477 | radix_tree_deref_slot_protected(pslot, | 477 | radix_tree_deref_slot_protected(pslot, |
478 | &mapping->i_pages.xa_lock) != page) { | 478 | &mapping->i_pages.xa_lock) != page) { |
@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
505 | */ | 505 | */ |
506 | newpage->index = page->index; | 506 | newpage->index = page->index; |
507 | newpage->mapping = page->mapping; | 507 | newpage->mapping = page->mapping; |
508 | get_page(newpage); /* add cache reference */ | 508 | page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ |
509 | if (PageSwapBacked(page)) { | 509 | if (PageSwapBacked(page)) { |
510 | __SetPageSwapBacked(newpage); | 510 | __SetPageSwapBacked(newpage); |
511 | if (PageSwapCache(page)) { | 511 | if (PageSwapCache(page)) { |
@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
524 | } | 524 | } |
525 | 525 | ||
526 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); | 526 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); |
527 | if (PageTransHuge(page)) { | ||
528 | int i; | ||
529 | int index = page_index(page); | ||
530 | |||
531 | for (i = 0; i < HPAGE_PMD_NR; i++) { | ||
532 | pslot = radix_tree_lookup_slot(&mapping->i_pages, | ||
533 | index + i); | ||
534 | radix_tree_replace_slot(&mapping->i_pages, pslot, | ||
535 | newpage + i); | ||
536 | } | ||
537 | } else { | ||
538 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); | ||
539 | } | ||
527 | 540 | ||
528 | /* | 541 | /* |
529 | * Drop cache reference from old page by unfreezing | 542 | * Drop cache reference from old page by unfreezing |
530 | * to one less reference. | 543 | * to one less reference. |
531 | * We know this isn't the last reference. | 544 | * We know this isn't the last reference. |
532 | */ | 545 | */ |
533 | page_ref_unfreeze(page, expected_count - 1); | 546 | page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); |
534 | 547 | ||
535 | xa_unlock(&mapping->i_pages); | 548 | xa_unlock(&mapping->i_pages); |
536 | /* Leave irq disabled to prevent preemption while updating stats */ | 549 | /* Leave irq disabled to prevent preemption while updating stats */ |
@@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, | |||
1622 | current_node = NUMA_NO_NODE; | 1635 | current_node = NUMA_NO_NODE; |
1623 | } | 1636 | } |
1624 | out_flush: | 1637 | out_flush: |
1638 | if (list_empty(&pagelist)) | ||
1639 | return err; | ||
1640 | |||
1625 | /* Make sure we do not overwrite the existing error */ | 1641 | /* Make sure we do not overwrite the existing error */ |
1626 | err1 = do_move_pages_to_node(mm, &pagelist, current_node); | 1642 | err1 = do_move_pages_to_node(mm, &pagelist, current_node); |
1627 | if (!err1) | 1643 | if (!err1) |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5c1a3279e63f..337c6afb3345 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page) | |||
2502 | if (mapping && mapping_cap_account_dirty(mapping)) { | 2502 | if (mapping && mapping_cap_account_dirty(mapping)) { |
2503 | struct inode *inode = mapping->host; | 2503 | struct inode *inode = mapping->host; |
2504 | struct bdi_writeback *wb; | 2504 | struct bdi_writeback *wb; |
2505 | bool locked; | 2505 | struct wb_lock_cookie cookie = {}; |
2506 | 2506 | ||
2507 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2507 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
2508 | current->nr_dirtied--; | 2508 | current->nr_dirtied--; |
2509 | dec_node_page_state(page, NR_DIRTIED); | 2509 | dec_node_page_state(page, NR_DIRTIED); |
2510 | dec_wb_stat(wb, WB_DIRTIED); | 2510 | dec_wb_stat(wb, WB_DIRTIED); |
2511 | unlocked_inode_to_wb_end(inode, locked); | 2511 | unlocked_inode_to_wb_end(inode, &cookie); |
2512 | } | 2512 | } |
2513 | } | 2513 | } |
2514 | EXPORT_SYMBOL(account_page_redirty); | 2514 | EXPORT_SYMBOL(account_page_redirty); |
@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page) | |||
2614 | if (mapping_cap_account_dirty(mapping)) { | 2614 | if (mapping_cap_account_dirty(mapping)) { |
2615 | struct inode *inode = mapping->host; | 2615 | struct inode *inode = mapping->host; |
2616 | struct bdi_writeback *wb; | 2616 | struct bdi_writeback *wb; |
2617 | bool locked; | 2617 | struct wb_lock_cookie cookie = {}; |
2618 | 2618 | ||
2619 | lock_page_memcg(page); | 2619 | lock_page_memcg(page); |
2620 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2620 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
2621 | 2621 | ||
2622 | if (TestClearPageDirty(page)) | 2622 | if (TestClearPageDirty(page)) |
2623 | account_page_cleaned(page, mapping, wb); | 2623 | account_page_cleaned(page, mapping, wb); |
2624 | 2624 | ||
2625 | unlocked_inode_to_wb_end(inode, locked); | 2625 | unlocked_inode_to_wb_end(inode, &cookie); |
2626 | unlock_page_memcg(page); | 2626 | unlock_page_memcg(page); |
2627 | } else { | 2627 | } else { |
2628 | ClearPageDirty(page); | 2628 | ClearPageDirty(page); |
@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page) | |||
2654 | if (mapping && mapping_cap_account_dirty(mapping)) { | 2654 | if (mapping && mapping_cap_account_dirty(mapping)) { |
2655 | struct inode *inode = mapping->host; | 2655 | struct inode *inode = mapping->host; |
2656 | struct bdi_writeback *wb; | 2656 | struct bdi_writeback *wb; |
2657 | bool locked; | 2657 | struct wb_lock_cookie cookie = {}; |
2658 | 2658 | ||
2659 | /* | 2659 | /* |
2660 | * Yes, Virginia, this is indeed insane. | 2660 | * Yes, Virginia, this is indeed insane. |
@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page) | |||
2691 | * always locked coming in here, so we get the desired | 2691 | * always locked coming in here, so we get the desired |
2692 | * exclusion. | 2692 | * exclusion. |
2693 | */ | 2693 | */ |
2694 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2694 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
2695 | if (TestClearPageDirty(page)) { | 2695 | if (TestClearPageDirty(page)) { |
2696 | dec_lruvec_page_state(page, NR_FILE_DIRTY); | 2696 | dec_lruvec_page_state(page, NR_FILE_DIRTY); |
2697 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); | 2697 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); |
2698 | dec_wb_stat(wb, WB_RECLAIMABLE); | 2698 | dec_wb_stat(wb, WB_RECLAIMABLE); |
2699 | ret = 1; | 2699 | ret = 1; |
2700 | } | 2700 | } |
2701 | unlocked_inode_to_wb_end(inode, locked); | 2701 | unlocked_inode_to_wb_end(inode, &cookie); |
2702 | return ret; | 2702 | return ret; |
2703 | } | 2703 | } |
2704 | return TestClearPageDirty(page); | 2704 | return TestClearPageDirty(page); |
@@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1374 | if (!pvmw.pte && (flags & TTU_MIGRATION)) { | 1374 | if (!pvmw.pte && (flags & TTU_MIGRATION)) { |
1375 | VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); | 1375 | VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); |
1376 | 1376 | ||
1377 | if (!PageAnon(page)) | ||
1378 | continue; | ||
1379 | |||
1380 | set_pmd_migration_entry(&pvmw, page); | 1377 | set_pmd_migration_entry(&pvmw, page); |
1381 | continue; | 1378 | continue; |
1382 | } | 1379 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b920ce3ae02..9b697323a88c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -303,7 +303,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone | |||
303 | /* | 303 | /* |
304 | * Add a shrinker callback to be called from the vm. | 304 | * Add a shrinker callback to be called from the vm. |
305 | */ | 305 | */ |
306 | int register_shrinker(struct shrinker *shrinker) | 306 | int prealloc_shrinker(struct shrinker *shrinker) |
307 | { | 307 | { |
308 | size_t size = sizeof(*shrinker->nr_deferred); | 308 | size_t size = sizeof(*shrinker->nr_deferred); |
309 | 309 | ||
@@ -313,10 +313,29 @@ int register_shrinker(struct shrinker *shrinker) | |||
313 | shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); | 313 | shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); |
314 | if (!shrinker->nr_deferred) | 314 | if (!shrinker->nr_deferred) |
315 | return -ENOMEM; | 315 | return -ENOMEM; |
316 | return 0; | ||
317 | } | ||
318 | |||
319 | void free_prealloced_shrinker(struct shrinker *shrinker) | ||
320 | { | ||
321 | kfree(shrinker->nr_deferred); | ||
322 | shrinker->nr_deferred = NULL; | ||
323 | } | ||
316 | 324 | ||
325 | void register_shrinker_prepared(struct shrinker *shrinker) | ||
326 | { | ||
317 | down_write(&shrinker_rwsem); | 327 | down_write(&shrinker_rwsem); |
318 | list_add_tail(&shrinker->list, &shrinker_list); | 328 | list_add_tail(&shrinker->list, &shrinker_list); |
319 | up_write(&shrinker_rwsem); | 329 | up_write(&shrinker_rwsem); |
330 | } | ||
331 | |||
332 | int register_shrinker(struct shrinker *shrinker) | ||
333 | { | ||
334 | int err = prealloc_shrinker(shrinker); | ||
335 | |||
336 | if (err) | ||
337 | return err; | ||
338 | register_shrinker_prepared(shrinker); | ||
320 | return 0; | 339 | return 0; |
321 | } | 340 | } |
322 | EXPORT_SYMBOL(register_shrinker); | 341 | EXPORT_SYMBOL(register_shrinker); |
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 53ecda10b790..13e2ae6be620 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -174,7 +174,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, | |||
174 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : | 174 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : |
175 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : | 175 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : |
176 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? | 176 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? |
177 | "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); | 177 | "REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND"); |
178 | 178 | ||
179 | 179 | ||
180 | 180 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 969462ebb296..af0558b00c6c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2969,7 +2969,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb, | |||
2969 | } | 2969 | } |
2970 | EXPORT_SYMBOL(passthru_features_check); | 2970 | EXPORT_SYMBOL(passthru_features_check); |
2971 | 2971 | ||
2972 | static netdev_features_t dflt_features_check(const struct sk_buff *skb, | 2972 | static netdev_features_t dflt_features_check(struct sk_buff *skb, |
2973 | struct net_device *dev, | 2973 | struct net_device *dev, |
2974 | netdev_features_t features) | 2974 | netdev_features_t features) |
2975 | { | 2975 | { |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index e3e6a3e2ca22..d884d8f5f0e5 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -839,7 +839,7 @@ void dev_mc_flush(struct net_device *dev) | |||
839 | EXPORT_SYMBOL(dev_mc_flush); | 839 | EXPORT_SYMBOL(dev_mc_flush); |
840 | 840 | ||
841 | /** | 841 | /** |
842 | * dev_mc_flush - Init multicast address list | 842 | * dev_mc_init - Init multicast address list |
843 | * @dev: device | 843 | * @dev: device |
844 | * | 844 | * |
845 | * Init multicast address list. | 845 | * Init multicast address list. |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7b7a14abba28..ce519861be59 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t); | |||
55 | static void __neigh_notify(struct neighbour *n, int type, int flags, | 55 | static void __neigh_notify(struct neighbour *n, int type, int flags, |
56 | u32 pid); | 56 | u32 pid); |
57 | static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); | 57 | static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); |
58 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); | 58 | static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
59 | struct net_device *dev); | ||
59 | 60 | ||
60 | #ifdef CONFIG_PROC_FS | 61 | #ifdef CONFIG_PROC_FS |
61 | static const struct file_operations neigh_stat_seq_fops; | 62 | static const struct file_operations neigh_stat_seq_fops; |
@@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |||
291 | { | 292 | { |
292 | write_lock_bh(&tbl->lock); | 293 | write_lock_bh(&tbl->lock); |
293 | neigh_flush_dev(tbl, dev); | 294 | neigh_flush_dev(tbl, dev); |
294 | pneigh_ifdown(tbl, dev); | 295 | pneigh_ifdown_and_unlock(tbl, dev); |
295 | write_unlock_bh(&tbl->lock); | ||
296 | 296 | ||
297 | del_timer_sync(&tbl->proxy_timer); | 297 | del_timer_sync(&tbl->proxy_timer); |
298 | pneigh_queue_purge(&tbl->proxy_queue); | 298 | pneigh_queue_purge(&tbl->proxy_queue); |
@@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, | |||
681 | return -ENOENT; | 681 | return -ENOENT; |
682 | } | 682 | } |
683 | 683 | ||
684 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | 684 | static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
685 | struct net_device *dev) | ||
685 | { | 686 | { |
686 | struct pneigh_entry *n, **np; | 687 | struct pneigh_entry *n, **np, *freelist = NULL; |
687 | u32 h; | 688 | u32 h; |
688 | 689 | ||
689 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | 690 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { |
@@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |||
691 | while ((n = *np) != NULL) { | 692 | while ((n = *np) != NULL) { |
692 | if (!dev || n->dev == dev) { | 693 | if (!dev || n->dev == dev) { |
693 | *np = n->next; | 694 | *np = n->next; |
694 | if (tbl->pdestructor) | 695 | n->next = freelist; |
695 | tbl->pdestructor(n); | 696 | freelist = n; |
696 | if (n->dev) | ||
697 | dev_put(n->dev); | ||
698 | kfree(n); | ||
699 | continue; | 697 | continue; |
700 | } | 698 | } |
701 | np = &n->next; | 699 | np = &n->next; |
702 | } | 700 | } |
703 | } | 701 | } |
702 | write_unlock_bh(&tbl->lock); | ||
703 | while ((n = freelist)) { | ||
704 | freelist = n->next; | ||
705 | n->next = NULL; | ||
706 | if (tbl->pdestructor) | ||
707 | tbl->pdestructor(n); | ||
708 | if (n->dev) | ||
709 | dev_put(n->dev); | ||
710 | kfree(n); | ||
711 | } | ||
704 | return -ENOENT; | 712 | return -ENOENT; |
705 | } | 713 | } |
706 | 714 | ||
@@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2323 | 2331 | ||
2324 | err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); | 2332 | err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); |
2325 | if (!err) { | 2333 | if (!err) { |
2326 | if (tb[NDA_IFINDEX]) | 2334 | if (tb[NDA_IFINDEX]) { |
2335 | if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) | ||
2336 | return -EINVAL; | ||
2327 | filter_idx = nla_get_u32(tb[NDA_IFINDEX]); | 2337 | filter_idx = nla_get_u32(tb[NDA_IFINDEX]); |
2328 | 2338 | } | |
2329 | if (tb[NDA_MASTER]) | 2339 | if (tb[NDA_MASTER]) { |
2340 | if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) | ||
2341 | return -EINVAL; | ||
2330 | filter_master_idx = nla_get_u32(tb[NDA_MASTER]); | 2342 | filter_master_idx = nla_get_u32(tb[NDA_MASTER]); |
2331 | 2343 | } | |
2332 | if (filter_idx || filter_master_idx) | 2344 | if (filter_idx || filter_master_idx) |
2333 | flags |= NLM_F_DUMP_FILTERED; | 2345 | flags |= NLM_F_DUMP_FILTERED; |
2334 | } | 2346 | } |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 8396705deffc..40c851693f77 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) | |||
91 | 91 | ||
92 | next_opt = memchr(opt, '#', end - opt) ?: end; | 92 | next_opt = memchr(opt, '#', end - opt) ?: end; |
93 | opt_len = next_opt - opt; | 93 | opt_len = next_opt - opt; |
94 | if (!opt_len) { | 94 | if (opt_len <= 0 || opt_len > 128) { |
95 | printk(KERN_WARNING | 95 | pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", |
96 | "Empty option to dns_resolver key\n"); | 96 | opt_len); |
97 | return -EINVAL; | 97 | return -EINVAL; |
98 | } | 98 | } |
99 | 99 | ||
@@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | bad_option_value: | 129 | bad_option_value: |
130 | printk(KERN_WARNING | 130 | pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", |
131 | "Option '%*.*s' to dns_resolver key:" | 131 | opt_nlen, opt_nlen, opt); |
132 | " bad/missing value\n", | ||
133 | opt_nlen, opt_nlen, opt); | ||
134 | return -EINVAL; | 132 | return -EINVAL; |
135 | } while (opt = next_opt + 1, opt < end); | 133 | } while (opt = next_opt + 1, opt < end); |
136 | } | 134 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4c11b810a447..83c73bab2c3d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1109,6 +1109,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
1109 | struct ip_options_rcu *opt; | 1109 | struct ip_options_rcu *opt; |
1110 | struct rtable *rt; | 1110 | struct rtable *rt; |
1111 | 1111 | ||
1112 | rt = *rtp; | ||
1113 | if (unlikely(!rt)) | ||
1114 | return -EFAULT; | ||
1115 | |||
1112 | /* | 1116 | /* |
1113 | * setup for corking. | 1117 | * setup for corking. |
1114 | */ | 1118 | */ |
@@ -1124,9 +1128,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
1124 | cork->flags |= IPCORK_OPT; | 1128 | cork->flags |= IPCORK_OPT; |
1125 | cork->addr = ipc->addr; | 1129 | cork->addr = ipc->addr; |
1126 | } | 1130 | } |
1127 | rt = *rtp; | 1131 | |
1128 | if (unlikely(!rt)) | ||
1129 | return -EFAULT; | ||
1130 | /* | 1132 | /* |
1131 | * We steal reference to this route, caller should not release it | 1133 | * We steal reference to this route, caller should not release it |
1132 | */ | 1134 | */ |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bccc4c270087..9ce1c726185e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2368,6 +2368,7 @@ void tcp_write_queue_purge(struct sock *sk) | |||
2368 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); | 2368 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); |
2369 | sk_mem_reclaim(sk); | 2369 | sk_mem_reclaim(sk); |
2370 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 2370 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2371 | tcp_sk(sk)->packets_out = 0; | ||
2371 | } | 2372 | } |
2372 | 2373 | ||
2373 | int tcp_disconnect(struct sock *sk, int flags) | 2374 | int tcp_disconnect(struct sock *sk, int flags) |
@@ -2417,7 +2418,6 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2417 | icsk->icsk_backoff = 0; | 2418 | icsk->icsk_backoff = 0; |
2418 | tp->snd_cwnd = 2; | 2419 | tp->snd_cwnd = 2; |
2419 | icsk->icsk_probes_out = 0; | 2420 | icsk->icsk_probes_out = 0; |
2420 | tp->packets_out = 0; | ||
2421 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 2421 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
2422 | tp->snd_cwnd_cnt = 0; | 2422 | tp->snd_cwnd_cnt = 0; |
2423 | tp->window_clamp = 0; | 2423 | tp->window_clamp = 0; |
@@ -2813,8 +2813,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2813 | #ifdef CONFIG_TCP_MD5SIG | 2813 | #ifdef CONFIG_TCP_MD5SIG |
2814 | case TCP_MD5SIG: | 2814 | case TCP_MD5SIG: |
2815 | case TCP_MD5SIG_EXT: | 2815 | case TCP_MD5SIG_EXT: |
2816 | /* Read the IP->Key mappings from userspace */ | 2816 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
2817 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); | 2817 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
2818 | else | ||
2819 | err = -EINVAL; | ||
2818 | break; | 2820 | break; |
2819 | #endif | 2821 | #endif |
2820 | case TCP_USER_TIMEOUT: | 2822 | case TCP_USER_TIMEOUT: |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0fbd3ee26165..40261cb68e83 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) | |||
183 | } | 183 | } |
184 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); | 184 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); |
185 | 185 | ||
186 | struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) | ||
187 | { | ||
188 | const struct l2tp_net *pn = l2tp_pernet(net); | ||
189 | struct l2tp_tunnel *tunnel; | ||
190 | int count = 0; | ||
191 | |||
192 | rcu_read_lock_bh(); | ||
193 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
194 | if (++count > nth) { | ||
195 | l2tp_tunnel_inc_refcount(tunnel); | ||
196 | rcu_read_unlock_bh(); | ||
197 | return tunnel; | ||
198 | } | ||
199 | } | ||
200 | rcu_read_unlock_bh(); | ||
201 | |||
202 | return NULL; | ||
203 | } | ||
204 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); | ||
205 | |||
186 | /* Lookup a session. A new reference is held on the returned session. */ | 206 | /* Lookup a session. A new reference is held on the returned session. */ |
187 | struct l2tp_session *l2tp_session_get(const struct net *net, | 207 | struct l2tp_session *l2tp_session_get(const struct net *net, |
188 | struct l2tp_tunnel *tunnel, | 208 | struct l2tp_tunnel *tunnel, |
@@ -335,26 +355,6 @@ err_tlock: | |||
335 | } | 355 | } |
336 | EXPORT_SYMBOL_GPL(l2tp_session_register); | 356 | EXPORT_SYMBOL_GPL(l2tp_session_register); |
337 | 357 | ||
338 | struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) | ||
339 | { | ||
340 | struct l2tp_net *pn = l2tp_pernet(net); | ||
341 | struct l2tp_tunnel *tunnel; | ||
342 | int count = 0; | ||
343 | |||
344 | rcu_read_lock_bh(); | ||
345 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
346 | if (++count > nth) { | ||
347 | rcu_read_unlock_bh(); | ||
348 | return tunnel; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | rcu_read_unlock_bh(); | ||
353 | |||
354 | return NULL; | ||
355 | } | ||
356 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); | ||
357 | |||
358 | /***************************************************************************** | 358 | /***************************************************************************** |
359 | * Receive data handling | 359 | * Receive data handling |
360 | *****************************************************************************/ | 360 | *****************************************************************************/ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index ba33cbec71eb..c199020f8a8a 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) | |||
212 | } | 212 | } |
213 | 213 | ||
214 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); | 214 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); |
215 | struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); | ||
216 | |||
215 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 217 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
216 | 218 | ||
217 | struct l2tp_session *l2tp_session_get(const struct net *net, | 219 | struct l2tp_session *l2tp_session_get(const struct net *net, |
@@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net, | |||
220 | struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); | 222 | struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); |
221 | struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, | 223 | struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, |
222 | const char *ifname); | 224 | const char *ifname); |
223 | struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); | ||
224 | 225 | ||
225 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, | 226 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, |
226 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, | 227 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 72e713da4733..b8f9d45bfeb1 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data { | |||
47 | 47 | ||
48 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) | 48 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) |
49 | { | 49 | { |
50 | pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); | 50 | /* Drop reference taken during previous invocation */ |
51 | if (pd->tunnel) | ||
52 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
53 | |||
54 | pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); | ||
51 | pd->tunnel_idx++; | 55 | pd->tunnel_idx++; |
52 | } | 56 | } |
53 | 57 | ||
@@ -96,7 +100,14 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
96 | 100 | ||
97 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) | 101 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) |
98 | { | 102 | { |
99 | /* nothing to do */ | 103 | struct l2tp_dfs_seq_data *pd = v; |
104 | |||
105 | if (!pd || pd == SEQ_START_TOKEN) | ||
106 | return; | ||
107 | |||
108 | /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ | ||
109 | if (pd->tunnel) | ||
110 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
100 | } | 111 | } |
101 | 112 | ||
102 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | 113 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index b05dbd9ffcb2..6616c9fd292f 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback | |||
487 | struct net *net = sock_net(skb->sk); | 487 | struct net *net = sock_net(skb->sk); |
488 | 488 | ||
489 | for (;;) { | 489 | for (;;) { |
490 | tunnel = l2tp_tunnel_find_nth(net, ti); | 490 | tunnel = l2tp_tunnel_get_nth(net, ti); |
491 | if (tunnel == NULL) | 491 | if (tunnel == NULL) |
492 | goto out; | 492 | goto out; |
493 | 493 | ||
494 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, | 494 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, |
495 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 495 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
496 | tunnel, L2TP_CMD_TUNNEL_GET) < 0) | 496 | tunnel, L2TP_CMD_TUNNEL_GET) < 0) { |
497 | l2tp_tunnel_dec_refcount(tunnel); | ||
497 | goto out; | 498 | goto out; |
499 | } | ||
500 | l2tp_tunnel_dec_refcount(tunnel); | ||
498 | 501 | ||
499 | ti++; | 502 | ti++; |
500 | } | 503 | } |
@@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
848 | 851 | ||
849 | for (;;) { | 852 | for (;;) { |
850 | if (tunnel == NULL) { | 853 | if (tunnel == NULL) { |
851 | tunnel = l2tp_tunnel_find_nth(net, ti); | 854 | tunnel = l2tp_tunnel_get_nth(net, ti); |
852 | if (tunnel == NULL) | 855 | if (tunnel == NULL) |
853 | goto out; | 856 | goto out; |
854 | } | 857 | } |
@@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
856 | session = l2tp_session_get_nth(tunnel, si); | 859 | session = l2tp_session_get_nth(tunnel, si); |
857 | if (session == NULL) { | 860 | if (session == NULL) { |
858 | ti++; | 861 | ti++; |
862 | l2tp_tunnel_dec_refcount(tunnel); | ||
859 | tunnel = NULL; | 863 | tunnel = NULL; |
860 | si = 0; | 864 | si = 0; |
861 | continue; | 865 | continue; |
@@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
865 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 869 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
866 | session, L2TP_CMD_SESSION_GET) < 0) { | 870 | session, L2TP_CMD_SESSION_GET) < 0) { |
867 | l2tp_session_dec_refcount(session); | 871 | l2tp_session_dec_refcount(session); |
872 | l2tp_tunnel_dec_refcount(tunnel); | ||
868 | break; | 873 | break; |
869 | } | 874 | } |
870 | l2tp_session_dec_refcount(session); | 875 | l2tp_session_dec_refcount(session); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 896bbca9bdaa..7d0c963680e6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -1551,16 +1551,19 @@ struct pppol2tp_seq_data { | |||
1551 | 1551 | ||
1552 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) | 1552 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) |
1553 | { | 1553 | { |
1554 | /* Drop reference taken during previous invocation */ | ||
1555 | if (pd->tunnel) | ||
1556 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
1557 | |||
1554 | for (;;) { | 1558 | for (;;) { |
1555 | pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); | 1559 | pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); |
1556 | pd->tunnel_idx++; | 1560 | pd->tunnel_idx++; |
1557 | 1561 | ||
1558 | if (pd->tunnel == NULL) | 1562 | /* Only accept L2TPv2 tunnels */ |
1559 | break; | 1563 | if (!pd->tunnel || pd->tunnel->version == 2) |
1564 | return; | ||
1560 | 1565 | ||
1561 | /* Ignore L2TPv3 tunnels */ | 1566 | l2tp_tunnel_dec_refcount(pd->tunnel); |
1562 | if (pd->tunnel->version < 3) | ||
1563 | break; | ||
1564 | } | 1567 | } |
1565 | } | 1568 | } |
1566 | 1569 | ||
@@ -1609,7 +1612,14 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
1609 | 1612 | ||
1610 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) | 1613 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) |
1611 | { | 1614 | { |
1612 | /* nothing to do */ | 1615 | struct pppol2tp_seq_data *pd = v; |
1616 | |||
1617 | if (!pd || pd == SEQ_START_TOKEN) | ||
1618 | return; | ||
1619 | |||
1620 | /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ | ||
1621 | if (pd->tunnel) | ||
1622 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
1613 | } | 1623 | } |
1614 | 1624 | ||
1615 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | 1625 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 01dcc0823d1f..6d29b2b94e84 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -189,6 +189,7 @@ static int llc_ui_release(struct socket *sock) | |||
189 | { | 189 | { |
190 | struct sock *sk = sock->sk; | 190 | struct sock *sk = sock->sk; |
191 | struct llc_sock *llc; | 191 | struct llc_sock *llc; |
192 | struct llc_sap *sap; | ||
192 | 193 | ||
193 | if (unlikely(sk == NULL)) | 194 | if (unlikely(sk == NULL)) |
194 | goto out; | 195 | goto out; |
@@ -199,9 +200,15 @@ static int llc_ui_release(struct socket *sock) | |||
199 | llc->laddr.lsap, llc->daddr.lsap); | 200 | llc->laddr.lsap, llc->daddr.lsap); |
200 | if (!llc_send_disc(sk)) | 201 | if (!llc_send_disc(sk)) |
201 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); | 202 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); |
203 | sap = llc->sap; | ||
204 | /* Hold this for release_sock(), so that llc_backlog_rcv() could still | ||
205 | * use it. | ||
206 | */ | ||
207 | llc_sap_hold(sap); | ||
202 | if (!sock_flag(sk, SOCK_ZAPPED)) | 208 | if (!sock_flag(sk, SOCK_ZAPPED)) |
203 | llc_sap_remove_socket(llc->sap, sk); | 209 | llc_sap_remove_socket(llc->sap, sk); |
204 | release_sock(sk); | 210 | release_sock(sk); |
211 | llc_sap_put(sap); | ||
205 | if (llc->dev) | 212 | if (llc->dev) |
206 | dev_put(llc->dev); | 213 | dev_put(llc->dev); |
207 | sock_put(sk); | 214 | sock_put(sk); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 616cb9c18f88..c31b0687396a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3008,6 +3008,7 @@ static int packet_release(struct socket *sock) | |||
3008 | 3008 | ||
3009 | packet_flush_mclist(sk); | 3009 | packet_flush_mclist(sk); |
3010 | 3010 | ||
3011 | lock_sock(sk); | ||
3011 | if (po->rx_ring.pg_vec) { | 3012 | if (po->rx_ring.pg_vec) { |
3012 | memset(&req_u, 0, sizeof(req_u)); | 3013 | memset(&req_u, 0, sizeof(req_u)); |
3013 | packet_set_ring(sk, &req_u, 1, 0); | 3014 | packet_set_ring(sk, &req_u, 1, 0); |
@@ -3017,6 +3018,7 @@ static int packet_release(struct socket *sock) | |||
3017 | memset(&req_u, 0, sizeof(req_u)); | 3018 | memset(&req_u, 0, sizeof(req_u)); |
3018 | packet_set_ring(sk, &req_u, 1, 1); | 3019 | packet_set_ring(sk, &req_u, 1, 1); |
3019 | } | 3020 | } |
3021 | release_sock(sk); | ||
3020 | 3022 | ||
3021 | f = fanout_release(sk); | 3023 | f = fanout_release(sk); |
3022 | 3024 | ||
@@ -3643,6 +3645,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
3643 | union tpacket_req_u req_u; | 3645 | union tpacket_req_u req_u; |
3644 | int len; | 3646 | int len; |
3645 | 3647 | ||
3648 | lock_sock(sk); | ||
3646 | switch (po->tp_version) { | 3649 | switch (po->tp_version) { |
3647 | case TPACKET_V1: | 3650 | case TPACKET_V1: |
3648 | case TPACKET_V2: | 3651 | case TPACKET_V2: |
@@ -3653,12 +3656,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
3653 | len = sizeof(req_u.req3); | 3656 | len = sizeof(req_u.req3); |
3654 | break; | 3657 | break; |
3655 | } | 3658 | } |
3656 | if (optlen < len) | 3659 | if (optlen < len) { |
3657 | return -EINVAL; | 3660 | ret = -EINVAL; |
3658 | if (copy_from_user(&req_u.req, optval, len)) | 3661 | } else { |
3659 | return -EFAULT; | 3662 | if (copy_from_user(&req_u.req, optval, len)) |
3660 | return packet_set_ring(sk, &req_u, 0, | 3663 | ret = -EFAULT; |
3661 | optname == PACKET_TX_RING); | 3664 | else |
3665 | ret = packet_set_ring(sk, &req_u, 0, | ||
3666 | optname == PACKET_TX_RING); | ||
3667 | } | ||
3668 | release_sock(sk); | ||
3669 | return ret; | ||
3662 | } | 3670 | } |
3663 | case PACKET_COPY_THRESH: | 3671 | case PACKET_COPY_THRESH: |
3664 | { | 3672 | { |
@@ -4208,8 +4216,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4208 | /* Added to avoid minimal code churn */ | 4216 | /* Added to avoid minimal code churn */ |
4209 | struct tpacket_req *req = &req_u->req; | 4217 | struct tpacket_req *req = &req_u->req; |
4210 | 4218 | ||
4211 | lock_sock(sk); | ||
4212 | |||
4213 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; | 4219 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
4214 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | 4220 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
4215 | 4221 | ||
@@ -4347,7 +4353,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4347 | if (pg_vec) | 4353 | if (pg_vec) |
4348 | free_pg_vec(pg_vec, order, req->tp_block_nr); | 4354 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
4349 | out: | 4355 | out: |
4350 | release_sock(sk); | ||
4351 | return err; | 4356 | return err; |
4352 | } | 4357 | } |
4353 | 4358 | ||
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index b33e5aeb4c06..2aa07b547b16 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c | |||
@@ -1135,3 +1135,4 @@ module_exit(qrtr_proto_fini); | |||
1135 | 1135 | ||
1136 | MODULE_DESCRIPTION("Qualcomm IPC-router driver"); | 1136 | MODULE_DESCRIPTION("Qualcomm IPC-router driver"); |
1137 | MODULE_LICENSE("GPL v2"); | 1137 | MODULE_LICENSE("GPL v2"); |
1138 | MODULE_ALIAS_NETPROTO(PF_QIPCRTR); | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 31083b5035ec..2e3f7b75a8ec 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -556,46 +556,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, | |||
556 | addr->v6.sin6_scope_id = 0; | 556 | addr->v6.sin6_scope_id = 0; |
557 | } | 557 | } |
558 | 558 | ||
559 | /* Compare addresses exactly. | 559 | static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, |
560 | * v4-mapped-v6 is also in consideration. | 560 | const union sctp_addr *addr2) |
561 | */ | ||
562 | static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | ||
563 | const union sctp_addr *addr2) | ||
564 | { | 561 | { |
565 | if (addr1->sa.sa_family != addr2->sa.sa_family) { | 562 | if (addr1->sa.sa_family != addr2->sa.sa_family) { |
566 | if (addr1->sa.sa_family == AF_INET && | 563 | if (addr1->sa.sa_family == AF_INET && |
567 | addr2->sa.sa_family == AF_INET6 && | 564 | addr2->sa.sa_family == AF_INET6 && |
568 | ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { | 565 | ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && |
569 | if (addr2->v6.sin6_port == addr1->v4.sin_port && | 566 | addr2->v6.sin6_addr.s6_addr32[3] == |
570 | addr2->v6.sin6_addr.s6_addr32[3] == | 567 | addr1->v4.sin_addr.s_addr) |
571 | addr1->v4.sin_addr.s_addr) | 568 | return 1; |
572 | return 1; | 569 | |
573 | } | ||
574 | if (addr2->sa.sa_family == AF_INET && | 570 | if (addr2->sa.sa_family == AF_INET && |
575 | addr1->sa.sa_family == AF_INET6 && | 571 | addr1->sa.sa_family == AF_INET6 && |
576 | ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { | 572 | ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && |
577 | if (addr1->v6.sin6_port == addr2->v4.sin_port && | 573 | addr1->v6.sin6_addr.s6_addr32[3] == |
578 | addr1->v6.sin6_addr.s6_addr32[3] == | 574 | addr2->v4.sin_addr.s_addr) |
579 | addr2->v4.sin_addr.s_addr) | 575 | return 1; |
580 | return 1; | 576 | |
581 | } | ||
582 | return 0; | 577 | return 0; |
583 | } | 578 | } |
584 | if (addr1->v6.sin6_port != addr2->v6.sin6_port) | 579 | |
585 | return 0; | ||
586 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) | 580 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) |
587 | return 0; | 581 | return 0; |
582 | |||
588 | /* If this is a linklocal address, compare the scope_id. */ | 583 | /* If this is a linklocal address, compare the scope_id. */ |
589 | if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { | 584 | if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && |
590 | if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && | 585 | addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && |
591 | (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { | 586 | addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) |
592 | return 0; | 587 | return 0; |
593 | } | ||
594 | } | ||
595 | 588 | ||
596 | return 1; | 589 | return 1; |
597 | } | 590 | } |
598 | 591 | ||
592 | /* Compare addresses exactly. | ||
593 | * v4-mapped-v6 is also in consideration. | ||
594 | */ | ||
595 | static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | ||
596 | const union sctp_addr *addr2) | ||
597 | { | ||
598 | return __sctp_v6_cmp_addr(addr1, addr2) && | ||
599 | addr1->v6.sin6_port == addr2->v6.sin6_port; | ||
600 | } | ||
601 | |||
599 | /* Initialize addr struct to INADDR_ANY. */ | 602 | /* Initialize addr struct to INADDR_ANY. */ |
600 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) | 603 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) |
601 | { | 604 | { |
@@ -875,8 +878,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
875 | const union sctp_addr *addr2, | 878 | const union sctp_addr *addr2, |
876 | struct sctp_sock *opt) | 879 | struct sctp_sock *opt) |
877 | { | 880 | { |
878 | struct sctp_af *af1, *af2; | ||
879 | struct sock *sk = sctp_opt2sk(opt); | 881 | struct sock *sk = sctp_opt2sk(opt); |
882 | struct sctp_af *af1, *af2; | ||
880 | 883 | ||
881 | af1 = sctp_get_af_specific(addr1->sa.sa_family); | 884 | af1 = sctp_get_af_specific(addr1->sa.sa_family); |
882 | af2 = sctp_get_af_specific(addr2->sa.sa_family); | 885 | af2 = sctp_get_af_specific(addr2->sa.sa_family); |
@@ -892,10 +895,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
892 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) | 895 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) |
893 | return 1; | 896 | return 1; |
894 | 897 | ||
895 | if (addr1->sa.sa_family != addr2->sa.sa_family) | 898 | return __sctp_v6_cmp_addr(addr1, addr2); |
896 | return 0; | ||
897 | |||
898 | return af1->cmp_addr(addr1, addr2); | ||
899 | } | 899 | } |
900 | 900 | ||
901 | /* Verify that the provided sockaddr looks bindable. Common verification, | 901 | /* Verify that the provided sockaddr looks bindable. Common verification, |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5f8046c62d90..f5d4b69dbabc 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -1259,14 +1259,12 @@ static int smc_shutdown(struct socket *sock, int how) | |||
1259 | rc = smc_close_shutdown_write(smc); | 1259 | rc = smc_close_shutdown_write(smc); |
1260 | break; | 1260 | break; |
1261 | case SHUT_RD: | 1261 | case SHUT_RD: |
1262 | if (sk->sk_state == SMC_LISTEN) | 1262 | rc = 0; |
1263 | rc = smc_close_active(smc); | 1263 | /* nothing more to do because peer is not involved */ |
1264 | else | ||
1265 | rc = 0; | ||
1266 | /* nothing more to do because peer is not involved */ | ||
1267 | break; | 1264 | break; |
1268 | } | 1265 | } |
1269 | rc1 = kernel_sock_shutdown(smc->clcsock, how); | 1266 | if (smc->clcsock) |
1267 | rc1 = kernel_sock_shutdown(smc->clcsock, how); | ||
1270 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ | 1268 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ |
1271 | sk->sk_shutdown |= how + 1; | 1269 | sk->sk_shutdown |= how + 1; |
1272 | 1270 | ||
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index b9283ce5cd85..805b139756db 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
@@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
296 | strp_start_timer(strp, timeo); | 296 | strp_start_timer(strp, timeo); |
297 | } | 297 | } |
298 | 298 | ||
299 | stm->accum_len += cand_len; | ||
299 | strp->need_bytes = stm->strp.full_len - | 300 | strp->need_bytes = stm->strp.full_len - |
300 | stm->accum_len; | 301 | stm->accum_len; |
301 | stm->accum_len += cand_len; | ||
302 | stm->early_eaten = cand_len; | 302 | stm->early_eaten = cand_len; |
303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); | 303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); |
304 | desc->count = 0; /* Stop reading socket */ | 304 | desc->count = 0; /* Stop reading socket */ |
@@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
321 | /* Hurray, we have a new message! */ | 321 | /* Hurray, we have a new message! */ |
322 | cancel_delayed_work(&strp->msg_timer_work); | 322 | cancel_delayed_work(&strp->msg_timer_work); |
323 | strp->skb_head = NULL; | 323 | strp->skb_head = NULL; |
324 | strp->need_bytes = 0; | ||
324 | STRP_STATS_INCR(strp->stats.msgs); | 325 | STRP_STATS_INCR(strp->stats.msgs); |
325 | 326 | ||
326 | /* Give skb to upper layer */ | 327 | /* Give skb to upper layer */ |
@@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp) | |||
410 | return; | 411 | return; |
411 | 412 | ||
412 | if (strp->need_bytes) { | 413 | if (strp->need_bytes) { |
413 | if (strp_peek_len(strp) >= strp->need_bytes) | 414 | if (strp_peek_len(strp) < strp->need_bytes) |
414 | strp->need_bytes = 0; | ||
415 | else | ||
416 | return; | 415 | return; |
417 | } | 416 | } |
418 | 417 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0f08934b2cea..c81ef5e6c981 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry) | |||
1375 | struct dentry *clnt_dir = pipe_dentry->d_parent; | 1375 | struct dentry *clnt_dir = pipe_dentry->d_parent; |
1376 | struct dentry *gssd_dir = clnt_dir->d_parent; | 1376 | struct dentry *gssd_dir = clnt_dir->d_parent; |
1377 | 1377 | ||
1378 | dget(pipe_dentry); | ||
1378 | __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); | 1379 | __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); |
1379 | __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); | 1380 | __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); |
1380 | __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); | 1381 | __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); |
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 32dc33a94bc7..5453e564da82 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c | |||
@@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, | |||
777 | 777 | ||
778 | ret = tipc_bearer_get_name(net, bearer_name, bearer_id); | 778 | ret = tipc_bearer_get_name(net, bearer_name, bearer_id); |
779 | if (ret || !mon) | 779 | if (ret || !mon) |
780 | return -EINVAL; | 780 | return 0; |
781 | 781 | ||
782 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 782 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
783 | NLM_F_MULTI, TIPC_NL_MON_GET); | 783 | NLM_F_MULTI, TIPC_NL_MON_GET); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index b1fe20972aa9..dd1c4fa2eb78 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
@@ -241,7 +241,8 @@ err: | |||
241 | static struct publication *tipc_service_remove_publ(struct net *net, | 241 | static struct publication *tipc_service_remove_publ(struct net *net, |
242 | struct tipc_service *sc, | 242 | struct tipc_service *sc, |
243 | u32 lower, u32 upper, | 243 | u32 lower, u32 upper, |
244 | u32 node, u32 key) | 244 | u32 node, u32 key, |
245 | struct service_range **rng) | ||
245 | { | 246 | { |
246 | struct tipc_subscription *sub, *tmp; | 247 | struct tipc_subscription *sub, *tmp; |
247 | struct service_range *sr; | 248 | struct service_range *sr; |
@@ -275,19 +276,15 @@ static struct publication *tipc_service_remove_publ(struct net *net, | |||
275 | 276 | ||
276 | list_del(&p->all_publ); | 277 | list_del(&p->all_publ); |
277 | list_del(&p->local_publ); | 278 | list_del(&p->local_publ); |
278 | 279 | if (list_empty(&sr->all_publ)) | |
279 | /* Remove service range item if this was its last publication */ | ||
280 | if (list_empty(&sr->all_publ)) { | ||
281 | last = true; | 280 | last = true; |
282 | rb_erase(&sr->tree_node, &sc->ranges); | ||
283 | kfree(sr); | ||
284 | } | ||
285 | 281 | ||
286 | /* Notify any waiting subscriptions */ | 282 | /* Notify any waiting subscriptions */ |
287 | list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { | 283 | list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { |
288 | tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, | 284 | tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, |
289 | p->port, p->node, p->scope, last); | 285 | p->port, p->node, p->scope, last); |
290 | } | 286 | } |
287 | *rng = sr; | ||
291 | return p; | 288 | return p; |
292 | } | 289 | } |
293 | 290 | ||
@@ -379,13 +376,20 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, | |||
379 | u32 node, u32 key) | 376 | u32 node, u32 key) |
380 | { | 377 | { |
381 | struct tipc_service *sc = tipc_service_find(net, type); | 378 | struct tipc_service *sc = tipc_service_find(net, type); |
379 | struct service_range *sr = NULL; | ||
382 | struct publication *p = NULL; | 380 | struct publication *p = NULL; |
383 | 381 | ||
384 | if (!sc) | 382 | if (!sc) |
385 | return NULL; | 383 | return NULL; |
386 | 384 | ||
387 | spin_lock_bh(&sc->lock); | 385 | spin_lock_bh(&sc->lock); |
388 | p = tipc_service_remove_publ(net, sc, lower, upper, node, key); | 386 | p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr); |
387 | |||
388 | /* Remove service range item if this was its last publication */ | ||
389 | if (sr && list_empty(&sr->all_publ)) { | ||
390 | rb_erase(&sr->tree_node, &sc->ranges); | ||
391 | kfree(sr); | ||
392 | } | ||
389 | 393 | ||
390 | /* Delete service item if this no more publications and subscriptions */ | 394 | /* Delete service item if this no more publications and subscriptions */ |
391 | if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { | 395 | if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { |
@@ -665,13 +669,14 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, | |||
665 | /** | 669 | /** |
666 | * tipc_nametbl_subscribe - add a subscription object to the name table | 670 | * tipc_nametbl_subscribe - add a subscription object to the name table |
667 | */ | 671 | */ |
668 | void tipc_nametbl_subscribe(struct tipc_subscription *sub) | 672 | bool tipc_nametbl_subscribe(struct tipc_subscription *sub) |
669 | { | 673 | { |
670 | struct name_table *nt = tipc_name_table(sub->net); | 674 | struct name_table *nt = tipc_name_table(sub->net); |
671 | struct tipc_net *tn = tipc_net(sub->net); | 675 | struct tipc_net *tn = tipc_net(sub->net); |
672 | struct tipc_subscr *s = &sub->evt.s; | 676 | struct tipc_subscr *s = &sub->evt.s; |
673 | u32 type = tipc_sub_read(s, seq.type); | 677 | u32 type = tipc_sub_read(s, seq.type); |
674 | struct tipc_service *sc; | 678 | struct tipc_service *sc; |
679 | bool res = true; | ||
675 | 680 | ||
676 | spin_lock_bh(&tn->nametbl_lock); | 681 | spin_lock_bh(&tn->nametbl_lock); |
677 | sc = tipc_service_find(sub->net, type); | 682 | sc = tipc_service_find(sub->net, type); |
@@ -685,8 +690,10 @@ void tipc_nametbl_subscribe(struct tipc_subscription *sub) | |||
685 | pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, | 690 | pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, |
686 | tipc_sub_read(s, seq.lower), | 691 | tipc_sub_read(s, seq.lower), |
687 | tipc_sub_read(s, seq.upper)); | 692 | tipc_sub_read(s, seq.upper)); |
693 | res = false; | ||
688 | } | 694 | } |
689 | spin_unlock_bh(&tn->nametbl_lock); | 695 | spin_unlock_bh(&tn->nametbl_lock); |
696 | return res; | ||
690 | } | 697 | } |
691 | 698 | ||
692 | /** | 699 | /** |
@@ -744,16 +751,17 @@ int tipc_nametbl_init(struct net *net) | |||
744 | static void tipc_service_delete(struct net *net, struct tipc_service *sc) | 751 | static void tipc_service_delete(struct net *net, struct tipc_service *sc) |
745 | { | 752 | { |
746 | struct service_range *sr, *tmpr; | 753 | struct service_range *sr, *tmpr; |
747 | struct publication *p, *tmpb; | 754 | struct publication *p, *tmp; |
748 | 755 | ||
749 | spin_lock_bh(&sc->lock); | 756 | spin_lock_bh(&sc->lock); |
750 | rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { | 757 | rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { |
751 | list_for_each_entry_safe(p, tmpb, | 758 | list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { |
752 | &sr->all_publ, all_publ) { | ||
753 | tipc_service_remove_publ(net, sc, p->lower, p->upper, | 759 | tipc_service_remove_publ(net, sc, p->lower, p->upper, |
754 | p->node, p->key); | 760 | p->node, p->key, &sr); |
755 | kfree_rcu(p, rcu); | 761 | kfree_rcu(p, rcu); |
756 | } | 762 | } |
763 | rb_erase(&sr->tree_node, &sc->ranges); | ||
764 | kfree(sr); | ||
757 | } | 765 | } |
758 | hlist_del_init_rcu(&sc->service_list); | 766 | hlist_del_init_rcu(&sc->service_list); |
759 | spin_unlock_bh(&sc->lock); | 767 | spin_unlock_bh(&sc->lock); |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 4b14fc28d9e2..0febba41da86 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
@@ -126,7 +126,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, | |||
126 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, | 126 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, |
127 | u32 lower, u32 upper, | 127 | u32 lower, u32 upper, |
128 | u32 node, u32 key); | 128 | u32 node, u32 key); |
129 | void tipc_nametbl_subscribe(struct tipc_subscription *s); | 129 | bool tipc_nametbl_subscribe(struct tipc_subscription *s); |
130 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); | 130 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); |
131 | int tipc_nametbl_init(struct net *net); | 131 | int tipc_nametbl_init(struct net *net); |
132 | void tipc_nametbl_stop(struct net *net); | 132 | void tipc_nametbl_stop(struct net *net); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 856f9e97ea29..4fbaa0464405 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -252,6 +252,8 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
252 | u64 *w0 = (u64 *)&node_id[0]; | 252 | u64 *w0 = (u64 *)&node_id[0]; |
253 | u64 *w1 = (u64 *)&node_id[8]; | 253 | u64 *w1 = (u64 *)&node_id[8]; |
254 | 254 | ||
255 | if (!attrs[TIPC_NLA_NET_NODEID_W1]) | ||
256 | return -EINVAL; | ||
255 | *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); | 257 | *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); |
256 | *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); | 258 | *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); |
257 | tipc_net_init(net, node_id, 0); | 259 | tipc_net_init(net, node_id, 0); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index b76f13f6fea1..6ff2254088f6 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -79,7 +79,10 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | |||
79 | 79 | ||
80 | const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { | 80 | const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { |
81 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, | 81 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, |
82 | [TIPC_NLA_NET_ID] = { .type = NLA_U32 } | 82 | [TIPC_NLA_NET_ID] = { .type = NLA_U32 }, |
83 | [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 }, | ||
84 | [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 }, | ||
85 | [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 }, | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { | 88 | const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { |
diff --git a/net/tipc/node.c b/net/tipc/node.c index c77dd2f3c589..6f98b56dd48e 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -2232,8 +2232,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) | |||
2232 | struct net *net = sock_net(skb->sk); | 2232 | struct net *net = sock_net(skb->sk); |
2233 | u32 prev_bearer = cb->args[0]; | 2233 | u32 prev_bearer = cb->args[0]; |
2234 | struct tipc_nl_msg msg; | 2234 | struct tipc_nl_msg msg; |
2235 | int bearer_id; | ||
2235 | int err; | 2236 | int err; |
2236 | int i; | ||
2237 | 2237 | ||
2238 | if (prev_bearer == MAX_BEARERS) | 2238 | if (prev_bearer == MAX_BEARERS) |
2239 | return 0; | 2239 | return 0; |
@@ -2243,16 +2243,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) | |||
2243 | msg.seq = cb->nlh->nlmsg_seq; | 2243 | msg.seq = cb->nlh->nlmsg_seq; |
2244 | 2244 | ||
2245 | rtnl_lock(); | 2245 | rtnl_lock(); |
2246 | for (i = prev_bearer; i < MAX_BEARERS; i++) { | 2246 | for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { |
2247 | prev_bearer = i; | ||
2248 | err = __tipc_nl_add_monitor(net, &msg, prev_bearer); | 2247 | err = __tipc_nl_add_monitor(net, &msg, prev_bearer); |
2249 | if (err) | 2248 | if (err) |
2250 | goto out; | 2249 | break; |
2251 | } | 2250 | } |
2252 | |||
2253 | out: | ||
2254 | rtnl_unlock(); | 2251 | rtnl_unlock(); |
2255 | cb->args[0] = prev_bearer; | 2252 | cb->args[0] = bearer_id; |
2256 | 2253 | ||
2257 | return skb->len; | 2254 | return skb->len; |
2258 | } | 2255 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1fd1c8b5ce03..252a52ae0893 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1278,7 +1278,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
1278 | struct tipc_msg *hdr = &tsk->phdr; | 1278 | struct tipc_msg *hdr = &tsk->phdr; |
1279 | struct tipc_name_seq *seq; | 1279 | struct tipc_name_seq *seq; |
1280 | struct sk_buff_head pkts; | 1280 | struct sk_buff_head pkts; |
1281 | u32 dnode, dport; | 1281 | u32 dport, dnode = 0; |
1282 | u32 type, inst; | 1282 | u32 type, inst; |
1283 | int mtu, rc; | 1283 | int mtu, rc; |
1284 | 1284 | ||
@@ -1348,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
1348 | msg_set_destnode(hdr, dnode); | 1348 | msg_set_destnode(hdr, dnode); |
1349 | msg_set_destport(hdr, dest->addr.id.ref); | 1349 | msg_set_destport(hdr, dest->addr.id.ref); |
1350 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); | 1350 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); |
1351 | } else { | ||
1352 | return -EINVAL; | ||
1351 | } | 1353 | } |
1352 | 1354 | ||
1353 | /* Block or return if destination link is congested */ | 1355 | /* Block or return if destination link is congested */ |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index b7d80bc5f4ab..f340e53da625 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -153,7 +153,10 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net, | |||
153 | memcpy(&sub->evt.s, s, sizeof(*s)); | 153 | memcpy(&sub->evt.s, s, sizeof(*s)); |
154 | spin_lock_init(&sub->lock); | 154 | spin_lock_init(&sub->lock); |
155 | kref_init(&sub->kref); | 155 | kref_init(&sub->kref); |
156 | tipc_nametbl_subscribe(sub); | 156 | if (!tipc_nametbl_subscribe(sub)) { |
157 | kfree(sub); | ||
158 | return NULL; | ||
159 | } | ||
157 | timer_setup(&sub->timer, tipc_sub_timeout, 0); | 160 | timer_setup(&sub->timer, tipc_sub_timeout, 0); |
158 | timeout = tipc_sub_read(&sub->evt.s, timeout); | 161 | timeout = tipc_sub_read(&sub->evt.s, timeout); |
159 | if (timeout != TIPC_WAIT_FOREVER) | 162 | if (timeout != TIPC_WAIT_FOREVER) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4dc766b03f00..71e79597f940 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <net/strparser.h> | 41 | #include <net/strparser.h> |
42 | #include <net/tls.h> | 42 | #include <net/tls.h> |
43 | 43 | ||
44 | #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE | ||
45 | |||
44 | static int tls_do_decryption(struct sock *sk, | 46 | static int tls_do_decryption(struct sock *sk, |
45 | struct scatterlist *sgin, | 47 | struct scatterlist *sgin, |
46 | struct scatterlist *sgout, | 48 | struct scatterlist *sgout, |
@@ -673,7 +675,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
673 | { | 675 | { |
674 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 676 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
675 | struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); | 677 | struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); |
676 | char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size]; | 678 | char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; |
677 | struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; | 679 | struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; |
678 | struct scatterlist *sgin = &sgin_arr[0]; | 680 | struct scatterlist *sgin = &sgin_arr[0]; |
679 | struct strp_msg *rxm = strp_msg(skb); | 681 | struct strp_msg *rxm = strp_msg(skb); |
@@ -1094,6 +1096,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
1094 | goto free_priv; | 1096 | goto free_priv; |
1095 | } | 1097 | } |
1096 | 1098 | ||
1099 | /* Sanity-check the IV size for stack allocations. */ | ||
1100 | if (iv_size > MAX_IV_SIZE) { | ||
1101 | rc = -EINVAL; | ||
1102 | goto free_priv; | ||
1103 | } | ||
1104 | |||
1097 | cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; | 1105 | cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; |
1098 | cctx->tag_size = tag_size; | 1106 | cctx->tag_size = tag_size; |
1099 | cctx->overhead_size = cctx->prepend_size + cctx->tag_size; | 1107 | cctx->overhead_size = cctx->prepend_size + cctx->tag_size; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index aac9b8f6552e..c1076c19b858 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void) | |||
2018 | } | 2018 | } |
2019 | EXPORT_SYMBOL_GPL(vsock_core_get_transport); | 2019 | EXPORT_SYMBOL_GPL(vsock_core_get_transport); |
2020 | 2020 | ||
2021 | static void __exit vsock_exit(void) | ||
2022 | { | ||
2023 | /* Do nothing. This function makes this module removable. */ | ||
2024 | } | ||
2025 | |||
2021 | module_init(vsock_init_tables); | 2026 | module_init(vsock_init_tables); |
2027 | module_exit(vsock_exit); | ||
2022 | 2028 | ||
2023 | MODULE_AUTHOR("VMware, Inc."); | 2029 | MODULE_AUTHOR("VMware, Inc."); |
2024 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); | 2030 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); |
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 830c55514f9f..49b13553eaae 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c | |||
@@ -56,6 +56,21 @@ struct dummy { | |||
56 | unsigned long jiffies_expire; | 56 | unsigned long jiffies_expire; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | /* | ||
60 | * The constructor makes more sense together with klp_shadow_get_or_alloc(). | ||
61 | * In this example, it would be safe to assign the pointer also to the shadow | ||
62 | * variable returned by klp_shadow_alloc(). But we wanted to show the more | ||
63 | * complicated use of the API. | ||
64 | */ | ||
65 | static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data) | ||
66 | { | ||
67 | void **shadow_leak = shadow_data; | ||
68 | void *leak = ctor_data; | ||
69 | |||
70 | *shadow_leak = leak; | ||
71 | return 0; | ||
72 | } | ||
73 | |||
59 | struct dummy *livepatch_fix1_dummy_alloc(void) | 74 | struct dummy *livepatch_fix1_dummy_alloc(void) |
60 | { | 75 | { |
61 | struct dummy *d; | 76 | struct dummy *d; |
@@ -74,7 +89,8 @@ struct dummy *livepatch_fix1_dummy_alloc(void) | |||
74 | * pointer to handle resource release. | 89 | * pointer to handle resource release. |
75 | */ | 90 | */ |
76 | leak = kzalloc(sizeof(int), GFP_KERNEL); | 91 | leak = kzalloc(sizeof(int), GFP_KERNEL); |
77 | klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL); | 92 | klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL, |
93 | shadow_leak_ctor, leak); | ||
78 | 94 | ||
79 | pr_info("%s: dummy @ %p, expires @ %lx\n", | 95 | pr_info("%s: dummy @ %p, expires @ %lx\n", |
80 | __func__, d, d->jiffies_expire); | 96 | __func__, d, d->jiffies_expire); |
@@ -82,9 +98,19 @@ struct dummy *livepatch_fix1_dummy_alloc(void) | |||
82 | return d; | 98 | return d; |
83 | } | 99 | } |
84 | 100 | ||
101 | static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data) | ||
102 | { | ||
103 | void *d = obj; | ||
104 | void **shadow_leak = shadow_data; | ||
105 | |||
106 | kfree(*shadow_leak); | ||
107 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
108 | __func__, d, *shadow_leak); | ||
109 | } | ||
110 | |||
85 | void livepatch_fix1_dummy_free(struct dummy *d) | 111 | void livepatch_fix1_dummy_free(struct dummy *d) |
86 | { | 112 | { |
87 | void **shadow_leak, *leak; | 113 | void **shadow_leak; |
88 | 114 | ||
89 | /* | 115 | /* |
90 | * Patch: fetch the saved SV_LEAK shadow variable, detach and | 116 | * Patch: fetch the saved SV_LEAK shadow variable, detach and |
@@ -93,15 +119,10 @@ void livepatch_fix1_dummy_free(struct dummy *d) | |||
93 | * was loaded.) | 119 | * was loaded.) |
94 | */ | 120 | */ |
95 | shadow_leak = klp_shadow_get(d, SV_LEAK); | 121 | shadow_leak = klp_shadow_get(d, SV_LEAK); |
96 | if (shadow_leak) { | 122 | if (shadow_leak) |
97 | leak = *shadow_leak; | 123 | klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor); |
98 | klp_shadow_free(d, SV_LEAK); | 124 | else |
99 | kfree(leak); | ||
100 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
101 | __func__, d, leak); | ||
102 | } else { | ||
103 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); | 125 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); |
104 | } | ||
105 | 126 | ||
106 | kfree(d); | 127 | kfree(d); |
107 | } | 128 | } |
@@ -147,7 +168,7 @@ static int livepatch_shadow_fix1_init(void) | |||
147 | static void livepatch_shadow_fix1_exit(void) | 168 | static void livepatch_shadow_fix1_exit(void) |
148 | { | 169 | { |
149 | /* Cleanup any existing SV_LEAK shadow variables */ | 170 | /* Cleanup any existing SV_LEAK shadow variables */ |
150 | klp_shadow_free_all(SV_LEAK); | 171 | klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor); |
151 | 172 | ||
152 | WARN_ON(klp_unregister_patch(&patch)); | 173 | WARN_ON(klp_unregister_patch(&patch)); |
153 | } | 174 | } |
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c index ff9948f0ec00..b34c7bf83356 100644 --- a/samples/livepatch/livepatch-shadow-fix2.c +++ b/samples/livepatch/livepatch-shadow-fix2.c | |||
@@ -53,39 +53,42 @@ struct dummy { | |||
53 | bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) | 53 | bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) |
54 | { | 54 | { |
55 | int *shadow_count; | 55 | int *shadow_count; |
56 | int count; | ||
57 | 56 | ||
58 | /* | 57 | /* |
59 | * Patch: handle in-flight dummy structures, if they do not | 58 | * Patch: handle in-flight dummy structures, if they do not |
60 | * already have a SV_COUNTER shadow variable, then attach a | 59 | * already have a SV_COUNTER shadow variable, then attach a |
61 | * new one. | 60 | * new one. |
62 | */ | 61 | */ |
63 | count = 0; | ||
64 | shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, | 62 | shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, |
65 | &count, sizeof(count), | 63 | sizeof(*shadow_count), GFP_NOWAIT, |
66 | GFP_NOWAIT); | 64 | NULL, NULL); |
67 | if (shadow_count) | 65 | if (shadow_count) |
68 | *shadow_count += 1; | 66 | *shadow_count += 1; |
69 | 67 | ||
70 | return time_after(jiffies, d->jiffies_expire); | 68 | return time_after(jiffies, d->jiffies_expire); |
71 | } | 69 | } |
72 | 70 | ||
71 | static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data) | ||
72 | { | ||
73 | void *d = obj; | ||
74 | void **shadow_leak = shadow_data; | ||
75 | |||
76 | kfree(*shadow_leak); | ||
77 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
78 | __func__, d, *shadow_leak); | ||
79 | } | ||
80 | |||
73 | void livepatch_fix2_dummy_free(struct dummy *d) | 81 | void livepatch_fix2_dummy_free(struct dummy *d) |
74 | { | 82 | { |
75 | void **shadow_leak, *leak; | 83 | void **shadow_leak; |
76 | int *shadow_count; | 84 | int *shadow_count; |
77 | 85 | ||
78 | /* Patch: copy the memory leak patch from the fix1 module. */ | 86 | /* Patch: copy the memory leak patch from the fix1 module. */ |
79 | shadow_leak = klp_shadow_get(d, SV_LEAK); | 87 | shadow_leak = klp_shadow_get(d, SV_LEAK); |
80 | if (shadow_leak) { | 88 | if (shadow_leak) |
81 | leak = *shadow_leak; | 89 | klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor); |
82 | klp_shadow_free(d, SV_LEAK); | 90 | else |
83 | kfree(leak); | ||
84 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
85 | __func__, d, leak); | ||
86 | } else { | ||
87 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); | 91 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); |
88 | } | ||
89 | 92 | ||
90 | /* | 93 | /* |
91 | * Patch: fetch the SV_COUNTER shadow variable and display | 94 | * Patch: fetch the SV_COUNTER shadow variable and display |
@@ -95,7 +98,7 @@ void livepatch_fix2_dummy_free(struct dummy *d) | |||
95 | if (shadow_count) { | 98 | if (shadow_count) { |
96 | pr_info("%s: dummy @ %p, check counter = %d\n", | 99 | pr_info("%s: dummy @ %p, check counter = %d\n", |
97 | __func__, d, *shadow_count); | 100 | __func__, d, *shadow_count); |
98 | klp_shadow_free(d, SV_COUNTER); | 101 | klp_shadow_free(d, SV_COUNTER, NULL); |
99 | } | 102 | } |
100 | 103 | ||
101 | kfree(d); | 104 | kfree(d); |
@@ -142,7 +145,7 @@ static int livepatch_shadow_fix2_init(void) | |||
142 | static void livepatch_shadow_fix2_exit(void) | 145 | static void livepatch_shadow_fix2_exit(void) |
143 | { | 146 | { |
144 | /* Cleanup any existing SV_COUNTER shadow variables */ | 147 | /* Cleanup any existing SV_COUNTER shadow variables */ |
145 | klp_shadow_free_all(SV_COUNTER); | 148 | klp_shadow_free_all(SV_COUNTER, NULL); |
146 | 149 | ||
147 | WARN_ON(klp_unregister_patch(&patch)); | 150 | WARN_ON(klp_unregister_patch(&patch)); |
148 | } | 151 | } |
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c index f69764d7cdd7..e30e30ba6e39 100644 --- a/sound/core/rawmidi_compat.c +++ b/sound/core/rawmidi_compat.c | |||
@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, | |||
36 | struct snd_rawmidi_params params; | 36 | struct snd_rawmidi_params params; |
37 | unsigned int val; | 37 | unsigned int val; |
38 | 38 | ||
39 | if (rfile->output == NULL) | ||
40 | return -EINVAL; | ||
41 | if (get_user(params.stream, &src->stream) || | 39 | if (get_user(params.stream, &src->stream) || |
42 | get_user(params.buffer_size, &src->buffer_size) || | 40 | get_user(params.buffer_size, &src->buffer_size) || |
43 | get_user(params.avail_min, &src->avail_min) || | 41 | get_user(params.avail_min, &src->avail_min) || |
@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, | |||
46 | params.no_active_sensing = val; | 44 | params.no_active_sensing = val; |
47 | switch (params.stream) { | 45 | switch (params.stream) { |
48 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 46 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
47 | if (!rfile->output) | ||
48 | return -EINVAL; | ||
49 | return snd_rawmidi_output_params(rfile->output, ¶ms); | 49 | return snd_rawmidi_output_params(rfile->output, ¶ms); |
50 | case SNDRV_RAWMIDI_STREAM_INPUT: | 50 | case SNDRV_RAWMIDI_STREAM_INPUT: |
51 | if (!rfile->input) | ||
52 | return -EINVAL; | ||
51 | return snd_rawmidi_input_params(rfile->input, ¶ms); | 53 | return snd_rawmidi_input_params(rfile->input, ¶ms); |
52 | } | 54 | } |
53 | return -EINVAL; | 55 | return -EINVAL; |
@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, | |||
67 | int err; | 69 | int err; |
68 | struct snd_rawmidi_status status; | 70 | struct snd_rawmidi_status status; |
69 | 71 | ||
70 | if (rfile->output == NULL) | ||
71 | return -EINVAL; | ||
72 | if (get_user(status.stream, &src->stream)) | 72 | if (get_user(status.stream, &src->stream)) |
73 | return -EFAULT; | 73 | return -EFAULT; |
74 | 74 | ||
75 | switch (status.stream) { | 75 | switch (status.stream) { |
76 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 76 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
77 | if (!rfile->output) | ||
78 | return -EINVAL; | ||
77 | err = snd_rawmidi_output_status(rfile->output, &status); | 79 | err = snd_rawmidi_output_status(rfile->output, &status); |
78 | break; | 80 | break; |
79 | case SNDRV_RAWMIDI_STREAM_INPUT: | 81 | case SNDRV_RAWMIDI_STREAM_INPUT: |
82 | if (!rfile->input) | ||
83 | return -EINVAL; | ||
80 | err = snd_rawmidi_input_status(rfile->input, &status); | 84 | err = snd_rawmidi_input_status(rfile->input, &status); |
81 | break; | 85 | break; |
82 | default: | 86 | default: |
@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, | |||
112 | int err; | 116 | int err; |
113 | struct snd_rawmidi_status status; | 117 | struct snd_rawmidi_status status; |
114 | 118 | ||
115 | if (rfile->output == NULL) | ||
116 | return -EINVAL; | ||
117 | if (get_user(status.stream, &src->stream)) | 119 | if (get_user(status.stream, &src->stream)) |
118 | return -EFAULT; | 120 | return -EFAULT; |
119 | 121 | ||
120 | switch (status.stream) { | 122 | switch (status.stream) { |
121 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 123 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
124 | if (!rfile->output) | ||
125 | return -EINVAL; | ||
122 | err = snd_rawmidi_output_status(rfile->output, &status); | 126 | err = snd_rawmidi_output_status(rfile->output, &status); |
123 | break; | 127 | break; |
124 | case SNDRV_RAWMIDI_STREAM_INPUT: | 128 | case SNDRV_RAWMIDI_STREAM_INPUT: |
129 | if (!rfile->input) | ||
130 | return -EINVAL; | ||
125 | err = snd_rawmidi_input_status(rfile->input, &status); | 131 | err = snd_rawmidi_input_status(rfile->input, &status); |
126 | break; | 132 | break; |
127 | default: | 133 | default: |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7a111a1b5836..b0c8c79848a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1647,7 +1647,8 @@ static void azx_check_snoop_available(struct azx *chip) | |||
1647 | */ | 1647 | */ |
1648 | u8 val; | 1648 | u8 val; |
1649 | pci_read_config_byte(chip->pci, 0x42, &val); | 1649 | pci_read_config_byte(chip->pci, 0x42, &val); |
1650 | if (!(val & 0x80) && chip->pci->revision == 0x30) | 1650 | if (!(val & 0x80) && (chip->pci->revision == 0x30 || |
1651 | chip->pci->revision == 0x20)) | ||
1651 | snoop = false; | 1652 | snoop = false; |
1652 | } | 1653 | } |
1653 | 1654 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aef1f52db7d9..fc77bf7a1544 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -6370,6 +6370,8 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6370 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | 6370 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ |
6371 | { } | 6371 | { } |
6372 | }, | 6372 | }, |
6373 | .chained = true, | ||
6374 | .chain_id = ALC269_FIXUP_HEADSET_MIC | ||
6373 | }, | 6375 | }, |
6374 | }; | 6376 | }; |
6375 | 6377 | ||
@@ -6573,6 +6575,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6573 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6575 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6574 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6576 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6575 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6577 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6578 | SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | ||
6576 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6579 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6577 | SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6580 | SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6578 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), | 6581 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), |
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index 6d7cde56a355..e2cf55c53ea8 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c | |||
@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data, | |||
125 | } | 125 | } |
126 | 126 | ||
127 | usb_fill_int_urb(urb, line6->usbdev, | 127 | usb_fill_int_urb(urb, line6->usbdev, |
128 | usb_sndbulkpipe(line6->usbdev, | 128 | usb_sndintpipe(line6->usbdev, |
129 | line6->properties->ep_ctrl_w), | 129 | line6->properties->ep_ctrl_w), |
130 | transfer_buffer, length, midi_sent, line6, | 130 | transfer_buffer, length, midi_sent, line6, |
131 | line6->interval); | 131 | line6->interval); |
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6edd177bb1c7..2ba95d6fe852 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h | |||
@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot { | |||
135 | #define KVM_REG_ARM_CRM_SHIFT 7 | 135 | #define KVM_REG_ARM_CRM_SHIFT 7 |
136 | #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 | 136 | #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 |
137 | #define KVM_REG_ARM_32_CRN_SHIFT 11 | 137 | #define KVM_REG_ARM_32_CRN_SHIFT 11 |
138 | /* | ||
139 | * For KVM currently all guest registers are nonsecure, but we reserve a bit | ||
140 | * in the encoding to distinguish secure from nonsecure for AArch32 system | ||
141 | * registers that are banked by security. This is 1 for the secure banked | ||
142 | * register, and 0 for the nonsecure banked register or if the register is | ||
143 | * not banked by security. | ||
144 | */ | ||
145 | #define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 | ||
146 | #define KVM_REG_ARM_SECURE_SHIFT 28 | ||
138 | 147 | ||
139 | #define ARM_CP15_REG_SHIFT_MASK(x,n) \ | 148 | #define ARM_CP15_REG_SHIFT_MASK(x,n) \ |
140 | (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) | 149 | (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) |
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fb3a6de7440b..6847d85400a8 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h | |||
@@ -53,12 +53,6 @@ | |||
53 | # define NEED_MOVBE 0 | 53 | # define NEED_MOVBE 0 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_X86_5LEVEL | ||
57 | # define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) | ||
58 | #else | ||
59 | # define NEED_LA57 0 | ||
60 | #endif | ||
61 | |||
62 | #ifdef CONFIG_X86_64 | 56 | #ifdef CONFIG_X86_64 |
63 | #ifdef CONFIG_PARAVIRT | 57 | #ifdef CONFIG_PARAVIRT |
64 | /* Paravirtualized systems may not have PSE or PGE available */ | 58 | /* Paravirtualized systems may not have PSE or PGE available */ |
@@ -104,7 +98,7 @@ | |||
104 | #define REQUIRED_MASK13 0 | 98 | #define REQUIRED_MASK13 0 |
105 | #define REQUIRED_MASK14 0 | 99 | #define REQUIRED_MASK14 0 |
106 | #define REQUIRED_MASK15 0 | 100 | #define REQUIRED_MASK15 0 |
107 | #define REQUIRED_MASK16 (NEED_LA57) | 101 | #define REQUIRED_MASK16 0 |
108 | #define REQUIRED_MASK17 0 | 102 | #define REQUIRED_MASK17 0 |
109 | #define REQUIRED_MASK18 0 | 103 | #define REQUIRED_MASK18 0 |
110 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) | 104 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) |
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index f3a960488eae..c535c2fdea13 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h | |||
@@ -354,8 +354,25 @@ struct kvm_xcrs { | |||
354 | __u64 padding[16]; | 354 | __u64 padding[16]; |
355 | }; | 355 | }; |
356 | 356 | ||
357 | /* definition of registers in kvm_run */ | 357 | #define KVM_SYNC_X86_REGS (1UL << 0) |
358 | #define KVM_SYNC_X86_SREGS (1UL << 1) | ||
359 | #define KVM_SYNC_X86_EVENTS (1UL << 2) | ||
360 | |||
361 | #define KVM_SYNC_X86_VALID_FIELDS \ | ||
362 | (KVM_SYNC_X86_REGS| \ | ||
363 | KVM_SYNC_X86_SREGS| \ | ||
364 | KVM_SYNC_X86_EVENTS) | ||
365 | |||
366 | /* kvm_sync_regs struct included by kvm_run struct */ | ||
358 | struct kvm_sync_regs { | 367 | struct kvm_sync_regs { |
368 | /* Members of this structure are potentially malicious. | ||
369 | * Care must be taken by code reading, esp. interpreting, | ||
370 | * data fields from them inside KVM to prevent TOCTOU and | ||
371 | * double-fetch types of vulnerabilities. | ||
372 | */ | ||
373 | struct kvm_regs regs; | ||
374 | struct kvm_sregs sregs; | ||
375 | struct kvm_vcpu_events events; | ||
359 | }; | 376 | }; |
360 | 377 | ||
361 | #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) | 378 | #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) |
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 04e32f965ad7..1827c2f973f9 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h | |||
@@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
151 | * required ordering. | 151 | * required ordering. |
152 | */ | 152 | */ |
153 | 153 | ||
154 | #define READ_ONCE(x) \ | 154 | #define READ_ONCE(x) \ |
155 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 155 | ({ \ |
156 | 156 | union { typeof(x) __val; char __c[1]; } __u = \ | |
157 | #define WRITE_ONCE(x, val) \ | 157 | { .__c = { 0 } }; \ |
158 | ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 158 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
159 | __u.__val; \ | ||
160 | }) | ||
161 | |||
162 | #define WRITE_ONCE(x, val) \ | ||
163 | ({ \ | ||
164 | union { typeof(x) __val; char __c[1]; } __u = \ | ||
165 | { .__val = (val) }; \ | ||
166 | __write_once_size(&(x), __u.__c, sizeof(x)); \ | ||
167 | __u.__val; \ | ||
168 | }) | ||
159 | 169 | ||
160 | 170 | ||
161 | #ifndef __fallthrough | 171 | #ifndef __fallthrough |
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/tools/include/linux/coresight-pmu.h +++ b/tools/include/linux/coresight-pmu.h | |||
@@ -1,18 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #ifndef _LINUX_CORESIGHT_PMU_H | 7 | #ifndef _LINUX_CORESIGHT_PMU_H |
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index f8b134f5608f..e7ee32861d51 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h | |||
@@ -27,6 +27,9 @@ | |||
27 | # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ | 27 | # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | /* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ | ||
31 | #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ | ||
32 | |||
30 | /* | 33 | /* |
31 | * Flags for mlock | 34 | * Flags for mlock |
32 | */ | 35 | */ |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d07465023a2..c5ec89732a8d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -864,6 +864,7 @@ enum bpf_func_id { | |||
864 | /* BPF_FUNC_skb_set_tunnel_key flags. */ | 864 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
865 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) | 865 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) |
866 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) | 866 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) |
867 | #define BPF_F_SEQ_NUMBER (1ULL << 3) | ||
867 | 868 | ||
868 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and | 869 | /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and |
869 | * BPF_FUNC_perf_event_read_value flags. | 870 | * BPF_FUNC_perf_event_read_value flags. |
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 6d9447700e18..68699f654118 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h | |||
@@ -941,4 +941,43 @@ enum { | |||
941 | IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ | 941 | IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ |
942 | }; | 942 | }; |
943 | 943 | ||
944 | /* tun section */ | ||
945 | |||
946 | enum { | ||
947 | IFLA_TUN_UNSPEC, | ||
948 | IFLA_TUN_OWNER, | ||
949 | IFLA_TUN_GROUP, | ||
950 | IFLA_TUN_TYPE, | ||
951 | IFLA_TUN_PI, | ||
952 | IFLA_TUN_VNET_HDR, | ||
953 | IFLA_TUN_PERSIST, | ||
954 | IFLA_TUN_MULTI_QUEUE, | ||
955 | IFLA_TUN_NUM_QUEUES, | ||
956 | IFLA_TUN_NUM_DISABLED_QUEUES, | ||
957 | __IFLA_TUN_MAX, | ||
958 | }; | ||
959 | |||
960 | #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) | ||
961 | |||
962 | /* rmnet section */ | ||
963 | |||
964 | #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) | ||
965 | #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) | ||
966 | #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) | ||
967 | #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) | ||
968 | |||
969 | enum { | ||
970 | IFLA_RMNET_UNSPEC, | ||
971 | IFLA_RMNET_MUX_ID, | ||
972 | IFLA_RMNET_FLAGS, | ||
973 | __IFLA_RMNET_MAX, | ||
974 | }; | ||
975 | |||
976 | #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) | ||
977 | |||
978 | struct ifla_rmnet_flags { | ||
979 | __u32 flags; | ||
980 | __u32 mask; | ||
981 | }; | ||
982 | |||
944 | #endif /* _UAPI_LINUX_IF_LINK_H */ | 983 | #endif /* _UAPI_LINUX_IF_LINK_H */ |
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 6b89f87db200..1065006c9bf5 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h | |||
@@ -396,6 +396,10 @@ struct kvm_run { | |||
396 | char padding[256]; | 396 | char padding[256]; |
397 | }; | 397 | }; |
398 | 398 | ||
399 | /* 2048 is the size of the char array used to bound/pad the size | ||
400 | * of the union that holds sync regs. | ||
401 | */ | ||
402 | #define SYNC_REGS_SIZE_BYTES 2048 | ||
399 | /* | 403 | /* |
400 | * shared registers between kvm and userspace. | 404 | * shared registers between kvm and userspace. |
401 | * kvm_valid_regs specifies the register classes set by the host | 405 | * kvm_valid_regs specifies the register classes set by the host |
@@ -407,7 +411,7 @@ struct kvm_run { | |||
407 | __u64 kvm_dirty_regs; | 411 | __u64 kvm_dirty_regs; |
408 | union { | 412 | union { |
409 | struct kvm_sync_regs regs; | 413 | struct kvm_sync_regs regs; |
410 | char padding[2048]; | 414 | char padding[SYNC_REGS_SIZE_BYTES]; |
411 | } s; | 415 | } s; |
412 | }; | 416 | }; |
413 | 417 | ||
@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt { | |||
936 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 | 940 | #define KVM_CAP_PPC_GET_CPU_CHAR 151 |
937 | #define KVM_CAP_S390_BPB 152 | 941 | #define KVM_CAP_S390_BPB 152 |
938 | #define KVM_CAP_GET_MSR_FEATURES 153 | 942 | #define KVM_CAP_GET_MSR_FEATURES 153 |
943 | #define KVM_CAP_HYPERV_EVENTFD 154 | ||
939 | 944 | ||
940 | #ifdef KVM_CAP_IRQ_ROUTING | 945 | #ifdef KVM_CAP_IRQ_ROUTING |
941 | 946 | ||
@@ -1375,6 +1380,10 @@ struct kvm_enc_region { | |||
1375 | #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) | 1380 | #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) |
1376 | #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) | 1381 | #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) |
1377 | 1382 | ||
1383 | /* Available with KVM_CAP_HYPERV_EVENTFD */ | ||
1384 | #define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) | ||
1385 | |||
1386 | |||
1378 | /* Secure Encrypted Virtualization command */ | 1387 | /* Secure Encrypted Virtualization command */ |
1379 | enum sev_cmd_id { | 1388 | enum sev_cmd_id { |
1380 | /* Guest initialization commands */ | 1389 | /* Guest initialization commands */ |
@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry { | |||
1515 | #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) | 1524 | #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) |
1516 | #define KVM_ARM_DEV_PMU (1 << 2) | 1525 | #define KVM_ARM_DEV_PMU (1 << 2) |
1517 | 1526 | ||
1527 | struct kvm_hyperv_eventfd { | ||
1528 | __u32 conn_id; | ||
1529 | __s32 fd; | ||
1530 | __u32 flags; | ||
1531 | __u32 padding[3]; | ||
1532 | }; | ||
1533 | |||
1534 | #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff | ||
1535 | #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) | ||
1536 | |||
1518 | #endif /* __LINUX_KVM_H */ | 1537 | #endif /* __LINUX_KVM_H */ |
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h | |||
@@ -650,11 +650,23 @@ struct perf_event_mmap_page { | |||
650 | #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) | 650 | #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) |
651 | #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) | 651 | #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) |
652 | /* | 652 | /* |
653 | * Indicates that the content of PERF_SAMPLE_IP points to | 653 | * These PERF_RECORD_MISC_* flags below are safely reused |
654 | * the actual instruction that triggered the event. See also | 654 | * for the following events: |
655 | * perf_event_attr::precise_ip. | 655 | * |
656 | * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events | ||
657 | * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events | ||
658 | * | ||
659 | * | ||
660 | * PERF_RECORD_MISC_EXACT_IP: | ||
661 | * Indicates that the content of PERF_SAMPLE_IP points to | ||
662 | * the actual instruction that triggered the event. See also | ||
663 | * perf_event_attr::precise_ip. | ||
664 | * | ||
665 | * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: | ||
666 | * Indicates that thread was preempted in TASK_RUNNING state. | ||
656 | */ | 667 | */ |
657 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) | 668 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) |
669 | #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) | ||
658 | /* | 670 | /* |
659 | * Reserve the last bit to indicate some extended misc field | 671 | * Reserve the last bit to indicate some extended misc field |
660 | */ | 672 | */ |
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 07d61583fd02..ed0a120d4f08 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h | |||
@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t; | |||
242 | #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ | 242 | #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ |
243 | #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ | 243 | #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ |
244 | #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE | 244 | #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE |
245 | #define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8 | ||
245 | 246 | ||
246 | #ifdef SNDRV_LITTLE_ENDIAN | 247 | #ifdef SNDRV_LITTLE_ENDIAN |
247 | #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE | 248 | #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE |
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index f6a1babcbac4..cb7154eccbdc 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c | |||
@@ -433,7 +433,7 @@ match: | |||
433 | 433 | ||
434 | if (ambiguous_option) { | 434 | if (ambiguous_option) { |
435 | fprintf(stderr, | 435 | fprintf(stderr, |
436 | " Error: Ambiguous option: %s (could be --%s%s or --%s%s)", | 436 | " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n", |
437 | arg, | 437 | arg, |
438 | (ambiguous_flags & OPT_UNSET) ? "no-" : "", | 438 | (ambiguous_flags & OPT_UNSET) ? "no-" : "", |
439 | ambiguous_option->long_name, | 439 | ambiguous_option->long_name, |
@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options) | |||
458 | return; | 458 | return; |
459 | 459 | ||
460 | if (strstarts(arg, "no-")) { | 460 | if (strstarts(arg, "no-")) { |
461 | fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); | 461 | fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); |
462 | exit(129); | 462 | exit(129); |
463 | } | 463 | } |
464 | 464 | ||
@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options) | |||
466 | if (!options->long_name) | 466 | if (!options->long_name) |
467 | continue; | 467 | continue; |
468 | if (strstarts(options->long_name, arg)) { | 468 | if (strstarts(options->long_name, arg)) { |
469 | fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); | 469 | fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); |
470 | exit(129); | 470 | exit(129); |
471 | } | 471 | } |
472 | } | 472 | } |
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile | |||
@@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ | |||
31 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ | 31 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ |
32 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include | 32 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include |
33 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed | 33 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed |
34 | CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) | 34 | CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) |
35 | LDFLAGS += -lelf $(LIBSUBCMD) | 35 | LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) |
36 | 36 | ||
37 | # Allow old libelf to be used: | 37 | # Allow old libelf to be used: |
38 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) | 38 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) |
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 5b4fff3adc4b..32f4a898e3f2 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt | |||
@@ -334,6 +334,11 @@ annotate.*:: | |||
334 | 334 | ||
335 | 99.93 │ mov %eax,%eax | 335 | 99.93 │ mov %eax,%eax |
336 | 336 | ||
337 | annotate.offset_level:: | ||
338 | Default is '1', meaning just jump targets will have offsets show right beside | ||
339 | the instruction. When set to '2' 'call' instructions will also have its offsets | ||
340 | shown, 3 or higher will show offsets for all instructions. | ||
341 | |||
337 | hist.*:: | 342 | hist.*:: |
338 | hist.percentage:: | 343 | hist.percentage:: |
339 | This option control the way to calculate overhead of filtered entries - | 344 | This option control the way to calculate overhead of filtered entries - |
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index b0211410969b..8806ed5f3802 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt | |||
@@ -67,6 +67,9 @@ OPTIONS | |||
67 | --phys-data:: | 67 | --phys-data:: |
68 | Record/Report sample physical addresses | 68 | Record/Report sample physical addresses |
69 | 69 | ||
70 | In addition, for report all perf report options are valid, and for record | ||
71 | all perf record options. | ||
72 | |||
70 | SEE ALSO | 73 | SEE ALSO |
71 | -------- | 74 | -------- |
72 | linkperf:perf-record[1], linkperf:perf-report[1] | 75 | linkperf:perf-record[1], linkperf:perf-report[1] |
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index bb33601a823b..63f938b887dd 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt | |||
@@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist' | |||
104 | kallsyms pathname | 104 | kallsyms pathname |
105 | 105 | ||
106 | -g:: | 106 | -g:: |
107 | --no-call-graph:: | 107 | --call-graph:: |
108 | Do not display call chains if present. | 108 | Display call chains if present (default on). |
109 | 109 | ||
110 | --max-stack:: | 110 | --max-stack:: |
111 | Maximum number of functions to display in backtrace, default 5. | 111 | Maximum number of functions to display in backtrace, default 5. |
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 36ec0257f8d3..afdafe2110a1 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -228,14 +228,15 @@ OPTIONS | |||
228 | For sample events it's possible to display misc field with -F +misc option, | 228 | For sample events it's possible to display misc field with -F +misc option, |
229 | following letters are displayed for each bit: | 229 | following letters are displayed for each bit: |
230 | 230 | ||
231 | PERF_RECORD_MISC_KERNEL K | 231 | PERF_RECORD_MISC_KERNEL K |
232 | PERF_RECORD_MISC_USER U | 232 | PERF_RECORD_MISC_USER U |
233 | PERF_RECORD_MISC_HYPERVISOR H | 233 | PERF_RECORD_MISC_HYPERVISOR H |
234 | PERF_RECORD_MISC_GUEST_KERNEL G | 234 | PERF_RECORD_MISC_GUEST_KERNEL G |
235 | PERF_RECORD_MISC_GUEST_USER g | 235 | PERF_RECORD_MISC_GUEST_USER g |
236 | PERF_RECORD_MISC_MMAP_DATA* M | 236 | PERF_RECORD_MISC_MMAP_DATA* M |
237 | PERF_RECORD_MISC_COMM_EXEC E | 237 | PERF_RECORD_MISC_COMM_EXEC E |
238 | PERF_RECORD_MISC_SWITCH_OUT S | 238 | PERF_RECORD_MISC_SWITCH_OUT S |
239 | PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp | ||
239 | 240 | ||
240 | $ perf script -F +misc ... | 241 | $ perf script -F +misc ... |
241 | sched-messaging 1414 K 28690.636582: 4590 cycles ... | 242 | sched-messaging 1414 K 28690.636582: 4590 cycles ... |
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index f15b306be183..e6c3b4e555c2 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt | |||
@@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m | |||
153 | 153 | ||
154 | -I msecs:: | 154 | -I msecs:: |
155 | --interval-print msecs:: | 155 | --interval-print msecs:: |
156 | Print count deltas every N milliseconds (minimum: 10ms) | 156 | Print count deltas every N milliseconds (minimum: 1ms) |
157 | The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. | 157 | The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. |
158 | example: 'perf stat -I 1000 -e cycles -a sleep 5' | 158 | example: 'perf stat -I 1000 -e cycles -a sleep 5' |
159 | 159 | ||
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index c7abd83a8e19..ae7dc46e8f8a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config | |||
@@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0) | |||
68 | endif | 68 | endif |
69 | 69 | ||
70 | ifneq ($(NO_SYSCALL_TABLE),1) | 70 | ifneq ($(NO_SYSCALL_TABLE),1) |
71 | CFLAGS += -DHAVE_SYSCALL_TABLE | 71 | CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT |
72 | endif | 72 | endif |
73 | 73 | ||
74 | # So far there's only x86 and arm libdw unwind support merged in perf. | 74 | # So far there's only x86 and arm libdw unwind support merged in perf. |
@@ -847,7 +847,7 @@ ifndef NO_JVMTI | |||
847 | ifeq ($(feature-jvmti), 1) | 847 | ifeq ($(feature-jvmti), 1) |
848 | $(call detected_var,JDIR) | 848 | $(call detected_var,JDIR) |
849 | else | 849 | else |
850 | $(warning No openjdk development package found, please install JDK package) | 850 | $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel) |
851 | NO_JVMTI := 1 | 851 | NO_JVMTI := 1 |
852 | endif | 852 | endif |
853 | endif | 853 | endif |
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h new file mode 100644 index 000000000000..90ec4c8cb880 --- /dev/null +++ b/tools/perf/arch/arm/include/arch-tests.h | |||
@@ -0,0 +1,12 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef ARCH_TESTS_H | ||
3 | #define ARCH_TESTS_H | ||
4 | |||
5 | #ifdef HAVE_DWARF_UNWIND_SUPPORT | ||
6 | struct thread; | ||
7 | struct perf_sample; | ||
8 | #endif | ||
9 | |||
10 | extern struct test arch_tests[]; | ||
11 | |||
12 | #endif | ||
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build index b30eff9bcc83..883c57ff0c08 100644 --- a/tools/perf/arch/arm/tests/Build +++ b/tools/perf/arch/arm/tests/Build | |||
@@ -1,2 +1,4 @@ | |||
1 | libperf-y += regs_load.o | 1 | libperf-y += regs_load.o |
2 | libperf-y += dwarf-unwind.o | 2 | libperf-y += dwarf-unwind.o |
3 | |||
4 | libperf-y += arch-tests.o | ||
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c new file mode 100644 index 000000000000..5b1543c98022 --- /dev/null +++ b/tools/perf/arch/arm/tests/arch-tests.c | |||
@@ -0,0 +1,16 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <string.h> | ||
3 | #include "tests/tests.h" | ||
4 | #include "arch-tests.h" | ||
5 | |||
6 | struct test arch_tests[] = { | ||
7 | #ifdef HAVE_DWARF_UNWIND_SUPPORT | ||
8 | { | ||
9 | .desc = "DWARF unwind", | ||
10 | .func = test__dwarf_unwind, | ||
11 | }, | ||
12 | #endif | ||
13 | { | ||
14 | .func = NULL, | ||
15 | }, | ||
16 | }; | ||
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index fa639e3e52ac..1ce6bdbda561 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c | |||
@@ -1,18 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #include <stdbool.h> | 7 | #include <stdbool.h> |
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 5c655ad4621e..2f595cd73da6 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c | |||
@@ -1,18 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #include <api/fs/fs.h> | 7 | #include <api/fs/fs.h> |
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h index 5256741be549..1a12e64f5127 100644 --- a/tools/perf/arch/arm/util/cs-etm.h +++ b/tools/perf/arch/arm/util/cs-etm.h | |||
@@ -1,18 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #ifndef INCLUDE__PERF_CS_ETM_H__ | 7 | #ifndef INCLUDE__PERF_CS_ETM_H__ |
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c index ac4dffc807b8..e047571e6080 100644 --- a/tools/perf/arch/arm/util/pmu.c +++ b/tools/perf/arch/arm/util/pmu.c | |||
@@ -1,18 +1,7 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #include <string.h> | 7 | #include <string.h> |
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index d74eaa7aa927..1a38e78117ce 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile | |||
@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') | |||
21 | $(header): $(sys)/syscall_64.tbl $(systbl) | 21 | $(header): $(sys)/syscall_64.tbl $(systbl) |
22 | @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ | 22 | @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ |
23 | (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ | 23 | (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ |
24 | || echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true | 24 | || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true |
25 | $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ | 25 | $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ |
26 | 26 | ||
27 | clean:: | 27 | clean:: |
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c index 5bd1ba8c0282..44f5aba78210 100644 --- a/tools/perf/arch/x86/annotate/instructions.c +++ b/tools/perf/arch/x86/annotate/instructions.c | |||
@@ -1,21 +1,43 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | static struct ins x86__instructions[] = { | 2 | static struct ins x86__instructions[] = { |
3 | { .name = "adc", .ops = &mov_ops, }, | ||
4 | { .name = "adcb", .ops = &mov_ops, }, | ||
5 | { .name = "adcl", .ops = &mov_ops, }, | ||
3 | { .name = "add", .ops = &mov_ops, }, | 6 | { .name = "add", .ops = &mov_ops, }, |
4 | { .name = "addl", .ops = &mov_ops, }, | 7 | { .name = "addl", .ops = &mov_ops, }, |
5 | { .name = "addq", .ops = &mov_ops, }, | 8 | { .name = "addq", .ops = &mov_ops, }, |
9 | { .name = "addsd", .ops = &mov_ops, }, | ||
6 | { .name = "addw", .ops = &mov_ops, }, | 10 | { .name = "addw", .ops = &mov_ops, }, |
7 | { .name = "and", .ops = &mov_ops, }, | 11 | { .name = "and", .ops = &mov_ops, }, |
12 | { .name = "andb", .ops = &mov_ops, }, | ||
13 | { .name = "andl", .ops = &mov_ops, }, | ||
14 | { .name = "andpd", .ops = &mov_ops, }, | ||
15 | { .name = "andps", .ops = &mov_ops, }, | ||
16 | { .name = "andq", .ops = &mov_ops, }, | ||
17 | { .name = "andw", .ops = &mov_ops, }, | ||
18 | { .name = "bsr", .ops = &mov_ops, }, | ||
19 | { .name = "bt", .ops = &mov_ops, }, | ||
20 | { .name = "btr", .ops = &mov_ops, }, | ||
8 | { .name = "bts", .ops = &mov_ops, }, | 21 | { .name = "bts", .ops = &mov_ops, }, |
22 | { .name = "btsq", .ops = &mov_ops, }, | ||
9 | { .name = "call", .ops = &call_ops, }, | 23 | { .name = "call", .ops = &call_ops, }, |
10 | { .name = "callq", .ops = &call_ops, }, | 24 | { .name = "callq", .ops = &call_ops, }, |
25 | { .name = "cmovbe", .ops = &mov_ops, }, | ||
26 | { .name = "cmove", .ops = &mov_ops, }, | ||
27 | { .name = "cmovae", .ops = &mov_ops, }, | ||
11 | { .name = "cmp", .ops = &mov_ops, }, | 28 | { .name = "cmp", .ops = &mov_ops, }, |
12 | { .name = "cmpb", .ops = &mov_ops, }, | 29 | { .name = "cmpb", .ops = &mov_ops, }, |
13 | { .name = "cmpl", .ops = &mov_ops, }, | 30 | { .name = "cmpl", .ops = &mov_ops, }, |
14 | { .name = "cmpq", .ops = &mov_ops, }, | 31 | { .name = "cmpq", .ops = &mov_ops, }, |
15 | { .name = "cmpw", .ops = &mov_ops, }, | 32 | { .name = "cmpw", .ops = &mov_ops, }, |
16 | { .name = "cmpxch", .ops = &mov_ops, }, | 33 | { .name = "cmpxch", .ops = &mov_ops, }, |
34 | { .name = "cmpxchg", .ops = &mov_ops, }, | ||
35 | { .name = "cs", .ops = &mov_ops, }, | ||
17 | { .name = "dec", .ops = &dec_ops, }, | 36 | { .name = "dec", .ops = &dec_ops, }, |
18 | { .name = "decl", .ops = &dec_ops, }, | 37 | { .name = "decl", .ops = &dec_ops, }, |
38 | { .name = "divsd", .ops = &mov_ops, }, | ||
39 | { .name = "divss", .ops = &mov_ops, }, | ||
40 | { .name = "gs", .ops = &mov_ops, }, | ||
19 | { .name = "imul", .ops = &mov_ops, }, | 41 | { .name = "imul", .ops = &mov_ops, }, |
20 | { .name = "inc", .ops = &dec_ops, }, | 42 | { .name = "inc", .ops = &dec_ops, }, |
21 | { .name = "incl", .ops = &dec_ops, }, | 43 | { .name = "incl", .ops = &dec_ops, }, |
@@ -57,25 +79,68 @@ static struct ins x86__instructions[] = { | |||
57 | { .name = "lea", .ops = &mov_ops, }, | 79 | { .name = "lea", .ops = &mov_ops, }, |
58 | { .name = "lock", .ops = &lock_ops, }, | 80 | { .name = "lock", .ops = &lock_ops, }, |
59 | { .name = "mov", .ops = &mov_ops, }, | 81 | { .name = "mov", .ops = &mov_ops, }, |
82 | { .name = "movapd", .ops = &mov_ops, }, | ||
83 | { .name = "movaps", .ops = &mov_ops, }, | ||
60 | { .name = "movb", .ops = &mov_ops, }, | 84 | { .name = "movb", .ops = &mov_ops, }, |
61 | { .name = "movdqa", .ops = &mov_ops, }, | 85 | { .name = "movdqa", .ops = &mov_ops, }, |
86 | { .name = "movdqu", .ops = &mov_ops, }, | ||
62 | { .name = "movl", .ops = &mov_ops, }, | 87 | { .name = "movl", .ops = &mov_ops, }, |
63 | { .name = "movq", .ops = &mov_ops, }, | 88 | { .name = "movq", .ops = &mov_ops, }, |
89 | { .name = "movsd", .ops = &mov_ops, }, | ||
64 | { .name = "movslq", .ops = &mov_ops, }, | 90 | { .name = "movslq", .ops = &mov_ops, }, |
91 | { .name = "movss", .ops = &mov_ops, }, | ||
92 | { .name = "movupd", .ops = &mov_ops, }, | ||
93 | { .name = "movups", .ops = &mov_ops, }, | ||
94 | { .name = "movw", .ops = &mov_ops, }, | ||
65 | { .name = "movzbl", .ops = &mov_ops, }, | 95 | { .name = "movzbl", .ops = &mov_ops, }, |
66 | { .name = "movzwl", .ops = &mov_ops, }, | 96 | { .name = "movzwl", .ops = &mov_ops, }, |
97 | { .name = "mulsd", .ops = &mov_ops, }, | ||
98 | { .name = "mulss", .ops = &mov_ops, }, | ||
67 | { .name = "nop", .ops = &nop_ops, }, | 99 | { .name = "nop", .ops = &nop_ops, }, |
68 | { .name = "nopl", .ops = &nop_ops, }, | 100 | { .name = "nopl", .ops = &nop_ops, }, |
69 | { .name = "nopw", .ops = &nop_ops, }, | 101 | { .name = "nopw", .ops = &nop_ops, }, |
70 | { .name = "or", .ops = &mov_ops, }, | 102 | { .name = "or", .ops = &mov_ops, }, |
103 | { .name = "orb", .ops = &mov_ops, }, | ||
71 | { .name = "orl", .ops = &mov_ops, }, | 104 | { .name = "orl", .ops = &mov_ops, }, |
105 | { .name = "orps", .ops = &mov_ops, }, | ||
106 | { .name = "orq", .ops = &mov_ops, }, | ||
107 | { .name = "pand", .ops = &mov_ops, }, | ||
108 | { .name = "paddq", .ops = &mov_ops, }, | ||
109 | { .name = "pcmpeqb", .ops = &mov_ops, }, | ||
110 | { .name = "por", .ops = &mov_ops, }, | ||
111 | { .name = "rclb", .ops = &mov_ops, }, | ||
112 | { .name = "rcll", .ops = &mov_ops, }, | ||
113 | { .name = "retq", .ops = &ret_ops, }, | ||
114 | { .name = "sbb", .ops = &mov_ops, }, | ||
115 | { .name = "sbbl", .ops = &mov_ops, }, | ||
116 | { .name = "sete", .ops = &mov_ops, }, | ||
117 | { .name = "sub", .ops = &mov_ops, }, | ||
118 | { .name = "subl", .ops = &mov_ops, }, | ||
119 | { .name = "subq", .ops = &mov_ops, }, | ||
120 | { .name = "subsd", .ops = &mov_ops, }, | ||
121 | { .name = "subw", .ops = &mov_ops, }, | ||
72 | { .name = "test", .ops = &mov_ops, }, | 122 | { .name = "test", .ops = &mov_ops, }, |
73 | { .name = "testb", .ops = &mov_ops, }, | 123 | { .name = "testb", .ops = &mov_ops, }, |
74 | { .name = "testl", .ops = &mov_ops, }, | 124 | { .name = "testl", .ops = &mov_ops, }, |
125 | { .name = "ucomisd", .ops = &mov_ops, }, | ||
126 | { .name = "ucomiss", .ops = &mov_ops, }, | ||
127 | { .name = "vaddsd", .ops = &mov_ops, }, | ||
128 | { .name = "vandpd", .ops = &mov_ops, }, | ||
129 | { .name = "vmovdqa", .ops = &mov_ops, }, | ||
130 | { .name = "vmovq", .ops = &mov_ops, }, | ||
131 | { .name = "vmovsd", .ops = &mov_ops, }, | ||
132 | { .name = "vmulsd", .ops = &mov_ops, }, | ||
133 | { .name = "vorpd", .ops = &mov_ops, }, | ||
134 | { .name = "vsubsd", .ops = &mov_ops, }, | ||
135 | { .name = "vucomisd", .ops = &mov_ops, }, | ||
75 | { .name = "xadd", .ops = &mov_ops, }, | 136 | { .name = "xadd", .ops = &mov_ops, }, |
76 | { .name = "xbeginl", .ops = &jump_ops, }, | 137 | { .name = "xbeginl", .ops = &jump_ops, }, |
77 | { .name = "xbeginq", .ops = &jump_ops, }, | 138 | { .name = "xbeginq", .ops = &jump_ops, }, |
78 | { .name = "retq", .ops = &ret_ops, }, | 139 | { .name = "xchg", .ops = &mov_ops, }, |
140 | { .name = "xor", .ops = &mov_ops, }, | ||
141 | { .name = "xorb", .ops = &mov_ops, }, | ||
142 | { .name = "xorpd", .ops = &mov_ops, }, | ||
143 | { .name = "xorps", .ops = &mov_ops, }, | ||
79 | }; | 144 | }; |
80 | 145 | ||
81 | static bool x86__ins_is_fused(struct arch *arch, const char *ins1, | 146 | static bool x86__ins_is_fused(struct arch *arch, const char *ins1, |
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 5aef183e2f85..4dfe42666d0c 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | |||
@@ -4,379 +4,383 @@ | |||
4 | # The format is: | 4 | # The format is: |
5 | # <number> <abi> <name> <entry point> | 5 | # <number> <abi> <name> <entry point> |
6 | # | 6 | # |
7 | # The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls | ||
8 | # | ||
7 | # The abi is "common", "64" or "x32" for this file. | 9 | # The abi is "common", "64" or "x32" for this file. |
8 | # | 10 | # |
9 | 0 common read sys_read | 11 | 0 common read __x64_sys_read |
10 | 1 common write sys_write | 12 | 1 common write __x64_sys_write |
11 | 2 common open sys_open | 13 | 2 common open __x64_sys_open |
12 | 3 common close sys_close | 14 | 3 common close __x64_sys_close |
13 | 4 common stat sys_newstat | 15 | 4 common stat __x64_sys_newstat |
14 | 5 common fstat sys_newfstat | 16 | 5 common fstat __x64_sys_newfstat |
15 | 6 common lstat sys_newlstat | 17 | 6 common lstat __x64_sys_newlstat |
16 | 7 common poll sys_poll | 18 | 7 common poll __x64_sys_poll |
17 | 8 common lseek sys_lseek | 19 | 8 common lseek __x64_sys_lseek |
18 | 9 common mmap sys_mmap | 20 | 9 common mmap __x64_sys_mmap |
19 | 10 common mprotect sys_mprotect | 21 | 10 common mprotect __x64_sys_mprotect |
20 | 11 common munmap sys_munmap | 22 | 11 common munmap __x64_sys_munmap |
21 | 12 common brk sys_brk | 23 | 12 common brk __x64_sys_brk |
22 | 13 64 rt_sigaction sys_rt_sigaction | 24 | 13 64 rt_sigaction __x64_sys_rt_sigaction |
23 | 14 common rt_sigprocmask sys_rt_sigprocmask | 25 | 14 common rt_sigprocmask __x64_sys_rt_sigprocmask |
24 | 15 64 rt_sigreturn sys_rt_sigreturn/ptregs | 26 | 15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs |
25 | 16 64 ioctl sys_ioctl | 27 | 16 64 ioctl __x64_sys_ioctl |
26 | 17 common pread64 sys_pread64 | 28 | 17 common pread64 __x64_sys_pread64 |
27 | 18 common pwrite64 sys_pwrite64 | 29 | 18 common pwrite64 __x64_sys_pwrite64 |
28 | 19 64 readv sys_readv | 30 | 19 64 readv __x64_sys_readv |
29 | 20 64 writev sys_writev | 31 | 20 64 writev __x64_sys_writev |
30 | 21 common access sys_access | 32 | 21 common access __x64_sys_access |
31 | 22 common pipe sys_pipe | 33 | 22 common pipe __x64_sys_pipe |
32 | 23 common select sys_select | 34 | 23 common select __x64_sys_select |
33 | 24 common sched_yield sys_sched_yield | 35 | 24 common sched_yield __x64_sys_sched_yield |
34 | 25 common mremap sys_mremap | 36 | 25 common mremap __x64_sys_mremap |
35 | 26 common msync sys_msync | 37 | 26 common msync __x64_sys_msync |
36 | 27 common mincore sys_mincore | 38 | 27 common mincore __x64_sys_mincore |
37 | 28 common madvise sys_madvise | 39 | 28 common madvise __x64_sys_madvise |
38 | 29 common shmget sys_shmget | 40 | 29 common shmget __x64_sys_shmget |
39 | 30 common shmat sys_shmat | 41 | 30 common shmat __x64_sys_shmat |
40 | 31 common shmctl sys_shmctl | 42 | 31 common shmctl __x64_sys_shmctl |
41 | 32 common dup sys_dup | 43 | 32 common dup __x64_sys_dup |
42 | 33 common dup2 sys_dup2 | 44 | 33 common dup2 __x64_sys_dup2 |
43 | 34 common pause sys_pause | 45 | 34 common pause __x64_sys_pause |
44 | 35 common nanosleep sys_nanosleep | 46 | 35 common nanosleep __x64_sys_nanosleep |
45 | 36 common getitimer sys_getitimer | 47 | 36 common getitimer __x64_sys_getitimer |
46 | 37 common alarm sys_alarm | 48 | 37 common alarm __x64_sys_alarm |
47 | 38 common setitimer sys_setitimer | 49 | 38 common setitimer __x64_sys_setitimer |
48 | 39 common getpid sys_getpid | 50 | 39 common getpid __x64_sys_getpid |
49 | 40 common sendfile sys_sendfile64 | 51 | 40 common sendfile __x64_sys_sendfile64 |
50 | 41 common socket sys_socket | 52 | 41 common socket __x64_sys_socket |
51 | 42 common connect sys_connect | 53 | 42 common connect __x64_sys_connect |
52 | 43 common accept sys_accept | 54 | 43 common accept __x64_sys_accept |
53 | 44 common sendto sys_sendto | 55 | 44 common sendto __x64_sys_sendto |
54 | 45 64 recvfrom sys_recvfrom | 56 | 45 64 recvfrom __x64_sys_recvfrom |
55 | 46 64 sendmsg sys_sendmsg | 57 | 46 64 sendmsg __x64_sys_sendmsg |
56 | 47 64 recvmsg sys_recvmsg | 58 | 47 64 recvmsg __x64_sys_recvmsg |
57 | 48 common shutdown sys_shutdown | 59 | 48 common shutdown __x64_sys_shutdown |
58 | 49 common bind sys_bind | 60 | 49 common bind __x64_sys_bind |
59 | 50 common listen sys_listen | 61 | 50 common listen __x64_sys_listen |
60 | 51 common getsockname sys_getsockname | 62 | 51 common getsockname __x64_sys_getsockname |
61 | 52 common getpeername sys_getpeername | 63 | 52 common getpeername __x64_sys_getpeername |
62 | 53 common socketpair sys_socketpair | 64 | 53 common socketpair __x64_sys_socketpair |
63 | 54 64 setsockopt sys_setsockopt | 65 | 54 64 setsockopt __x64_sys_setsockopt |
64 | 55 64 getsockopt sys_getsockopt | 66 | 55 64 getsockopt __x64_sys_getsockopt |
65 | 56 common clone sys_clone/ptregs | 67 | 56 common clone __x64_sys_clone/ptregs |
66 | 57 common fork sys_fork/ptregs | 68 | 57 common fork __x64_sys_fork/ptregs |
67 | 58 common vfork sys_vfork/ptregs | 69 | 58 common vfork __x64_sys_vfork/ptregs |
68 | 59 64 execve sys_execve/ptregs | 70 | 59 64 execve __x64_sys_execve/ptregs |
69 | 60 common exit sys_exit | 71 | 60 common exit __x64_sys_exit |
70 | 61 common wait4 sys_wait4 | 72 | 61 common wait4 __x64_sys_wait4 |
71 | 62 common kill sys_kill | 73 | 62 common kill __x64_sys_kill |
72 | 63 common uname sys_newuname | 74 | 63 common uname __x64_sys_newuname |
73 | 64 common semget sys_semget | 75 | 64 common semget __x64_sys_semget |
74 | 65 common semop sys_semop | 76 | 65 common semop __x64_sys_semop |
75 | 66 common semctl sys_semctl | 77 | 66 common semctl __x64_sys_semctl |
76 | 67 common shmdt sys_shmdt | 78 | 67 common shmdt __x64_sys_shmdt |
77 | 68 common msgget sys_msgget | 79 | 68 common msgget __x64_sys_msgget |
78 | 69 common msgsnd sys_msgsnd | 80 | 69 common msgsnd __x64_sys_msgsnd |
79 | 70 common msgrcv sys_msgrcv | 81 | 70 common msgrcv __x64_sys_msgrcv |
80 | 71 common msgctl sys_msgctl | 82 | 71 common msgctl __x64_sys_msgctl |
81 | 72 common fcntl sys_fcntl | 83 | 72 common fcntl __x64_sys_fcntl |
82 | 73 common flock sys_flock | 84 | 73 common flock __x64_sys_flock |
83 | 74 common fsync sys_fsync | 85 | 74 common fsync __x64_sys_fsync |
84 | 75 common fdatasync sys_fdatasync | 86 | 75 common fdatasync __x64_sys_fdatasync |
85 | 76 common truncate sys_truncate | 87 | 76 common truncate __x64_sys_truncate |
86 | 77 common ftruncate sys_ftruncate | 88 | 77 common ftruncate __x64_sys_ftruncate |
87 | 78 common getdents sys_getdents | 89 | 78 common getdents __x64_sys_getdents |
88 | 79 common getcwd sys_getcwd | 90 | 79 common getcwd __x64_sys_getcwd |
89 | 80 common chdir sys_chdir | 91 | 80 common chdir __x64_sys_chdir |
90 | 81 common fchdir sys_fchdir | 92 | 81 common fchdir __x64_sys_fchdir |
91 | 82 common rename sys_rename | 93 | 82 common rename __x64_sys_rename |
92 | 83 common mkdir sys_mkdir | 94 | 83 common mkdir __x64_sys_mkdir |
93 | 84 common rmdir sys_rmdir | 95 | 84 common rmdir __x64_sys_rmdir |
94 | 85 common creat sys_creat | 96 | 85 common creat __x64_sys_creat |
95 | 86 common link sys_link | 97 | 86 common link __x64_sys_link |
96 | 87 common unlink sys_unlink | 98 | 87 common unlink __x64_sys_unlink |
97 | 88 common symlink sys_symlink | 99 | 88 common symlink __x64_sys_symlink |
98 | 89 common readlink sys_readlink | 100 | 89 common readlink __x64_sys_readlink |
99 | 90 common chmod sys_chmod | 101 | 90 common chmod __x64_sys_chmod |
100 | 91 common fchmod sys_fchmod | 102 | 91 common fchmod __x64_sys_fchmod |
101 | 92 common chown sys_chown | 103 | 92 common chown __x64_sys_chown |
102 | 93 common fchown sys_fchown | 104 | 93 common fchown __x64_sys_fchown |
103 | 94 common lchown sys_lchown | 105 | 94 common lchown __x64_sys_lchown |
104 | 95 common umask sys_umask | 106 | 95 common umask __x64_sys_umask |
105 | 96 common gettimeofday sys_gettimeofday | 107 | 96 common gettimeofday __x64_sys_gettimeofday |
106 | 97 common getrlimit sys_getrlimit | 108 | 97 common getrlimit __x64_sys_getrlimit |
107 | 98 common getrusage sys_getrusage | 109 | 98 common getrusage __x64_sys_getrusage |
108 | 99 common sysinfo sys_sysinfo | 110 | 99 common sysinfo __x64_sys_sysinfo |
109 | 100 common times sys_times | 111 | 100 common times __x64_sys_times |
110 | 101 64 ptrace sys_ptrace | 112 | 101 64 ptrace __x64_sys_ptrace |
111 | 102 common getuid sys_getuid | 113 | 102 common getuid __x64_sys_getuid |
112 | 103 common syslog sys_syslog | 114 | 103 common syslog __x64_sys_syslog |
113 | 104 common getgid sys_getgid | 115 | 104 common getgid __x64_sys_getgid |
114 | 105 common setuid sys_setuid | 116 | 105 common setuid __x64_sys_setuid |
115 | 106 common setgid sys_setgid | 117 | 106 common setgid __x64_sys_setgid |
116 | 107 common geteuid sys_geteuid | 118 | 107 common geteuid __x64_sys_geteuid |
117 | 108 common getegid sys_getegid | 119 | 108 common getegid __x64_sys_getegid |
118 | 109 common setpgid sys_setpgid | 120 | 109 common setpgid __x64_sys_setpgid |
119 | 110 common getppid sys_getppid | 121 | 110 common getppid __x64_sys_getppid |
120 | 111 common getpgrp sys_getpgrp | 122 | 111 common getpgrp __x64_sys_getpgrp |
121 | 112 common setsid sys_setsid | 123 | 112 common setsid __x64_sys_setsid |
122 | 113 common setreuid sys_setreuid | 124 | 113 common setreuid __x64_sys_setreuid |
123 | 114 common setregid sys_setregid | 125 | 114 common setregid __x64_sys_setregid |
124 | 115 common getgroups sys_getgroups | 126 | 115 common getgroups __x64_sys_getgroups |
125 | 116 common setgroups sys_setgroups | 127 | 116 common setgroups __x64_sys_setgroups |
126 | 117 common setresuid sys_setresuid | 128 | 117 common setresuid __x64_sys_setresuid |
127 | 118 common getresuid sys_getresuid | 129 | 118 common getresuid __x64_sys_getresuid |
128 | 119 common setresgid sys_setresgid | 130 | 119 common setresgid __x64_sys_setresgid |
129 | 120 common getresgid sys_getresgid | 131 | 120 common getresgid __x64_sys_getresgid |
130 | 121 common getpgid sys_getpgid | 132 | 121 common getpgid __x64_sys_getpgid |
131 | 122 common setfsuid sys_setfsuid | 133 | 122 common setfsuid __x64_sys_setfsuid |
132 | 123 common setfsgid sys_setfsgid | 134 | 123 common setfsgid __x64_sys_setfsgid |
133 | 124 common getsid sys_getsid | 135 | 124 common getsid __x64_sys_getsid |
134 | 125 common capget sys_capget | 136 | 125 common capget __x64_sys_capget |
135 | 126 common capset sys_capset | 137 | 126 common capset __x64_sys_capset |
136 | 127 64 rt_sigpending sys_rt_sigpending | 138 | 127 64 rt_sigpending __x64_sys_rt_sigpending |
137 | 128 64 rt_sigtimedwait sys_rt_sigtimedwait | 139 | 128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait |
138 | 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo | 140 | 129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo |
139 | 130 common rt_sigsuspend sys_rt_sigsuspend | 141 | 130 common rt_sigsuspend __x64_sys_rt_sigsuspend |
140 | 131 64 sigaltstack sys_sigaltstack | 142 | 131 64 sigaltstack __x64_sys_sigaltstack |
141 | 132 common utime sys_utime | 143 | 132 common utime __x64_sys_utime |
142 | 133 common mknod sys_mknod | 144 | 133 common mknod __x64_sys_mknod |
143 | 134 64 uselib | 145 | 134 64 uselib |
144 | 135 common personality sys_personality | 146 | 135 common personality __x64_sys_personality |
145 | 136 common ustat sys_ustat | 147 | 136 common ustat __x64_sys_ustat |
146 | 137 common statfs sys_statfs | 148 | 137 common statfs __x64_sys_statfs |
147 | 138 common fstatfs sys_fstatfs | 149 | 138 common fstatfs __x64_sys_fstatfs |
148 | 139 common sysfs sys_sysfs | 150 | 139 common sysfs __x64_sys_sysfs |
149 | 140 common getpriority sys_getpriority | 151 | 140 common getpriority __x64_sys_getpriority |
150 | 141 common setpriority sys_setpriority | 152 | 141 common setpriority __x64_sys_setpriority |
151 | 142 common sched_setparam sys_sched_setparam | 153 | 142 common sched_setparam __x64_sys_sched_setparam |
152 | 143 common sched_getparam sys_sched_getparam | 154 | 143 common sched_getparam __x64_sys_sched_getparam |
153 | 144 common sched_setscheduler sys_sched_setscheduler | 155 | 144 common sched_setscheduler __x64_sys_sched_setscheduler |
154 | 145 common sched_getscheduler sys_sched_getscheduler | 156 | 145 common sched_getscheduler __x64_sys_sched_getscheduler |
155 | 146 common sched_get_priority_max sys_sched_get_priority_max | 157 | 146 common sched_get_priority_max __x64_sys_sched_get_priority_max |
156 | 147 common sched_get_priority_min sys_sched_get_priority_min | 158 | 147 common sched_get_priority_min __x64_sys_sched_get_priority_min |
157 | 148 common sched_rr_get_interval sys_sched_rr_get_interval | 159 | 148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval |
158 | 149 common mlock sys_mlock | 160 | 149 common mlock __x64_sys_mlock |
159 | 150 common munlock sys_munlock | 161 | 150 common munlock __x64_sys_munlock |
160 | 151 common mlockall sys_mlockall | 162 | 151 common mlockall __x64_sys_mlockall |
161 | 152 common munlockall sys_munlockall | 163 | 152 common munlockall __x64_sys_munlockall |
162 | 153 common vhangup sys_vhangup | 164 | 153 common vhangup __x64_sys_vhangup |
163 | 154 common modify_ldt sys_modify_ldt | 165 | 154 common modify_ldt __x64_sys_modify_ldt |
164 | 155 common pivot_root sys_pivot_root | 166 | 155 common pivot_root __x64_sys_pivot_root |
165 | 156 64 _sysctl sys_sysctl | 167 | 156 64 _sysctl __x64_sys_sysctl |
166 | 157 common prctl sys_prctl | 168 | 157 common prctl __x64_sys_prctl |
167 | 158 common arch_prctl sys_arch_prctl | 169 | 158 common arch_prctl __x64_sys_arch_prctl |
168 | 159 common adjtimex sys_adjtimex | 170 | 159 common adjtimex __x64_sys_adjtimex |
169 | 160 common setrlimit sys_setrlimit | 171 | 160 common setrlimit __x64_sys_setrlimit |
170 | 161 common chroot sys_chroot | 172 | 161 common chroot __x64_sys_chroot |
171 | 162 common sync sys_sync | 173 | 162 common sync __x64_sys_sync |
172 | 163 common acct sys_acct | 174 | 163 common acct __x64_sys_acct |
173 | 164 common settimeofday sys_settimeofday | 175 | 164 common settimeofday __x64_sys_settimeofday |
174 | 165 common mount sys_mount | 176 | 165 common mount __x64_sys_mount |
175 | 166 common umount2 sys_umount | 177 | 166 common umount2 __x64_sys_umount |
176 | 167 common swapon sys_swapon | 178 | 167 common swapon __x64_sys_swapon |
177 | 168 common swapoff sys_swapoff | 179 | 168 common swapoff __x64_sys_swapoff |
178 | 169 common reboot sys_reboot | 180 | 169 common reboot __x64_sys_reboot |
179 | 170 common sethostname sys_sethostname | 181 | 170 common sethostname __x64_sys_sethostname |
180 | 171 common setdomainname sys_setdomainname | 182 | 171 common setdomainname __x64_sys_setdomainname |
181 | 172 common iopl sys_iopl/ptregs | 183 | 172 common iopl __x64_sys_iopl/ptregs |
182 | 173 common ioperm sys_ioperm | 184 | 173 common ioperm __x64_sys_ioperm |
183 | 174 64 create_module | 185 | 174 64 create_module |
184 | 175 common init_module sys_init_module | 186 | 175 common init_module __x64_sys_init_module |
185 | 176 common delete_module sys_delete_module | 187 | 176 common delete_module __x64_sys_delete_module |
186 | 177 64 get_kernel_syms | 188 | 177 64 get_kernel_syms |
187 | 178 64 query_module | 189 | 178 64 query_module |
188 | 179 common quotactl sys_quotactl | 190 | 179 common quotactl __x64_sys_quotactl |
189 | 180 64 nfsservctl | 191 | 180 64 nfsservctl |
190 | 181 common getpmsg | 192 | 181 common getpmsg |
191 | 182 common putpmsg | 193 | 182 common putpmsg |
192 | 183 common afs_syscall | 194 | 183 common afs_syscall |
193 | 184 common tuxcall | 195 | 184 common tuxcall |
194 | 185 common security | 196 | 185 common security |
195 | 186 common gettid sys_gettid | 197 | 186 common gettid __x64_sys_gettid |
196 | 187 common readahead sys_readahead | 198 | 187 common readahead __x64_sys_readahead |
197 | 188 common setxattr sys_setxattr | 199 | 188 common setxattr __x64_sys_setxattr |
198 | 189 common lsetxattr sys_lsetxattr | 200 | 189 common lsetxattr __x64_sys_lsetxattr |
199 | 190 common fsetxattr sys_fsetxattr | 201 | 190 common fsetxattr __x64_sys_fsetxattr |
200 | 191 common getxattr sys_getxattr | 202 | 191 common getxattr __x64_sys_getxattr |
201 | 192 common lgetxattr sys_lgetxattr | 203 | 192 common lgetxattr __x64_sys_lgetxattr |
202 | 193 common fgetxattr sys_fgetxattr | 204 | 193 common fgetxattr __x64_sys_fgetxattr |
203 | 194 common listxattr sys_listxattr | 205 | 194 common listxattr __x64_sys_listxattr |
204 | 195 common llistxattr sys_llistxattr | 206 | 195 common llistxattr __x64_sys_llistxattr |
205 | 196 common flistxattr sys_flistxattr | 207 | 196 common flistxattr __x64_sys_flistxattr |
206 | 197 common removexattr sys_removexattr | 208 | 197 common removexattr __x64_sys_removexattr |
207 | 198 common lremovexattr sys_lremovexattr | 209 | 198 common lremovexattr __x64_sys_lremovexattr |
208 | 199 common fremovexattr sys_fremovexattr | 210 | 199 common fremovexattr __x64_sys_fremovexattr |
209 | 200 common tkill sys_tkill | 211 | 200 common tkill __x64_sys_tkill |
210 | 201 common time sys_time | 212 | 201 common time __x64_sys_time |
211 | 202 common futex sys_futex | 213 | 202 common futex __x64_sys_futex |
212 | 203 common sched_setaffinity sys_sched_setaffinity | 214 | 203 common sched_setaffinity __x64_sys_sched_setaffinity |
213 | 204 common sched_getaffinity sys_sched_getaffinity | 215 | 204 common sched_getaffinity __x64_sys_sched_getaffinity |
214 | 205 64 set_thread_area | 216 | 205 64 set_thread_area |
215 | 206 64 io_setup sys_io_setup | 217 | 206 64 io_setup __x64_sys_io_setup |
216 | 207 common io_destroy sys_io_destroy | 218 | 207 common io_destroy __x64_sys_io_destroy |
217 | 208 common io_getevents sys_io_getevents | 219 | 208 common io_getevents __x64_sys_io_getevents |
218 | 209 64 io_submit sys_io_submit | 220 | 209 64 io_submit __x64_sys_io_submit |
219 | 210 common io_cancel sys_io_cancel | 221 | 210 common io_cancel __x64_sys_io_cancel |
220 | 211 64 get_thread_area | 222 | 211 64 get_thread_area |
221 | 212 common lookup_dcookie sys_lookup_dcookie | 223 | 212 common lookup_dcookie __x64_sys_lookup_dcookie |
222 | 213 common epoll_create sys_epoll_create | 224 | 213 common epoll_create __x64_sys_epoll_create |
223 | 214 64 epoll_ctl_old | 225 | 214 64 epoll_ctl_old |
224 | 215 64 epoll_wait_old | 226 | 215 64 epoll_wait_old |
225 | 216 common remap_file_pages sys_remap_file_pages | 227 | 216 common remap_file_pages __x64_sys_remap_file_pages |
226 | 217 common getdents64 sys_getdents64 | 228 | 217 common getdents64 __x64_sys_getdents64 |
227 | 218 common set_tid_address sys_set_tid_address | 229 | 218 common set_tid_address __x64_sys_set_tid_address |
228 | 219 common restart_syscall sys_restart_syscall | 230 | 219 common restart_syscall __x64_sys_restart_syscall |
229 | 220 common semtimedop sys_semtimedop | 231 | 220 common semtimedop __x64_sys_semtimedop |
230 | 221 common fadvise64 sys_fadvise64 | 232 | 221 common fadvise64 __x64_sys_fadvise64 |
231 | 222 64 timer_create sys_timer_create | 233 | 222 64 timer_create __x64_sys_timer_create |
232 | 223 common timer_settime sys_timer_settime | 234 | 223 common timer_settime __x64_sys_timer_settime |
233 | 224 common timer_gettime sys_timer_gettime | 235 | 224 common timer_gettime __x64_sys_timer_gettime |
234 | 225 common timer_getoverrun sys_timer_getoverrun | 236 | 225 common timer_getoverrun __x64_sys_timer_getoverrun |
235 | 226 common timer_delete sys_timer_delete | 237 | 226 common timer_delete __x64_sys_timer_delete |
236 | 227 common clock_settime sys_clock_settime | 238 | 227 common clock_settime __x64_sys_clock_settime |
237 | 228 common clock_gettime sys_clock_gettime | 239 | 228 common clock_gettime __x64_sys_clock_gettime |
238 | 229 common clock_getres sys_clock_getres | 240 | 229 common clock_getres __x64_sys_clock_getres |
239 | 230 common clock_nanosleep sys_clock_nanosleep | 241 | 230 common clock_nanosleep __x64_sys_clock_nanosleep |
240 | 231 common exit_group sys_exit_group | 242 | 231 common exit_group __x64_sys_exit_group |
241 | 232 common epoll_wait sys_epoll_wait | 243 | 232 common epoll_wait __x64_sys_epoll_wait |
242 | 233 common epoll_ctl sys_epoll_ctl | 244 | 233 common epoll_ctl __x64_sys_epoll_ctl |
243 | 234 common tgkill sys_tgkill | 245 | 234 common tgkill __x64_sys_tgkill |
244 | 235 common utimes sys_utimes | 246 | 235 common utimes __x64_sys_utimes |
245 | 236 64 vserver | 247 | 236 64 vserver |
246 | 237 common mbind sys_mbind | 248 | 237 common mbind __x64_sys_mbind |
247 | 238 common set_mempolicy sys_set_mempolicy | 249 | 238 common set_mempolicy __x64_sys_set_mempolicy |
248 | 239 common get_mempolicy sys_get_mempolicy | 250 | 239 common get_mempolicy __x64_sys_get_mempolicy |
249 | 240 common mq_open sys_mq_open | 251 | 240 common mq_open __x64_sys_mq_open |
250 | 241 common mq_unlink sys_mq_unlink | 252 | 241 common mq_unlink __x64_sys_mq_unlink |
251 | 242 common mq_timedsend sys_mq_timedsend | 253 | 242 common mq_timedsend __x64_sys_mq_timedsend |
252 | 243 common mq_timedreceive sys_mq_timedreceive | 254 | 243 common mq_timedreceive __x64_sys_mq_timedreceive |
253 | 244 64 mq_notify sys_mq_notify | 255 | 244 64 mq_notify __x64_sys_mq_notify |
254 | 245 common mq_getsetattr sys_mq_getsetattr | 256 | 245 common mq_getsetattr __x64_sys_mq_getsetattr |
255 | 246 64 kexec_load sys_kexec_load | 257 | 246 64 kexec_load __x64_sys_kexec_load |
256 | 247 64 waitid sys_waitid | 258 | 247 64 waitid __x64_sys_waitid |
257 | 248 common add_key sys_add_key | 259 | 248 common add_key __x64_sys_add_key |
258 | 249 common request_key sys_request_key | 260 | 249 common request_key __x64_sys_request_key |
259 | 250 common keyctl sys_keyctl | 261 | 250 common keyctl __x64_sys_keyctl |
260 | 251 common ioprio_set sys_ioprio_set | 262 | 251 common ioprio_set __x64_sys_ioprio_set |
261 | 252 common ioprio_get sys_ioprio_get | 263 | 252 common ioprio_get __x64_sys_ioprio_get |
262 | 253 common inotify_init sys_inotify_init | 264 | 253 common inotify_init __x64_sys_inotify_init |
263 | 254 common inotify_add_watch sys_inotify_add_watch | 265 | 254 common inotify_add_watch __x64_sys_inotify_add_watch |
264 | 255 common inotify_rm_watch sys_inotify_rm_watch | 266 | 255 common inotify_rm_watch __x64_sys_inotify_rm_watch |
265 | 256 common migrate_pages sys_migrate_pages | 267 | 256 common migrate_pages __x64_sys_migrate_pages |
266 | 257 common openat sys_openat | 268 | 257 common openat __x64_sys_openat |
267 | 258 common mkdirat sys_mkdirat | 269 | 258 common mkdirat __x64_sys_mkdirat |
268 | 259 common mknodat sys_mknodat | 270 | 259 common mknodat __x64_sys_mknodat |
269 | 260 common fchownat sys_fchownat | 271 | 260 common fchownat __x64_sys_fchownat |
270 | 261 common futimesat sys_futimesat | 272 | 261 common futimesat __x64_sys_futimesat |
271 | 262 common newfstatat sys_newfstatat | 273 | 262 common newfstatat __x64_sys_newfstatat |
272 | 263 common unlinkat sys_unlinkat | 274 | 263 common unlinkat __x64_sys_unlinkat |
273 | 264 common renameat sys_renameat | 275 | 264 common renameat __x64_sys_renameat |
274 | 265 common linkat sys_linkat | 276 | 265 common linkat __x64_sys_linkat |
275 | 266 common symlinkat sys_symlinkat | 277 | 266 common symlinkat __x64_sys_symlinkat |
276 | 267 common readlinkat sys_readlinkat | 278 | 267 common readlinkat __x64_sys_readlinkat |
277 | 268 common fchmodat sys_fchmodat | 279 | 268 common fchmodat __x64_sys_fchmodat |
278 | 269 common faccessat sys_faccessat | 280 | 269 common faccessat __x64_sys_faccessat |
279 | 270 common pselect6 sys_pselect6 | 281 | 270 common pselect6 __x64_sys_pselect6 |
280 | 271 common ppoll sys_ppoll | 282 | 271 common ppoll __x64_sys_ppoll |
281 | 272 common unshare sys_unshare | 283 | 272 common unshare __x64_sys_unshare |
282 | 273 64 set_robust_list sys_set_robust_list | 284 | 273 64 set_robust_list __x64_sys_set_robust_list |
283 | 274 64 get_robust_list sys_get_robust_list | 285 | 274 64 get_robust_list __x64_sys_get_robust_list |
284 | 275 common splice sys_splice | 286 | 275 common splice __x64_sys_splice |
285 | 276 common tee sys_tee | 287 | 276 common tee __x64_sys_tee |
286 | 277 common sync_file_range sys_sync_file_range | 288 | 277 common sync_file_range __x64_sys_sync_file_range |
287 | 278 64 vmsplice sys_vmsplice | 289 | 278 64 vmsplice __x64_sys_vmsplice |
288 | 279 64 move_pages sys_move_pages | 290 | 279 64 move_pages __x64_sys_move_pages |
289 | 280 common utimensat sys_utimensat | 291 | 280 common utimensat __x64_sys_utimensat |
290 | 281 common epoll_pwait sys_epoll_pwait | 292 | 281 common epoll_pwait __x64_sys_epoll_pwait |
291 | 282 common signalfd sys_signalfd | 293 | 282 common signalfd __x64_sys_signalfd |
292 | 283 common timerfd_create sys_timerfd_create | 294 | 283 common timerfd_create __x64_sys_timerfd_create |
293 | 284 common eventfd sys_eventfd | 295 | 284 common eventfd __x64_sys_eventfd |
294 | 285 common fallocate sys_fallocate | 296 | 285 common fallocate __x64_sys_fallocate |
295 | 286 common timerfd_settime sys_timerfd_settime | 297 | 286 common timerfd_settime __x64_sys_timerfd_settime |
296 | 287 common timerfd_gettime sys_timerfd_gettime | 298 | 287 common timerfd_gettime __x64_sys_timerfd_gettime |
297 | 288 common accept4 sys_accept4 | 299 | 288 common accept4 __x64_sys_accept4 |
298 | 289 common signalfd4 sys_signalfd4 | 300 | 289 common signalfd4 __x64_sys_signalfd4 |
299 | 290 common eventfd2 sys_eventfd2 | 301 | 290 common eventfd2 __x64_sys_eventfd2 |
300 | 291 common epoll_create1 sys_epoll_create1 | 302 | 291 common epoll_create1 __x64_sys_epoll_create1 |
301 | 292 common dup3 sys_dup3 | 303 | 292 common dup3 __x64_sys_dup3 |
302 | 293 common pipe2 sys_pipe2 | 304 | 293 common pipe2 __x64_sys_pipe2 |
303 | 294 common inotify_init1 sys_inotify_init1 | 305 | 294 common inotify_init1 __x64_sys_inotify_init1 |
304 | 295 64 preadv sys_preadv | 306 | 295 64 preadv __x64_sys_preadv |
305 | 296 64 pwritev sys_pwritev | 307 | 296 64 pwritev __x64_sys_pwritev |
306 | 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo | 308 | 297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo |
307 | 298 common perf_event_open sys_perf_event_open | 309 | 298 common perf_event_open __x64_sys_perf_event_open |
308 | 299 64 recvmmsg sys_recvmmsg | 310 | 299 64 recvmmsg __x64_sys_recvmmsg |
309 | 300 common fanotify_init sys_fanotify_init | 311 | 300 common fanotify_init __x64_sys_fanotify_init |
310 | 301 common fanotify_mark sys_fanotify_mark | 312 | 301 common fanotify_mark __x64_sys_fanotify_mark |
311 | 302 common prlimit64 sys_prlimit64 | 313 | 302 common prlimit64 __x64_sys_prlimit64 |
312 | 303 common name_to_handle_at sys_name_to_handle_at | 314 | 303 common name_to_handle_at __x64_sys_name_to_handle_at |
313 | 304 common open_by_handle_at sys_open_by_handle_at | 315 | 304 common open_by_handle_at __x64_sys_open_by_handle_at |
314 | 305 common clock_adjtime sys_clock_adjtime | 316 | 305 common clock_adjtime __x64_sys_clock_adjtime |
315 | 306 common syncfs sys_syncfs | 317 | 306 common syncfs __x64_sys_syncfs |
316 | 307 64 sendmmsg sys_sendmmsg | 318 | 307 64 sendmmsg __x64_sys_sendmmsg |
317 | 308 common setns sys_setns | 319 | 308 common setns __x64_sys_setns |
318 | 309 common getcpu sys_getcpu | 320 | 309 common getcpu __x64_sys_getcpu |
319 | 310 64 process_vm_readv sys_process_vm_readv | 321 | 310 64 process_vm_readv __x64_sys_process_vm_readv |
320 | 311 64 process_vm_writev sys_process_vm_writev | 322 | 311 64 process_vm_writev __x64_sys_process_vm_writev |
321 | 312 common kcmp sys_kcmp | 323 | 312 common kcmp __x64_sys_kcmp |
322 | 313 common finit_module sys_finit_module | 324 | 313 common finit_module __x64_sys_finit_module |
323 | 314 common sched_setattr sys_sched_setattr | 325 | 314 common sched_setattr __x64_sys_sched_setattr |
324 | 315 common sched_getattr sys_sched_getattr | 326 | 315 common sched_getattr __x64_sys_sched_getattr |
325 | 316 common renameat2 sys_renameat2 | 327 | 316 common renameat2 __x64_sys_renameat2 |
326 | 317 common seccomp sys_seccomp | 328 | 317 common seccomp __x64_sys_seccomp |
327 | 318 common getrandom sys_getrandom | 329 | 318 common getrandom __x64_sys_getrandom |
328 | 319 common memfd_create sys_memfd_create | 330 | 319 common memfd_create __x64_sys_memfd_create |
329 | 320 common kexec_file_load sys_kexec_file_load | 331 | 320 common kexec_file_load __x64_sys_kexec_file_load |
330 | 321 common bpf sys_bpf | 332 | 321 common bpf __x64_sys_bpf |
331 | 322 64 execveat sys_execveat/ptregs | 333 | 322 64 execveat __x64_sys_execveat/ptregs |
332 | 323 common userfaultfd sys_userfaultfd | 334 | 323 common userfaultfd __x64_sys_userfaultfd |
333 | 324 common membarrier sys_membarrier | 335 | 324 common membarrier __x64_sys_membarrier |
334 | 325 common mlock2 sys_mlock2 | 336 | 325 common mlock2 __x64_sys_mlock2 |
335 | 326 common copy_file_range sys_copy_file_range | 337 | 326 common copy_file_range __x64_sys_copy_file_range |
336 | 327 64 preadv2 sys_preadv2 | 338 | 327 64 preadv2 __x64_sys_preadv2 |
337 | 328 64 pwritev2 sys_pwritev2 | 339 | 328 64 pwritev2 __x64_sys_pwritev2 |
338 | 329 common pkey_mprotect sys_pkey_mprotect | 340 | 329 common pkey_mprotect __x64_sys_pkey_mprotect |
339 | 330 common pkey_alloc sys_pkey_alloc | 341 | 330 common pkey_alloc __x64_sys_pkey_alloc |
340 | 331 common pkey_free sys_pkey_free | 342 | 331 common pkey_free __x64_sys_pkey_free |
341 | 332 common statx sys_statx | 343 | 332 common statx __x64_sys_statx |
342 | 344 | ||
343 | # | 345 | # |
344 | # x32-specific system call numbers start at 512 to avoid cache impact | 346 | # x32-specific system call numbers start at 512 to avoid cache impact |
345 | # for native 64-bit operation. | 347 | # for native 64-bit operation. The __x32_compat_sys stubs are created |
348 | # on-the-fly for compat_sys_*() compatibility system calls if X86_X32 | ||
349 | # is defined. | ||
346 | # | 350 | # |
347 | 512 x32 rt_sigaction compat_sys_rt_sigaction | 351 | 512 x32 rt_sigaction __x32_compat_sys_rt_sigaction |
348 | 513 x32 rt_sigreturn sys32_x32_rt_sigreturn | 352 | 513 x32 rt_sigreturn sys32_x32_rt_sigreturn |
349 | 514 x32 ioctl compat_sys_ioctl | 353 | 514 x32 ioctl __x32_compat_sys_ioctl |
350 | 515 x32 readv compat_sys_readv | 354 | 515 x32 readv __x32_compat_sys_readv |
351 | 516 x32 writev compat_sys_writev | 355 | 516 x32 writev __x32_compat_sys_writev |
352 | 517 x32 recvfrom compat_sys_recvfrom | 356 | 517 x32 recvfrom __x32_compat_sys_recvfrom |
353 | 518 x32 sendmsg compat_sys_sendmsg | 357 | 518 x32 sendmsg __x32_compat_sys_sendmsg |
354 | 519 x32 recvmsg compat_sys_recvmsg | 358 | 519 x32 recvmsg __x32_compat_sys_recvmsg |
355 | 520 x32 execve compat_sys_execve/ptregs | 359 | 520 x32 execve __x32_compat_sys_execve/ptregs |
356 | 521 x32 ptrace compat_sys_ptrace | 360 | 521 x32 ptrace __x32_compat_sys_ptrace |
357 | 522 x32 rt_sigpending compat_sys_rt_sigpending | 361 | 522 x32 rt_sigpending __x32_compat_sys_rt_sigpending |
358 | 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait | 362 | 523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait |
359 | 524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo | 363 | 524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo |
360 | 525 x32 sigaltstack compat_sys_sigaltstack | 364 | 525 x32 sigaltstack __x32_compat_sys_sigaltstack |
361 | 526 x32 timer_create compat_sys_timer_create | 365 | 526 x32 timer_create __x32_compat_sys_timer_create |
362 | 527 x32 mq_notify compat_sys_mq_notify | 366 | 527 x32 mq_notify __x32_compat_sys_mq_notify |
363 | 528 x32 kexec_load compat_sys_kexec_load | 367 | 528 x32 kexec_load __x32_compat_sys_kexec_load |
364 | 529 x32 waitid compat_sys_waitid | 368 | 529 x32 waitid __x32_compat_sys_waitid |
365 | 530 x32 set_robust_list compat_sys_set_robust_list | 369 | 530 x32 set_robust_list __x32_compat_sys_set_robust_list |
366 | 531 x32 get_robust_list compat_sys_get_robust_list | 370 | 531 x32 get_robust_list __x32_compat_sys_get_robust_list |
367 | 532 x32 vmsplice compat_sys_vmsplice | 371 | 532 x32 vmsplice __x32_compat_sys_vmsplice |
368 | 533 x32 move_pages compat_sys_move_pages | 372 | 533 x32 move_pages __x32_compat_sys_move_pages |
369 | 534 x32 preadv compat_sys_preadv64 | 373 | 534 x32 preadv __x32_compat_sys_preadv64 |
370 | 535 x32 pwritev compat_sys_pwritev64 | 374 | 535 x32 pwritev __x32_compat_sys_pwritev64 |
371 | 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | 375 | 536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo |
372 | 537 x32 recvmmsg compat_sys_recvmmsg | 376 | 537 x32 recvmmsg __x32_compat_sys_recvmmsg |
373 | 538 x32 sendmmsg compat_sys_sendmmsg | 377 | 538 x32 sendmmsg __x32_compat_sys_sendmmsg |
374 | 539 x32 process_vm_readv compat_sys_process_vm_readv | 378 | 539 x32 process_vm_readv __x32_compat_sys_process_vm_readv |
375 | 540 x32 process_vm_writev compat_sys_process_vm_writev | 379 | 540 x32 process_vm_writev __x32_compat_sys_process_vm_writev |
376 | 541 x32 setsockopt compat_sys_setsockopt | 380 | 541 x32 setsockopt __x32_compat_sys_setsockopt |
377 | 542 x32 getsockopt compat_sys_getsockopt | 381 | 542 x32 getsockopt __x32_compat_sys_getsockopt |
378 | 543 x32 io_setup compat_sys_io_setup | 382 | 543 x32 io_setup __x32_compat_sys_io_setup |
379 | 544 x32 io_submit compat_sys_io_submit | 383 | 544 x32 io_submit __x32_compat_sys_io_submit |
380 | 545 x32 execveat compat_sys_execveat/ptregs | 384 | 545 x32 execveat __x32_compat_sys_execveat/ptregs |
381 | 546 x32 preadv2 compat_sys_preadv64v2 | 385 | 546 x32 preadv2 __x32_compat_sys_preadv64v2 |
382 | 547 x32 pwritev2 compat_sys_pwritev64v2 | 386 | 547 x32 pwritev2 __x32_compat_sys_pwritev64v2 |
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 4aca13f23b9d..1c41b4eaf73c 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c | |||
@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv) | |||
439 | #ifdef HAVE_LIBELF_SUPPORT | 439 | #ifdef HAVE_LIBELF_SUPPORT |
440 | "probe", | 440 | "probe", |
441 | #endif | 441 | #endif |
442 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) | 442 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) |
443 | "trace", | 443 | "trace", |
444 | #endif | 444 | #endif |
445 | NULL }; | 445 | NULL }; |
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 506564651cda..57393e94d156 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c | |||
@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) | |||
83 | }; | 83 | }; |
84 | 84 | ||
85 | argc = parse_options(argc, argv, options, record_mem_usage, | 85 | argc = parse_options(argc, argv, options, record_mem_usage, |
86 | PARSE_OPT_STOP_AT_NON_OPTION); | 86 | PARSE_OPT_KEEP_UNKNOWN); |
87 | 87 | ||
88 | rec_argc = argc + 9; /* max number of arguments */ | 88 | rec_argc = argc + 9; /* max number of arguments */ |
89 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | 89 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv) | |||
436 | } | 436 | } |
437 | 437 | ||
438 | argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, | 438 | argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, |
439 | mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); | 439 | mem_usage, PARSE_OPT_KEEP_UNKNOWN); |
440 | 440 | ||
441 | if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) | 441 | if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) |
442 | usage_with_options(mem_usage, mem_options); | 442 | usage_with_options(mem_usage, mem_options); |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 313c42423393..e0a9845b6cbc 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample, | |||
657 | break; | 657 | break; |
658 | case PERF_RECORD_SWITCH: | 658 | case PERF_RECORD_SWITCH: |
659 | case PERF_RECORD_SWITCH_CPU_WIDE: | 659 | case PERF_RECORD_SWITCH_CPU_WIDE: |
660 | if (has(SWITCH_OUT)) | 660 | if (has(SWITCH_OUT)) { |
661 | ret += fprintf(fp, "S"); | 661 | ret += fprintf(fp, "S"); |
662 | if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) | ||
663 | ret += fprintf(fp, "p"); | ||
664 | } | ||
662 | default: | 665 | default: |
663 | break; | 666 | break; |
664 | } | 667 | } |
@@ -2801,11 +2804,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array) | |||
2801 | for_each_lang(scripts_path, scripts_dir, lang_dirent) { | 2804 | for_each_lang(scripts_path, scripts_dir, lang_dirent) { |
2802 | scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, | 2805 | scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, |
2803 | lang_dirent->d_name); | 2806 | lang_dirent->d_name); |
2804 | #ifdef NO_LIBPERL | 2807 | #ifndef HAVE_LIBPERL_SUPPORT |
2805 | if (strstr(lang_path, "perl")) | 2808 | if (strstr(lang_path, "perl")) |
2806 | continue; | 2809 | continue; |
2807 | #endif | 2810 | #endif |
2808 | #ifdef NO_LIBPYTHON | 2811 | #ifndef HAVE_LIBPYTHON_SUPPORT |
2809 | if (strstr(lang_path, "python")) | 2812 | if (strstr(lang_path, "python")) |
2810 | continue; | 2813 | continue; |
2811 | #endif | 2814 | #endif |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f5c454855908..147a27e8c937 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -1943,7 +1943,8 @@ static const struct option stat_options[] = { | |||
1943 | OPT_STRING(0, "post", &post_cmd, "command", | 1943 | OPT_STRING(0, "post", &post_cmd, "command", |
1944 | "command to run after to the measured command"), | 1944 | "command to run after to the measured command"), |
1945 | OPT_UINTEGER('I', "interval-print", &stat_config.interval, | 1945 | OPT_UINTEGER('I', "interval-print", &stat_config.interval, |
1946 | "print counts at regular interval in ms (>= 10)"), | 1946 | "print counts at regular interval in ms " |
1947 | "(overhead is possible for values <= 100ms)"), | ||
1947 | OPT_INTEGER(0, "interval-count", &stat_config.times, | 1948 | OPT_INTEGER(0, "interval-count", &stat_config.times, |
1948 | "print counts for fixed number of times"), | 1949 | "print counts for fixed number of times"), |
1949 | OPT_UINTEGER(0, "timeout", &stat_config.timeout, | 1950 | OPT_UINTEGER(0, "timeout", &stat_config.timeout, |
@@ -2923,17 +2924,6 @@ int cmd_stat(int argc, const char **argv) | |||
2923 | } | 2924 | } |
2924 | } | 2925 | } |
2925 | 2926 | ||
2926 | if (interval && interval < 100) { | ||
2927 | if (interval < 10) { | ||
2928 | pr_err("print interval must be >= 10ms\n"); | ||
2929 | parse_options_usage(stat_usage, stat_options, "I", 1); | ||
2930 | goto out; | ||
2931 | } else | ||
2932 | pr_warning("print interval < 100ms. " | ||
2933 | "The overhead percentage could be high in some cases. " | ||
2934 | "Please proceed with caution.\n"); | ||
2935 | } | ||
2936 | |||
2937 | if (stat_config.times && interval) | 2927 | if (stat_config.times && interval) |
2938 | interval_count = true; | 2928 | interval_count = true; |
2939 | else if (stat_config.times && !interval) { | 2929 | else if (stat_config.times && !interval) { |
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c index 2abe3910d6b6..50df168be326 100644 --- a/tools/perf/builtin-version.c +++ b/tools/perf/builtin-version.c | |||
@@ -60,7 +60,10 @@ static void library_status(void) | |||
60 | STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); | 60 | STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); |
61 | STATUS(HAVE_GLIBC_SUPPORT, glibc); | 61 | STATUS(HAVE_GLIBC_SUPPORT, glibc); |
62 | STATUS(HAVE_GTK2_SUPPORT, gtk2); | 62 | STATUS(HAVE_GTK2_SUPPORT, gtk2); |
63 | #ifndef HAVE_SYSCALL_TABLE_SUPPORT | ||
63 | STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); | 64 | STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); |
65 | #endif | ||
66 | STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table); | ||
64 | STATUS(HAVE_LIBBFD_SUPPORT, libbfd); | 67 | STATUS(HAVE_LIBBFD_SUPPORT, libbfd); |
65 | STATUS(HAVE_LIBELF_SUPPORT, libelf); | 68 | STATUS(HAVE_LIBELF_SUPPORT, libelf); |
66 | STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); | 69 | STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); |
diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 1659029d03fc..20a08cb32332 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c | |||
@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = { | |||
73 | { "lock", cmd_lock, 0 }, | 73 | { "lock", cmd_lock, 0 }, |
74 | { "kvm", cmd_kvm, 0 }, | 74 | { "kvm", cmd_kvm, 0 }, |
75 | { "test", cmd_test, 0 }, | 75 | { "test", cmd_test, 0 }, |
76 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) | 76 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) |
77 | { "trace", cmd_trace, 0 }, | 77 | { "trace", cmd_trace, 0 }, |
78 | #endif | 78 | #endif |
79 | { "inject", cmd_inject, 0 }, | 79 | { "inject", cmd_inject, 0 }, |
@@ -491,7 +491,7 @@ int main(int argc, const char **argv) | |||
491 | argv[0] = cmd; | 491 | argv[0] = cmd; |
492 | } | 492 | } |
493 | if (strstarts(cmd, "trace")) { | 493 | if (strstarts(cmd, "trace")) { |
494 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) | 494 | #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) |
495 | setup_path(); | 495 | setup_path(); |
496 | argv[0] = "trace"; | 496 | argv[0] = "trace"; |
497 | return cmd_trace(argc, argv); | 497 | return cmd_trace(argc, argv); |
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c index e4123c1b0e88..1ca5106df5f1 100644 --- a/tools/perf/tests/bpf-script-example.c +++ b/tools/perf/tests/bpf-script-example.c | |||
@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = { | |||
31 | .max_entries = 1, | 31 | .max_entries = 1, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | SEC("func=SyS_epoll_pwait") | 34 | SEC("func=do_epoll_wait") |
35 | int bpf_func__SyS_epoll_pwait(void *ctx) | 35 | int bpf_func__SyS_epoll_pwait(void *ctx) |
36 | { | 36 | { |
37 | int ind =0; | 37 | int ind =0; |
diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c index 3626924740d8..ff3ec8337f0a 100644 --- a/tools/perf/tests/bpf-script-test-kbuild.c +++ b/tools/perf/tests/bpf-script-test-kbuild.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #define SEC(NAME) __attribute__((section(NAME), used)) | 9 | #define SEC(NAME) __attribute__((section(NAME), used)) |
10 | 10 | ||
11 | #include <uapi/linux/fs.h> | 11 | #include <uapi/linux/fs.h> |
12 | #include <uapi/asm/ptrace.h> | ||
13 | 12 | ||
14 | SEC("func=vfs_llseek") | 13 | SEC("func=vfs_llseek") |
15 | int bpf_func__vfs_llseek(void *ctx) | 14 | int bpf_func__vfs_llseek(void *ctx) |
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 625f5a6772af..cac8f8889bc3 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c | |||
@@ -118,6 +118,7 @@ static struct test generic_tests[] = { | |||
118 | { | 118 | { |
119 | .desc = "Breakpoint accounting", | 119 | .desc = "Breakpoint accounting", |
120 | .func = test__bp_accounting, | 120 | .func = test__bp_accounting, |
121 | .is_supported = test__bp_signal_is_supported, | ||
121 | }, | 122 | }, |
122 | { | 123 | { |
123 | .desc = "Number of exit events of a simple workload", | 124 | .desc = "Number of exit events of a simple workload", |
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index bb8e6bcb0d96..0919b0793e5b 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c | |||
@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse | |||
75 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); | 75 | snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); |
76 | evsels[i] = perf_evsel__newtp("syscalls", name); | 76 | evsels[i] = perf_evsel__newtp("syscalls", name); |
77 | if (IS_ERR(evsels[i])) { | 77 | if (IS_ERR(evsels[i])) { |
78 | pr_debug("perf_evsel__new\n"); | 78 | pr_debug("perf_evsel__new(%s)\n", name); |
79 | goto out_delete_evlist; | 79 | goto out_delete_evlist; |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index 417e3ecfe9d7..9f68077b241b 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c | |||
@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, | |||
54 | P_MMAP_FLAG(EXECUTABLE); | 54 | P_MMAP_FLAG(EXECUTABLE); |
55 | P_MMAP_FLAG(FILE); | 55 | P_MMAP_FLAG(FILE); |
56 | P_MMAP_FLAG(FIXED); | 56 | P_MMAP_FLAG(FIXED); |
57 | #ifdef MAP_FIXED_NOREPLACE | ||
58 | P_MMAP_FLAG(FIXED_NOREPLACE); | ||
59 | #endif | ||
57 | P_MMAP_FLAG(GROWSDOWN); | 60 | P_MMAP_FLAG(GROWSDOWN); |
58 | P_MMAP_FLAG(HUGETLB); | 61 | P_MMAP_FLAG(HUGETLB); |
59 | P_MMAP_FLAG(LOCKED); | 62 | P_MMAP_FLAG(LOCKED); |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 12c099a87f8b..3781d74088a7 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser, | |||
692 | "J Toggle showing number of jump sources on targets\n" | 692 | "J Toggle showing number of jump sources on targets\n" |
693 | "n Search next string\n" | 693 | "n Search next string\n" |
694 | "o Toggle disassembler output/simplified view\n" | 694 | "o Toggle disassembler output/simplified view\n" |
695 | "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" | ||
695 | "s Toggle source code view\n" | 696 | "s Toggle source code view\n" |
696 | "t Circulate percent, total period, samples view\n" | 697 | "t Circulate percent, total period, samples view\n" |
697 | "/ Search string\n" | 698 | "/ Search string\n" |
@@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser, | |||
719 | notes->options->use_offset = !notes->options->use_offset; | 720 | notes->options->use_offset = !notes->options->use_offset; |
720 | annotation__update_column_widths(notes); | 721 | annotation__update_column_widths(notes); |
721 | continue; | 722 | continue; |
723 | case 'O': | ||
724 | if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) | ||
725 | notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; | ||
726 | continue; | ||
722 | case 'j': | 727 | case 'j': |
723 | notes->options->jump_arrows = !notes->options->jump_arrows; | 728 | notes->options->jump_arrows = !notes->options->jump_arrows; |
724 | continue; | 729 | continue; |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 0eec06c105c6..e5f247247daa 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
2714 | "h/?/F1 Show this window\n" \ | 2714 | "h/?/F1 Show this window\n" \ |
2715 | "UP/DOWN/PGUP\n" \ | 2715 | "UP/DOWN/PGUP\n" \ |
2716 | "PGDN/SPACE Navigate\n" \ | 2716 | "PGDN/SPACE Navigate\n" \ |
2717 | "q/ESC/CTRL+C Exit browser\n\n" \ | 2717 | "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \ |
2718 | "For multiple event sessions:\n\n" \ | 2718 | "For multiple event sessions:\n\n" \ |
2719 | "TAB/UNTAB Switch events\n\n" \ | 2719 | "TAB/UNTAB Switch events\n\n" \ |
2720 | "For symbolic views (--sort has sym):\n\n" \ | 2720 | "For symbolic views (--sort has sym):\n\n" \ |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index fbad8dfbb186..536ee148bff8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -46,6 +46,7 @@ | |||
46 | struct annotation_options annotation__default_options = { | 46 | struct annotation_options annotation__default_options = { |
47 | .use_offset = true, | 47 | .use_offset = true, |
48 | .jump_arrows = true, | 48 | .jump_arrows = true, |
49 | .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, | ||
49 | }; | 50 | }; |
50 | 51 | ||
51 | const char *disassembler_style; | 52 | const char *disassembler_style; |
@@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati | |||
2512 | if (!notes->options->use_offset) { | 2513 | if (!notes->options->use_offset) { |
2513 | printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); | 2514 | printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); |
2514 | } else { | 2515 | } else { |
2515 | if (al->jump_sources) { | 2516 | if (al->jump_sources && |
2517 | notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { | ||
2516 | if (notes->options->show_nr_jumps) { | 2518 | if (notes->options->show_nr_jumps) { |
2517 | int prev; | 2519 | int prev; |
2518 | printed = scnprintf(bf, sizeof(bf), "%*d ", | 2520 | printed = scnprintf(bf, sizeof(bf), "%*d ", |
@@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati | |||
2523 | obj__printf(obj, bf); | 2525 | obj__printf(obj, bf); |
2524 | obj__set_color(obj, prev); | 2526 | obj__set_color(obj, prev); |
2525 | } | 2527 | } |
2526 | 2528 | print_addr: | |
2527 | printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", | 2529 | printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", |
2528 | notes->widths.target, addr); | 2530 | notes->widths.target, addr); |
2531 | } else if (ins__is_call(&disasm_line(al)->ins) && | ||
2532 | notes->options->offset_level >= ANNOTATION__OFFSET_CALL) { | ||
2533 | goto print_addr; | ||
2534 | } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { | ||
2535 | goto print_addr; | ||
2529 | } else { | 2536 | } else { |
2530 | printed = scnprintf(bf, sizeof(bf), "%-*s ", | 2537 | printed = scnprintf(bf, sizeof(bf), "%-*s ", |
2531 | notes->widths.addr, " "); | 2538 | notes->widths.addr, " "); |
@@ -2642,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes, | |||
2642 | */ | 2649 | */ |
2643 | static struct annotation_config { | 2650 | static struct annotation_config { |
2644 | const char *name; | 2651 | const char *name; |
2645 | bool *value; | 2652 | void *value; |
2646 | } annotation__configs[] = { | 2653 | } annotation__configs[] = { |
2647 | ANNOTATION__CFG(hide_src_code), | 2654 | ANNOTATION__CFG(hide_src_code), |
2648 | ANNOTATION__CFG(jump_arrows), | 2655 | ANNOTATION__CFG(jump_arrows), |
2656 | ANNOTATION__CFG(offset_level), | ||
2649 | ANNOTATION__CFG(show_linenr), | 2657 | ANNOTATION__CFG(show_linenr), |
2650 | ANNOTATION__CFG(show_nr_jumps), | 2658 | ANNOTATION__CFG(show_nr_jumps), |
2651 | ANNOTATION__CFG(show_nr_samples), | 2659 | ANNOTATION__CFG(show_nr_samples), |
@@ -2677,8 +2685,16 @@ static int annotation__config(const char *var, const char *value, | |||
2677 | 2685 | ||
2678 | if (cfg == NULL) | 2686 | if (cfg == NULL) |
2679 | pr_debug("%s variable unknown, ignoring...", var); | 2687 | pr_debug("%s variable unknown, ignoring...", var); |
2680 | else | 2688 | else if (strcmp(var, "annotate.offset_level") == 0) { |
2681 | *cfg->value = perf_config_bool(name, value); | 2689 | perf_config_int(cfg->value, name, value); |
2690 | |||
2691 | if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL) | ||
2692 | *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL; | ||
2693 | else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL) | ||
2694 | *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL; | ||
2695 | } else { | ||
2696 | *(bool *)cfg->value = perf_config_bool(name, value); | ||
2697 | } | ||
2682 | return 0; | 2698 | return 0; |
2683 | } | 2699 | } |
2684 | 2700 | ||
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index db8d09bea07e..f28a9e43421d 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h | |||
@@ -70,8 +70,17 @@ struct annotation_options { | |||
70 | show_nr_jumps, | 70 | show_nr_jumps, |
71 | show_nr_samples, | 71 | show_nr_samples, |
72 | show_total_period; | 72 | show_total_period; |
73 | u8 offset_level; | ||
73 | }; | 74 | }; |
74 | 75 | ||
76 | enum { | ||
77 | ANNOTATION__OFFSET_JUMP_TARGETS = 1, | ||
78 | ANNOTATION__OFFSET_CALL, | ||
79 | ANNOTATION__MAX_OFFSET_LEVEL, | ||
80 | }; | ||
81 | |||
82 | #define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS | ||
83 | |||
75 | extern struct annotation_options annotation__default_options; | 84 | extern struct annotation_options annotation__default_options; |
76 | 85 | ||
77 | struct annotation; | 86 | struct annotation; |
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 640af88331b4..c8b98fa22997 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c | |||
@@ -1,6 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SPDX-License-Identifier: GPL-2.0 | ||
3 | * | ||
4 | * Copyright(C) 2015-2018 Linaro Limited. | 3 | * Copyright(C) 2015-2018 Linaro Limited. |
5 | * | 4 | * |
6 | * Author: Tor Jeremiassen <tor@ti.com> | 5 | * Author: Tor Jeremiassen <tor@ti.com> |
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 1b0d422373be..40020b1ca54f 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c | |||
@@ -1,6 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * SPDX-License-Identifier: GPL-2.0 | ||
3 | * | ||
4 | * Copyright(C) 2015-2018 Linaro Limited. | 3 | * Copyright(C) 2015-2018 Linaro Limited. |
5 | * | 4 | * |
6 | * Author: Tor Jeremiassen <tor@ti.com> | 5 | * Author: Tor Jeremiassen <tor@ti.com> |
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 5864d5dca616..37f8d48179ca 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h | |||
@@ -1,18 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | 3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. |
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | 4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | 5 | */ |
17 | 6 | ||
18 | #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ | 7 | #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index f0a6cbd033cc..98ff3a6a3d50 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) | |||
1421 | size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) | 1421 | size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) |
1422 | { | 1422 | { |
1423 | bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; | 1423 | bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; |
1424 | const char *in_out = out ? "OUT" : "IN "; | 1424 | const char *in_out = !out ? "IN " : |
1425 | !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? | ||
1426 | "OUT " : "OUT preempt"; | ||
1425 | 1427 | ||
1426 | if (event->header.type == PERF_RECORD_SWITCH) | 1428 | if (event->header.type == PERF_RECORD_SWITCH) |
1427 | return fprintf(fp, " %s\n", in_out); | 1429 | return fprintf(fp, " %s\n", in_out); |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1ac8d9236efd..3e87486c28fe 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2870 | #if defined(__i386__) || defined(__x86_64__) | 2870 | #if defined(__i386__) || defined(__x86_64__) |
2871 | if (evsel->attr.type == PERF_TYPE_HARDWARE) | 2871 | if (evsel->attr.type == PERF_TYPE_HARDWARE) |
2872 | return scnprintf(msg, size, "%s", | 2872 | return scnprintf(msg, size, "%s", |
2873 | "No hardware sampling interrupt available.\n" | 2873 | "No hardware sampling interrupt available.\n"); |
2874 | "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); | ||
2875 | #endif | 2874 | #endif |
2876 | break; | 2875 | break; |
2877 | case EBUSY: | 2876 | case EBUSY: |
@@ -2894,8 +2893,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2894 | 2893 | ||
2895 | return scnprintf(msg, size, | 2894 | return scnprintf(msg, size, |
2896 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" | 2895 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" |
2897 | "/bin/dmesg may provide additional information.\n" | 2896 | "/bin/dmesg | grep -i perf may provide additional information.\n", |
2898 | "No CONFIG_PERF_EVENTS=y kernel support configured?", | ||
2899 | err, str_error_r(err, sbuf, sizeof(sbuf)), | 2897 | err, str_error_r(err, sbuf, sizeof(sbuf)), |
2900 | perf_evsel__name(evsel)); | 2898 | perf_evsel__name(evsel)); |
2901 | } | 2899 | } |
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh index ff17920a5ebc..c3cef36d4176 100755 --- a/tools/perf/util/generate-cmdlist.sh +++ b/tools/perf/util/generate-cmdlist.sh | |||
@@ -38,7 +38,7 @@ do | |||
38 | done | 38 | done |
39 | echo "#endif /* HAVE_LIBELF_SUPPORT */" | 39 | echo "#endif /* HAVE_LIBELF_SUPPORT */" |
40 | 40 | ||
41 | echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" | 41 | echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)" |
42 | sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | | 42 | sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | |
43 | sort | | 43 | sort | |
44 | while read cmd | 44 | while read cmd |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 121df1683c36..a8bff2178fbc 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) | |||
1320 | 1320 | ||
1321 | dir = opendir(path); | 1321 | dir = opendir(path); |
1322 | if (!dir) { | 1322 | if (!dir) { |
1323 | pr_warning("failed: can't open node sysfs data\n"); | 1323 | pr_debug2("%s: could't read %s, does this arch have topology information?\n", |
1324 | __func__, path); | ||
1324 | return -1; | 1325 | return -1; |
1325 | } | 1326 | } |
1326 | 1327 | ||
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 064bdcb7bd78..61a5e5027338 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
@@ -562,6 +562,12 @@ static int is_pmu_core(const char *name) | |||
562 | if (stat(path, &st) == 0) | 562 | if (stat(path, &st) == 0) |
563 | return 1; | 563 | return 1; |
564 | 564 | ||
565 | /* Look for cpu sysfs (specific to s390) */ | ||
566 | scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s", | ||
567 | sysfs, name); | ||
568 | if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5)) | ||
569 | return 1; | ||
570 | |||
565 | return 0; | 571 | return 0; |
566 | } | 572 | } |
567 | 573 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 62b2dd2253eb..1466814ebada 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void) | |||
2091 | 2091 | ||
2092 | int symbol__annotation_init(void) | 2092 | int symbol__annotation_init(void) |
2093 | { | 2093 | { |
2094 | if (symbol_conf.init_annotation) | ||
2095 | return 0; | ||
2096 | |||
2094 | if (symbol_conf.initialized) { | 2097 | if (symbol_conf.initialized) { |
2095 | pr_err("Annotation needs to be init before symbol__init()\n"); | 2098 | pr_err("Annotation needs to be init before symbol__init()\n"); |
2096 | return -1; | 2099 | return -1; |
2097 | } | 2100 | } |
2098 | 2101 | ||
2099 | if (symbol_conf.init_annotation) { | ||
2100 | pr_warning("Annotation being initialized multiple times\n"); | ||
2101 | return 0; | ||
2102 | } | ||
2103 | |||
2104 | symbol_conf.priv_size += sizeof(struct annotation); | 2102 | symbol_conf.priv_size += sizeof(struct annotation); |
2105 | symbol_conf.init_annotation = true; | 2103 | symbol_conf.init_annotation = true; |
2106 | return 0; | 2104 | return 0; |
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 895122d638dd..0ee7f568d60c 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <stdlib.h> | 17 | #include <stdlib.h> |
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | 19 | ||
20 | #ifdef HAVE_SYSCALL_TABLE | 20 | #ifdef HAVE_SYSCALL_TABLE_SUPPORT |
21 | #include <string.h> | 21 | #include <string.h> |
22 | #include "string2.h" | 22 | #include "string2.h" |
23 | #include "util.h" | 23 | #include "util.h" |
@@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g | |||
139 | return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); | 139 | return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); |
140 | } | 140 | } |
141 | 141 | ||
142 | #else /* HAVE_SYSCALL_TABLE */ | 142 | #else /* HAVE_SYSCALL_TABLE_SUPPORT */ |
143 | 143 | ||
144 | #include <libaudit.h> | 144 | #include <libaudit.h> |
145 | 145 | ||
@@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g | |||
176 | { | 176 | { |
177 | return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); | 177 | return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); |
178 | } | 178 | } |
179 | #endif /* HAVE_SYSCALL_TABLE */ | 179 | #endif /* HAVE_SYSCALL_TABLE_SUPPORT */ |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 0ac9077f62a2..b1e5c3a2b8e3 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | #ifdef NO_LIBPYTHON | 101 | #ifndef HAVE_LIBPYTHON_SUPPORT |
102 | void setup_python_scripting(void) | 102 | void setup_python_scripting(void) |
103 | { | 103 | { |
104 | register_python_scripting(&python_scripting_unsupported_ops); | 104 | register_python_scripting(&python_scripting_unsupported_ops); |
@@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) | |||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | #ifdef NO_LIBPERL | 164 | #ifndef HAVE_LIBPERL_SUPPORT |
165 | void setup_perl_scripting(void) | 165 | void setup_perl_scripting(void) |
166 | { | 166 | { |
167 | register_perl_scripting(&perl_scripting_unsupported_ops); | 167 | register_perl_scripting(&perl_scripting_unsupported_ops); |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index cb166be4918d..4ea385be528f 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -138,6 +138,7 @@ static u32 handle[] = { | |||
138 | }; | 138 | }; |
139 | 139 | ||
140 | static unsigned long dimm_fail_cmd_flags[NUM_DCR]; | 140 | static unsigned long dimm_fail_cmd_flags[NUM_DCR]; |
141 | static int dimm_fail_cmd_code[NUM_DCR]; | ||
141 | 142 | ||
142 | struct nfit_test_fw { | 143 | struct nfit_test_fw { |
143 | enum intel_fw_update_state state; | 144 | enum intel_fw_update_state state; |
@@ -892,8 +893,11 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) | |||
892 | if (i >= ARRAY_SIZE(handle)) | 893 | if (i >= ARRAY_SIZE(handle)) |
893 | return -ENXIO; | 894 | return -ENXIO; |
894 | 895 | ||
895 | if ((1 << func) & dimm_fail_cmd_flags[i]) | 896 | if ((1 << func) & dimm_fail_cmd_flags[i]) { |
897 | if (dimm_fail_cmd_code[i]) | ||
898 | return dimm_fail_cmd_code[i]; | ||
896 | return -EIO; | 899 | return -EIO; |
900 | } | ||
897 | 901 | ||
898 | return i; | 902 | return i; |
899 | } | 903 | } |
@@ -1162,12 +1166,12 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state) | |||
1162 | 1166 | ||
1163 | static void put_dimms(void *data) | 1167 | static void put_dimms(void *data) |
1164 | { | 1168 | { |
1165 | struct device **dimm_dev = data; | 1169 | struct nfit_test *t = data; |
1166 | int i; | 1170 | int i; |
1167 | 1171 | ||
1168 | for (i = 0; i < NUM_DCR; i++) | 1172 | for (i = 0; i < t->num_dcr; i++) |
1169 | if (dimm_dev[i]) | 1173 | if (t->dimm_dev[i]) |
1170 | device_unregister(dimm_dev[i]); | 1174 | device_unregister(t->dimm_dev[i]); |
1171 | } | 1175 | } |
1172 | 1176 | ||
1173 | static struct class *nfit_test_dimm; | 1177 | static struct class *nfit_test_dimm; |
@@ -1176,13 +1180,11 @@ static int dimm_name_to_id(struct device *dev) | |||
1176 | { | 1180 | { |
1177 | int dimm; | 1181 | int dimm; |
1178 | 1182 | ||
1179 | if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 | 1183 | if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) |
1180 | || dimm >= NUM_DCR || dimm < 0) | ||
1181 | return -ENXIO; | 1184 | return -ENXIO; |
1182 | return dimm; | 1185 | return dimm; |
1183 | } | 1186 | } |
1184 | 1187 | ||
1185 | |||
1186 | static ssize_t handle_show(struct device *dev, struct device_attribute *attr, | 1188 | static ssize_t handle_show(struct device *dev, struct device_attribute *attr, |
1187 | char *buf) | 1189 | char *buf) |
1188 | { | 1190 | { |
@@ -1191,7 +1193,7 @@ static ssize_t handle_show(struct device *dev, struct device_attribute *attr, | |||
1191 | if (dimm < 0) | 1193 | if (dimm < 0) |
1192 | return dimm; | 1194 | return dimm; |
1193 | 1195 | ||
1194 | return sprintf(buf, "%#x", handle[dimm]); | 1196 | return sprintf(buf, "%#x\n", handle[dimm]); |
1195 | } | 1197 | } |
1196 | DEVICE_ATTR_RO(handle); | 1198 | DEVICE_ATTR_RO(handle); |
1197 | 1199 | ||
@@ -1225,8 +1227,39 @@ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, | |||
1225 | } | 1227 | } |
1226 | static DEVICE_ATTR_RW(fail_cmd); | 1228 | static DEVICE_ATTR_RW(fail_cmd); |
1227 | 1229 | ||
1230 | static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, | ||
1231 | char *buf) | ||
1232 | { | ||
1233 | int dimm = dimm_name_to_id(dev); | ||
1234 | |||
1235 | if (dimm < 0) | ||
1236 | return dimm; | ||
1237 | |||
1238 | return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); | ||
1239 | } | ||
1240 | |||
1241 | static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, | ||
1242 | const char *buf, size_t size) | ||
1243 | { | ||
1244 | int dimm = dimm_name_to_id(dev); | ||
1245 | unsigned long val; | ||
1246 | ssize_t rc; | ||
1247 | |||
1248 | if (dimm < 0) | ||
1249 | return dimm; | ||
1250 | |||
1251 | rc = kstrtol(buf, 0, &val); | ||
1252 | if (rc) | ||
1253 | return rc; | ||
1254 | |||
1255 | dimm_fail_cmd_code[dimm] = val; | ||
1256 | return size; | ||
1257 | } | ||
1258 | static DEVICE_ATTR_RW(fail_cmd_code); | ||
1259 | |||
1228 | static struct attribute *nfit_test_dimm_attributes[] = { | 1260 | static struct attribute *nfit_test_dimm_attributes[] = { |
1229 | &dev_attr_fail_cmd.attr, | 1261 | &dev_attr_fail_cmd.attr, |
1262 | &dev_attr_fail_cmd_code.attr, | ||
1230 | &dev_attr_handle.attr, | 1263 | &dev_attr_handle.attr, |
1231 | NULL, | 1264 | NULL, |
1232 | }; | 1265 | }; |
@@ -1240,6 +1273,23 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { | |||
1240 | NULL, | 1273 | NULL, |
1241 | }; | 1274 | }; |
1242 | 1275 | ||
1276 | static int nfit_test_dimm_init(struct nfit_test *t) | ||
1277 | { | ||
1278 | int i; | ||
1279 | |||
1280 | if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) | ||
1281 | return -ENOMEM; | ||
1282 | for (i = 0; i < t->num_dcr; i++) { | ||
1283 | t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, | ||
1284 | &t->pdev.dev, 0, NULL, | ||
1285 | nfit_test_dimm_attribute_groups, | ||
1286 | "test_dimm%d", i + t->dcr_idx); | ||
1287 | if (!t->dimm_dev[i]) | ||
1288 | return -ENOMEM; | ||
1289 | } | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1243 | static void smart_init(struct nfit_test *t) | 1293 | static void smart_init(struct nfit_test *t) |
1244 | { | 1294 | { |
1245 | int i; | 1295 | int i; |
@@ -1335,17 +1385,8 @@ static int nfit_test0_alloc(struct nfit_test *t) | |||
1335 | if (!t->_fit) | 1385 | if (!t->_fit) |
1336 | return -ENOMEM; | 1386 | return -ENOMEM; |
1337 | 1387 | ||
1338 | if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) | 1388 | if (nfit_test_dimm_init(t)) |
1339 | return -ENOMEM; | 1389 | return -ENOMEM; |
1340 | for (i = 0; i < NUM_DCR; i++) { | ||
1341 | t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, | ||
1342 | &t->pdev.dev, 0, NULL, | ||
1343 | nfit_test_dimm_attribute_groups, | ||
1344 | "test_dimm%d", i); | ||
1345 | if (!t->dimm_dev[i]) | ||
1346 | return -ENOMEM; | ||
1347 | } | ||
1348 | |||
1349 | smart_init(t); | 1390 | smart_init(t); |
1350 | return ars_state_init(&t->pdev.dev, &t->ars_state); | 1391 | return ars_state_init(&t->pdev.dev, &t->ars_state); |
1351 | } | 1392 | } |
@@ -1377,6 +1418,8 @@ static int nfit_test1_alloc(struct nfit_test *t) | |||
1377 | if (!t->spa_set[1]) | 1418 | if (!t->spa_set[1]) |
1378 | return -ENOMEM; | 1419 | return -ENOMEM; |
1379 | 1420 | ||
1421 | if (nfit_test_dimm_init(t)) | ||
1422 | return -ENOMEM; | ||
1380 | smart_init(t); | 1423 | smart_init(t); |
1381 | return ars_state_init(&t->pdev.dev, &t->ars_state); | 1424 | return ars_state_init(&t->pdev.dev, &t->ars_state); |
1382 | } | 1425 | } |
@@ -2222,6 +2265,9 @@ static void nfit_test1_setup(struct nfit_test *t) | |||
2222 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); | 2265 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); |
2223 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); | 2266 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); |
2224 | set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); | 2267 | set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); |
2268 | set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); | ||
2269 | set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
2270 | set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
2225 | } | 2271 | } |
2226 | 2272 | ||
2227 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, | 2273 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, |
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile index 4e6d09fb166f..5c7d7001ad37 100644 --- a/tools/testing/selftests/filesystems/Makefile +++ b/tools/testing/selftests/filesystems/Makefile | |||
@@ -1,8 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | TEST_PROGS := dnotify_test devpts_pts | ||
3 | all: $(TEST_PROGS) | ||
4 | 2 | ||
5 | include ../lib.mk | 3 | TEST_GEN_PROGS := devpts_pts |
4 | TEST_GEN_PROGS_EXTENDED := dnotify_test | ||
6 | 5 | ||
7 | clean: | 6 | include ../lib.mk |
8 | rm -fr $(TEST_PROGS) | ||
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index dc44de904797..2ddcc96ae456 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
@@ -4,17 +4,18 @@ top_srcdir = ../../../../ | |||
4 | UNAME_M := $(shell uname -m) | 4 | UNAME_M := $(shell uname -m) |
5 | 5 | ||
6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c | 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c |
7 | LIBKVM_x86_64 = lib/x86.c | 7 | LIBKVM_x86_64 = lib/x86.c lib/vmx.c |
8 | 8 | ||
9 | TEST_GEN_PROGS_x86_64 = set_sregs_test | 9 | TEST_GEN_PROGS_x86_64 = set_sregs_test |
10 | TEST_GEN_PROGS_x86_64 += sync_regs_test | 10 | TEST_GEN_PROGS_x86_64 += sync_regs_test |
11 | TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test | ||
11 | 12 | ||
12 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) | 13 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) |
13 | LIBKVM += $(LIBKVM_$(UNAME_M)) | 14 | LIBKVM += $(LIBKVM_$(UNAME_M)) |
14 | 15 | ||
15 | INSTALL_HDR_PATH = $(top_srcdir)/usr | 16 | INSTALL_HDR_PATH = $(top_srcdir)/usr |
16 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
17 | CFLAGS += -O2 -g -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) | 18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) |
18 | 19 | ||
19 | # After inclusion, $(OUTPUT) is defined and | 20 | # After inclusion, $(OUTPUT) is defined and |
20 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 57974ad46373..637b7017b6ee 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h | |||
@@ -112,24 +112,27 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, | |||
112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, | 112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, |
113 | vm_paddr_t paddr_min, uint32_t memslot); | 113 | vm_paddr_t paddr_min, uint32_t memslot); |
114 | 114 | ||
115 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); | 115 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void); |
116 | void vcpu_set_cpuid( | 116 | void vcpu_set_cpuid( |
117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); | 117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); |
118 | 118 | ||
119 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void); | ||
120 | struct kvm_cpuid_entry2 * | 119 | struct kvm_cpuid_entry2 * |
121 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 120 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); |
122 | uint32_t index); | ||
123 | 121 | ||
124 | static inline struct kvm_cpuid_entry2 * | 122 | static inline struct kvm_cpuid_entry2 * |
125 | find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) | 123 | kvm_get_supported_cpuid_entry(uint32_t function) |
126 | { | 124 | { |
127 | return find_cpuid_index_entry(cpuid, function, 0); | 125 | return kvm_get_supported_cpuid_index(function, 0); |
128 | } | 126 | } |
129 | 127 | ||
130 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); | 128 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); |
131 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); | 129 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); |
132 | 130 | ||
131 | typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, | ||
132 | vm_paddr_t vmxon_paddr, | ||
133 | vm_vaddr_t vmcs_vaddr, | ||
134 | vm_paddr_t vmcs_paddr); | ||
135 | |||
133 | struct kvm_userspace_memory_region * | 136 | struct kvm_userspace_memory_region * |
134 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, | 137 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, |
135 | uint64_t end); | 138 | uint64_t end); |
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h new file mode 100644 index 000000000000..6ed8499807fd --- /dev/null +++ b/tools/testing/selftests/kvm/include/vmx.h | |||
@@ -0,0 +1,494 @@ | |||
1 | /* | ||
2 | * tools/testing/selftests/kvm/include/vmx.h | ||
3 | * | ||
4 | * Copyright (C) 2018, Google LLC. | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef SELFTEST_KVM_VMX_H | ||
11 | #define SELFTEST_KVM_VMX_H | ||
12 | |||
13 | #include <stdint.h> | ||
14 | #include "x86.h" | ||
15 | |||
16 | #define CPUID_VMX_BIT 5 | ||
17 | |||
18 | #define CPUID_VMX (1 << 5) | ||
19 | |||
20 | /* | ||
21 | * Definitions of Primary Processor-Based VM-Execution Controls. | ||
22 | */ | ||
23 | #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 | ||
24 | #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 | ||
25 | #define CPU_BASED_HLT_EXITING 0x00000080 | ||
26 | #define CPU_BASED_INVLPG_EXITING 0x00000200 | ||
27 | #define CPU_BASED_MWAIT_EXITING 0x00000400 | ||
28 | #define CPU_BASED_RDPMC_EXITING 0x00000800 | ||
29 | #define CPU_BASED_RDTSC_EXITING 0x00001000 | ||
30 | #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 | ||
31 | #define CPU_BASED_CR3_STORE_EXITING 0x00010000 | ||
32 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | ||
33 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | ||
34 | #define CPU_BASED_TPR_SHADOW 0x00200000 | ||
35 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
36 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | ||
37 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | ||
38 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | ||
39 | #define CPU_BASED_MONITOR_TRAP 0x08000000 | ||
40 | #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 | ||
41 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | ||
42 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | ||
43 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | ||
44 | |||
45 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 | ||
46 | |||
47 | /* | ||
48 | * Definitions of Secondary Processor-Based VM-Execution Controls. | ||
49 | */ | ||
50 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | ||
51 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | ||
52 | #define SECONDARY_EXEC_DESC 0x00000004 | ||
53 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
54 | #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 | ||
55 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | ||
56 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | ||
57 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | ||
58 | #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 | ||
59 | #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 | ||
60 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
61 | #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800 | ||
62 | #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 | ||
63 | #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 | ||
64 | #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 | ||
65 | #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 | ||
66 | #define SECONDARY_EXEC_ENABLE_PML 0x00020000 | ||
67 | #define SECONDARY_EPT_VE 0x00040000 | ||
68 | #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000 | ||
69 | #define SECONDARY_EXEC_TSC_SCALING 0x02000000 | ||
70 | |||
71 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | ||
72 | #define PIN_BASED_NMI_EXITING 0x00000008 | ||
73 | #define PIN_BASED_VIRTUAL_NMIS 0x00000020 | ||
74 | #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 | ||
75 | #define PIN_BASED_POSTED_INTR 0x00000080 | ||
76 | |||
77 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 | ||
78 | |||
79 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 | ||
80 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | ||
81 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 | ||
82 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | ||
83 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
84 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
85 | #define VM_EXIT_SAVE_IA32_EFER 0x00100000 | ||
86 | #define VM_EXIT_LOAD_IA32_EFER 0x00200000 | ||
87 | #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 | ||
88 | |||
89 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff | ||
90 | |||
91 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 | ||
92 | #define VM_ENTRY_IA32E_MODE 0x00000200 | ||
93 | #define VM_ENTRY_SMM 0x00000400 | ||
94 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | ||
95 | #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 | ||
96 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
97 | #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 | ||
98 | |||
99 | #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff | ||
100 | |||
101 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f | ||
102 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 | ||
103 | |||
104 | #define EXIT_REASON_FAILED_VMENTRY 0x80000000 | ||
105 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
106 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
107 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
108 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
109 | #define EXIT_REASON_NMI_WINDOW 8 | ||
110 | #define EXIT_REASON_TASK_SWITCH 9 | ||
111 | #define EXIT_REASON_CPUID 10 | ||
112 | #define EXIT_REASON_HLT 12 | ||
113 | #define EXIT_REASON_INVD 13 | ||
114 | #define EXIT_REASON_INVLPG 14 | ||
115 | #define EXIT_REASON_RDPMC 15 | ||
116 | #define EXIT_REASON_RDTSC 16 | ||
117 | #define EXIT_REASON_VMCALL 18 | ||
118 | #define EXIT_REASON_VMCLEAR 19 | ||
119 | #define EXIT_REASON_VMLAUNCH 20 | ||
120 | #define EXIT_REASON_VMPTRLD 21 | ||
121 | #define EXIT_REASON_VMPTRST 22 | ||
122 | #define EXIT_REASON_VMREAD 23 | ||
123 | #define EXIT_REASON_VMRESUME 24 | ||
124 | #define EXIT_REASON_VMWRITE 25 | ||
125 | #define EXIT_REASON_VMOFF 26 | ||
126 | #define EXIT_REASON_VMON 27 | ||
127 | #define EXIT_REASON_CR_ACCESS 28 | ||
128 | #define EXIT_REASON_DR_ACCESS 29 | ||
129 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
130 | #define EXIT_REASON_MSR_READ 31 | ||
131 | #define EXIT_REASON_MSR_WRITE 32 | ||
132 | #define EXIT_REASON_INVALID_STATE 33 | ||
133 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
134 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
135 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
136 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
137 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
138 | #define EXIT_REASON_APIC_ACCESS 44 | ||
139 | #define EXIT_REASON_EOI_INDUCED 45 | ||
140 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
141 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
142 | #define EXIT_REASON_INVEPT 50 | ||
143 | #define EXIT_REASON_RDTSCP 51 | ||
144 | #define EXIT_REASON_PREEMPTION_TIMER 52 | ||
145 | #define EXIT_REASON_INVVPID 53 | ||
146 | #define EXIT_REASON_WBINVD 54 | ||
147 | #define EXIT_REASON_XSETBV 55 | ||
148 | #define EXIT_REASON_APIC_WRITE 56 | ||
149 | #define EXIT_REASON_INVPCID 58 | ||
150 | #define EXIT_REASON_PML_FULL 62 | ||
151 | #define EXIT_REASON_XSAVES 63 | ||
152 | #define EXIT_REASON_XRSTORS 64 | ||
153 | #define LAST_EXIT_REASON 64 | ||
154 | |||
155 | enum vmcs_field { | ||
156 | VIRTUAL_PROCESSOR_ID = 0x00000000, | ||
157 | POSTED_INTR_NV = 0x00000002, | ||
158 | GUEST_ES_SELECTOR = 0x00000800, | ||
159 | GUEST_CS_SELECTOR = 0x00000802, | ||
160 | GUEST_SS_SELECTOR = 0x00000804, | ||
161 | GUEST_DS_SELECTOR = 0x00000806, | ||
162 | GUEST_FS_SELECTOR = 0x00000808, | ||
163 | GUEST_GS_SELECTOR = 0x0000080a, | ||
164 | GUEST_LDTR_SELECTOR = 0x0000080c, | ||
165 | GUEST_TR_SELECTOR = 0x0000080e, | ||
166 | GUEST_INTR_STATUS = 0x00000810, | ||
167 | GUEST_PML_INDEX = 0x00000812, | ||
168 | HOST_ES_SELECTOR = 0x00000c00, | ||
169 | HOST_CS_SELECTOR = 0x00000c02, | ||
170 | HOST_SS_SELECTOR = 0x00000c04, | ||
171 | HOST_DS_SELECTOR = 0x00000c06, | ||
172 | HOST_FS_SELECTOR = 0x00000c08, | ||
173 | HOST_GS_SELECTOR = 0x00000c0a, | ||
174 | HOST_TR_SELECTOR = 0x00000c0c, | ||
175 | IO_BITMAP_A = 0x00002000, | ||
176 | IO_BITMAP_A_HIGH = 0x00002001, | ||
177 | IO_BITMAP_B = 0x00002002, | ||
178 | IO_BITMAP_B_HIGH = 0x00002003, | ||
179 | MSR_BITMAP = 0x00002004, | ||
180 | MSR_BITMAP_HIGH = 0x00002005, | ||
181 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, | ||
182 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, | ||
183 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, | ||
184 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, | ||
185 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, | ||
186 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, | ||
187 | PML_ADDRESS = 0x0000200e, | ||
188 | PML_ADDRESS_HIGH = 0x0000200f, | ||
189 | TSC_OFFSET = 0x00002010, | ||
190 | TSC_OFFSET_HIGH = 0x00002011, | ||
191 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | ||
192 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | ||
193 | APIC_ACCESS_ADDR = 0x00002014, | ||
194 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
195 | POSTED_INTR_DESC_ADDR = 0x00002016, | ||
196 | POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, | ||
197 | EPT_POINTER = 0x0000201a, | ||
198 | EPT_POINTER_HIGH = 0x0000201b, | ||
199 | EOI_EXIT_BITMAP0 = 0x0000201c, | ||
200 | EOI_EXIT_BITMAP0_HIGH = 0x0000201d, | ||
201 | EOI_EXIT_BITMAP1 = 0x0000201e, | ||
202 | EOI_EXIT_BITMAP1_HIGH = 0x0000201f, | ||
203 | EOI_EXIT_BITMAP2 = 0x00002020, | ||
204 | EOI_EXIT_BITMAP2_HIGH = 0x00002021, | ||
205 | EOI_EXIT_BITMAP3 = 0x00002022, | ||
206 | EOI_EXIT_BITMAP3_HIGH = 0x00002023, | ||
207 | VMREAD_BITMAP = 0x00002026, | ||
208 | VMREAD_BITMAP_HIGH = 0x00002027, | ||
209 | VMWRITE_BITMAP = 0x00002028, | ||
210 | VMWRITE_BITMAP_HIGH = 0x00002029, | ||
211 | XSS_EXIT_BITMAP = 0x0000202C, | ||
212 | XSS_EXIT_BITMAP_HIGH = 0x0000202D, | ||
213 | TSC_MULTIPLIER = 0x00002032, | ||
214 | TSC_MULTIPLIER_HIGH = 0x00002033, | ||
215 | GUEST_PHYSICAL_ADDRESS = 0x00002400, | ||
216 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, | ||
217 | VMCS_LINK_POINTER = 0x00002800, | ||
218 | VMCS_LINK_POINTER_HIGH = 0x00002801, | ||
219 | GUEST_IA32_DEBUGCTL = 0x00002802, | ||
220 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | ||
221 | GUEST_IA32_PAT = 0x00002804, | ||
222 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
223 | GUEST_IA32_EFER = 0x00002806, | ||
224 | GUEST_IA32_EFER_HIGH = 0x00002807, | ||
225 | GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, | ||
226 | GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, | ||
227 | GUEST_PDPTR0 = 0x0000280a, | ||
228 | GUEST_PDPTR0_HIGH = 0x0000280b, | ||
229 | GUEST_PDPTR1 = 0x0000280c, | ||
230 | GUEST_PDPTR1_HIGH = 0x0000280d, | ||
231 | GUEST_PDPTR2 = 0x0000280e, | ||
232 | GUEST_PDPTR2_HIGH = 0x0000280f, | ||
233 | GUEST_PDPTR3 = 0x00002810, | ||
234 | GUEST_PDPTR3_HIGH = 0x00002811, | ||
235 | GUEST_BNDCFGS = 0x00002812, | ||
236 | GUEST_BNDCFGS_HIGH = 0x00002813, | ||
237 | HOST_IA32_PAT = 0x00002c00, | ||
238 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
239 | HOST_IA32_EFER = 0x00002c02, | ||
240 | HOST_IA32_EFER_HIGH = 0x00002c03, | ||
241 | HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, | ||
242 | HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, | ||
243 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | ||
244 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | ||
245 | EXCEPTION_BITMAP = 0x00004004, | ||
246 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, | ||
247 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, | ||
248 | CR3_TARGET_COUNT = 0x0000400a, | ||
249 | VM_EXIT_CONTROLS = 0x0000400c, | ||
250 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, | ||
251 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, | ||
252 | VM_ENTRY_CONTROLS = 0x00004012, | ||
253 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, | ||
254 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, | ||
255 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, | ||
256 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | ||
257 | TPR_THRESHOLD = 0x0000401c, | ||
258 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | ||
259 | PLE_GAP = 0x00004020, | ||
260 | PLE_WINDOW = 0x00004022, | ||
261 | VM_INSTRUCTION_ERROR = 0x00004400, | ||
262 | VM_EXIT_REASON = 0x00004402, | ||
263 | VM_EXIT_INTR_INFO = 0x00004404, | ||
264 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, | ||
265 | IDT_VECTORING_INFO_FIELD = 0x00004408, | ||
266 | IDT_VECTORING_ERROR_CODE = 0x0000440a, | ||
267 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, | ||
268 | VMX_INSTRUCTION_INFO = 0x0000440e, | ||
269 | GUEST_ES_LIMIT = 0x00004800, | ||
270 | GUEST_CS_LIMIT = 0x00004802, | ||
271 | GUEST_SS_LIMIT = 0x00004804, | ||
272 | GUEST_DS_LIMIT = 0x00004806, | ||
273 | GUEST_FS_LIMIT = 0x00004808, | ||
274 | GUEST_GS_LIMIT = 0x0000480a, | ||
275 | GUEST_LDTR_LIMIT = 0x0000480c, | ||
276 | GUEST_TR_LIMIT = 0x0000480e, | ||
277 | GUEST_GDTR_LIMIT = 0x00004810, | ||
278 | GUEST_IDTR_LIMIT = 0x00004812, | ||
279 | GUEST_ES_AR_BYTES = 0x00004814, | ||
280 | GUEST_CS_AR_BYTES = 0x00004816, | ||
281 | GUEST_SS_AR_BYTES = 0x00004818, | ||
282 | GUEST_DS_AR_BYTES = 0x0000481a, | ||
283 | GUEST_FS_AR_BYTES = 0x0000481c, | ||
284 | GUEST_GS_AR_BYTES = 0x0000481e, | ||
285 | GUEST_LDTR_AR_BYTES = 0x00004820, | ||
286 | GUEST_TR_AR_BYTES = 0x00004822, | ||
287 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, | ||
288 | GUEST_ACTIVITY_STATE = 0X00004826, | ||
289 | GUEST_SYSENTER_CS = 0x0000482A, | ||
290 | VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, | ||
291 | HOST_IA32_SYSENTER_CS = 0x00004c00, | ||
292 | CR0_GUEST_HOST_MASK = 0x00006000, | ||
293 | CR4_GUEST_HOST_MASK = 0x00006002, | ||
294 | CR0_READ_SHADOW = 0x00006004, | ||
295 | CR4_READ_SHADOW = 0x00006006, | ||
296 | CR3_TARGET_VALUE0 = 0x00006008, | ||
297 | CR3_TARGET_VALUE1 = 0x0000600a, | ||
298 | CR3_TARGET_VALUE2 = 0x0000600c, | ||
299 | CR3_TARGET_VALUE3 = 0x0000600e, | ||
300 | EXIT_QUALIFICATION = 0x00006400, | ||
301 | GUEST_LINEAR_ADDRESS = 0x0000640a, | ||
302 | GUEST_CR0 = 0x00006800, | ||
303 | GUEST_CR3 = 0x00006802, | ||
304 | GUEST_CR4 = 0x00006804, | ||
305 | GUEST_ES_BASE = 0x00006806, | ||
306 | GUEST_CS_BASE = 0x00006808, | ||
307 | GUEST_SS_BASE = 0x0000680a, | ||
308 | GUEST_DS_BASE = 0x0000680c, | ||
309 | GUEST_FS_BASE = 0x0000680e, | ||
310 | GUEST_GS_BASE = 0x00006810, | ||
311 | GUEST_LDTR_BASE = 0x00006812, | ||
312 | GUEST_TR_BASE = 0x00006814, | ||
313 | GUEST_GDTR_BASE = 0x00006816, | ||
314 | GUEST_IDTR_BASE = 0x00006818, | ||
315 | GUEST_DR7 = 0x0000681a, | ||
316 | GUEST_RSP = 0x0000681c, | ||
317 | GUEST_RIP = 0x0000681e, | ||
318 | GUEST_RFLAGS = 0x00006820, | ||
319 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, | ||
320 | GUEST_SYSENTER_ESP = 0x00006824, | ||
321 | GUEST_SYSENTER_EIP = 0x00006826, | ||
322 | HOST_CR0 = 0x00006c00, | ||
323 | HOST_CR3 = 0x00006c02, | ||
324 | HOST_CR4 = 0x00006c04, | ||
325 | HOST_FS_BASE = 0x00006c06, | ||
326 | HOST_GS_BASE = 0x00006c08, | ||
327 | HOST_TR_BASE = 0x00006c0a, | ||
328 | HOST_GDTR_BASE = 0x00006c0c, | ||
329 | HOST_IDTR_BASE = 0x00006c0e, | ||
330 | HOST_IA32_SYSENTER_ESP = 0x00006c10, | ||
331 | HOST_IA32_SYSENTER_EIP = 0x00006c12, | ||
332 | HOST_RSP = 0x00006c14, | ||
333 | HOST_RIP = 0x00006c16, | ||
334 | }; | ||
335 | |||
336 | struct vmx_msr_entry { | ||
337 | uint32_t index; | ||
338 | uint32_t reserved; | ||
339 | uint64_t value; | ||
340 | } __attribute__ ((aligned(16))); | ||
341 | |||
342 | static inline int vmxon(uint64_t phys) | ||
343 | { | ||
344 | uint8_t ret; | ||
345 | |||
346 | __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" | ||
347 | : [ret]"=rm"(ret) | ||
348 | : [pa]"m"(phys) | ||
349 | : "cc", "memory"); | ||
350 | |||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static inline void vmxoff(void) | ||
355 | { | ||
356 | __asm__ __volatile__("vmxoff"); | ||
357 | } | ||
358 | |||
359 | static inline int vmclear(uint64_t vmcs_pa) | ||
360 | { | ||
361 | uint8_t ret; | ||
362 | |||
363 | __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" | ||
364 | : [ret]"=rm"(ret) | ||
365 | : [pa]"m"(vmcs_pa) | ||
366 | : "cc", "memory"); | ||
367 | |||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | static inline int vmptrld(uint64_t vmcs_pa) | ||
372 | { | ||
373 | uint8_t ret; | ||
374 | |||
375 | __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" | ||
376 | : [ret]"=rm"(ret) | ||
377 | : [pa]"m"(vmcs_pa) | ||
378 | : "cc", "memory"); | ||
379 | |||
380 | return ret; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * No guest state (e.g. GPRs) is established by this vmlaunch. | ||
385 | */ | ||
386 | static inline int vmlaunch(void) | ||
387 | { | ||
388 | int ret; | ||
389 | |||
390 | __asm__ __volatile__("push %%rbp;" | ||
391 | "push %%rcx;" | ||
392 | "push %%rdx;" | ||
393 | "push %%rsi;" | ||
394 | "push %%rdi;" | ||
395 | "push $0;" | ||
396 | "vmwrite %%rsp, %[host_rsp];" | ||
397 | "lea 1f(%%rip), %%rax;" | ||
398 | "vmwrite %%rax, %[host_rip];" | ||
399 | "vmlaunch;" | ||
400 | "incq (%%rsp);" | ||
401 | "1: pop %%rax;" | ||
402 | "pop %%rdi;" | ||
403 | "pop %%rsi;" | ||
404 | "pop %%rdx;" | ||
405 | "pop %%rcx;" | ||
406 | "pop %%rbp;" | ||
407 | : [ret]"=&a"(ret) | ||
408 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
409 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
410 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
411 | "r11", "r12", "r13", "r14", "r15"); | ||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * No guest state (e.g. GPRs) is established by this vmresume. | ||
417 | */ | ||
418 | static inline int vmresume(void) | ||
419 | { | ||
420 | int ret; | ||
421 | |||
422 | __asm__ __volatile__("push %%rbp;" | ||
423 | "push %%rcx;" | ||
424 | "push %%rdx;" | ||
425 | "push %%rsi;" | ||
426 | "push %%rdi;" | ||
427 | "push $0;" | ||
428 | "vmwrite %%rsp, %[host_rsp];" | ||
429 | "lea 1f(%%rip), %%rax;" | ||
430 | "vmwrite %%rax, %[host_rip];" | ||
431 | "vmresume;" | ||
432 | "incq (%%rsp);" | ||
433 | "1: pop %%rax;" | ||
434 | "pop %%rdi;" | ||
435 | "pop %%rsi;" | ||
436 | "pop %%rdx;" | ||
437 | "pop %%rcx;" | ||
438 | "pop %%rbp;" | ||
439 | : [ret]"=&a"(ret) | ||
440 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
441 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
442 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
443 | "r11", "r12", "r13", "r14", "r15"); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | static inline int vmread(uint64_t encoding, uint64_t *value) | ||
448 | { | ||
449 | uint64_t tmp; | ||
450 | uint8_t ret; | ||
451 | |||
452 | __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" | ||
453 | : [value]"=rm"(tmp), [ret]"=rm"(ret) | ||
454 | : [encoding]"r"(encoding) | ||
455 | : "cc", "memory"); | ||
456 | |||
457 | *value = tmp; | ||
458 | return ret; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * A wrapper around vmread that ignores errors and returns zero if the | ||
463 | * vmread instruction fails. | ||
464 | */ | ||
465 | static inline uint64_t vmreadz(uint64_t encoding) | ||
466 | { | ||
467 | uint64_t value = 0; | ||
468 | vmread(encoding, &value); | ||
469 | return value; | ||
470 | } | ||
471 | |||
472 | static inline int vmwrite(uint64_t encoding, uint64_t value) | ||
473 | { | ||
474 | uint8_t ret; | ||
475 | |||
476 | __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" | ||
477 | : [ret]"=rm"(ret) | ||
478 | : [value]"rm"(value), [encoding]"r"(encoding) | ||
479 | : "cc", "memory"); | ||
480 | |||
481 | return ret; | ||
482 | } | ||
483 | |||
484 | static inline uint32_t vmcs_revision(void) | ||
485 | { | ||
486 | return rdmsr(MSR_IA32_VMX_BASIC); | ||
487 | } | ||
488 | |||
489 | void prepare_for_vmx_operation(void); | ||
490 | void prepare_vmcs(void *guest_rip, void *guest_rsp); | ||
491 | struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid, | ||
492 | vmx_guest_code_t guest_code); | ||
493 | |||
494 | #endif /* !SELFTEST_KVM_VMX_H */ | ||
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 7ca1bb40c498..2cedfda181d4 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
@@ -378,7 +378,7 @@ int kvm_memcmp_hva_gva(void *hva, | |||
378 | * complicated. This function uses a reasonable default length for | 378 | * complicated. This function uses a reasonable default length for |
379 | * the array and performs the appropriate allocation. | 379 | * the array and performs the appropriate allocation. |
380 | */ | 380 | */ |
381 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | 381 | static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) |
382 | { | 382 | { |
383 | struct kvm_cpuid2 *cpuid; | 383 | struct kvm_cpuid2 *cpuid; |
384 | int nent = 100; | 384 | int nent = 100; |
@@ -402,17 +402,21 @@ struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | |||
402 | * Input Args: None | 402 | * Input Args: None |
403 | * | 403 | * |
404 | * Output Args: | 404 | * Output Args: |
405 | * cpuid - The supported KVM CPUID | ||
406 | * | 405 | * |
407 | * Return: void | 406 | * Return: The supported KVM CPUID |
408 | * | 407 | * |
409 | * Get the guest CPUID supported by KVM. | 408 | * Get the guest CPUID supported by KVM. |
410 | */ | 409 | */ |
411 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | 410 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void) |
412 | { | 411 | { |
412 | static struct kvm_cpuid2 *cpuid; | ||
413 | int ret; | 413 | int ret; |
414 | int kvm_fd; | 414 | int kvm_fd; |
415 | 415 | ||
416 | if (cpuid) | ||
417 | return cpuid; | ||
418 | |||
419 | cpuid = allocate_kvm_cpuid2(); | ||
416 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
417 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 421 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", |
418 | KVM_DEV_PATH, kvm_fd, errno); | 422 | KVM_DEV_PATH, kvm_fd, errno); |
@@ -422,6 +426,7 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
422 | ret, errno); | 426 | ret, errno); |
423 | 427 | ||
424 | close(kvm_fd); | 428 | close(kvm_fd); |
429 | return cpuid; | ||
425 | } | 430 | } |
426 | 431 | ||
427 | /* Locate a cpuid entry. | 432 | /* Locate a cpuid entry. |
@@ -435,12 +440,13 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
435 | * Return: A pointer to the cpuid entry. Never returns NULL. | 440 | * Return: A pointer to the cpuid entry. Never returns NULL. |
436 | */ | 441 | */ |
437 | struct kvm_cpuid_entry2 * | 442 | struct kvm_cpuid_entry2 * |
438 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 443 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) |
439 | uint32_t index) | ||
440 | { | 444 | { |
445 | struct kvm_cpuid2 *cpuid; | ||
441 | struct kvm_cpuid_entry2 *entry = NULL; | 446 | struct kvm_cpuid_entry2 *entry = NULL; |
442 | int i; | 447 | int i; |
443 | 448 | ||
449 | cpuid = kvm_get_supported_cpuid(); | ||
444 | for (i = 0; i < cpuid->nent; i++) { | 450 | for (i = 0; i < cpuid->nent; i++) { |
445 | if (cpuid->entries[i].function == function && | 451 | if (cpuid->entries[i].function == function && |
446 | cpuid->entries[i].index == index) { | 452 | cpuid->entries[i].index == index) { |
@@ -1435,7 +1441,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, | |||
1435 | sparsebit_idx_t pg; | 1441 | sparsebit_idx_t pg; |
1436 | 1442 | ||
1437 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " | 1443 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " |
1438 | "not divisable by page size.\n" | 1444 | "not divisible by page size.\n" |
1439 | " paddr_min: 0x%lx page_size: 0x%x", | 1445 | " paddr_min: 0x%lx page_size: 0x%x", |
1440 | paddr_min, vm->page_size); | 1446 | paddr_min, vm->page_size); |
1441 | 1447 | ||
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 0c5cf3e0cb6f..b132bc95d183 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c | |||
@@ -121,7 +121,7 @@ | |||
121 | * avoided by moving the setting of the nodes mask bits into | 121 | * avoided by moving the setting of the nodes mask bits into |
122 | * the previous nodes num_after setting. | 122 | * the previous nodes num_after setting. |
123 | * | 123 | * |
124 | * + Node starting index is evenly divisable by the number of bits | 124 | * + Node starting index is evenly divisible by the number of bits |
125 | * within a nodes mask member. | 125 | * within a nodes mask member. |
126 | * | 126 | * |
127 | * + Nodes never represent a range of bits that wrap around the | 127 | * + Nodes never represent a range of bits that wrap around the |
@@ -1741,7 +1741,7 @@ void sparsebit_validate_internal(struct sparsebit *s) | |||
1741 | 1741 | ||
1742 | /* Validate node index is divisible by the mask size */ | 1742 | /* Validate node index is divisible by the mask size */ |
1743 | if (nodep->idx % MASK_BITS) { | 1743 | if (nodep->idx % MASK_BITS) { |
1744 | fprintf(stderr, "Node index not divisable by " | 1744 | fprintf(stderr, "Node index not divisible by " |
1745 | "mask size,\n" | 1745 | "mask size,\n" |
1746 | " nodep: %p nodep->idx: 0x%lx " | 1746 | " nodep: %p nodep->idx: 0x%lx " |
1747 | "MASK_BITS: %lu\n", | 1747 | "MASK_BITS: %lu\n", |
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/vmx.c new file mode 100644 index 000000000000..0231bc0aae7b --- /dev/null +++ b/tools/testing/selftests/kvm/lib/vmx.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * tools/testing/selftests/kvm/lib/x86.c | ||
3 | * | ||
4 | * Copyright (C) 2018, Google LLC. | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
7 | */ | ||
8 | |||
9 | #define _GNU_SOURCE /* for program_invocation_name */ | ||
10 | |||
11 | #include "test_util.h" | ||
12 | #include "kvm_util.h" | ||
13 | #include "x86.h" | ||
14 | #include "vmx.h" | ||
15 | |||
16 | /* Create a default VM for VMX tests. | ||
17 | * | ||
18 | * Input Args: | ||
19 | * vcpuid - The id of the single VCPU to add to the VM. | ||
20 | * guest_code - The vCPU's entry point | ||
21 | * | ||
22 | * Output Args: None | ||
23 | * | ||
24 | * Return: | ||
25 | * Pointer to opaque structure that describes the created VM. | ||
26 | */ | ||
27 | struct kvm_vm * | ||
28 | vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code) | ||
29 | { | ||
30 | struct kvm_cpuid2 *cpuid; | ||
31 | struct kvm_vm *vm; | ||
32 | vm_vaddr_t vmxon_vaddr; | ||
33 | vm_paddr_t vmxon_paddr; | ||
34 | vm_vaddr_t vmcs_vaddr; | ||
35 | vm_paddr_t vmcs_paddr; | ||
36 | |||
37 | vm = vm_create_default(vcpuid, (void *) guest_code); | ||
38 | |||
39 | /* Enable nesting in CPUID */ | ||
40 | vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); | ||
41 | |||
42 | /* Setup of a region of guest memory for the vmxon region. */ | ||
43 | vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
44 | vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr); | ||
45 | |||
46 | /* Setup of a region of guest memory for a vmcs. */ | ||
47 | vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
48 | vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr); | ||
49 | |||
50 | vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr, | ||
51 | vmcs_paddr); | ||
52 | |||
53 | return vm; | ||
54 | } | ||
55 | |||
56 | void prepare_for_vmx_operation(void) | ||
57 | { | ||
58 | uint64_t feature_control; | ||
59 | uint64_t required; | ||
60 | unsigned long cr0; | ||
61 | unsigned long cr4; | ||
62 | |||
63 | /* | ||
64 | * Ensure bits in CR0 and CR4 are valid in VMX operation: | ||
65 | * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. | ||
66 | * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. | ||
67 | */ | ||
68 | __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); | ||
69 | cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); | ||
70 | cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); | ||
71 | __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); | ||
72 | |||
73 | __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); | ||
74 | cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); | ||
75 | cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); | ||
76 | /* Enable VMX operation */ | ||
77 | cr4 |= X86_CR4_VMXE; | ||
78 | __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); | ||
79 | |||
80 | /* | ||
81 | * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: | ||
82 | * Bit 0: Lock bit. If clear, VMXON causes a #GP. | ||
83 | * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON | ||
84 | * outside of SMX causes a #GP. | ||
85 | */ | ||
86 | required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | ||
87 | required |= FEATURE_CONTROL_LOCKED; | ||
88 | feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); | ||
89 | if ((feature_control & required) != required) | ||
90 | wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Initialize the control fields to the most basic settings possible. | ||
95 | */ | ||
96 | static inline void init_vmcs_control_fields(void) | ||
97 | { | ||
98 | vmwrite(VIRTUAL_PROCESSOR_ID, 0); | ||
99 | vmwrite(POSTED_INTR_NV, 0); | ||
100 | |||
101 | vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS)); | ||
102 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS)); | ||
103 | vmwrite(EXCEPTION_BITMAP, 0); | ||
104 | vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); | ||
105 | vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ | ||
106 | vmwrite(CR3_TARGET_COUNT, 0); | ||
107 | vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | | ||
108 | VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ | ||
109 | vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); | ||
110 | vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
111 | vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | | ||
112 | VM_ENTRY_IA32E_MODE); /* 64-bit guest */ | ||
113 | vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
114 | vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); | ||
115 | vmwrite(TPR_THRESHOLD, 0); | ||
116 | vmwrite(SECONDARY_VM_EXEC_CONTROL, 0); | ||
117 | |||
118 | vmwrite(CR0_GUEST_HOST_MASK, 0); | ||
119 | vmwrite(CR4_GUEST_HOST_MASK, 0); | ||
120 | vmwrite(CR0_READ_SHADOW, get_cr0()); | ||
121 | vmwrite(CR4_READ_SHADOW, get_cr4()); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Initialize the host state fields based on the current host state, with | ||
126 | * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch | ||
127 | * or vmresume. | ||
128 | */ | ||
129 | static inline void init_vmcs_host_state(void) | ||
130 | { | ||
131 | uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); | ||
132 | |||
133 | vmwrite(HOST_ES_SELECTOR, get_es()); | ||
134 | vmwrite(HOST_CS_SELECTOR, get_cs()); | ||
135 | vmwrite(HOST_SS_SELECTOR, get_ss()); | ||
136 | vmwrite(HOST_DS_SELECTOR, get_ds()); | ||
137 | vmwrite(HOST_FS_SELECTOR, get_fs()); | ||
138 | vmwrite(HOST_GS_SELECTOR, get_gs()); | ||
139 | vmwrite(HOST_TR_SELECTOR, get_tr()); | ||
140 | |||
141 | if (exit_controls & VM_EXIT_LOAD_IA32_PAT) | ||
142 | vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); | ||
143 | if (exit_controls & VM_EXIT_LOAD_IA32_EFER) | ||
144 | vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); | ||
145 | if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) | ||
146 | vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, | ||
147 | rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); | ||
148 | |||
149 | vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); | ||
150 | |||
151 | vmwrite(HOST_CR0, get_cr0()); | ||
152 | vmwrite(HOST_CR3, get_cr3()); | ||
153 | vmwrite(HOST_CR4, get_cr4()); | ||
154 | vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); | ||
155 | vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); | ||
156 | vmwrite(HOST_TR_BASE, | ||
157 | get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); | ||
158 | vmwrite(HOST_GDTR_BASE, get_gdt_base()); | ||
159 | vmwrite(HOST_IDTR_BASE, get_idt_base()); | ||
160 | vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); | ||
161 | vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Initialize the guest state fields essentially as a clone of | ||
166 | * the host state fields. Some host state fields have fixed | ||
167 | * values, and we set the corresponding guest state fields accordingly. | ||
168 | */ | ||
169 | static inline void init_vmcs_guest_state(void *rip, void *rsp) | ||
170 | { | ||
171 | vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); | ||
172 | vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); | ||
173 | vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); | ||
174 | vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); | ||
175 | vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); | ||
176 | vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); | ||
177 | vmwrite(GUEST_LDTR_SELECTOR, 0); | ||
178 | vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); | ||
179 | vmwrite(GUEST_INTR_STATUS, 0); | ||
180 | vmwrite(GUEST_PML_INDEX, 0); | ||
181 | |||
182 | vmwrite(VMCS_LINK_POINTER, -1ll); | ||
183 | vmwrite(GUEST_IA32_DEBUGCTL, 0); | ||
184 | vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); | ||
185 | vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); | ||
186 | vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, | ||
187 | vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); | ||
188 | |||
189 | vmwrite(GUEST_ES_LIMIT, -1); | ||
190 | vmwrite(GUEST_CS_LIMIT, -1); | ||
191 | vmwrite(GUEST_SS_LIMIT, -1); | ||
192 | vmwrite(GUEST_DS_LIMIT, -1); | ||
193 | vmwrite(GUEST_FS_LIMIT, -1); | ||
194 | vmwrite(GUEST_GS_LIMIT, -1); | ||
195 | vmwrite(GUEST_LDTR_LIMIT, -1); | ||
196 | vmwrite(GUEST_TR_LIMIT, 0x67); | ||
197 | vmwrite(GUEST_GDTR_LIMIT, 0xffff); | ||
198 | vmwrite(GUEST_IDTR_LIMIT, 0xffff); | ||
199 | vmwrite(GUEST_ES_AR_BYTES, | ||
200 | vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
201 | vmwrite(GUEST_CS_AR_BYTES, 0xa09b); | ||
202 | vmwrite(GUEST_SS_AR_BYTES, 0xc093); | ||
203 | vmwrite(GUEST_DS_AR_BYTES, | ||
204 | vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
205 | vmwrite(GUEST_FS_AR_BYTES, | ||
206 | vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
207 | vmwrite(GUEST_GS_AR_BYTES, | ||
208 | vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
209 | vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); | ||
210 | vmwrite(GUEST_TR_AR_BYTES, 0x8b); | ||
211 | vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); | ||
212 | vmwrite(GUEST_ACTIVITY_STATE, 0); | ||
213 | vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); | ||
214 | vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); | ||
215 | |||
216 | vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); | ||
217 | vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); | ||
218 | vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); | ||
219 | vmwrite(GUEST_ES_BASE, 0); | ||
220 | vmwrite(GUEST_CS_BASE, 0); | ||
221 | vmwrite(GUEST_SS_BASE, 0); | ||
222 | vmwrite(GUEST_DS_BASE, 0); | ||
223 | vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); | ||
224 | vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); | ||
225 | vmwrite(GUEST_LDTR_BASE, 0); | ||
226 | vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); | ||
227 | vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); | ||
228 | vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); | ||
229 | vmwrite(GUEST_DR7, 0x400); | ||
230 | vmwrite(GUEST_RSP, (uint64_t)rsp); | ||
231 | vmwrite(GUEST_RIP, (uint64_t)rip); | ||
232 | vmwrite(GUEST_RFLAGS, 2); | ||
233 | vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); | ||
234 | vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); | ||
235 | vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); | ||
236 | } | ||
237 | |||
238 | void prepare_vmcs(void *guest_rip, void *guest_rsp) | ||
239 | { | ||
240 | init_vmcs_control_fields(); | ||
241 | init_vmcs_host_state(); | ||
242 | init_vmcs_guest_state(guest_rip, guest_rsp); | ||
243 | } | ||
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c new file mode 100644 index 000000000000..8f7f62093add --- /dev/null +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * gtests/tests/vmx_tsc_adjust_test.c | ||
3 | * | ||
4 | * Copyright (C) 2018, Google LLC. | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
7 | * | ||
8 | * | ||
9 | * IA32_TSC_ADJUST test | ||
10 | * | ||
11 | * According to the SDM, "if an execution of WRMSR to the | ||
12 | * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, | ||
13 | * the logical processor also adds (or subtracts) value X from the | ||
14 | * IA32_TSC_ADJUST MSR. | ||
15 | * | ||
16 | * Note that when L1 doesn't intercept writes to IA32_TSC, a | ||
17 | * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC | ||
18 | * value. | ||
19 | * | ||
20 | * This test verifies that this unusual case is handled correctly. | ||
21 | */ | ||
22 | |||
23 | #include "test_util.h" | ||
24 | #include "kvm_util.h" | ||
25 | #include "x86.h" | ||
26 | #include "vmx.h" | ||
27 | |||
28 | #include <string.h> | ||
29 | #include <sys/ioctl.h> | ||
30 | |||
31 | #ifndef MSR_IA32_TSC_ADJUST | ||
32 | #define MSR_IA32_TSC_ADJUST 0x3b | ||
33 | #endif | ||
34 | |||
35 | #define PAGE_SIZE 4096 | ||
36 | #define VCPU_ID 5 | ||
37 | |||
38 | #define TSC_ADJUST_VALUE (1ll << 32) | ||
39 | #define TSC_OFFSET_VALUE -(1ll << 48) | ||
40 | |||
41 | enum { | ||
42 | PORT_ABORT = 0x1000, | ||
43 | PORT_REPORT, | ||
44 | PORT_DONE, | ||
45 | }; | ||
46 | |||
47 | struct vmx_page { | ||
48 | vm_vaddr_t virt; | ||
49 | vm_paddr_t phys; | ||
50 | }; | ||
51 | |||
52 | enum { | ||
53 | VMXON_PAGE = 0, | ||
54 | VMCS_PAGE, | ||
55 | MSR_BITMAP_PAGE, | ||
56 | |||
57 | NUM_VMX_PAGES, | ||
58 | }; | ||
59 | |||
60 | struct kvm_single_msr { | ||
61 | struct kvm_msrs header; | ||
62 | struct kvm_msr_entry entry; | ||
63 | } __attribute__((packed)); | ||
64 | |||
65 | /* The virtual machine object. */ | ||
66 | static struct kvm_vm *vm; | ||
67 | |||
68 | /* Array of vmx_page descriptors that is shared with the guest. */ | ||
69 | struct vmx_page *vmx_pages; | ||
70 | |||
71 | #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg)) | ||
72 | static void do_exit_to_l0(uint16_t port, unsigned long arg) | ||
73 | { | ||
74 | __asm__ __volatile__("in %[port], %%al" | ||
75 | : | ||
76 | : [port]"d"(port), "D"(arg) | ||
77 | : "rax"); | ||
78 | } | ||
79 | |||
80 | |||
81 | #define GUEST_ASSERT(_condition) do { \ | ||
82 | if (!(_condition)) \ | ||
83 | exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \ | ||
84 | } while (0) | ||
85 | |||
86 | static void check_ia32_tsc_adjust(int64_t max) | ||
87 | { | ||
88 | int64_t adjust; | ||
89 | |||
90 | adjust = rdmsr(MSR_IA32_TSC_ADJUST); | ||
91 | exit_to_l0(PORT_REPORT, adjust); | ||
92 | GUEST_ASSERT(adjust <= max); | ||
93 | } | ||
94 | |||
95 | static void l2_guest_code(void) | ||
96 | { | ||
97 | uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; | ||
98 | |||
99 | wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); | ||
100 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
101 | |||
102 | /* Exit to L1 */ | ||
103 | __asm__ __volatile__("vmcall"); | ||
104 | } | ||
105 | |||
106 | static void l1_guest_code(struct vmx_page *vmx_pages) | ||
107 | { | ||
108 | #define L2_GUEST_STACK_SIZE 64 | ||
109 | unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; | ||
110 | uint32_t control; | ||
111 | uintptr_t save_cr3; | ||
112 | |||
113 | GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); | ||
114 | wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); | ||
115 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
116 | |||
117 | prepare_for_vmx_operation(); | ||
118 | |||
119 | /* Enter VMX root operation. */ | ||
120 | *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision(); | ||
121 | GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys)); | ||
122 | |||
123 | /* Load a VMCS. */ | ||
124 | *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision(); | ||
125 | GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys)); | ||
126 | GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys)); | ||
127 | |||
128 | /* Prepare the VMCS for L2 execution. */ | ||
129 | prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); | ||
130 | control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); | ||
131 | control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; | ||
132 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); | ||
133 | vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys); | ||
134 | vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); | ||
135 | |||
136 | /* Jump into L2. First, test failure to load guest CR3. */ | ||
137 | save_cr3 = vmreadz(GUEST_CR3); | ||
138 | vmwrite(GUEST_CR3, -1ull); | ||
139 | GUEST_ASSERT(!vmlaunch()); | ||
140 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == | ||
141 | (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); | ||
142 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
143 | vmwrite(GUEST_CR3, save_cr3); | ||
144 | |||
145 | GUEST_ASSERT(!vmlaunch()); | ||
146 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); | ||
147 | |||
148 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
149 | |||
150 | exit_to_l0(PORT_DONE, 0); | ||
151 | } | ||
152 | |||
153 | static void allocate_vmx_page(struct vmx_page *page) | ||
154 | { | ||
155 | vm_vaddr_t virt; | ||
156 | |||
157 | virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0); | ||
158 | memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE); | ||
159 | |||
160 | page->virt = virt; | ||
161 | page->phys = addr_gva2gpa(vm, virt); | ||
162 | } | ||
163 | |||
164 | static vm_vaddr_t allocate_vmx_pages(void) | ||
165 | { | ||
166 | vm_vaddr_t vmx_pages_vaddr; | ||
167 | int i; | ||
168 | |||
169 | vmx_pages_vaddr = vm_vaddr_alloc( | ||
170 | vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0); | ||
171 | |||
172 | vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr); | ||
173 | |||
174 | for (i = 0; i < NUM_VMX_PAGES; i++) | ||
175 | allocate_vmx_page(&vmx_pages[i]); | ||
176 | |||
177 | return vmx_pages_vaddr; | ||
178 | } | ||
179 | |||
180 | void report(int64_t val) | ||
181 | { | ||
182 | printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", | ||
183 | val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); | ||
184 | } | ||
185 | |||
186 | int main(int argc, char *argv[]) | ||
187 | { | ||
188 | vm_vaddr_t vmx_pages_vaddr; | ||
189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | ||
190 | |||
191 | if (!(entry->ecx & CPUID_VMX)) { | ||
192 | printf("nested VMX not enabled, skipping test"); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); | ||
197 | |||
198 | /* Allocate VMX pages and shared descriptors (vmx_pages). */ | ||
199 | vmx_pages_vaddr = allocate_vmx_pages(); | ||
200 | vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr); | ||
201 | |||
202 | for (;;) { | ||
203 | volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); | ||
204 | struct kvm_regs regs; | ||
205 | |||
206 | vcpu_run(vm, VCPU_ID); | ||
207 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | ||
208 | "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n", | ||
209 | run->exit_reason, | ||
210 | exit_reason_str(run->exit_reason)); | ||
211 | |||
212 | vcpu_regs_get(vm, VCPU_ID, ®s); | ||
213 | |||
214 | switch (run->io.port) { | ||
215 | case PORT_ABORT: | ||
216 | TEST_ASSERT(false, "%s", (const char *) regs.rdi); | ||
217 | /* NOT REACHED */ | ||
218 | case PORT_REPORT: | ||
219 | report(regs.rdi); | ||
220 | break; | ||
221 | case PORT_DONE: | ||
222 | goto done; | ||
223 | default: | ||
224 | TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); | ||
225 | } | ||
226 | } | ||
227 | |||
228 | kvm_vm_free(vm); | ||
229 | done: | ||
230 | return 0; | ||
231 | } | ||
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 785fc18a16b4..8f1e13d2e547 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
@@ -5,7 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g | |||
5 | CFLAGS += -I../../../../usr/include/ | 5 | CFLAGS += -I../../../../usr/include/ |
6 | 6 | ||
7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh | 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh |
8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh | 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh |
9 | TEST_GEN_FILES = socket | 9 | TEST_GEN_FILES = socket |
10 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy | 10 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy |
11 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 11 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |