diff options
211 files changed, 4422 insertions, 1618 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index ff335f8aeb39..92f30006adae 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst | |||
| @@ -136,6 +136,19 @@ Sorting | |||
| 136 | .. kernel-doc:: lib/list_sort.c | 136 | .. kernel-doc:: lib/list_sort.c |
| 137 | :export: | 137 | :export: |
| 138 | 138 | ||
| 139 | Text Searching | ||
| 140 | -------------- | ||
| 141 | |||
| 142 | .. kernel-doc:: lib/textsearch.c | ||
| 143 | :doc: ts_intro | ||
| 144 | |||
| 145 | .. kernel-doc:: lib/textsearch.c | ||
| 146 | :export: | ||
| 147 | |||
| 148 | .. kernel-doc:: include/linux/textsearch.h | ||
| 149 | :functions: textsearch_find textsearch_next \ | ||
| 150 | textsearch_get_pattern textsearch_get_pattern_len | ||
| 151 | |||
| 139 | UUID/GUID | 152 | UUID/GUID |
| 140 | --------- | 153 | --------- |
| 141 | 154 | ||
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 1b596fd38dc4..b957acff57aa 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt | |||
| @@ -49,19 +49,6 @@ on the SoC (only first trip points defined in DT will be configured): | |||
| 49 | - samsung,exynos5433-tmu: 8 | 49 | - samsung,exynos5433-tmu: 8 |
| 50 | - samsung,exynos7-tmu: 8 | 50 | - samsung,exynos7-tmu: 8 |
| 51 | 51 | ||
| 52 | Following properties are mandatory (depending on SoC): | ||
| 53 | - samsung,tmu_gain: Gain value for internal TMU operation. | ||
| 54 | - samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage | ||
| 55 | - samsung,tmu_noise_cancel_mode: Mode for noise cancellation | ||
| 56 | - samsung,tmu_efuse_value: Default level of temperature - it is needed when | ||
| 57 | in factory fusing produced wrong value | ||
| 58 | - samsung,tmu_min_efuse_value: Minimum temperature fused value | ||
| 59 | - samsung,tmu_max_efuse_value: Maximum temperature fused value | ||
| 60 | - samsung,tmu_first_point_trim: First point trimming value | ||
| 61 | - samsung,tmu_second_point_trim: Second point trimming value | ||
| 62 | - samsung,tmu_default_temp_offset: Default temperature offset | ||
| 63 | - samsung,tmu_cal_type: Callibration type | ||
| 64 | |||
| 65 | ** Optional properties: | 52 | ** Optional properties: |
| 66 | 53 | ||
| 67 | - vtmu-supply: This entry is optional and provides the regulator node supplying | 54 | - vtmu-supply: This entry is optional and provides the regulator node supplying |
| @@ -78,7 +65,7 @@ Example 1): | |||
| 78 | clocks = <&clock 383>; | 65 | clocks = <&clock 383>; |
| 79 | clock-names = "tmu_apbif"; | 66 | clock-names = "tmu_apbif"; |
| 80 | vtmu-supply = <&tmu_regulator_node>; | 67 | vtmu-supply = <&tmu_regulator_node>; |
| 81 | #include "exynos4412-tmu-sensor-conf.dtsi" | 68 | #thermal-sensor-cells = <0>; |
| 82 | }; | 69 | }; |
| 83 | 70 | ||
| 84 | Example 2): | 71 | Example 2): |
| @@ -89,7 +76,7 @@ Example 2): | |||
| 89 | interrupts = <0 58 0>; | 76 | interrupts = <0 58 0>; |
| 90 | clocks = <&clock 21>; | 77 | clocks = <&clock 21>; |
| 91 | clock-names = "tmu_apbif"; | 78 | clock-names = "tmu_apbif"; |
| 92 | #include "exynos5440-tmu-sensor-conf.dtsi" | 79 | #thermal-sensor-cells = <0>; |
| 93 | }; | 80 | }; |
| 94 | 81 | ||
| 95 | Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | 82 | Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") |
| @@ -99,7 +86,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
| 99 | interrupts = <0 184 0>; | 86 | interrupts = <0 184 0>; |
| 100 | clocks = <&clock 318>, <&clock 318>; | 87 | clocks = <&clock 318>, <&clock 318>; |
| 101 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 88 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
| 102 | #include "exynos4412-tmu-sensor-conf.dtsi" | 89 | #thermal-sensor-cells = <0>; |
| 103 | }; | 90 | }; |
| 104 | 91 | ||
| 105 | tmu_cpu3: tmu@1006c000 { | 92 | tmu_cpu3: tmu@1006c000 { |
| @@ -108,7 +95,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
| 108 | interrupts = <0 185 0>; | 95 | interrupts = <0 185 0>; |
| 109 | clocks = <&clock 318>, <&clock 319>; | 96 | clocks = <&clock 318>, <&clock 319>; |
| 110 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 97 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
| 111 | #include "exynos4412-tmu-sensor-conf.dtsi" | 98 | #thermal-sensor-cells = <0>; |
| 112 | }; | 99 | }; |
| 113 | 100 | ||
| 114 | tmu_gpu: tmu@100a0000 { | 101 | tmu_gpu: tmu@100a0000 { |
| @@ -117,7 +104,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") | |||
| 117 | interrupts = <0 215 0>; | 104 | interrupts = <0 215 0>; |
| 118 | clocks = <&clock 319>, <&clock 318>; | 105 | clocks = <&clock 319>, <&clock 318>; |
| 119 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 106 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
| 120 | #include "exynos4412-tmu-sensor-conf.dtsi" | 107 | #thermal-sensor-cells = <0>; |
| 121 | }; | 108 | }; |
| 122 | 109 | ||
| 123 | Note: For multi-instance tmu each instance should have an alias correctly | 110 | Note: For multi-instance tmu each instance should have an alias correctly |
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 1719d47a5e2f..cc553f0952c5 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt | |||
| @@ -55,8 +55,7 @@ of heat dissipation). For example a fan's cooling states correspond to | |||
| 55 | the different fan speeds possible. Cooling states are referred to by | 55 | the different fan speeds possible. Cooling states are referred to by |
| 56 | single unsigned integers, where larger numbers mean greater heat | 56 | single unsigned integers, where larger numbers mean greater heat |
| 57 | dissipation. The precise set of cooling states associated with a device | 57 | dissipation. The precise set of cooling states associated with a device |
| 58 | (as referred to by the cooling-min-level and cooling-max-level | 58 | should be defined in a particular device's binding. |
| 59 | properties) should be defined in a particular device's binding. | ||
| 60 | For more examples of cooling devices, refer to the example sections below. | 59 | For more examples of cooling devices, refer to the example sections below. |
| 61 | 60 | ||
| 62 | Required properties: | 61 | Required properties: |
| @@ -69,15 +68,6 @@ Required properties: | |||
| 69 | See Cooling device maps section below for more details | 68 | See Cooling device maps section below for more details |
| 70 | on how consumers refer to cooling devices. | 69 | on how consumers refer to cooling devices. |
| 71 | 70 | ||
| 72 | Optional properties: | ||
| 73 | - cooling-min-level: An integer indicating the smallest | ||
| 74 | Type: unsigned cooling state accepted. Typically 0. | ||
| 75 | Size: one cell | ||
| 76 | |||
| 77 | - cooling-max-level: An integer indicating the largest | ||
| 78 | Type: unsigned cooling state accepted. | ||
| 79 | Size: one cell | ||
| 80 | |||
| 81 | * Trip points | 71 | * Trip points |
| 82 | 72 | ||
| 83 | The trip node is a node to describe a point in the temperature domain | 73 | The trip node is a node to describe a point in the temperature domain |
| @@ -226,8 +216,6 @@ cpus { | |||
| 226 | 396000 950000 | 216 | 396000 950000 |
| 227 | 198000 850000 | 217 | 198000 850000 |
| 228 | >; | 218 | >; |
| 229 | cooling-min-level = <0>; | ||
| 230 | cooling-max-level = <3>; | ||
| 231 | #cooling-cells = <2>; /* min followed by max */ | 219 | #cooling-cells = <2>; /* min followed by max */ |
| 232 | }; | 220 | }; |
| 233 | ... | 221 | ... |
| @@ -241,8 +229,6 @@ cpus { | |||
| 241 | */ | 229 | */ |
| 242 | fan0: fan@48 { | 230 | fan0: fan@48 { |
| 243 | ... | 231 | ... |
| 244 | cooling-min-level = <0>; | ||
| 245 | cooling-max-level = <9>; | ||
| 246 | #cooling-cells = <2>; /* min followed by max */ | 232 | #cooling-cells = <2>; /* min followed by max */ |
| 247 | }; | 233 | }; |
| 248 | }; | 234 | }; |
diff --git a/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt new file mode 100644 index 000000000000..ea22dfe485be --- /dev/null +++ b/Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | Nuvoton NPCM7xx timer | ||
| 2 | |||
| 3 | Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit | ||
| 4 | timer counters. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible : "nuvoton,npcm750-timer" for Poleg NPCM750. | ||
| 8 | - reg : Offset and length of the register set for the device. | ||
| 9 | - interrupts : Contain the timer interrupt with flags for | ||
| 10 | falling edge. | ||
| 11 | - clocks : phandle of timer reference clock (usually a 25 MHz clock). | ||
| 12 | |||
| 13 | Example: | ||
| 14 | |||
| 15 | timer@f0008000 { | ||
| 16 | compatible = "nuvoton,npcm750-timer"; | ||
| 17 | interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; | ||
| 18 | reg = <0xf0008000 0x50>; | ||
| 19 | clocks = <&clk NPCM7XX_CLK_TIMER>; | ||
| 20 | }; | ||
| 21 | |||
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt index b4aa7ddb5b13..f82087b220f4 100644 --- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt +++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt | |||
| @@ -15,7 +15,7 @@ Required properties: | |||
| 15 | - interrupts : Should be the clock event device interrupt. | 15 | - interrupts : Should be the clock event device interrupt. |
| 16 | - clocks : The clocks provided by the SoC to drive the timer, must contain | 16 | - clocks : The clocks provided by the SoC to drive the timer, must contain |
| 17 | an entry for each entry in clock-names. | 17 | an entry for each entry in clock-names. |
| 18 | - clock-names : Must include the following entries: "igp" and "per". | 18 | - clock-names : Must include the following entries: "ipg" and "per". |
| 19 | 19 | ||
| 20 | Example: | 20 | Example: |
| 21 | tpm5: tpm@40260000 { | 21 | tpm5: tpm@40260000 { |
diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.txt index 89c66634d600..ecc09a7be5dd 100644 --- a/Documentation/livepatch/shadow-vars.txt +++ b/Documentation/livepatch/shadow-vars.txt | |||
| @@ -34,9 +34,13 @@ meta-data and shadow-data: | |||
| 34 | - data[] - storage for shadow data | 34 | - data[] - storage for shadow data |
| 35 | 35 | ||
| 36 | It is important to note that the klp_shadow_alloc() and | 36 | It is important to note that the klp_shadow_alloc() and |
| 37 | klp_shadow_get_or_alloc() calls, described below, store a *copy* of the | 37 | klp_shadow_get_or_alloc() are zeroing the variable by default. |
| 38 | data that the functions are provided. Callers should provide whatever | 38 | They also allow to call a custom constructor function when a non-zero |
| 39 | mutual exclusion is required of the shadow data. | 39 | value is needed. Callers should provide whatever mutual exclusion |
| 40 | is required. | ||
| 41 | |||
| 42 | Note that the constructor is called under klp_shadow_lock spinlock. It allows | ||
| 43 | to do actions that can be done only once when a new variable is allocated. | ||
| 40 | 44 | ||
| 41 | * klp_shadow_get() - retrieve a shadow variable data pointer | 45 | * klp_shadow_get() - retrieve a shadow variable data pointer |
| 42 | - search hashtable for <obj, id> pair | 46 | - search hashtable for <obj, id> pair |
| @@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data. | |||
| 47 | - WARN and return NULL | 51 | - WARN and return NULL |
| 48 | - if <obj, id> doesn't already exist | 52 | - if <obj, id> doesn't already exist |
| 49 | - allocate a new shadow variable | 53 | - allocate a new shadow variable |
| 50 | - copy data into the new shadow variable | 54 | - initialize the variable using a custom constructor and data when provided |
| 51 | - add <obj, id> to the global hashtable | 55 | - add <obj, id> to the global hashtable |
| 52 | 56 | ||
| 53 | * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable | 57 | * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable |
| @@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data. | |||
| 56 | - return existing shadow variable | 60 | - return existing shadow variable |
| 57 | - if <obj, id> doesn't already exist | 61 | - if <obj, id> doesn't already exist |
| 58 | - allocate a new shadow variable | 62 | - allocate a new shadow variable |
| 59 | - copy data into the new shadow variable | 63 | - initialize the variable using a custom constructor and data when provided |
| 60 | - add <obj, id> pair to the global hashtable | 64 | - add <obj, id> pair to the global hashtable |
| 61 | 65 | ||
| 62 | * klp_shadow_free() - detach and free a <obj, id> shadow variable | 66 | * klp_shadow_free() - detach and free a <obj, id> shadow variable |
| 63 | - find and remove a <obj, id> reference from global hashtable | 67 | - find and remove a <obj, id> reference from global hashtable |
| 64 | - if found, free shadow variable | 68 | - if found |
| 69 | - call destructor function if defined | ||
| 70 | - free shadow variable | ||
| 65 | 71 | ||
| 66 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables | 72 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables |
| 67 | - find and remove any <*, id> references from global hashtable | 73 | - find and remove any <*, id> references from global hashtable |
| 68 | - if found, free shadow variable | 74 | - if found |
| 75 | - call destructor function if defined | ||
| 76 | - free shadow variable | ||
| 69 | 77 | ||
| 70 | 78 | ||
| 71 | 2. Use cases | 79 | 2. Use cases |
| @@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
| 107 | sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); | 115 | sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); |
| 108 | 116 | ||
| 109 | /* Attach a corresponding shadow variable, then initialize it */ | 117 | /* Attach a corresponding shadow variable, then initialize it */ |
| 110 | ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp); | 118 | ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp, |
| 119 | NULL, NULL); | ||
| 111 | if (!ps_lock) | 120 | if (!ps_lock) |
| 112 | goto shadow_fail; | 121 | goto shadow_fail; |
| 113 | spin_lock_init(ps_lock); | 122 | spin_lock_init(ps_lock); |
| @@ -131,7 +140,7 @@ variable: | |||
| 131 | 140 | ||
| 132 | void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) | 141 | void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) |
| 133 | { | 142 | { |
| 134 | klp_shadow_free(sta, PS_LOCK); | 143 | klp_shadow_free(sta, PS_LOCK, NULL); |
| 135 | kfree(sta); | 144 | kfree(sta); |
| 136 | ... | 145 | ... |
| 137 | 146 | ||
| @@ -148,16 +157,24 @@ shadow variables to parents already in-flight. | |||
| 148 | For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is | 157 | For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is |
| 149 | inside ieee80211_sta_ps_deliver_wakeup(): | 158 | inside ieee80211_sta_ps_deliver_wakeup(): |
| 150 | 159 | ||
| 160 | int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data) | ||
| 161 | { | ||
| 162 | spinlock_t *lock = shadow_data; | ||
| 163 | |||
| 164 | spin_lock_init(lock); | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 151 | #define PS_LOCK 1 | 168 | #define PS_LOCK 1 |
| 152 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | 169 | void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) |
| 153 | { | 170 | { |
| 154 | DEFINE_SPINLOCK(ps_lock_fallback); | ||
| 155 | spinlock_t *ps_lock; | 171 | spinlock_t *ps_lock; |
| 156 | 172 | ||
| 157 | /* sync with ieee80211_tx_h_unicast_ps_buf */ | 173 | /* sync with ieee80211_tx_h_unicast_ps_buf */ |
| 158 | ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, | 174 | ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, |
| 159 | &ps_lock_fallback, sizeof(ps_lock_fallback), | 175 | sizeof(*ps_lock), GFP_ATOMIC, |
| 160 | GFP_ATOMIC); | 176 | ps_lock_shadow_ctor, NULL); |
| 177 | |||
| 161 | if (ps_lock) | 178 | if (ps_lock) |
| 162 | spin_lock(ps_lock); | 179 | spin_lock(ps_lock); |
| 163 | ... | 180 | ... |
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index a4508ec1816b..fd55c7de9991 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
| @@ -169,7 +169,7 @@ access to BPF code as well. | |||
| 169 | BPF engine and instruction set | 169 | BPF engine and instruction set |
| 170 | ------------------------------ | 170 | ------------------------------ |
| 171 | 171 | ||
| 172 | Under tools/net/ there's a small helper tool called bpf_asm which can | 172 | Under tools/bpf/ there's a small helper tool called bpf_asm which can |
| 173 | be used to write low-level filters for example scenarios mentioned in the | 173 | be used to write low-level filters for example scenarios mentioned in the |
| 174 | previous section. Asm-like syntax mentioned here has been implemented in | 174 | previous section. Asm-like syntax mentioned here has been implemented in |
| 175 | bpf_asm and will be used for further explanations (instead of dealing with | 175 | bpf_asm and will be used for further explanations (instead of dealing with |
| @@ -359,7 +359,7 @@ $ ./bpf_asm -c foo | |||
| 359 | In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF | 359 | In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF |
| 360 | filters that might not be obvious at first, it's good to test filters before | 360 | filters that might not be obvious at first, it's good to test filters before |
| 361 | attaching to a live system. For that purpose, there's a small tool called | 361 | attaching to a live system. For that purpose, there's a small tool called |
| 362 | bpf_dbg under tools/net/ in the kernel source directory. This debugger allows | 362 | bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows |
| 363 | for testing BPF filters against given pcap files, single stepping through the | 363 | for testing BPF filters against given pcap files, single stepping through the |
| 364 | BPF code on the pcap's packets and to do BPF machine register dumps. | 364 | BPF code on the pcap's packets and to do BPF machine register dumps. |
| 365 | 365 | ||
| @@ -483,7 +483,7 @@ Example output from dmesg: | |||
| 483 | [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 | 483 | [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 |
| 484 | [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 | 484 | [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 |
| 485 | 485 | ||
| 486 | In the kernel source tree under tools/net/, there's bpf_jit_disasm for | 486 | In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for |
| 487 | generating disassembly out of the kernel log's hexdump: | 487 | generating disassembly out of the kernel log's hexdump: |
| 488 | 488 | ||
| 489 | # ./bpf_jit_disasm | 489 | # ./bpf_jit_disasm |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 5dc1a040a2f1..b583a73cf95f 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
| @@ -1390,26 +1390,26 @@ mld_qrv - INTEGER | |||
| 1390 | Default: 2 (as specified by RFC3810 9.1) | 1390 | Default: 2 (as specified by RFC3810 9.1) |
| 1391 | Minimum: 1 (as specified by RFC6636 4.5) | 1391 | Minimum: 1 (as specified by RFC6636 4.5) |
| 1392 | 1392 | ||
| 1393 | max_dst_opts_cnt - INTEGER | 1393 | max_dst_opts_number - INTEGER |
| 1394 | Maximum number of non-padding TLVs allowed in a Destination | 1394 | Maximum number of non-padding TLVs allowed in a Destination |
| 1395 | options extension header. If this value is less than zero | 1395 | options extension header. If this value is less than zero |
| 1396 | then unknown options are disallowed and the number of known | 1396 | then unknown options are disallowed and the number of known |
| 1397 | TLVs allowed is the absolute value of this number. | 1397 | TLVs allowed is the absolute value of this number. |
| 1398 | Default: 8 | 1398 | Default: 8 |
| 1399 | 1399 | ||
| 1400 | max_hbh_opts_cnt - INTEGER | 1400 | max_hbh_opts_number - INTEGER |
| 1401 | Maximum number of non-padding TLVs allowed in a Hop-by-Hop | 1401 | Maximum number of non-padding TLVs allowed in a Hop-by-Hop |
| 1402 | options extension header. If this value is less than zero | 1402 | options extension header. If this value is less than zero |
| 1403 | then unknown options are disallowed and the number of known | 1403 | then unknown options are disallowed and the number of known |
| 1404 | TLVs allowed is the absolute value of this number. | 1404 | TLVs allowed is the absolute value of this number. |
| 1405 | Default: 8 | 1405 | Default: 8 |
| 1406 | 1406 | ||
| 1407 | max dst_opts_len - INTEGER | 1407 | max_dst_opts_length - INTEGER |
| 1408 | Maximum length allowed for a Destination options extension | 1408 | Maximum length allowed for a Destination options extension |
| 1409 | header. | 1409 | header. |
| 1410 | Default: INT_MAX (unlimited) | 1410 | Default: INT_MAX (unlimited) |
| 1411 | 1411 | ||
| 1412 | max hbh_opts_len - INTEGER | 1412 | max_hbh_length - INTEGER |
| 1413 | Maximum length allowed for a Hop-by-Hop options extension | 1413 | Maximum length allowed for a Hop-by-Hop options extension |
| 1414 | header. | 1414 | header. |
| 1415 | Default: INT_MAX (unlimited) | 1415 | Default: INT_MAX (unlimited) |
diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..92be777d060a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/ | |||
| 1373 | F: drivers/net/ethernet/amd/am79c961a.* | 1373 | F: drivers/net/ethernet/amd/am79c961a.* |
| 1374 | 1374 | ||
| 1375 | ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT | 1375 | ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT |
| 1376 | M: Uwe Kleine-König <kernel@pengutronix.de> | 1376 | M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> |
| 1377 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
| 1377 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1378 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1378 | S: Maintained | 1379 | S: Maintained |
| 1379 | N: efm32 | 1380 | N: efm32 |
| @@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/ | |||
| 1401 | 1402 | ||
| 1402 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 1403 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
| 1403 | M: Shawn Guo <shawnguo@kernel.org> | 1404 | M: Shawn Guo <shawnguo@kernel.org> |
| 1404 | M: Sascha Hauer <kernel@pengutronix.de> | 1405 | M: Sascha Hauer <s.hauer@pengutronix.de> |
| 1406 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
| 1405 | R: Fabio Estevam <fabio.estevam@nxp.com> | 1407 | R: Fabio Estevam <fabio.estevam@nxp.com> |
| 1406 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1408 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1407 | S: Maintained | 1409 | S: Maintained |
| @@ -1416,7 +1418,8 @@ F: include/soc/imx/ | |||
| 1416 | 1418 | ||
| 1417 | ARM/FREESCALE VYBRID ARM ARCHITECTURE | 1419 | ARM/FREESCALE VYBRID ARM ARCHITECTURE |
| 1418 | M: Shawn Guo <shawnguo@kernel.org> | 1420 | M: Shawn Guo <shawnguo@kernel.org> |
| 1419 | M: Sascha Hauer <kernel@pengutronix.de> | 1421 | M: Sascha Hauer <s.hauer@pengutronix.de> |
| 1422 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
| 1420 | R: Stefan Agner <stefan@agner.ch> | 1423 | R: Stefan Agner <stefan@agner.ch> |
| 1421 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1424 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1422 | S: Maintained | 1425 | S: Maintained |
| @@ -4245,6 +4248,9 @@ F: include/trace/events/fs_dax.h | |||
| 4245 | 4248 | ||
| 4246 | DEVICE DIRECT ACCESS (DAX) | 4249 | DEVICE DIRECT ACCESS (DAX) |
| 4247 | M: Dan Williams <dan.j.williams@intel.com> | 4250 | M: Dan Williams <dan.j.williams@intel.com> |
| 4251 | M: Dave Jiang <dave.jiang@intel.com> | ||
| 4252 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
| 4253 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
| 4248 | L: linux-nvdimm@lists.01.org | 4254 | L: linux-nvdimm@lists.01.org |
| 4249 | S: Supported | 4255 | S: Supported |
| 4250 | F: drivers/dax/ | 4256 | F: drivers/dax/ |
| @@ -5652,7 +5658,8 @@ F: drivers/net/ethernet/freescale/fec.h | |||
| 5652 | F: Documentation/devicetree/bindings/net/fsl-fec.txt | 5658 | F: Documentation/devicetree/bindings/net/fsl-fec.txt |
| 5653 | 5659 | ||
| 5654 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER | 5660 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER |
| 5655 | M: Sascha Hauer <kernel@pengutronix.de> | 5661 | M: Sascha Hauer <s.hauer@pengutronix.de> |
| 5662 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
| 5656 | L: linux-fbdev@vger.kernel.org | 5663 | L: linux-fbdev@vger.kernel.org |
| 5657 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 5664 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 5658 | S: Maintained | 5665 | S: Maintained |
| @@ -5784,6 +5791,14 @@ F: fs/crypto/ | |||
| 5784 | F: include/linux/fscrypt*.h | 5791 | F: include/linux/fscrypt*.h |
| 5785 | F: Documentation/filesystems/fscrypt.rst | 5792 | F: Documentation/filesystems/fscrypt.rst |
| 5786 | 5793 | ||
| 5794 | FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE | ||
| 5795 | M: Jan Kara <jack@suse.cz> | ||
| 5796 | R: Amir Goldstein <amir73il@gmail.com> | ||
| 5797 | L: linux-fsdevel@vger.kernel.org | ||
| 5798 | S: Maintained | ||
| 5799 | F: fs/notify/ | ||
| 5800 | F: include/linux/fsnotify*.h | ||
| 5801 | |||
| 5787 | FUJITSU LAPTOP EXTRAS | 5802 | FUJITSU LAPTOP EXTRAS |
| 5788 | M: Jonathan Woithe <jwoithe@just42.net> | 5803 | M: Jonathan Woithe <jwoithe@just42.net> |
| 5789 | L: platform-driver-x86@vger.kernel.org | 5804 | L: platform-driver-x86@vger.kernel.org |
| @@ -6256,7 +6271,7 @@ S: Odd Fixes | |||
| 6256 | F: drivers/media/usb/hdpvr/ | 6271 | F: drivers/media/usb/hdpvr/ |
| 6257 | 6272 | ||
| 6258 | HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER | 6273 | HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER |
| 6259 | M: Jimmy Vance <jimmy.vance@hpe.com> | 6274 | M: Jerry Hoemann <jerry.hoemann@hpe.com> |
| 6260 | S: Supported | 6275 | S: Supported |
| 6261 | F: Documentation/watchdog/hpwdt.txt | 6276 | F: Documentation/watchdog/hpwdt.txt |
| 6262 | F: drivers/watchdog/hpwdt.c | 6277 | F: drivers/watchdog/hpwdt.c |
| @@ -8048,6 +8063,9 @@ F: tools/lib/lockdep/ | |||
| 8048 | 8063 | ||
| 8049 | LIBNVDIMM BLK: MMIO-APERTURE DRIVER | 8064 | LIBNVDIMM BLK: MMIO-APERTURE DRIVER |
| 8050 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | 8065 | M: Ross Zwisler <ross.zwisler@linux.intel.com> |
| 8066 | M: Dan Williams <dan.j.williams@intel.com> | ||
| 8067 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
| 8068 | M: Dave Jiang <dave.jiang@intel.com> | ||
| 8051 | L: linux-nvdimm@lists.01.org | 8069 | L: linux-nvdimm@lists.01.org |
| 8052 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8070 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
| 8053 | S: Supported | 8071 | S: Supported |
| @@ -8056,6 +8074,9 @@ F: drivers/nvdimm/region_devs.c | |||
| 8056 | 8074 | ||
| 8057 | LIBNVDIMM BTT: BLOCK TRANSLATION TABLE | 8075 | LIBNVDIMM BTT: BLOCK TRANSLATION TABLE |
| 8058 | M: Vishal Verma <vishal.l.verma@intel.com> | 8076 | M: Vishal Verma <vishal.l.verma@intel.com> |
| 8077 | M: Dan Williams <dan.j.williams@intel.com> | ||
| 8078 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
| 8079 | M: Dave Jiang <dave.jiang@intel.com> | ||
| 8059 | L: linux-nvdimm@lists.01.org | 8080 | L: linux-nvdimm@lists.01.org |
| 8060 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8081 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
| 8061 | S: Supported | 8082 | S: Supported |
| @@ -8063,6 +8084,9 @@ F: drivers/nvdimm/btt* | |||
| 8063 | 8084 | ||
| 8064 | LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER | 8085 | LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER |
| 8065 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | 8086 | M: Ross Zwisler <ross.zwisler@linux.intel.com> |
| 8087 | M: Dan Williams <dan.j.williams@intel.com> | ||
| 8088 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
| 8089 | M: Dave Jiang <dave.jiang@intel.com> | ||
| 8066 | L: linux-nvdimm@lists.01.org | 8090 | L: linux-nvdimm@lists.01.org |
| 8067 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8091 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
| 8068 | S: Supported | 8092 | S: Supported |
| @@ -8078,6 +8102,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt | |||
| 8078 | 8102 | ||
| 8079 | LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM | 8103 | LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM |
| 8080 | M: Dan Williams <dan.j.williams@intel.com> | 8104 | M: Dan Williams <dan.j.williams@intel.com> |
| 8105 | M: Ross Zwisler <ross.zwisler@linux.intel.com> | ||
| 8106 | M: Vishal Verma <vishal.l.verma@intel.com> | ||
| 8107 | M: Dave Jiang <dave.jiang@intel.com> | ||
| 8081 | L: linux-nvdimm@lists.01.org | 8108 | L: linux-nvdimm@lists.01.org |
| 8082 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | 8109 | Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ |
| 8083 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git | 8110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git |
| @@ -9765,6 +9792,7 @@ F: include/uapi/linux/net_namespace.h | |||
| 9765 | F: tools/testing/selftests/net/ | 9792 | F: tools/testing/selftests/net/ |
| 9766 | F: lib/net_utils.c | 9793 | F: lib/net_utils.c |
| 9767 | F: lib/random32.c | 9794 | F: lib/random32.c |
| 9795 | F: Documentation/networking/ | ||
| 9768 | 9796 | ||
| 9769 | NETWORKING [IPSEC] | 9797 | NETWORKING [IPSEC] |
| 9770 | M: Steffen Klassert <steffen.klassert@secunet.com> | 9798 | M: Steffen Klassert <steffen.klassert@secunet.com> |
| @@ -12816,7 +12844,8 @@ F: include/linux/siphash.h | |||
| 12816 | 12844 | ||
| 12817 | SIOX | 12845 | SIOX |
| 12818 | M: Gavin Schenk <g.schenk@eckelmann.de> | 12846 | M: Gavin Schenk <g.schenk@eckelmann.de> |
| 12819 | M: Uwe Kleine-König <kernel@pengutronix.de> | 12847 | M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> |
| 12848 | R: Pengutronix Kernel Team <kernel@pengutronix.de> | ||
| 12820 | S: Supported | 12849 | S: Supported |
| 12821 | F: drivers/siox/* | 12850 | F: drivers/siox/* |
| 12822 | F: include/trace/events/siox.h | 12851 | F: include/trace/events/siox.h |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..1cb2749a72bf 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
| @@ -366,7 +366,7 @@ void force_signal_inject(int signal, int code, unsigned long address) | |||
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | /* Force signals we don't understand to SIGKILL */ | 368 | /* Force signals we don't understand to SIGKILL */ |
| 369 | if (WARN_ON(signal != SIGKILL || | 369 | if (WARN_ON(signal != SIGKILL && |
| 370 | siginfo_layout(signal, code) != SIL_FAULT)) { | 370 | siginfo_layout(signal, code) != SIL_FAULT)) { |
| 371 | signal = SIGKILL; | 371 | signal = SIGKILL; |
| 372 | } | 372 | } |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index dabfc1ecda3d..12145874c02b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
| @@ -204,7 +204,7 @@ void __init kasan_init(void) | |||
| 204 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); | 204 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
| 205 | 205 | ||
| 206 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, | 206 | kasan_map_populate(kimg_shadow_start, kimg_shadow_end, |
| 207 | pfn_to_nid(virt_to_pfn(lm_alias(_text)))); | 207 | early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); |
| 208 | 208 | ||
| 209 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, | 209 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
| 210 | (void *)mod_shadow_start); | 210 | (void *)mod_shadow_start); |
| @@ -224,7 +224,7 @@ void __init kasan_init(void) | |||
| 224 | 224 | ||
| 225 | kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), | 225 | kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), |
| 226 | (unsigned long)kasan_mem_to_shadow(end), | 226 | (unsigned long)kasan_mem_to_shadow(end), |
| 227 | pfn_to_nid(virt_to_pfn(start))); | 227 | early_pfn_to_nid(virt_to_pfn(start))); |
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | /* | 230 | /* |
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 1bd105428f61..65af3f6ba81c 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts | |||
| @@ -51,6 +51,8 @@ | |||
| 51 | ranges = <0x02000000 0 0x40000000 | 51 | ranges = <0x02000000 0 0x40000000 |
| 52 | 0x40000000 0 0x40000000>; | 52 | 0x40000000 0 0x40000000>; |
| 53 | 53 | ||
| 54 | bus-range = <0x00 0xff>; | ||
| 55 | |||
| 54 | interrupt-map-mask = <0 0 0 7>; | 56 | interrupt-map-mask = <0 0 0 7>; |
| 55 | interrupt-map = <0 0 0 1 &pci0_intc 1>, | 57 | interrupt-map = <0 0 0 1 &pci0_intc 1>, |
| 56 | <0 0 0 2 &pci0_intc 2>, | 58 | <0 0 0 2 &pci0_intc 2>, |
| @@ -79,6 +81,8 @@ | |||
| 79 | ranges = <0x02000000 0 0x20000000 | 81 | ranges = <0x02000000 0 0x20000000 |
| 80 | 0x20000000 0 0x20000000>; | 82 | 0x20000000 0 0x20000000>; |
| 81 | 83 | ||
| 84 | bus-range = <0x00 0xff>; | ||
| 85 | |||
| 82 | interrupt-map-mask = <0 0 0 7>; | 86 | interrupt-map-mask = <0 0 0 7>; |
| 83 | interrupt-map = <0 0 0 1 &pci1_intc 1>, | 87 | interrupt-map = <0 0 0 1 &pci1_intc 1>, |
| 84 | <0 0 0 2 &pci1_intc 2>, | 88 | <0 0 0 2 &pci1_intc 2>, |
| @@ -107,6 +111,8 @@ | |||
| 107 | ranges = <0x02000000 0 0x16000000 | 111 | ranges = <0x02000000 0 0x16000000 |
| 108 | 0x16000000 0 0x100000>; | 112 | 0x16000000 0 0x100000>; |
| 109 | 113 | ||
| 114 | bus-range = <0x00 0xff>; | ||
| 115 | |||
| 110 | interrupt-map-mask = <0 0 0 7>; | 116 | interrupt-map-mask = <0 0 0 7>; |
| 111 | interrupt-map = <0 0 0 1 &pci2_intc 1>, | 117 | interrupt-map = <0 0 0 1 &pci2_intc 1>, |
| 112 | <0 0 0 2 &pci2_intc 2>, | 118 | <0 0 0 2 &pci2_intc 2>, |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..a7d0b836f2f7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
| @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) | |||
| 307 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) | 307 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) |
| 308 | #define war_io_reorder_wmb() wmb() | 308 | #define war_io_reorder_wmb() wmb() |
| 309 | #else | 309 | #else |
| 310 | #define war_io_reorder_wmb() do { } while (0) | 310 | #define war_io_reorder_wmb() barrier() |
| 311 | #endif | 311 | #endif |
| 312 | 312 | ||
| 313 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ | 313 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ |
| @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ | |||
| 377 | BUG(); \ | 377 | BUG(); \ |
| 378 | } \ | 378 | } \ |
| 379 | \ | 379 | \ |
| 380 | /* prevent prefetching of coherent DMA data prematurely */ \ | ||
| 381 | rmb(); \ | ||
| 380 | return pfx##ioswab##bwlq(__mem, __val); \ | 382 | return pfx##ioswab##bwlq(__mem, __val); \ |
| 381 | } | 383 | } |
| 382 | 384 | ||
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h | |||
| @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
| 654 | { | 654 | { |
| 655 | __kernel_size_t res; | 655 | __kernel_size_t res; |
| 656 | 656 | ||
| 657 | #ifdef CONFIG_CPU_MICROMIPS | ||
| 658 | /* micromips memset / bzero also clobbers t7 & t8 */ | ||
| 659 | #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" | ||
| 660 | #else | ||
| 661 | #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" | ||
| 662 | #endif /* CONFIG_CPU_MICROMIPS */ | ||
| 663 | |||
| 657 | if (eva_kernel_access()) { | 664 | if (eva_kernel_access()) { |
| 658 | __asm__ __volatile__( | 665 | __asm__ __volatile__( |
| 659 | "move\t$4, %1\n\t" | 666 | "move\t$4, %1\n\t" |
| @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
| 663 | "move\t%0, $6" | 670 | "move\t%0, $6" |
| 664 | : "=r" (res) | 671 | : "=r" (res) |
| 665 | : "r" (addr), "r" (size) | 672 | : "r" (addr), "r" (size) |
| 666 | : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); | 673 | : bzero_clobbers); |
| 667 | } else { | 674 | } else { |
| 668 | might_fault(); | 675 | might_fault(); |
| 669 | __asm__ __volatile__( | 676 | __asm__ __volatile__( |
| @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) | |||
| 674 | "move\t%0, $6" | 681 | "move\t%0, $6" |
| 675 | : "=r" (res) | 682 | : "=r" (res) |
| 676 | : "r" (addr), "r" (size) | 683 | : "r" (addr), "r" (size) |
| 677 | : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); | 684 | : bzero_clobbers); |
| 678 | } | 685 | } |
| 679 | 686 | ||
| 680 | return res; | 687 | return res; |
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index a1456664d6c2..f7327979a8f8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S | |||
| @@ -219,7 +219,7 @@ | |||
| 219 | 1: PTR_ADDIU a0, 1 /* fill bytewise */ | 219 | 1: PTR_ADDIU a0, 1 /* fill bytewise */ |
| 220 | R10KCBARRIER(0(ra)) | 220 | R10KCBARRIER(0(ra)) |
| 221 | bne t1, a0, 1b | 221 | bne t1, a0, 1b |
| 222 | sb a1, -1(a0) | 222 | EX(sb, a1, -1(a0), .Lsmall_fixup\@) |
| 223 | 223 | ||
| 224 | 2: jr ra /* done */ | 224 | 2: jr ra /* done */ |
| 225 | move a2, zero | 225 | move a2, zero |
| @@ -252,13 +252,18 @@ | |||
| 252 | PTR_L t0, TI_TASK($28) | 252 | PTR_L t0, TI_TASK($28) |
| 253 | andi a2, STORMASK | 253 | andi a2, STORMASK |
| 254 | LONG_L t0, THREAD_BUADDR(t0) | 254 | LONG_L t0, THREAD_BUADDR(t0) |
| 255 | LONG_ADDU a2, t1 | 255 | LONG_ADDU a2, a0 |
| 256 | jr ra | 256 | jr ra |
| 257 | LONG_SUBU a2, t0 | 257 | LONG_SUBU a2, t0 |
| 258 | 258 | ||
| 259 | .Llast_fixup\@: | 259 | .Llast_fixup\@: |
| 260 | jr ra | 260 | jr ra |
| 261 | andi v1, a2, STORMASK | 261 | nop |
| 262 | |||
| 263 | .Lsmall_fixup\@: | ||
| 264 | PTR_SUBU a2, t1, a0 | ||
| 265 | jr ra | ||
| 266 | PTR_ADDIU a2, 1 | ||
| 262 | 267 | ||
| 263 | .endm | 268 | .endm |
| 264 | 269 | ||
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index eafd06ab59ef..e5de34d00b1a 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile | |||
| @@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o | |||
| 23 | obj-$(CONFIG_PA11) += pci-dma.o | 23 | obj-$(CONFIG_PA11) += pci-dma.o |
| 24 | obj-$(CONFIG_PCI) += pci.o | 24 | obj-$(CONFIG_PCI) += pci.o |
| 25 | obj-$(CONFIG_MODULES) += module.o | 25 | obj-$(CONFIG_MODULES) += module.o |
| 26 | obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o | 26 | obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o |
| 27 | obj-$(CONFIG_STACKTRACE)+= stacktrace.o | 27 | obj-$(CONFIG_STACKTRACE)+= stacktrace.o |
| 28 | obj-$(CONFIG_AUDIT) += audit.o | 28 | obj-$(CONFIG_AUDIT) += audit.o |
| 29 | obj64-$(CONFIG_AUDIT) += compat_audit.o | 29 | obj64-$(CONFIG_AUDIT) += compat_audit.o |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2d4956e97aa9..ee5a67d57aab 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
| @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) | |||
| 807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); | 807 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
| 808 | 808 | ||
| 809 | /* PCI Command: 0x4 */ | 809 | /* PCI Command: 0x4 */ |
| 810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); | 810 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | |
| 811 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | ||
| 811 | 812 | ||
| 812 | /* Check the PCIe link is ready */ | 813 | /* Check the PCIe link is ready */ |
| 813 | eeh_bridge_check_link(edev); | 814 | eeh_bridge_check_link(edev); |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 79d005445c6c..e734f6e45abc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
| @@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |||
| 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) | 554 | lbz r0,HSTATE_HWTHREAD_STATE(r13) |
| 555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL | 555 | cmpwi r0,KVM_HWTHREAD_IN_KERNEL |
| 556 | beq 1f | 556 | beq 0f |
| 557 | li r0,KVM_HWTHREAD_IN_KERNEL | 557 | li r0,KVM_HWTHREAD_IN_KERNEL |
| 558 | stb r0,HSTATE_HWTHREAD_STATE(r13) | 558 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
| 559 | /* Order setting hwthread_state vs. testing hwthread_req */ | 559 | /* Order setting hwthread_state vs. testing hwthread_req */ |
| 560 | sync | 560 | sync |
| 561 | lbz r0,HSTATE_HWTHREAD_REQ(r13) | 561 | 0: lbz r0,HSTATE_HWTHREAD_REQ(r13) |
| 562 | cmpwi r0,0 | 562 | cmpwi r0,0 |
| 563 | beq 1f | 563 | beq 1f |
| 564 | b kvm_start_guest | 564 | b kvm_start_guest |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 44c30dd38067..b78f142a4148 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void) | |||
| 890 | return; | 890 | return; |
| 891 | 891 | ||
| 892 | l1d_size = ppc64_caches.l1d.size; | 892 | l1d_size = ppc64_caches.l1d.size; |
| 893 | |||
| 894 | /* | ||
| 895 | * If there is no d-cache-size property in the device tree, l1d_size | ||
| 896 | * could be zero. That leads to the loop in the asm wrapping around to | ||
| 897 | * 2^64-1, and then walking off the end of the fallback area and | ||
| 898 | * eventually causing a page fault which is fatal. Just default to | ||
| 899 | * something vaguely sane. | ||
| 900 | */ | ||
| 901 | if (!l1d_size) | ||
| 902 | l1d_size = (64 * 1024); | ||
| 903 | |||
| 893 | limit = min(ppc64_bolted_size(), ppc64_rma_size); | 904 | limit = min(ppc64_bolted_size(), ppc64_rma_size); |
| 894 | 905 | ||
| 895 | /* | 906 | /* |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 35f80ab7cbd8..288fe4f0db4e 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
| @@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, | |||
| 55 | unsigned int *target = (unsigned int *)branch_target(src); | 55 | unsigned int *target = (unsigned int *)branch_target(src); |
| 56 | 56 | ||
| 57 | /* Branch within the section doesn't need translating */ | 57 | /* Branch within the section doesn't need translating */ |
| 58 | if (target < alt_start || target >= alt_end) { | 58 | if (target < alt_start || target > alt_end) { |
| 59 | instr = translate_branch(dest, src); | 59 | instr = translate_branch(dest, src); |
| 60 | if (!instr) | 60 | if (!instr) |
| 61 | return 1; | 61 | return 1; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9033c8194eda..ccc421503363 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
| @@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) | |||
| 1093 | LOAD_INT(c), LOAD_FRAC(c), | 1093 | LOAD_INT(c), LOAD_FRAC(c), |
| 1094 | count_active_contexts(), | 1094 | count_active_contexts(), |
| 1095 | atomic_read(&nr_spu_contexts), | 1095 | atomic_read(&nr_spu_contexts), |
| 1096 | idr_get_cursor(&task_active_pid_ns(current)->idr)); | 1096 | idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); |
| 1097 | return 0; | 1097 | return 0; |
| 1098 | } | 1098 | } |
| 1099 | 1099 | ||
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d22aeb0b69e1..b48454be5b98 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c | |||
| @@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) | |||
| 389 | if (xive_pool_vps == XIVE_INVALID_VP) | 389 | if (xive_pool_vps == XIVE_INVALID_VP) |
| 390 | return; | 390 | return; |
| 391 | 391 | ||
| 392 | /* Check if pool VP already active, if it is, pull it */ | ||
| 393 | if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) | ||
| 394 | in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); | ||
| 395 | |||
| 392 | /* Enable the pool VP */ | 396 | /* Enable the pool VP */ |
| 393 | vp = xive_pool_vps + cpu; | 397 | vp = xive_pool_vps + cpu; |
| 394 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); | 398 | pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); |
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 9fdff3fe1a42..e63940bb57cd 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild | |||
| @@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/ | |||
| 8 | obj-y += net/ | 8 | obj-y += net/ |
| 9 | obj-$(CONFIG_PCI) += pci/ | 9 | obj-$(CONFIG_PCI) += pci/ |
| 10 | obj-$(CONFIG_NUMA) += numa/ | 10 | obj-$(CONFIG_NUMA) += numa/ |
| 11 | obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 32a0d5b958bf..199ac3e4da1d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -47,10 +47,6 @@ config PGSTE | |||
| 47 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC | 47 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
| 48 | def_bool y | 48 | def_bool y |
| 49 | 49 | ||
| 50 | config KEXEC | ||
| 51 | def_bool y | ||
| 52 | select KEXEC_CORE | ||
| 53 | |||
| 54 | config AUDIT_ARCH | 50 | config AUDIT_ARCH |
| 55 | def_bool y | 51 | def_bool y |
| 56 | 52 | ||
| @@ -290,12 +286,12 @@ config MARCH_Z13 | |||
| 290 | older machines. | 286 | older machines. |
| 291 | 287 | ||
| 292 | config MARCH_Z14 | 288 | config MARCH_Z14 |
| 293 | bool "IBM z14" | 289 | bool "IBM z14 ZR1 and z14" |
| 294 | select HAVE_MARCH_Z14_FEATURES | 290 | select HAVE_MARCH_Z14_FEATURES |
| 295 | help | 291 | help |
| 296 | Select this to enable optimizations for IBM z14 (3906 series). | 292 | Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 |
| 297 | The kernel will be slightly faster but will not work on older | 293 | and 3906 series). The kernel will be slightly faster but will not |
| 298 | machines. | 294 | work on older machines. |
| 299 | 295 | ||
| 300 | endchoice | 296 | endchoice |
| 301 | 297 | ||
| @@ -525,6 +521,26 @@ source kernel/Kconfig.preempt | |||
| 525 | 521 | ||
| 526 | source kernel/Kconfig.hz | 522 | source kernel/Kconfig.hz |
| 527 | 523 | ||
| 524 | config KEXEC | ||
| 525 | def_bool y | ||
| 526 | select KEXEC_CORE | ||
| 527 | |||
| 528 | config KEXEC_FILE | ||
| 529 | bool "kexec file based system call" | ||
| 530 | select KEXEC_CORE | ||
| 531 | select BUILD_BIN2C | ||
| 532 | depends on CRYPTO | ||
| 533 | depends on CRYPTO_SHA256 | ||
| 534 | depends on CRYPTO_SHA256_S390 | ||
| 535 | help | ||
| 536 | Enable the kexec file based system call. In contrast to the normal | ||
| 537 | kexec system call this system call takes file descriptors for the | ||
| 538 | kernel and initramfs as arguments. | ||
| 539 | |||
| 540 | config ARCH_HAS_KEXEC_PURGATORY | ||
| 541 | def_bool y | ||
| 542 | depends on KEXEC_FILE | ||
| 543 | |||
| 528 | config ARCH_RANDOM | 544 | config ARCH_RANDOM |
| 529 | def_bool y | 545 | def_bool y |
| 530 | prompt "s390 architectural random number generation API" | 546 | prompt "s390 architectural random number generation API" |
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index da9dad35c28e..d1fa37fcce83 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile | |||
| @@ -3,12 +3,6 @@ | |||
| 3 | # Makefile for the linux s390-specific parts of the memory manager. | 3 | # Makefile for the linux s390-specific parts of the memory manager. |
| 4 | # | 4 | # |
| 5 | 5 | ||
| 6 | COMPILE_VERSION := __linux_compile_version_id__`hostname | \ | ||
| 7 | tr -c '[0-9A-Za-z]' '_'`__`date | \ | ||
| 8 | tr -c '[0-9A-Za-z]' '_'`_t | ||
| 9 | |||
| 10 | ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. | ||
| 11 | |||
| 12 | targets := image | 6 | targets := image |
| 13 | targets += bzImage | 7 | targets += bzImage |
| 14 | subdir- := compressed | 8 | subdir- := compressed |
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore index ae06b9b4c02f..2088cc140629 100644 --- a/arch/s390/boot/compressed/.gitignore +++ b/arch/s390/boot/compressed/.gitignore | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | sizes.h | 1 | sizes.h |
| 2 | vmlinux | 2 | vmlinux |
| 3 | vmlinux.lds | 3 | vmlinux.lds |
| 4 | vmlinux.bin.full | ||
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/debug_defconfig index 5af8458951cf..6176fe9795ca 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/debug_defconfig | |||
| @@ -24,13 +24,13 @@ CONFIG_CPUSETS=y | |||
| 24 | CONFIG_CGROUP_DEVICE=y | 24 | CONFIG_CGROUP_DEVICE=y |
| 25 | CONFIG_CGROUP_CPUACCT=y | 25 | CONFIG_CGROUP_CPUACCT=y |
| 26 | CONFIG_CGROUP_PERF=y | 26 | CONFIG_CGROUP_PERF=y |
| 27 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 28 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
| 29 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
| 30 | CONFIG_SCHED_AUTOGROUP=y | 29 | CONFIG_SCHED_AUTOGROUP=y |
| 31 | CONFIG_BLK_DEV_INITRD=y | 30 | CONFIG_BLK_DEV_INITRD=y |
| 32 | CONFIG_EXPERT=y | 31 | CONFIG_EXPERT=y |
| 33 | # CONFIG_SYSFS_SYSCALL is not set | 32 | # CONFIG_SYSFS_SYSCALL is not set |
| 33 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 34 | CONFIG_BPF_SYSCALL=y | 34 | CONFIG_BPF_SYSCALL=y |
| 35 | CONFIG_USERFAULTFD=y | 35 | CONFIG_USERFAULTFD=y |
| 36 | # CONFIG_COMPAT_BRK is not set | 36 | # CONFIG_COMPAT_BRK is not set |
| @@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y | |||
| 59 | CONFIG_DEFAULT_DEADLINE=y | 59 | CONFIG_DEFAULT_DEADLINE=y |
| 60 | CONFIG_LIVEPATCH=y | 60 | CONFIG_LIVEPATCH=y |
| 61 | CONFIG_TUNE_ZEC12=y | 61 | CONFIG_TUNE_ZEC12=y |
| 62 | CONFIG_NR_CPUS=256 | 62 | CONFIG_NR_CPUS=512 |
| 63 | CONFIG_NUMA=y | 63 | CONFIG_NUMA=y |
| 64 | CONFIG_PREEMPT=y | 64 | CONFIG_PREEMPT=y |
| 65 | CONFIG_HZ_100=y | 65 | CONFIG_HZ_100=y |
| 66 | CONFIG_KEXEC_FILE=y | ||
| 66 | CONFIG_MEMORY_HOTPLUG=y | 67 | CONFIG_MEMORY_HOTPLUG=y |
| 67 | CONFIG_MEMORY_HOTREMOVE=y | 68 | CONFIG_MEMORY_HOTREMOVE=y |
| 68 | CONFIG_KSM=y | 69 | CONFIG_KSM=y |
| @@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m | |||
| 305 | CONFIG_IP6_NF_NAT=m | 306 | CONFIG_IP6_NF_NAT=m |
| 306 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
| 307 | CONFIG_NF_TABLES_BRIDGE=m | 308 | CONFIG_NF_TABLES_BRIDGE=m |
| 308 | CONFIG_NET_SCTPPROBE=m | ||
| 309 | CONFIG_RDS=m | 309 | CONFIG_RDS=m |
| 310 | CONFIG_RDS_RDMA=m | 310 | CONFIG_RDS_RDMA=m |
| 311 | CONFIG_RDS_TCP=m | 311 | CONFIG_RDS_TCP=m |
| @@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m | |||
| 364 | CONFIG_NET_ACT_SKBEDIT=m | 364 | CONFIG_NET_ACT_SKBEDIT=m |
| 365 | CONFIG_NET_ACT_CSUM=m | 365 | CONFIG_NET_ACT_CSUM=m |
| 366 | CONFIG_DNS_RESOLVER=y | 366 | CONFIG_DNS_RESOLVER=y |
| 367 | CONFIG_OPENVSWITCH=m | ||
| 367 | CONFIG_NETLINK_DIAG=m | 368 | CONFIG_NETLINK_DIAG=m |
| 368 | CONFIG_CGROUP_NET_PRIO=y | 369 | CONFIG_CGROUP_NET_PRIO=y |
| 369 | CONFIG_BPF_JIT=y | 370 | CONFIG_BPF_JIT=y |
| 370 | CONFIG_NET_PKTGEN=m | 371 | CONFIG_NET_PKTGEN=m |
| 371 | CONFIG_NET_TCPPROBE=m | ||
| 372 | CONFIG_DEVTMPFS=y | 372 | CONFIG_DEVTMPFS=y |
| 373 | CONFIG_DMA_CMA=y | 373 | CONFIG_DMA_CMA=y |
| 374 | CONFIG_CMA_SIZE_MBYTES=0 | 374 | CONFIG_CMA_SIZE_MBYTES=0 |
| @@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m | |||
| 380 | CONFIG_BLK_DEV_NBD=m | 380 | CONFIG_BLK_DEV_NBD=m |
| 381 | CONFIG_BLK_DEV_RAM=y | 381 | CONFIG_BLK_DEV_RAM=y |
| 382 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 382 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
| 383 | CONFIG_BLK_DEV_RAM_DAX=y | ||
| 384 | CONFIG_VIRTIO_BLK=y | 383 | CONFIG_VIRTIO_BLK=y |
| 385 | CONFIG_BLK_DEV_RBD=m | 384 | CONFIG_BLK_DEV_RBD=m |
| 385 | CONFIG_BLK_DEV_NVME=m | ||
| 386 | CONFIG_ENCLOSURE_SERVICES=m | 386 | CONFIG_ENCLOSURE_SERVICES=m |
| 387 | CONFIG_GENWQE=m | 387 | CONFIG_GENWQE=m |
| 388 | CONFIG_RAID_ATTRS=m | 388 | CONFIG_RAID_ATTRS=m |
| @@ -461,6 +461,7 @@ CONFIG_PPTP=m | |||
| 461 | CONFIG_PPPOL2TP=m | 461 | CONFIG_PPPOL2TP=m |
| 462 | CONFIG_PPP_ASYNC=m | 462 | CONFIG_PPP_ASYNC=m |
| 463 | CONFIG_PPP_SYNC_TTY=m | 463 | CONFIG_PPP_SYNC_TTY=m |
| 464 | CONFIG_INPUT_EVDEV=y | ||
| 464 | # CONFIG_INPUT_KEYBOARD is not set | 465 | # CONFIG_INPUT_KEYBOARD is not set |
| 465 | # CONFIG_INPUT_MOUSE is not set | 466 | # CONFIG_INPUT_MOUSE is not set |
| 466 | # CONFIG_SERIO is not set | 467 | # CONFIG_SERIO is not set |
| @@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y | |||
| 474 | CONFIG_WATCHDOG_NOWAYOUT=y | 475 | CONFIG_WATCHDOG_NOWAYOUT=y |
| 475 | CONFIG_SOFT_WATCHDOG=m | 476 | CONFIG_SOFT_WATCHDOG=m |
| 476 | CONFIG_DIAG288_WATCHDOG=m | 477 | CONFIG_DIAG288_WATCHDOG=m |
| 478 | CONFIG_DRM=y | ||
| 479 | CONFIG_DRM_VIRTIO_GPU=y | ||
| 480 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
| 477 | # CONFIG_HID is not set | 481 | # CONFIG_HID is not set |
| 478 | # CONFIG_USB_SUPPORT is not set | 482 | # CONFIG_USB_SUPPORT is not set |
| 479 | CONFIG_INFINIBAND=m | 483 | CONFIG_INFINIBAND=m |
| @@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m | |||
| 482 | CONFIG_MLX5_INFINIBAND=m | 486 | CONFIG_MLX5_INFINIBAND=m |
| 483 | CONFIG_VFIO=m | 487 | CONFIG_VFIO=m |
| 484 | CONFIG_VFIO_PCI=m | 488 | CONFIG_VFIO_PCI=m |
| 489 | CONFIG_VIRTIO_PCI=m | ||
| 485 | CONFIG_VIRTIO_BALLOON=m | 490 | CONFIG_VIRTIO_BALLOON=m |
| 491 | CONFIG_VIRTIO_INPUT=y | ||
| 486 | CONFIG_EXT4_FS=y | 492 | CONFIG_EXT4_FS=y |
| 487 | CONFIG_EXT4_FS_POSIX_ACL=y | 493 | CONFIG_EXT4_FS_POSIX_ACL=y |
| 488 | CONFIG_EXT4_FS_SECURITY=y | 494 | CONFIG_EXT4_FS_SECURITY=y |
| @@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y | |||
| 641 | CONFIG_TEST_BPF=m | 647 | CONFIG_TEST_BPF=m |
| 642 | CONFIG_BUG_ON_DATA_CORRUPTION=y | 648 | CONFIG_BUG_ON_DATA_CORRUPTION=y |
| 643 | CONFIG_S390_PTDUMP=y | 649 | CONFIG_S390_PTDUMP=y |
| 650 | CONFIG_PERSISTENT_KEYRINGS=y | ||
| 651 | CONFIG_BIG_KEYS=y | ||
| 644 | CONFIG_ENCRYPTED_KEYS=m | 652 | CONFIG_ENCRYPTED_KEYS=m |
| 645 | CONFIG_SECURITY=y | 653 | CONFIG_SECURITY=y |
| 646 | CONFIG_SECURITY_NETWORK=y | 654 | CONFIG_SECURITY_NETWORK=y |
| @@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y | |||
| 649 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | 657 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y |
| 650 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | 658 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 |
| 651 | CONFIG_SECURITY_SELINUX_DISABLE=y | 659 | CONFIG_SECURITY_SELINUX_DISABLE=y |
| 660 | CONFIG_INTEGRITY_SIGNATURE=y | ||
| 661 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | ||
| 652 | CONFIG_IMA=y | 662 | CONFIG_IMA=y |
| 663 | CONFIG_IMA_DEFAULT_HASH_SHA256=y | ||
| 664 | CONFIG_IMA_WRITE_POLICY=y | ||
| 653 | CONFIG_IMA_APPRAISE=y | 665 | CONFIG_IMA_APPRAISE=y |
| 654 | CONFIG_CRYPTO_RSA=m | ||
| 655 | CONFIG_CRYPTO_DH=m | 666 | CONFIG_CRYPTO_DH=m |
| 656 | CONFIG_CRYPTO_ECDH=m | 667 | CONFIG_CRYPTO_ECDH=m |
| 657 | CONFIG_CRYPTO_USER=m | 668 | CONFIG_CRYPTO_USER=m |
| 669 | # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set | ||
| 658 | CONFIG_CRYPTO_PCRYPT=m | 670 | CONFIG_CRYPTO_PCRYPT=m |
| 659 | CONFIG_CRYPTO_CRYPTD=m | 671 | CONFIG_CRYPTO_CRYPTD=m |
| 660 | CONFIG_CRYPTO_MCRYPTD=m | 672 | CONFIG_CRYPTO_MCRYPTD=m |
| 661 | CONFIG_CRYPTO_TEST=m | 673 | CONFIG_CRYPTO_TEST=m |
| 662 | CONFIG_CRYPTO_GCM=m | ||
| 663 | CONFIG_CRYPTO_CHACHA20POLY1305=m | 674 | CONFIG_CRYPTO_CHACHA20POLY1305=m |
| 664 | CONFIG_CRYPTO_LRW=m | 675 | CONFIG_CRYPTO_LRW=m |
| 665 | CONFIG_CRYPTO_PCBC=m | 676 | CONFIG_CRYPTO_PCBC=m |
| @@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m | |||
| 707 | CONFIG_CRYPTO_AES_S390=m | 718 | CONFIG_CRYPTO_AES_S390=m |
| 708 | CONFIG_CRYPTO_GHASH_S390=m | 719 | CONFIG_CRYPTO_GHASH_S390=m |
| 709 | CONFIG_CRYPTO_CRC32_S390=y | 720 | CONFIG_CRYPTO_CRC32_S390=y |
| 710 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 721 | CONFIG_PKCS7_MESSAGE_PARSER=y |
| 711 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 722 | CONFIG_SYSTEM_TRUSTED_KEYRING=y |
| 712 | CONFIG_X509_CERTIFICATE_PARSER=m | ||
| 713 | CONFIG_CRC7=m | 723 | CONFIG_CRC7=m |
| 714 | CONFIG_CRC8=m | 724 | CONFIG_CRC8=m |
| 715 | CONFIG_RANDOM32_SELFTEST=y | 725 | CONFIG_RANDOM32_SELFTEST=y |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig deleted file mode 100644 index d52eafe57ae8..000000000000 --- a/arch/s390/configs/gcov_defconfig +++ /dev/null | |||
| @@ -1,661 +0,0 @@ | |||
| 1 | CONFIG_SYSVIPC=y | ||
| 2 | CONFIG_POSIX_MQUEUE=y | ||
| 3 | CONFIG_AUDIT=y | ||
| 4 | CONFIG_NO_HZ_IDLE=y | ||
| 5 | CONFIG_HIGH_RES_TIMERS=y | ||
| 6 | CONFIG_BSD_PROCESS_ACCT=y | ||
| 7 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
| 8 | CONFIG_TASKSTATS=y | ||
| 9 | CONFIG_TASK_DELAY_ACCT=y | ||
| 10 | CONFIG_TASK_XACCT=y | ||
| 11 | CONFIG_TASK_IO_ACCOUNTING=y | ||
| 12 | CONFIG_IKCONFIG=y | ||
| 13 | CONFIG_IKCONFIG_PROC=y | ||
| 14 | CONFIG_NUMA_BALANCING=y | ||
| 15 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set | ||
| 16 | CONFIG_MEMCG=y | ||
| 17 | CONFIG_MEMCG_SWAP=y | ||
| 18 | CONFIG_BLK_CGROUP=y | ||
| 19 | CONFIG_CFS_BANDWIDTH=y | ||
| 20 | CONFIG_RT_GROUP_SCHED=y | ||
| 21 | CONFIG_CGROUP_PIDS=y | ||
| 22 | CONFIG_CGROUP_FREEZER=y | ||
| 23 | CONFIG_CGROUP_HUGETLB=y | ||
| 24 | CONFIG_CPUSETS=y | ||
| 25 | CONFIG_CGROUP_DEVICE=y | ||
| 26 | CONFIG_CGROUP_CPUACCT=y | ||
| 27 | CONFIG_CGROUP_PERF=y | ||
| 28 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 29 | CONFIG_NAMESPACES=y | ||
| 30 | CONFIG_USER_NS=y | ||
| 31 | CONFIG_SCHED_AUTOGROUP=y | ||
| 32 | CONFIG_BLK_DEV_INITRD=y | ||
| 33 | CONFIG_EXPERT=y | ||
| 34 | # CONFIG_SYSFS_SYSCALL is not set | ||
| 35 | CONFIG_BPF_SYSCALL=y | ||
| 36 | CONFIG_USERFAULTFD=y | ||
| 37 | # CONFIG_COMPAT_BRK is not set | ||
| 38 | CONFIG_PROFILING=y | ||
| 39 | CONFIG_OPROFILE=m | ||
| 40 | CONFIG_KPROBES=y | ||
| 41 | CONFIG_JUMP_LABEL=y | ||
| 42 | CONFIG_GCOV_KERNEL=y | ||
| 43 | CONFIG_GCOV_PROFILE_ALL=y | ||
| 44 | CONFIG_MODULES=y | ||
| 45 | CONFIG_MODULE_FORCE_LOAD=y | ||
| 46 | CONFIG_MODULE_UNLOAD=y | ||
| 47 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
| 48 | CONFIG_MODVERSIONS=y | ||
| 49 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
| 50 | CONFIG_BLK_DEV_INTEGRITY=y | ||
| 51 | CONFIG_BLK_DEV_THROTTLING=y | ||
| 52 | CONFIG_BLK_WBT=y | ||
| 53 | CONFIG_BLK_WBT_SQ=y | ||
| 54 | CONFIG_PARTITION_ADVANCED=y | ||
| 55 | CONFIG_IBM_PARTITION=y | ||
| 56 | CONFIG_BSD_DISKLABEL=y | ||
| 57 | CONFIG_MINIX_SUBPARTITION=y | ||
| 58 | CONFIG_SOLARIS_X86_PARTITION=y | ||
| 59 | CONFIG_UNIXWARE_DISKLABEL=y | ||
| 60 | CONFIG_CFQ_GROUP_IOSCHED=y | ||
| 61 | CONFIG_DEFAULT_DEADLINE=y | ||
| 62 | CONFIG_LIVEPATCH=y | ||
| 63 | CONFIG_TUNE_ZEC12=y | ||
| 64 | CONFIG_NR_CPUS=512 | ||
| 65 | CONFIG_NUMA=y | ||
| 66 | CONFIG_HZ_100=y | ||
| 67 | CONFIG_MEMORY_HOTPLUG=y | ||
| 68 | CONFIG_MEMORY_HOTREMOVE=y | ||
| 69 | CONFIG_KSM=y | ||
| 70 | CONFIG_TRANSPARENT_HUGEPAGE=y | ||
| 71 | CONFIG_CLEANCACHE=y | ||
| 72 | CONFIG_FRONTSWAP=y | ||
| 73 | CONFIG_MEM_SOFT_DIRTY=y | ||
| 74 | CONFIG_ZSWAP=y | ||
| 75 | CONFIG_ZBUD=m | ||
| 76 | CONFIG_ZSMALLOC=m | ||
| 77 | CONFIG_ZSMALLOC_STAT=y | ||
| 78 | CONFIG_DEFERRED_STRUCT_PAGE_INIT=y | ||
| 79 | CONFIG_IDLE_PAGE_TRACKING=y | ||
| 80 | CONFIG_PCI=y | ||
| 81 | CONFIG_HOTPLUG_PCI=y | ||
| 82 | CONFIG_HOTPLUG_PCI_S390=y | ||
| 83 | CONFIG_CHSC_SCH=y | ||
| 84 | CONFIG_CRASH_DUMP=y | ||
| 85 | CONFIG_BINFMT_MISC=m | ||
| 86 | CONFIG_HIBERNATION=y | ||
| 87 | CONFIG_NET=y | ||
| 88 | CONFIG_PACKET=y | ||
| 89 | CONFIG_PACKET_DIAG=m | ||
| 90 | CONFIG_UNIX=y | ||
| 91 | CONFIG_UNIX_DIAG=m | ||
| 92 | CONFIG_XFRM_USER=m | ||
| 93 | CONFIG_NET_KEY=m | ||
| 94 | CONFIG_SMC=m | ||
| 95 | CONFIG_SMC_DIAG=m | ||
| 96 | CONFIG_INET=y | ||
| 97 | CONFIG_IP_MULTICAST=y | ||
| 98 | CONFIG_IP_ADVANCED_ROUTER=y | ||
| 99 | CONFIG_IP_MULTIPLE_TABLES=y | ||
| 100 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
| 101 | CONFIG_IP_ROUTE_VERBOSE=y | ||
| 102 | CONFIG_NET_IPIP=m | ||
| 103 | CONFIG_NET_IPGRE_DEMUX=m | ||
| 104 | CONFIG_NET_IPGRE=m | ||
| 105 | CONFIG_NET_IPGRE_BROADCAST=y | ||
| 106 | CONFIG_IP_MROUTE=y | ||
| 107 | CONFIG_IP_MROUTE_MULTIPLE_TABLES=y | ||
| 108 | CONFIG_IP_PIMSM_V1=y | ||
| 109 | CONFIG_IP_PIMSM_V2=y | ||
| 110 | CONFIG_SYN_COOKIES=y | ||
| 111 | CONFIG_NET_IPVTI=m | ||
| 112 | CONFIG_INET_AH=m | ||
| 113 | CONFIG_INET_ESP=m | ||
| 114 | CONFIG_INET_IPCOMP=m | ||
| 115 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | ||
| 116 | CONFIG_INET_XFRM_MODE_TUNNEL=m | ||
| 117 | CONFIG_INET_XFRM_MODE_BEET=m | ||
| 118 | CONFIG_INET_DIAG=m | ||
| 119 | CONFIG_INET_UDP_DIAG=m | ||
| 120 | CONFIG_TCP_CONG_ADVANCED=y | ||
| 121 | CONFIG_TCP_CONG_HSTCP=m | ||
| 122 | CONFIG_TCP_CONG_HYBLA=m | ||
| 123 | CONFIG_TCP_CONG_SCALABLE=m | ||
| 124 | CONFIG_TCP_CONG_LP=m | ||
| 125 | CONFIG_TCP_CONG_VENO=m | ||
| 126 | CONFIG_TCP_CONG_YEAH=m | ||
| 127 | CONFIG_TCP_CONG_ILLINOIS=m | ||
| 128 | CONFIG_IPV6_ROUTER_PREF=y | ||
| 129 | CONFIG_INET6_AH=m | ||
| 130 | CONFIG_INET6_ESP=m | ||
| 131 | CONFIG_INET6_IPCOMP=m | ||
| 132 | CONFIG_IPV6_MIP6=m | ||
| 133 | CONFIG_INET6_XFRM_MODE_TRANSPORT=m | ||
| 134 | CONFIG_INET6_XFRM_MODE_TUNNEL=m | ||
| 135 | CONFIG_INET6_XFRM_MODE_BEET=m | ||
| 136 | CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m | ||
| 137 | CONFIG_IPV6_VTI=m | ||
| 138 | CONFIG_IPV6_SIT=m | ||
| 139 | CONFIG_IPV6_GRE=m | ||
| 140 | CONFIG_IPV6_MULTIPLE_TABLES=y | ||
| 141 | CONFIG_IPV6_SUBTREES=y | ||
| 142 | CONFIG_NETFILTER=y | ||
| 143 | CONFIG_NF_CONNTRACK=m | ||
| 144 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
| 145 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
| 146 | CONFIG_NF_CONNTRACK_TIMEOUT=y | ||
| 147 | CONFIG_NF_CONNTRACK_TIMESTAMP=y | ||
| 148 | CONFIG_NF_CONNTRACK_AMANDA=m | ||
| 149 | CONFIG_NF_CONNTRACK_FTP=m | ||
| 150 | CONFIG_NF_CONNTRACK_H323=m | ||
| 151 | CONFIG_NF_CONNTRACK_IRC=m | ||
| 152 | CONFIG_NF_CONNTRACK_NETBIOS_NS=m | ||
| 153 | CONFIG_NF_CONNTRACK_SNMP=m | ||
| 154 | CONFIG_NF_CONNTRACK_PPTP=m | ||
| 155 | CONFIG_NF_CONNTRACK_SANE=m | ||
| 156 | CONFIG_NF_CONNTRACK_SIP=m | ||
| 157 | CONFIG_NF_CONNTRACK_TFTP=m | ||
| 158 | CONFIG_NF_CT_NETLINK=m | ||
| 159 | CONFIG_NF_CT_NETLINK_TIMEOUT=m | ||
| 160 | CONFIG_NF_TABLES=m | ||
| 161 | CONFIG_NFT_EXTHDR=m | ||
| 162 | CONFIG_NFT_META=m | ||
| 163 | CONFIG_NFT_CT=m | ||
| 164 | CONFIG_NFT_COUNTER=m | ||
| 165 | CONFIG_NFT_LOG=m | ||
| 166 | CONFIG_NFT_LIMIT=m | ||
| 167 | CONFIG_NFT_NAT=m | ||
| 168 | CONFIG_NFT_COMPAT=m | ||
| 169 | CONFIG_NFT_HASH=m | ||
| 170 | CONFIG_NETFILTER_XT_SET=m | ||
| 171 | CONFIG_NETFILTER_XT_TARGET_AUDIT=m | ||
| 172 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | ||
| 173 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | ||
| 174 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=m | ||
| 175 | CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m | ||
| 176 | CONFIG_NETFILTER_XT_TARGET_CT=m | ||
| 177 | CONFIG_NETFILTER_XT_TARGET_DSCP=m | ||
| 178 | CONFIG_NETFILTER_XT_TARGET_HMARK=m | ||
| 179 | CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m | ||
| 180 | CONFIG_NETFILTER_XT_TARGET_LOG=m | ||
| 181 | CONFIG_NETFILTER_XT_TARGET_MARK=m | ||
| 182 | CONFIG_NETFILTER_XT_TARGET_NFLOG=m | ||
| 183 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | ||
| 184 | CONFIG_NETFILTER_XT_TARGET_TEE=m | ||
| 185 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
| 186 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | ||
| 187 | CONFIG_NETFILTER_XT_TARGET_SECMARK=m | ||
| 188 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | ||
| 189 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | ||
| 190 | CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m | ||
| 191 | CONFIG_NETFILTER_XT_MATCH_BPF=m | ||
| 192 | CONFIG_NETFILTER_XT_MATCH_CLUSTER=m | ||
| 193 | CONFIG_NETFILTER_XT_MATCH_COMMENT=m | ||
| 194 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m | ||
| 195 | CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m | ||
| 196 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m | ||
| 197 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=m | ||
| 198 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
| 199 | CONFIG_NETFILTER_XT_MATCH_CPU=m | ||
| 200 | CONFIG_NETFILTER_XT_MATCH_DCCP=m | ||
| 201 | CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m | ||
| 202 | CONFIG_NETFILTER_XT_MATCH_DSCP=m | ||
| 203 | CONFIG_NETFILTER_XT_MATCH_ESP=m | ||
| 204 | CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m | ||
| 205 | CONFIG_NETFILTER_XT_MATCH_HELPER=m | ||
| 206 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=m | ||
| 207 | CONFIG_NETFILTER_XT_MATCH_IPVS=m | ||
| 208 | CONFIG_NETFILTER_XT_MATCH_LENGTH=m | ||
| 209 | CONFIG_NETFILTER_XT_MATCH_LIMIT=m | ||
| 210 | CONFIG_NETFILTER_XT_MATCH_MAC=m | ||
| 211 | CONFIG_NETFILTER_XT_MATCH_MARK=m | ||
| 212 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m | ||
| 213 | CONFIG_NETFILTER_XT_MATCH_NFACCT=m | ||
| 214 | CONFIG_NETFILTER_XT_MATCH_OSF=m | ||
| 215 | CONFIG_NETFILTER_XT_MATCH_OWNER=m | ||
| 216 | CONFIG_NETFILTER_XT_MATCH_POLICY=m | ||
| 217 | CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m | ||
| 218 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m | ||
| 219 | CONFIG_NETFILTER_XT_MATCH_QUOTA=m | ||
| 220 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | ||
| 221 | CONFIG_NETFILTER_XT_MATCH_REALM=m | ||
| 222 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | ||
| 223 | CONFIG_NETFILTER_XT_MATCH_STATE=m | ||
| 224 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | ||
| 225 | CONFIG_NETFILTER_XT_MATCH_STRING=m | ||
| 226 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=m | ||
| 227 | CONFIG_NETFILTER_XT_MATCH_TIME=m | ||
| 228 | CONFIG_NETFILTER_XT_MATCH_U32=m | ||
| 229 | CONFIG_IP_SET=m | ||
| 230 | CONFIG_IP_SET_BITMAP_IP=m | ||
| 231 | CONFIG_IP_SET_BITMAP_IPMAC=m | ||
| 232 | CONFIG_IP_SET_BITMAP_PORT=m | ||
| 233 | CONFIG_IP_SET_HASH_IP=m | ||
| 234 | CONFIG_IP_SET_HASH_IPPORT=m | ||
| 235 | CONFIG_IP_SET_HASH_IPPORTIP=m | ||
| 236 | CONFIG_IP_SET_HASH_IPPORTNET=m | ||
| 237 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
| 238 | CONFIG_IP_SET_HASH_NET=m | ||
| 239 | CONFIG_IP_SET_HASH_NETNET=m | ||
| 240 | CONFIG_IP_SET_HASH_NETPORT=m | ||
| 241 | CONFIG_IP_SET_HASH_NETIFACE=m | ||
| 242 | CONFIG_IP_SET_LIST_SET=m | ||
| 243 | CONFIG_IP_VS=m | ||
| 244 | CONFIG_IP_VS_PROTO_TCP=y | ||
| 245 | CONFIG_IP_VS_PROTO_UDP=y | ||
| 246 | CONFIG_IP_VS_PROTO_ESP=y | ||
| 247 | CONFIG_IP_VS_PROTO_AH=y | ||
| 248 | CONFIG_IP_VS_RR=m | ||
| 249 | CONFIG_IP_VS_WRR=m | ||
| 250 | CONFIG_IP_VS_LC=m | ||
| 251 | CONFIG_IP_VS_WLC=m | ||
| 252 | CONFIG_IP_VS_LBLC=m | ||
| 253 | CONFIG_IP_VS_LBLCR=m | ||
| 254 | CONFIG_IP_VS_DH=m | ||
| 255 | CONFIG_IP_VS_SH=m | ||
| 256 | CONFIG_IP_VS_SED=m | ||
| 257 | CONFIG_IP_VS_NQ=m | ||
| 258 | CONFIG_IP_VS_FTP=m | ||
| 259 | CONFIG_IP_VS_PE_SIP=m | ||
| 260 | CONFIG_NF_CONNTRACK_IPV4=m | ||
| 261 | CONFIG_NF_TABLES_IPV4=m | ||
| 262 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
| 263 | CONFIG_NF_TABLES_ARP=m | ||
| 264 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
| 265 | CONFIG_IP_NF_IPTABLES=m | ||
| 266 | CONFIG_IP_NF_MATCH_AH=m | ||
| 267 | CONFIG_IP_NF_MATCH_ECN=m | ||
| 268 | CONFIG_IP_NF_MATCH_RPFILTER=m | ||
| 269 | CONFIG_IP_NF_MATCH_TTL=m | ||
| 270 | CONFIG_IP_NF_FILTER=m | ||
| 271 | CONFIG_IP_NF_TARGET_REJECT=m | ||
| 272 | CONFIG_IP_NF_NAT=m | ||
| 273 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
| 274 | CONFIG_IP_NF_MANGLE=m | ||
| 275 | CONFIG_IP_NF_TARGET_CLUSTERIP=m | ||
| 276 | CONFIG_IP_NF_TARGET_ECN=m | ||
| 277 | CONFIG_IP_NF_TARGET_TTL=m | ||
| 278 | CONFIG_IP_NF_RAW=m | ||
| 279 | CONFIG_IP_NF_SECURITY=m | ||
| 280 | CONFIG_IP_NF_ARPTABLES=m | ||
| 281 | CONFIG_IP_NF_ARPFILTER=m | ||
| 282 | CONFIG_IP_NF_ARP_MANGLE=m | ||
| 283 | CONFIG_NF_CONNTRACK_IPV6=m | ||
| 284 | CONFIG_NF_TABLES_IPV6=m | ||
| 285 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
| 286 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
| 287 | CONFIG_IP6_NF_IPTABLES=m | ||
| 288 | CONFIG_IP6_NF_MATCH_AH=m | ||
| 289 | CONFIG_IP6_NF_MATCH_EUI64=m | ||
| 290 | CONFIG_IP6_NF_MATCH_FRAG=m | ||
| 291 | CONFIG_IP6_NF_MATCH_OPTS=m | ||
| 292 | CONFIG_IP6_NF_MATCH_HL=m | ||
| 293 | CONFIG_IP6_NF_MATCH_IPV6HEADER=m | ||
| 294 | CONFIG_IP6_NF_MATCH_MH=m | ||
| 295 | CONFIG_IP6_NF_MATCH_RPFILTER=m | ||
| 296 | CONFIG_IP6_NF_MATCH_RT=m | ||
| 297 | CONFIG_IP6_NF_TARGET_HL=m | ||
| 298 | CONFIG_IP6_NF_FILTER=m | ||
| 299 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
| 300 | CONFIG_IP6_NF_MANGLE=m | ||
| 301 | CONFIG_IP6_NF_RAW=m | ||
| 302 | CONFIG_IP6_NF_SECURITY=m | ||
| 303 | CONFIG_IP6_NF_NAT=m | ||
| 304 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | ||
| 305 | CONFIG_NF_TABLES_BRIDGE=m | ||
| 306 | CONFIG_NET_SCTPPROBE=m | ||
| 307 | CONFIG_RDS=m | ||
| 308 | CONFIG_RDS_RDMA=m | ||
| 309 | CONFIG_RDS_TCP=m | ||
| 310 | CONFIG_L2TP=m | ||
| 311 | CONFIG_L2TP_DEBUGFS=m | ||
| 312 | CONFIG_L2TP_V3=y | ||
| 313 | CONFIG_L2TP_IP=m | ||
| 314 | CONFIG_L2TP_ETH=m | ||
| 315 | CONFIG_BRIDGE=m | ||
| 316 | CONFIG_VLAN_8021Q=m | ||
| 317 | CONFIG_VLAN_8021Q_GVRP=y | ||
| 318 | CONFIG_NET_SCHED=y | ||
| 319 | CONFIG_NET_SCH_CBQ=m | ||
| 320 | CONFIG_NET_SCH_HTB=m | ||
| 321 | CONFIG_NET_SCH_HFSC=m | ||
| 322 | CONFIG_NET_SCH_PRIO=m | ||
| 323 | CONFIG_NET_SCH_MULTIQ=m | ||
| 324 | CONFIG_NET_SCH_RED=m | ||
| 325 | CONFIG_NET_SCH_SFB=m | ||
| 326 | CONFIG_NET_SCH_SFQ=m | ||
| 327 | CONFIG_NET_SCH_TEQL=m | ||
| 328 | CONFIG_NET_SCH_TBF=m | ||
| 329 | CONFIG_NET_SCH_GRED=m | ||
| 330 | CONFIG_NET_SCH_DSMARK=m | ||
| 331 | CONFIG_NET_SCH_NETEM=m | ||
| 332 | CONFIG_NET_SCH_DRR=m | ||
| 333 | CONFIG_NET_SCH_MQPRIO=m | ||
| 334 | CONFIG_NET_SCH_CHOKE=m | ||
| 335 | CONFIG_NET_SCH_QFQ=m | ||
| 336 | CONFIG_NET_SCH_CODEL=m | ||
| 337 | CONFIG_NET_SCH_FQ_CODEL=m | ||
| 338 | CONFIG_NET_SCH_INGRESS=m | ||
| 339 | CONFIG_NET_SCH_PLUG=m | ||
| 340 | CONFIG_NET_CLS_BASIC=m | ||
| 341 | CONFIG_NET_CLS_TCINDEX=m | ||
| 342 | CONFIG_NET_CLS_ROUTE4=m | ||
| 343 | CONFIG_NET_CLS_FW=m | ||
| 344 | CONFIG_NET_CLS_U32=m | ||
| 345 | CONFIG_CLS_U32_PERF=y | ||
| 346 | CONFIG_CLS_U32_MARK=y | ||
| 347 | CONFIG_NET_CLS_RSVP=m | ||
| 348 | CONFIG_NET_CLS_RSVP6=m | ||
| 349 | CONFIG_NET_CLS_FLOW=m | ||
| 350 | CONFIG_NET_CLS_CGROUP=y | ||
| 351 | CONFIG_NET_CLS_BPF=m | ||
| 352 | CONFIG_NET_CLS_ACT=y | ||
| 353 | CONFIG_NET_ACT_POLICE=m | ||
| 354 | CONFIG_NET_ACT_GACT=m | ||
| 355 | CONFIG_GACT_PROB=y | ||
| 356 | CONFIG_NET_ACT_MIRRED=m | ||
| 357 | CONFIG_NET_ACT_IPT=m | ||
| 358 | CONFIG_NET_ACT_NAT=m | ||
| 359 | CONFIG_NET_ACT_PEDIT=m | ||
| 360 | CONFIG_NET_ACT_SIMP=m | ||
| 361 | CONFIG_NET_ACT_SKBEDIT=m | ||
| 362 | CONFIG_NET_ACT_CSUM=m | ||
| 363 | CONFIG_DNS_RESOLVER=y | ||
| 364 | CONFIG_NETLINK_DIAG=m | ||
| 365 | CONFIG_CGROUP_NET_PRIO=y | ||
| 366 | CONFIG_BPF_JIT=y | ||
| 367 | CONFIG_NET_PKTGEN=m | ||
| 368 | CONFIG_NET_TCPPROBE=m | ||
| 369 | CONFIG_DEVTMPFS=y | ||
| 370 | CONFIG_DMA_CMA=y | ||
| 371 | CONFIG_CMA_SIZE_MBYTES=0 | ||
| 372 | CONFIG_CONNECTOR=y | ||
| 373 | CONFIG_ZRAM=m | ||
| 374 | CONFIG_BLK_DEV_LOOP=m | ||
| 375 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
| 376 | CONFIG_BLK_DEV_DRBD=m | ||
| 377 | CONFIG_BLK_DEV_NBD=m | ||
| 378 | CONFIG_BLK_DEV_RAM=y | ||
| 379 | CONFIG_BLK_DEV_RAM_SIZE=32768 | ||
| 380 | CONFIG_BLK_DEV_RAM_DAX=y | ||
| 381 | CONFIG_VIRTIO_BLK=y | ||
| 382 | CONFIG_ENCLOSURE_SERVICES=m | ||
| 383 | CONFIG_GENWQE=m | ||
| 384 | CONFIG_RAID_ATTRS=m | ||
| 385 | CONFIG_SCSI=y | ||
| 386 | CONFIG_BLK_DEV_SD=y | ||
| 387 | CONFIG_CHR_DEV_ST=m | ||
| 388 | CONFIG_CHR_DEV_OSST=m | ||
| 389 | CONFIG_BLK_DEV_SR=m | ||
| 390 | CONFIG_CHR_DEV_SG=y | ||
| 391 | CONFIG_CHR_DEV_SCH=m | ||
| 392 | CONFIG_SCSI_ENCLOSURE=m | ||
| 393 | CONFIG_SCSI_CONSTANTS=y | ||
| 394 | CONFIG_SCSI_LOGGING=y | ||
| 395 | CONFIG_SCSI_SPI_ATTRS=m | ||
| 396 | CONFIG_SCSI_FC_ATTRS=y | ||
| 397 | CONFIG_SCSI_SAS_LIBSAS=m | ||
| 398 | CONFIG_SCSI_SRP_ATTRS=m | ||
| 399 | CONFIG_ISCSI_TCP=m | ||
| 400 | CONFIG_SCSI_DEBUG=m | ||
| 401 | CONFIG_ZFCP=y | ||
| 402 | CONFIG_SCSI_VIRTIO=m | ||
| 403 | CONFIG_SCSI_DH=y | ||
| 404 | CONFIG_SCSI_DH_RDAC=m | ||
| 405 | CONFIG_SCSI_DH_HP_SW=m | ||
| 406 | CONFIG_SCSI_DH_EMC=m | ||
| 407 | CONFIG_SCSI_DH_ALUA=m | ||
| 408 | CONFIG_SCSI_OSD_INITIATOR=m | ||
| 409 | CONFIG_SCSI_OSD_ULD=m | ||
| 410 | CONFIG_MD=y | ||
| 411 | CONFIG_BLK_DEV_MD=y | ||
| 412 | CONFIG_MD_LINEAR=m | ||
| 413 | CONFIG_MD_MULTIPATH=m | ||
| 414 | CONFIG_MD_FAULTY=m | ||
| 415 | CONFIG_BLK_DEV_DM=m | ||
| 416 | CONFIG_DM_CRYPT=m | ||
| 417 | CONFIG_DM_SNAPSHOT=m | ||
| 418 | CONFIG_DM_THIN_PROVISIONING=m | ||
| 419 | CONFIG_DM_MIRROR=m | ||
| 420 | CONFIG_DM_LOG_USERSPACE=m | ||
| 421 | CONFIG_DM_RAID=m | ||
| 422 | CONFIG_DM_ZERO=m | ||
| 423 | CONFIG_DM_MULTIPATH=m | ||
| 424 | CONFIG_DM_MULTIPATH_QL=m | ||
| 425 | CONFIG_DM_MULTIPATH_ST=m | ||
| 426 | CONFIG_DM_DELAY=m | ||
| 427 | CONFIG_DM_UEVENT=y | ||
| 428 | CONFIG_DM_FLAKEY=m | ||
| 429 | CONFIG_DM_VERITY=m | ||
| 430 | CONFIG_DM_SWITCH=m | ||
| 431 | CONFIG_NETDEVICES=y | ||
| 432 | CONFIG_BONDING=m | ||
| 433 | CONFIG_DUMMY=m | ||
| 434 | CONFIG_EQUALIZER=m | ||
| 435 | CONFIG_IFB=m | ||
| 436 | CONFIG_MACVLAN=m | ||
| 437 | CONFIG_MACVTAP=m | ||
| 438 | CONFIG_VXLAN=m | ||
| 439 | CONFIG_TUN=m | ||
| 440 | CONFIG_VETH=m | ||
| 441 | CONFIG_VIRTIO_NET=m | ||
| 442 | CONFIG_NLMON=m | ||
| 443 | # CONFIG_NET_VENDOR_ARC is not set | ||
| 444 | # CONFIG_NET_VENDOR_CHELSIO is not set | ||
| 445 | # CONFIG_NET_VENDOR_INTEL is not set | ||
| 446 | # CONFIG_NET_VENDOR_MARVELL is not set | ||
| 447 | CONFIG_MLX4_EN=m | ||
| 448 | CONFIG_MLX5_CORE=m | ||
| 449 | CONFIG_MLX5_CORE_EN=y | ||
| 450 | # CONFIG_NET_VENDOR_NATSEMI is not set | ||
| 451 | CONFIG_PPP=m | ||
| 452 | CONFIG_PPP_BSDCOMP=m | ||
| 453 | CONFIG_PPP_DEFLATE=m | ||
| 454 | CONFIG_PPP_MPPE=m | ||
| 455 | CONFIG_PPPOE=m | ||
| 456 | CONFIG_PPTP=m | ||
| 457 | CONFIG_PPPOL2TP=m | ||
| 458 | CONFIG_PPP_ASYNC=m | ||
| 459 | CONFIG_PPP_SYNC_TTY=m | ||
| 460 | # CONFIG_INPUT_KEYBOARD is not set | ||
| 461 | # CONFIG_INPUT_MOUSE is not set | ||
| 462 | # CONFIG_SERIO is not set | ||
| 463 | CONFIG_LEGACY_PTY_COUNT=0 | ||
| 464 | CONFIG_HW_RANDOM_VIRTIO=m | ||
| 465 | CONFIG_RAW_DRIVER=m | ||
| 466 | CONFIG_HANGCHECK_TIMER=m | ||
| 467 | CONFIG_TN3270_FS=y | ||
| 468 | # CONFIG_HWMON is not set | ||
| 469 | CONFIG_WATCHDOG=y | ||
| 470 | CONFIG_WATCHDOG_NOWAYOUT=y | ||
| 471 | CONFIG_SOFT_WATCHDOG=m | ||
| 472 | CONFIG_DIAG288_WATCHDOG=m | ||
| 473 | # CONFIG_HID is not set | ||
| 474 | # CONFIG_USB_SUPPORT is not set | ||
| 475 | CONFIG_INFINIBAND=m | ||
| 476 | CONFIG_INFINIBAND_USER_ACCESS=m | ||
| 477 | CONFIG_MLX4_INFINIBAND=m | ||
| 478 | CONFIG_MLX5_INFINIBAND=m | ||
| 479 | CONFIG_VFIO=m | ||
| 480 | CONFIG_VFIO_PCI=m | ||
| 481 | CONFIG_VIRTIO_BALLOON=m | ||
| 482 | CONFIG_EXT4_FS=y | ||
| 483 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
| 484 | CONFIG_EXT4_FS_SECURITY=y | ||
| 485 | CONFIG_EXT4_ENCRYPTION=y | ||
| 486 | CONFIG_JBD2_DEBUG=y | ||
| 487 | CONFIG_JFS_FS=m | ||
| 488 | CONFIG_JFS_POSIX_ACL=y | ||
| 489 | CONFIG_JFS_SECURITY=y | ||
| 490 | CONFIG_JFS_STATISTICS=y | ||
| 491 | CONFIG_XFS_FS=y | ||
| 492 | CONFIG_XFS_QUOTA=y | ||
| 493 | CONFIG_XFS_POSIX_ACL=y | ||
| 494 | CONFIG_XFS_RT=y | ||
| 495 | CONFIG_GFS2_FS=m | ||
| 496 | CONFIG_GFS2_FS_LOCKING_DLM=y | ||
| 497 | CONFIG_OCFS2_FS=m | ||
| 498 | CONFIG_BTRFS_FS=y | ||
| 499 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
| 500 | CONFIG_NILFS2_FS=m | ||
| 501 | CONFIG_FS_DAX=y | ||
| 502 | CONFIG_EXPORTFS_BLOCK_OPS=y | ||
| 503 | CONFIG_FANOTIFY=y | ||
| 504 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | ||
| 505 | CONFIG_QUOTA_NETLINK_INTERFACE=y | ||
| 506 | CONFIG_QFMT_V1=m | ||
| 507 | CONFIG_QFMT_V2=m | ||
| 508 | CONFIG_AUTOFS4_FS=m | ||
| 509 | CONFIG_FUSE_FS=y | ||
| 510 | CONFIG_CUSE=m | ||
| 511 | CONFIG_OVERLAY_FS=m | ||
| 512 | CONFIG_OVERLAY_FS_REDIRECT_DIR=y | ||
| 513 | CONFIG_FSCACHE=m | ||
| 514 | CONFIG_CACHEFILES=m | ||
| 515 | CONFIG_ISO9660_FS=y | ||
| 516 | CONFIG_JOLIET=y | ||
| 517 | CONFIG_ZISOFS=y | ||
| 518 | CONFIG_UDF_FS=m | ||
| 519 | CONFIG_MSDOS_FS=m | ||
| 520 | CONFIG_VFAT_FS=m | ||
| 521 | CONFIG_NTFS_FS=m | ||
| 522 | CONFIG_NTFS_RW=y | ||
| 523 | CONFIG_PROC_KCORE=y | ||
| 524 | CONFIG_TMPFS=y | ||
| 525 | CONFIG_TMPFS_POSIX_ACL=y | ||
| 526 | CONFIG_HUGETLBFS=y | ||
| 527 | CONFIG_CONFIGFS_FS=m | ||
| 528 | CONFIG_ECRYPT_FS=m | ||
| 529 | CONFIG_CRAMFS=m | ||
| 530 | CONFIG_SQUASHFS=m | ||
| 531 | CONFIG_SQUASHFS_XATTR=y | ||
| 532 | CONFIG_SQUASHFS_LZO=y | ||
| 533 | CONFIG_SQUASHFS_XZ=y | ||
| 534 | CONFIG_ROMFS_FS=m | ||
| 535 | CONFIG_NFS_FS=m | ||
| 536 | CONFIG_NFS_V3_ACL=y | ||
| 537 | CONFIG_NFS_V4=m | ||
| 538 | CONFIG_NFS_SWAP=y | ||
| 539 | CONFIG_NFSD=m | ||
| 540 | CONFIG_NFSD_V3_ACL=y | ||
| 541 | CONFIG_NFSD_V4=y | ||
| 542 | CONFIG_NFSD_V4_SECURITY_LABEL=y | ||
| 543 | CONFIG_CIFS=m | ||
| 544 | CONFIG_CIFS_STATS=y | ||
| 545 | CONFIG_CIFS_STATS2=y | ||
| 546 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
| 547 | CONFIG_CIFS_UPCALL=y | ||
| 548 | CONFIG_CIFS_XATTR=y | ||
| 549 | CONFIG_CIFS_POSIX=y | ||
| 550 | # CONFIG_CIFS_DEBUG is not set | ||
| 551 | CONFIG_CIFS_DFS_UPCALL=y | ||
| 552 | CONFIG_NLS_DEFAULT="utf8" | ||
| 553 | CONFIG_NLS_CODEPAGE_437=m | ||
| 554 | CONFIG_NLS_CODEPAGE_850=m | ||
| 555 | CONFIG_NLS_ASCII=m | ||
| 556 | CONFIG_NLS_ISO8859_1=m | ||
| 557 | CONFIG_NLS_ISO8859_15=m | ||
| 558 | CONFIG_NLS_UTF8=m | ||
| 559 | CONFIG_DLM=m | ||
| 560 | CONFIG_PRINTK_TIME=y | ||
| 561 | CONFIG_DEBUG_INFO=y | ||
| 562 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 563 | CONFIG_GDB_SCRIPTS=y | ||
| 564 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
| 565 | CONFIG_FRAME_WARN=1024 | ||
| 566 | CONFIG_UNUSED_SYMBOLS=y | ||
| 567 | CONFIG_MAGIC_SYSRQ=y | ||
| 568 | CONFIG_DEBUG_MEMORY_INIT=y | ||
| 569 | CONFIG_PANIC_ON_OOPS=y | ||
| 570 | CONFIG_RCU_TORTURE_TEST=m | ||
| 571 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | ||
| 572 | CONFIG_LATENCYTOP=y | ||
| 573 | CONFIG_SCHED_TRACER=y | ||
| 574 | CONFIG_FTRACE_SYSCALLS=y | ||
| 575 | CONFIG_STACK_TRACER=y | ||
| 576 | CONFIG_BLK_DEV_IO_TRACE=y | ||
| 577 | CONFIG_FUNCTION_PROFILER=y | ||
| 578 | CONFIG_HIST_TRIGGERS=y | ||
| 579 | CONFIG_LKDTM=m | ||
| 580 | CONFIG_PERCPU_TEST=m | ||
| 581 | CONFIG_ATOMIC64_SELFTEST=y | ||
| 582 | CONFIG_TEST_BPF=m | ||
| 583 | CONFIG_BUG_ON_DATA_CORRUPTION=y | ||
| 584 | CONFIG_S390_PTDUMP=y | ||
| 585 | CONFIG_PERSISTENT_KEYRINGS=y | ||
| 586 | CONFIG_BIG_KEYS=y | ||
| 587 | CONFIG_ENCRYPTED_KEYS=m | ||
| 588 | CONFIG_SECURITY=y | ||
| 589 | CONFIG_SECURITY_NETWORK=y | ||
| 590 | CONFIG_SECURITY_SELINUX=y | ||
| 591 | CONFIG_SECURITY_SELINUX_BOOTPARAM=y | ||
| 592 | CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 | ||
| 593 | CONFIG_SECURITY_SELINUX_DISABLE=y | ||
| 594 | CONFIG_INTEGRITY_SIGNATURE=y | ||
| 595 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | ||
| 596 | CONFIG_IMA=y | ||
| 597 | CONFIG_IMA_WRITE_POLICY=y | ||
| 598 | CONFIG_IMA_APPRAISE=y | ||
| 599 | CONFIG_CRYPTO_DH=m | ||
| 600 | CONFIG_CRYPTO_ECDH=m | ||
| 601 | CONFIG_CRYPTO_USER=m | ||
| 602 | # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set | ||
| 603 | CONFIG_CRYPTO_PCRYPT=m | ||
| 604 | CONFIG_CRYPTO_CRYPTD=m | ||
| 605 | CONFIG_CRYPTO_MCRYPTD=m | ||
| 606 | CONFIG_CRYPTO_TEST=m | ||
| 607 | CONFIG_CRYPTO_CHACHA20POLY1305=m | ||
| 608 | CONFIG_CRYPTO_LRW=m | ||
| 609 | CONFIG_CRYPTO_PCBC=m | ||
| 610 | CONFIG_CRYPTO_KEYWRAP=m | ||
| 611 | CONFIG_CRYPTO_XCBC=m | ||
| 612 | CONFIG_CRYPTO_VMAC=m | ||
| 613 | CONFIG_CRYPTO_CRC32=m | ||
| 614 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
| 615 | CONFIG_CRYPTO_RMD128=m | ||
| 616 | CONFIG_CRYPTO_RMD160=m | ||
| 617 | CONFIG_CRYPTO_RMD256=m | ||
| 618 | CONFIG_CRYPTO_RMD320=m | ||
| 619 | CONFIG_CRYPTO_SHA512=m | ||
| 620 | CONFIG_CRYPTO_SHA3=m | ||
| 621 | CONFIG_CRYPTO_TGR192=m | ||
| 622 | CONFIG_CRYPTO_WP512=m | ||
| 623 | CONFIG_CRYPTO_AES_TI=m | ||
| 624 | CONFIG_CRYPTO_ANUBIS=m | ||
| 625 | CONFIG_CRYPTO_BLOWFISH=m | ||
| 626 | CONFIG_CRYPTO_CAMELLIA=m | ||
| 627 | CONFIG_CRYPTO_CAST5=m | ||
| 628 | CONFIG_CRYPTO_CAST6=m | ||
| 629 | CONFIG_CRYPTO_FCRYPT=m | ||
| 630 | CONFIG_CRYPTO_KHAZAD=m | ||
| 631 | CONFIG_CRYPTO_SALSA20=m | ||
| 632 | CONFIG_CRYPTO_SEED=m | ||
| 633 | CONFIG_CRYPTO_SERPENT=m | ||
| 634 | CONFIG_CRYPTO_TEA=m | ||
| 635 | CONFIG_CRYPTO_TWOFISH=m | ||
| 636 | CONFIG_CRYPTO_842=m | ||
| 637 | CONFIG_CRYPTO_LZ4=m | ||
| 638 | CONFIG_CRYPTO_LZ4HC=m | ||
| 639 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
| 640 | CONFIG_CRYPTO_USER_API_HASH=m | ||
| 641 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | ||
| 642 | CONFIG_CRYPTO_USER_API_RNG=m | ||
| 643 | CONFIG_CRYPTO_USER_API_AEAD=m | ||
| 644 | CONFIG_ZCRYPT=m | ||
| 645 | CONFIG_PKEY=m | ||
| 646 | CONFIG_CRYPTO_PAES_S390=m | ||
| 647 | CONFIG_CRYPTO_SHA1_S390=m | ||
| 648 | CONFIG_CRYPTO_SHA256_S390=m | ||
| 649 | CONFIG_CRYPTO_SHA512_S390=m | ||
| 650 | CONFIG_CRYPTO_DES_S390=m | ||
| 651 | CONFIG_CRYPTO_AES_S390=m | ||
| 652 | CONFIG_CRYPTO_GHASH_S390=m | ||
| 653 | CONFIG_CRYPTO_CRC32_S390=y | ||
| 654 | CONFIG_CRC7=m | ||
| 655 | CONFIG_CRC8=m | ||
| 656 | CONFIG_CORDIC=m | ||
| 657 | CONFIG_CMM=m | ||
| 658 | CONFIG_APPLDATA_BASE=y | ||
| 659 | CONFIG_KVM=m | ||
| 660 | CONFIG_KVM_S390_UCONTROL=y | ||
| 661 | CONFIG_VHOST_NET=m | ||
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 20ed149e1137..c105bcc6d7a6 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -25,13 +25,13 @@ CONFIG_CPUSETS=y | |||
| 25 | CONFIG_CGROUP_DEVICE=y | 25 | CONFIG_CGROUP_DEVICE=y |
| 26 | CONFIG_CGROUP_CPUACCT=y | 26 | CONFIG_CGROUP_CPUACCT=y |
| 27 | CONFIG_CGROUP_PERF=y | 27 | CONFIG_CGROUP_PERF=y |
| 28 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 29 | CONFIG_NAMESPACES=y | 28 | CONFIG_NAMESPACES=y |
| 30 | CONFIG_USER_NS=y | 29 | CONFIG_USER_NS=y |
| 31 | CONFIG_SCHED_AUTOGROUP=y | 30 | CONFIG_SCHED_AUTOGROUP=y |
| 32 | CONFIG_BLK_DEV_INITRD=y | 31 | CONFIG_BLK_DEV_INITRD=y |
| 33 | CONFIG_EXPERT=y | 32 | CONFIG_EXPERT=y |
| 34 | # CONFIG_SYSFS_SYSCALL is not set | 33 | # CONFIG_SYSFS_SYSCALL is not set |
| 34 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 35 | CONFIG_BPF_SYSCALL=y | 35 | CONFIG_BPF_SYSCALL=y |
| 36 | CONFIG_USERFAULTFD=y | 36 | CONFIG_USERFAULTFD=y |
| 37 | # CONFIG_COMPAT_BRK is not set | 37 | # CONFIG_COMPAT_BRK is not set |
| @@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y | |||
| 45 | CONFIG_MODULE_FORCE_UNLOAD=y | 45 | CONFIG_MODULE_FORCE_UNLOAD=y |
| 46 | CONFIG_MODVERSIONS=y | 46 | CONFIG_MODVERSIONS=y |
| 47 | CONFIG_MODULE_SRCVERSION_ALL=y | 47 | CONFIG_MODULE_SRCVERSION_ALL=y |
| 48 | CONFIG_MODULE_SIG=y | ||
| 49 | CONFIG_MODULE_SIG_SHA256=y | ||
| 48 | CONFIG_BLK_DEV_INTEGRITY=y | 50 | CONFIG_BLK_DEV_INTEGRITY=y |
| 49 | CONFIG_BLK_DEV_THROTTLING=y | 51 | CONFIG_BLK_DEV_THROTTLING=y |
| 50 | CONFIG_BLK_WBT=y | 52 | CONFIG_BLK_WBT=y |
| @@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y | |||
| 62 | CONFIG_NR_CPUS=512 | 64 | CONFIG_NR_CPUS=512 |
| 63 | CONFIG_NUMA=y | 65 | CONFIG_NUMA=y |
| 64 | CONFIG_HZ_100=y | 66 | CONFIG_HZ_100=y |
| 67 | CONFIG_KEXEC_FILE=y | ||
| 65 | CONFIG_MEMORY_HOTPLUG=y | 68 | CONFIG_MEMORY_HOTPLUG=y |
| 66 | CONFIG_MEMORY_HOTREMOVE=y | 69 | CONFIG_MEMORY_HOTREMOVE=y |
| 67 | CONFIG_KSM=y | 70 | CONFIG_KSM=y |
| @@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m | |||
| 301 | CONFIG_IP6_NF_NAT=m | 304 | CONFIG_IP6_NF_NAT=m |
| 302 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
| 303 | CONFIG_NF_TABLES_BRIDGE=m | 306 | CONFIG_NF_TABLES_BRIDGE=m |
| 304 | CONFIG_NET_SCTPPROBE=m | ||
| 305 | CONFIG_RDS=m | 307 | CONFIG_RDS=m |
| 306 | CONFIG_RDS_RDMA=m | 308 | CONFIG_RDS_RDMA=m |
| 307 | CONFIG_RDS_TCP=m | 309 | CONFIG_RDS_TCP=m |
| @@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m | |||
| 359 | CONFIG_NET_ACT_SKBEDIT=m | 361 | CONFIG_NET_ACT_SKBEDIT=m |
| 360 | CONFIG_NET_ACT_CSUM=m | 362 | CONFIG_NET_ACT_CSUM=m |
| 361 | CONFIG_DNS_RESOLVER=y | 363 | CONFIG_DNS_RESOLVER=y |
| 364 | CONFIG_OPENVSWITCH=m | ||
| 362 | CONFIG_NETLINK_DIAG=m | 365 | CONFIG_NETLINK_DIAG=m |
| 363 | CONFIG_CGROUP_NET_PRIO=y | 366 | CONFIG_CGROUP_NET_PRIO=y |
| 364 | CONFIG_BPF_JIT=y | 367 | CONFIG_BPF_JIT=y |
| 365 | CONFIG_NET_PKTGEN=m | 368 | CONFIG_NET_PKTGEN=m |
| 366 | CONFIG_NET_TCPPROBE=m | ||
| 367 | CONFIG_DEVTMPFS=y | 369 | CONFIG_DEVTMPFS=y |
| 368 | CONFIG_DMA_CMA=y | 370 | CONFIG_DMA_CMA=y |
| 369 | CONFIG_CMA_SIZE_MBYTES=0 | 371 | CONFIG_CMA_SIZE_MBYTES=0 |
| @@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m | |||
| 375 | CONFIG_BLK_DEV_NBD=m | 377 | CONFIG_BLK_DEV_NBD=m |
| 376 | CONFIG_BLK_DEV_RAM=y | 378 | CONFIG_BLK_DEV_RAM=y |
| 377 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 379 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
| 378 | CONFIG_BLK_DEV_RAM_DAX=y | ||
| 379 | CONFIG_VIRTIO_BLK=y | 380 | CONFIG_VIRTIO_BLK=y |
| 381 | CONFIG_BLK_DEV_RBD=m | ||
| 382 | CONFIG_BLK_DEV_NVME=m | ||
| 380 | CONFIG_ENCLOSURE_SERVICES=m | 383 | CONFIG_ENCLOSURE_SERVICES=m |
| 381 | CONFIG_GENWQE=m | 384 | CONFIG_GENWQE=m |
| 382 | CONFIG_RAID_ATTRS=m | 385 | CONFIG_RAID_ATTRS=m |
| @@ -455,6 +458,7 @@ CONFIG_PPTP=m | |||
| 455 | CONFIG_PPPOL2TP=m | 458 | CONFIG_PPPOL2TP=m |
| 456 | CONFIG_PPP_ASYNC=m | 459 | CONFIG_PPP_ASYNC=m |
| 457 | CONFIG_PPP_SYNC_TTY=m | 460 | CONFIG_PPP_SYNC_TTY=m |
| 461 | CONFIG_INPUT_EVDEV=y | ||
| 458 | # CONFIG_INPUT_KEYBOARD is not set | 462 | # CONFIG_INPUT_KEYBOARD is not set |
| 459 | # CONFIG_INPUT_MOUSE is not set | 463 | # CONFIG_INPUT_MOUSE is not set |
| 460 | # CONFIG_SERIO is not set | 464 | # CONFIG_SERIO is not set |
| @@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y | |||
| 468 | CONFIG_WATCHDOG_NOWAYOUT=y | 472 | CONFIG_WATCHDOG_NOWAYOUT=y |
| 469 | CONFIG_SOFT_WATCHDOG=m | 473 | CONFIG_SOFT_WATCHDOG=m |
| 470 | CONFIG_DIAG288_WATCHDOG=m | 474 | CONFIG_DIAG288_WATCHDOG=m |
| 475 | CONFIG_DRM=y | ||
| 476 | CONFIG_DRM_VIRTIO_GPU=y | ||
| 477 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
| 471 | # CONFIG_HID is not set | 478 | # CONFIG_HID is not set |
| 472 | # CONFIG_USB_SUPPORT is not set | 479 | # CONFIG_USB_SUPPORT is not set |
| 473 | CONFIG_INFINIBAND=m | 480 | CONFIG_INFINIBAND=m |
| @@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m | |||
| 476 | CONFIG_MLX5_INFINIBAND=m | 483 | CONFIG_MLX5_INFINIBAND=m |
| 477 | CONFIG_VFIO=m | 484 | CONFIG_VFIO=m |
| 478 | CONFIG_VFIO_PCI=m | 485 | CONFIG_VFIO_PCI=m |
| 486 | CONFIG_VIRTIO_PCI=m | ||
| 479 | CONFIG_VIRTIO_BALLOON=m | 487 | CONFIG_VIRTIO_BALLOON=m |
| 488 | CONFIG_VIRTIO_INPUT=y | ||
| 480 | CONFIG_EXT4_FS=y | 489 | CONFIG_EXT4_FS=y |
| 481 | CONFIG_EXT4_FS_POSIX_ACL=y | 490 | CONFIG_EXT4_FS_POSIX_ACL=y |
| 482 | CONFIG_EXT4_FS_SECURITY=y | 491 | CONFIG_EXT4_FS_SECURITY=y |
| @@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m | |||
| 507 | CONFIG_FUSE_FS=y | 516 | CONFIG_FUSE_FS=y |
| 508 | CONFIG_CUSE=m | 517 | CONFIG_CUSE=m |
| 509 | CONFIG_OVERLAY_FS=m | 518 | CONFIG_OVERLAY_FS=m |
| 510 | CONFIG_OVERLAY_FS_REDIRECT_DIR=y | ||
| 511 | CONFIG_FSCACHE=m | 519 | CONFIG_FSCACHE=m |
| 512 | CONFIG_CACHEFILES=m | 520 | CONFIG_CACHEFILES=m |
| 513 | CONFIG_ISO9660_FS=y | 521 | CONFIG_ISO9660_FS=y |
| @@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y | |||
| 592 | CONFIG_INTEGRITY_SIGNATURE=y | 600 | CONFIG_INTEGRITY_SIGNATURE=y |
| 593 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y | 601 | CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y |
| 594 | CONFIG_IMA=y | 602 | CONFIG_IMA=y |
| 603 | CONFIG_IMA_DEFAULT_HASH_SHA256=y | ||
| 595 | CONFIG_IMA_WRITE_POLICY=y | 604 | CONFIG_IMA_WRITE_POLICY=y |
| 596 | CONFIG_IMA_APPRAISE=y | 605 | CONFIG_IMA_APPRAISE=y |
| 606 | CONFIG_CRYPTO_FIPS=y | ||
| 597 | CONFIG_CRYPTO_DH=m | 607 | CONFIG_CRYPTO_DH=m |
| 598 | CONFIG_CRYPTO_ECDH=m | 608 | CONFIG_CRYPTO_ECDH=m |
| 599 | CONFIG_CRYPTO_USER=m | 609 | CONFIG_CRYPTO_USER=m |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 46a3178d8bc6..f40600eb1762 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y | |||
| 8 | CONFIG_TASK_DELAY_ACCT=y | 8 | CONFIG_TASK_DELAY_ACCT=y |
| 9 | CONFIG_TASK_XACCT=y | 9 | CONFIG_TASK_XACCT=y |
| 10 | CONFIG_TASK_IO_ACCOUNTING=y | 10 | CONFIG_TASK_IO_ACCOUNTING=y |
| 11 | # CONFIG_CPU_ISOLATION is not set | ||
| 11 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
| 12 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
| 13 | CONFIG_CGROUPS=y | 14 | CONFIG_CGROUPS=y |
| @@ -23,12 +24,12 @@ CONFIG_CPUSETS=y | |||
| 23 | CONFIG_CGROUP_DEVICE=y | 24 | CONFIG_CGROUP_DEVICE=y |
| 24 | CONFIG_CGROUP_CPUACCT=y | 25 | CONFIG_CGROUP_CPUACCT=y |
| 25 | CONFIG_CGROUP_PERF=y | 26 | CONFIG_CGROUP_PERF=y |
| 26 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 27 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
| 28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
| 29 | CONFIG_BLK_DEV_INITRD=y | 29 | CONFIG_BLK_DEV_INITRD=y |
| 30 | CONFIG_EXPERT=y | 30 | CONFIG_EXPERT=y |
| 31 | # CONFIG_SYSFS_SYSCALL is not set | 31 | # CONFIG_SYSFS_SYSCALL is not set |
| 32 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 32 | CONFIG_BPF_SYSCALL=y | 33 | CONFIG_BPF_SYSCALL=y |
| 33 | CONFIG_USERFAULTFD=y | 34 | CONFIG_USERFAULTFD=y |
| 34 | # CONFIG_COMPAT_BRK is not set | 35 | # CONFIG_COMPAT_BRK is not set |
| @@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y | |||
| 47 | CONFIG_NR_CPUS=256 | 48 | CONFIG_NR_CPUS=256 |
| 48 | CONFIG_NUMA=y | 49 | CONFIG_NUMA=y |
| 49 | CONFIG_HZ_100=y | 50 | CONFIG_HZ_100=y |
| 51 | CONFIG_KEXEC_FILE=y | ||
| 50 | CONFIG_MEMORY_HOTPLUG=y | 52 | CONFIG_MEMORY_HOTPLUG=y |
| 51 | CONFIG_MEMORY_HOTREMOVE=y | 53 | CONFIG_MEMORY_HOTREMOVE=y |
| 52 | CONFIG_KSM=y | 54 | CONFIG_KSM=y |
| @@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m | |||
| 129 | CONFIG_TUN=m | 131 | CONFIG_TUN=m |
| 130 | CONFIG_VIRTIO_NET=y | 132 | CONFIG_VIRTIO_NET=y |
| 131 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 133 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 134 | # CONFIG_NET_VENDOR_CORTINA is not set | ||
| 132 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 135 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 136 | # CONFIG_NET_VENDOR_SOCIONEXT is not set | ||
| 133 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | 137 | # CONFIG_NET_VENDOR_SYNOPSYS is not set |
| 134 | # CONFIG_INPUT is not set | 138 | # CONFIG_INPUT is not set |
| 135 | # CONFIG_SERIO is not set | 139 | # CONFIG_SERIO is not set |
| 140 | # CONFIG_VT is not set | ||
| 136 | CONFIG_DEVKMEM=y | 141 | CONFIG_DEVKMEM=y |
| 137 | CONFIG_RAW_DRIVER=m | 142 | CONFIG_RAW_DRIVER=m |
| 138 | CONFIG_VIRTIO_BALLOON=y | 143 | CONFIG_VIRTIO_BALLOON=y |
| @@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | |||
| 177 | CONFIG_STACK_TRACER=y | 182 | CONFIG_STACK_TRACER=y |
| 178 | CONFIG_BLK_DEV_IO_TRACE=y | 183 | CONFIG_BLK_DEV_IO_TRACE=y |
| 179 | CONFIG_FUNCTION_PROFILER=y | 184 | CONFIG_FUNCTION_PROFILER=y |
| 180 | CONFIG_KPROBES_SANITY_TEST=y | 185 | # CONFIG_RUNTIME_TESTING_MENU is not set |
| 181 | CONFIG_S390_PTDUMP=y | 186 | CONFIG_S390_PTDUMP=y |
| 182 | CONFIG_CRYPTO_CRYPTD=m | 187 | CONFIG_CRYPTO_CRYPTD=m |
| 188 | CONFIG_CRYPTO_AUTHENC=m | ||
| 183 | CONFIG_CRYPTO_TEST=m | 189 | CONFIG_CRYPTO_TEST=m |
| 184 | CONFIG_CRYPTO_CCM=m | 190 | CONFIG_CRYPTO_CCM=m |
| 185 | CONFIG_CRYPTO_GCM=m | 191 | CONFIG_CRYPTO_GCM=m |
| 186 | CONFIG_CRYPTO_CBC=y | 192 | CONFIG_CRYPTO_CBC=y |
| 193 | CONFIG_CRYPTO_CFB=m | ||
| 187 | CONFIG_CRYPTO_CTS=m | 194 | CONFIG_CRYPTO_CTS=m |
| 188 | CONFIG_CRYPTO_LRW=m | 195 | CONFIG_CRYPTO_LRW=m |
| 189 | CONFIG_CRYPTO_PCBC=m | 196 | CONFIG_CRYPTO_PCBC=m |
| @@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m | |||
| 213 | CONFIG_CRYPTO_SALSA20=m | 220 | CONFIG_CRYPTO_SALSA20=m |
| 214 | CONFIG_CRYPTO_SEED=m | 221 | CONFIG_CRYPTO_SEED=m |
| 215 | CONFIG_CRYPTO_SERPENT=m | 222 | CONFIG_CRYPTO_SERPENT=m |
| 223 | CONFIG_CRYPTO_SM4=m | ||
| 224 | CONFIG_CRYPTO_SPECK=m | ||
| 216 | CONFIG_CRYPTO_TEA=m | 225 | CONFIG_CRYPTO_TEA=m |
| 217 | CONFIG_CRYPTO_TWOFISH=m | 226 | CONFIG_CRYPTO_TWOFISH=m |
| 218 | CONFIG_CRYPTO_DEFLATE=m | 227 | CONFIG_CRYPTO_DEFLATE=m |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 43bbe63e2992..06b513d192b9 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
| @@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb) | |||
| 320 | 320 | ||
| 321 | if (sb->s_root) | 321 | if (sb->s_root) |
| 322 | hypfs_delete_tree(sb->s_root); | 322 | hypfs_delete_tree(sb->s_root); |
| 323 | if (sb_info->update_file) | 323 | if (sb_info && sb_info->update_file) |
| 324 | hypfs_remove(sb_info->update_file); | 324 | hypfs_remove(sb_info->update_file); |
| 325 | kfree(sb->s_fs_info); | 325 | kfree(sb->s_fs_info); |
| 326 | sb->s_fs_info = NULL; | 326 | sb->s_fs_info = NULL; |
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 1d708a419326..825dd0f7f221 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h | |||
| @@ -46,4 +46,27 @@ | |||
| 46 | static inline void crash_setup_regs(struct pt_regs *newregs, | 46 | static inline void crash_setup_regs(struct pt_regs *newregs, |
| 47 | struct pt_regs *oldregs) { } | 47 | struct pt_regs *oldregs) { } |
| 48 | 48 | ||
| 49 | struct kimage; | ||
| 50 | struct s390_load_data { | ||
| 51 | /* Pointer to the kernel buffer. Used to register cmdline etc.. */ | ||
| 52 | void *kernel_buf; | ||
| 53 | |||
| 54 | /* Total size of loaded segments in memory. Used as an offset. */ | ||
| 55 | size_t memsz; | ||
| 56 | |||
| 57 | /* Load address of initrd. Used to register INITRD_START in kernel. */ | ||
| 58 | unsigned long initrd_load_addr; | ||
| 59 | }; | ||
| 60 | |||
| 61 | int kexec_file_add_purgatory(struct kimage *image, | ||
| 62 | struct s390_load_data *data); | ||
| 63 | int kexec_file_add_initrd(struct kimage *image, | ||
| 64 | struct s390_load_data *data, | ||
| 65 | char *initrd, unsigned long initrd_len); | ||
| 66 | int *kexec_file_update_kernel(struct kimage *iamge, | ||
| 67 | struct s390_load_data *data); | ||
| 68 | |||
| 69 | extern const struct kexec_file_ops s390_kexec_image_ops; | ||
| 70 | extern const struct kexec_file_ops s390_kexec_elf_ops; | ||
| 71 | |||
| 49 | #endif /*_S390_KEXEC_H */ | 72 | #endif /*_S390_KEXEC_H */ |
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h new file mode 100644 index 000000000000..e297bcfc476f --- /dev/null +++ b/arch/s390/include/asm/purgatory.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright IBM Corp. 2018 | ||
| 4 | * | ||
| 5 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _S390_PURGATORY_H_ | ||
| 9 | #define _S390_PURGATORY_H_ | ||
| 10 | #ifndef __ASSEMBLY__ | ||
| 11 | |||
| 12 | #include <linux/purgatory.h> | ||
| 13 | |||
| 14 | int verify_sha256_digest(void); | ||
| 15 | |||
| 16 | #endif /* __ASSEMBLY__ */ | ||
| 17 | #endif /* _S390_PURGATORY_H_ */ | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 124154fdfc97..9c30ebe046f3 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * S390 version | 3 | * S390 version |
| 4 | * Copyright IBM Corp. 1999, 2010 | 4 | * Copyright IBM Corp. 1999, 2017 |
| 5 | */ | 5 | */ |
| 6 | #ifndef _ASM_S390_SETUP_H | 6 | #ifndef _ASM_S390_SETUP_H |
| 7 | #define _ASM_S390_SETUP_H | 7 | #define _ASM_S390_SETUP_H |
| @@ -37,17 +37,31 @@ | |||
| 37 | #define LPP_MAGIC _BITUL(31) | 37 | #define LPP_MAGIC _BITUL(31) |
| 38 | #define LPP_PID_MASK _AC(0xffffffff, UL) | 38 | #define LPP_PID_MASK _AC(0xffffffff, UL) |
| 39 | 39 | ||
| 40 | /* Offsets to entry points in kernel/head.S */ | ||
| 41 | |||
| 42 | #define STARTUP_NORMAL_OFFSET 0x10000 | ||
| 43 | #define STARTUP_KDUMP_OFFSET 0x10010 | ||
| 44 | |||
| 45 | /* Offsets to parameters in kernel/head.S */ | ||
| 46 | |||
| 47 | #define IPL_DEVICE_OFFSET 0x10400 | ||
| 48 | #define INITRD_START_OFFSET 0x10408 | ||
| 49 | #define INITRD_SIZE_OFFSET 0x10410 | ||
| 50 | #define OLDMEM_BASE_OFFSET 0x10418 | ||
| 51 | #define OLDMEM_SIZE_OFFSET 0x10420 | ||
| 52 | #define COMMAND_LINE_OFFSET 0x10480 | ||
| 53 | |||
| 40 | #ifndef __ASSEMBLY__ | 54 | #ifndef __ASSEMBLY__ |
| 41 | 55 | ||
| 42 | #include <asm/lowcore.h> | 56 | #include <asm/lowcore.h> |
| 43 | #include <asm/types.h> | 57 | #include <asm/types.h> |
| 44 | 58 | ||
| 45 | #define IPL_DEVICE (*(unsigned long *) (0x10400)) | 59 | #define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET)) |
| 46 | #define INITRD_START (*(unsigned long *) (0x10408)) | 60 | #define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET)) |
| 47 | #define INITRD_SIZE (*(unsigned long *) (0x10410)) | 61 | #define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET)) |
| 48 | #define OLDMEM_BASE (*(unsigned long *) (0x10418)) | 62 | #define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET)) |
| 49 | #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) | 63 | #define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) |
| 50 | #define COMMAND_LINE ((char *) (0x10480)) | 64 | #define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) |
| 51 | 65 | ||
| 52 | extern int memory_end_set; | 66 | extern int memory_end_set; |
| 53 | extern unsigned long memory_end; | 67 | extern unsigned long memory_end; |
| @@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void); | |||
| 121 | 135 | ||
| 122 | #else /* __ASSEMBLY__ */ | 136 | #else /* __ASSEMBLY__ */ |
| 123 | 137 | ||
| 124 | #define IPL_DEVICE 0x10400 | 138 | #define IPL_DEVICE (IPL_DEVICE_OFFSET) |
| 125 | #define INITRD_START 0x10408 | 139 | #define INITRD_START (INITRD_START_OFFSET) |
| 126 | #define INITRD_SIZE 0x10410 | 140 | #define INITRD_SIZE (INITRD_SIZE_OFFSET) |
| 127 | #define OLDMEM_BASE 0x10418 | 141 | #define OLDMEM_BASE (OLDMEM_BASE_OFFSET) |
| 128 | #define OLDMEM_SIZE 0x10420 | 142 | #define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET) |
| 129 | #define COMMAND_LINE 0x10480 | 143 | #define COMMAND_LINE (COMMAND_LINE_OFFSET) |
| 130 | 144 | ||
| 131 | #endif /* __ASSEMBLY__ */ | 145 | #endif /* __ASSEMBLY__ */ |
| 132 | #endif /* _ASM_S390_SETUP_H */ | 146 | #endif /* _ASM_S390_SETUP_H */ |
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index c57f9d28d894..9a14a611ed82 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h | |||
| @@ -97,22 +97,31 @@ typedef unsigned long sigset_t; | |||
| 97 | #include <asm-generic/signal-defs.h> | 97 | #include <asm-generic/signal-defs.h> |
| 98 | 98 | ||
| 99 | #ifndef __KERNEL__ | 99 | #ifndef __KERNEL__ |
| 100 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
| 101 | 100 | ||
| 101 | /* | ||
| 102 | * There are two system calls in regard to sigaction, sys_rt_sigaction | ||
| 103 | * and sys_sigaction. Internally the kernel uses the struct old_sigaction | ||
| 104 | * for the older sys_sigaction system call, and the kernel version of the | ||
| 105 | * struct sigaction for the newer sys_rt_sigaction. | ||
| 106 | * | ||
| 107 | * The uapi definition for struct sigaction has made a strange distinction | ||
| 108 | * between 31-bit and 64-bit in the past. For 64-bit the uapi structure | ||
| 109 | * looks like the kernel struct sigaction, but for 31-bit it used to | ||
| 110 | * look like the kernel struct old_sigaction. That practically made the | ||
| 111 | * structure unusable for either system call. To get around this problem | ||
| 112 | * the glibc always had its own definitions for the sigaction structures. | ||
| 113 | * | ||
| 114 | * The current struct sigaction uapi definition below is suitable for the | ||
| 115 | * sys_rt_sigaction system call only. | ||
| 116 | */ | ||
| 102 | struct sigaction { | 117 | struct sigaction { |
| 103 | union { | 118 | union { |
| 104 | __sighandler_t _sa_handler; | 119 | __sighandler_t _sa_handler; |
| 105 | void (*_sa_sigaction)(int, struct siginfo *, void *); | 120 | void (*_sa_sigaction)(int, struct siginfo *, void *); |
| 106 | } _u; | 121 | } _u; |
| 107 | #ifndef __s390x__ /* lovely */ | ||
| 108 | sigset_t sa_mask; | ||
| 109 | unsigned long sa_flags; | ||
| 110 | void (*sa_restorer)(void); | ||
| 111 | #else /* __s390x__ */ | ||
| 112 | unsigned long sa_flags; | 122 | unsigned long sa_flags; |
| 113 | void (*sa_restorer)(void); | 123 | void (*sa_restorer)(void); |
| 114 | sigset_t sa_mask; | 124 | sigset_t sa_mask; |
| 115 | #endif /* __s390x__ */ | ||
| 116 | }; | 125 | }; |
| 117 | 126 | ||
| 118 | #define sa_handler _u._sa_handler | 127 | #define sa_handler _u._sa_handler |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b06a6f79c1ec..84ea6225efb4 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
| @@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | |||
| 82 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 82 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
| 83 | obj-$(CONFIG_UPROBES) += uprobes.o | 83 | obj-$(CONFIG_UPROBES) += uprobes.o |
| 84 | 84 | ||
| 85 | obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o | ||
| 86 | obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o | ||
| 87 | |||
| 85 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o | 88 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o |
| 86 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o | 89 | obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o |
| 87 | 90 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cfe2c45c5180..eb2a5c0443cd 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/kbuild.h> | 10 | #include <linux/kbuild.h> |
| 11 | #include <linux/kvm_host.h> | 11 | #include <linux/kvm_host.h> |
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
| 13 | #include <linux/purgatory.h> | ||
| 13 | #include <asm/idle.h> | 14 | #include <asm/idle.h> |
| 14 | #include <asm/vdso.h> | 15 | #include <asm/vdso.h> |
| 15 | #include <asm/pgtable.h> | 16 | #include <asm/pgtable.h> |
| @@ -204,5 +205,9 @@ int main(void) | |||
| 204 | OFFSET(__GMAP_ASCE, gmap, asce); | 205 | OFFSET(__GMAP_ASCE, gmap, asce); |
| 205 | OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); | 206 | OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); |
| 206 | OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); | 207 | OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); |
| 208 | /* kexec_sha_region */ | ||
| 209 | OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); | ||
| 210 | OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); | ||
| 211 | DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); | ||
| 207 | return 0; | 212 | return 0; |
| 208 | } | 213 | } |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 11e9d8b5c1b0..607c5e9fba3d 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
| @@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, | |||
| 182 | COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); | 182 | COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); |
| 183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); | 183 | COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); |
| 184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); | 184 | COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); |
| 185 | COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) | ||
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c new file mode 100644 index 000000000000..5a286b012043 --- /dev/null +++ b/arch/s390/kernel/kexec_elf.c | |||
| @@ -0,0 +1,147 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * ELF loader for kexec_file_load system call. | ||
| 4 | * | ||
| 5 | * Copyright IBM Corp. 2018 | ||
| 6 | * | ||
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/errno.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/kexec.h> | ||
| 13 | #include <asm/setup.h> | ||
| 14 | |||
| 15 | static int kexec_file_add_elf_kernel(struct kimage *image, | ||
| 16 | struct s390_load_data *data, | ||
| 17 | char *kernel, unsigned long kernel_len) | ||
| 18 | { | ||
| 19 | struct kexec_buf buf; | ||
| 20 | const Elf_Ehdr *ehdr; | ||
| 21 | const Elf_Phdr *phdr; | ||
| 22 | int i, ret; | ||
| 23 | |||
| 24 | ehdr = (Elf_Ehdr *)kernel; | ||
| 25 | buf.image = image; | ||
| 26 | |||
| 27 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
| 28 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | ||
| 29 | if (phdr->p_type != PT_LOAD) | ||
| 30 | continue; | ||
| 31 | |||
| 32 | buf.buffer = kernel + phdr->p_offset; | ||
| 33 | buf.bufsz = phdr->p_filesz; | ||
| 34 | |||
| 35 | buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); | ||
| 36 | buf.memsz = phdr->p_memsz; | ||
| 37 | |||
| 38 | if (phdr->p_paddr == 0) { | ||
| 39 | data->kernel_buf = buf.buffer; | ||
| 40 | data->memsz += STARTUP_NORMAL_OFFSET; | ||
| 41 | |||
| 42 | buf.buffer += STARTUP_NORMAL_OFFSET; | ||
| 43 | buf.bufsz -= STARTUP_NORMAL_OFFSET; | ||
| 44 | |||
| 45 | buf.mem += STARTUP_NORMAL_OFFSET; | ||
| 46 | buf.memsz -= STARTUP_NORMAL_OFFSET; | ||
| 47 | } | ||
| 48 | |||
| 49 | if (image->type == KEXEC_TYPE_CRASH) | ||
| 50 | buf.mem += crashk_res.start; | ||
| 51 | |||
| 52 | ret = kexec_add_buffer(&buf); | ||
| 53 | if (ret) | ||
| 54 | return ret; | ||
| 55 | |||
| 56 | data->memsz += buf.memsz; | ||
| 57 | } | ||
| 58 | |||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | static void *s390_elf_load(struct kimage *image, | ||
| 63 | char *kernel, unsigned long kernel_len, | ||
| 64 | char *initrd, unsigned long initrd_len, | ||
| 65 | char *cmdline, unsigned long cmdline_len) | ||
| 66 | { | ||
| 67 | struct s390_load_data data = {0}; | ||
| 68 | const Elf_Ehdr *ehdr; | ||
| 69 | const Elf_Phdr *phdr; | ||
| 70 | size_t size; | ||
| 71 | int i, ret; | ||
| 72 | |||
| 73 | /* image->fobs->probe already checked for valid ELF magic number. */ | ||
| 74 | ehdr = (Elf_Ehdr *)kernel; | ||
| 75 | |||
| 76 | if (ehdr->e_type != ET_EXEC || | ||
| 77 | ehdr->e_ident[EI_CLASS] != ELFCLASS64 || | ||
| 78 | !elf_check_arch(ehdr)) | ||
| 79 | return ERR_PTR(-EINVAL); | ||
| 80 | |||
| 81 | if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr)) | ||
| 82 | return ERR_PTR(-EINVAL); | ||
| 83 | |||
| 84 | size = ehdr->e_ehsize + ehdr->e_phoff; | ||
| 85 | size += ehdr->e_phentsize * ehdr->e_phnum; | ||
| 86 | if (size > kernel_len) | ||
| 87 | return ERR_PTR(-EINVAL); | ||
| 88 | |||
| 89 | phdr = (void *)ehdr + ehdr->e_phoff; | ||
| 90 | size = ALIGN(size, phdr->p_align); | ||
| 91 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | ||
| 92 | if (phdr->p_type == PT_INTERP) | ||
| 93 | return ERR_PTR(-EINVAL); | ||
| 94 | |||
| 95 | if (phdr->p_offset > kernel_len) | ||
| 96 | return ERR_PTR(-EINVAL); | ||
| 97 | |||
| 98 | size += ALIGN(phdr->p_filesz, phdr->p_align); | ||
| 99 | } | ||
| 100 | |||
| 101 | if (size > kernel_len) | ||
| 102 | return ERR_PTR(-EINVAL); | ||
| 103 | |||
| 104 | ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len); | ||
| 105 | if (ret) | ||
| 106 | return ERR_PTR(ret); | ||
| 107 | |||
| 108 | if (!data.memsz) | ||
| 109 | return ERR_PTR(-EINVAL); | ||
| 110 | |||
| 111 | if (initrd) { | ||
| 112 | ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); | ||
| 113 | if (ret) | ||
| 114 | return ERR_PTR(ret); | ||
| 115 | } | ||
| 116 | |||
| 117 | ret = kexec_file_add_purgatory(image, &data); | ||
| 118 | if (ret) | ||
| 119 | return ERR_PTR(ret); | ||
| 120 | |||
| 121 | return kexec_file_update_kernel(image, &data); | ||
| 122 | } | ||
| 123 | |||
| 124 | static int s390_elf_probe(const char *buf, unsigned long len) | ||
| 125 | { | ||
| 126 | const Elf_Ehdr *ehdr; | ||
| 127 | |||
| 128 | if (len < sizeof(Elf_Ehdr)) | ||
| 129 | return -ENOEXEC; | ||
| 130 | |||
| 131 | ehdr = (Elf_Ehdr *)buf; | ||
| 132 | |||
| 133 | /* Only check the ELF magic number here and do proper validity check | ||
| 134 | * in the loader. Any check here that fails would send the erroneous | ||
| 135 | * ELF file to the image loader that does not care what it gets. | ||
| 136 | * (Most likely) causing behavior not intended by the user. | ||
| 137 | */ | ||
| 138 | if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) | ||
| 139 | return -ENOEXEC; | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | const struct kexec_file_ops s390_kexec_elf_ops = { | ||
| 145 | .probe = s390_elf_probe, | ||
| 146 | .load = s390_elf_load, | ||
| 147 | }; | ||
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c new file mode 100644 index 000000000000..3800852595e8 --- /dev/null +++ b/arch/s390/kernel/kexec_image.c | |||
| @@ -0,0 +1,76 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Image loader for kexec_file_load system call. | ||
| 4 | * | ||
| 5 | * Copyright IBM Corp. 2018 | ||
| 6 | * | ||
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/errno.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/kexec.h> | ||
| 13 | #include <asm/setup.h> | ||
| 14 | |||
| 15 | static int kexec_file_add_image_kernel(struct kimage *image, | ||
| 16 | struct s390_load_data *data, | ||
| 17 | char *kernel, unsigned long kernel_len) | ||
| 18 | { | ||
| 19 | struct kexec_buf buf; | ||
| 20 | int ret; | ||
| 21 | |||
| 22 | buf.image = image; | ||
| 23 | |||
| 24 | buf.buffer = kernel + STARTUP_NORMAL_OFFSET; | ||
| 25 | buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET; | ||
| 26 | |||
| 27 | buf.mem = STARTUP_NORMAL_OFFSET; | ||
| 28 | if (image->type == KEXEC_TYPE_CRASH) | ||
| 29 | buf.mem += crashk_res.start; | ||
| 30 | buf.memsz = buf.bufsz; | ||
| 31 | |||
| 32 | ret = kexec_add_buffer(&buf); | ||
| 33 | |||
| 34 | data->kernel_buf = kernel; | ||
| 35 | data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET; | ||
| 36 | |||
| 37 | return ret; | ||
| 38 | } | ||
| 39 | |||
| 40 | static void *s390_image_load(struct kimage *image, | ||
| 41 | char *kernel, unsigned long kernel_len, | ||
| 42 | char *initrd, unsigned long initrd_len, | ||
| 43 | char *cmdline, unsigned long cmdline_len) | ||
| 44 | { | ||
| 45 | struct s390_load_data data = {0}; | ||
| 46 | int ret; | ||
| 47 | |||
| 48 | ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len); | ||
| 49 | if (ret) | ||
| 50 | return ERR_PTR(ret); | ||
| 51 | |||
| 52 | if (initrd) { | ||
| 53 | ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); | ||
| 54 | if (ret) | ||
| 55 | return ERR_PTR(ret); | ||
| 56 | } | ||
| 57 | |||
| 58 | ret = kexec_file_add_purgatory(image, &data); | ||
| 59 | if (ret) | ||
| 60 | return ERR_PTR(ret); | ||
| 61 | |||
| 62 | return kexec_file_update_kernel(image, &data); | ||
| 63 | } | ||
| 64 | |||
| 65 | static int s390_image_probe(const char *buf, unsigned long len) | ||
| 66 | { | ||
| 67 | /* Can't reliably tell if an image is valid. Therefore give the | ||
| 68 | * user whatever he wants. | ||
| 69 | */ | ||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | const struct kexec_file_ops s390_kexec_image_ops = { | ||
| 74 | .probe = s390_image_probe, | ||
| 75 | .load = s390_image_load, | ||
| 76 | }; | ||
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..f413f57f8d20 --- /dev/null +++ b/arch/s390/kernel/machine_kexec_file.c | |||
| @@ -0,0 +1,245 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * s390 code for kexec_file_load system call | ||
| 4 | * | ||
| 5 | * Copyright IBM Corp. 2018 | ||
| 6 | * | ||
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/elf.h> | ||
| 11 | #include <linux/kexec.h> | ||
| 12 | #include <asm/setup.h> | ||
| 13 | |||
| 14 | const struct kexec_file_ops * const kexec_file_loaders[] = { | ||
| 15 | &s390_kexec_elf_ops, | ||
| 16 | &s390_kexec_image_ops, | ||
| 17 | NULL, | ||
| 18 | }; | ||
| 19 | |||
| 20 | int *kexec_file_update_kernel(struct kimage *image, | ||
| 21 | struct s390_load_data *data) | ||
| 22 | { | ||
| 23 | unsigned long *loc; | ||
| 24 | |||
| 25 | if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) | ||
| 26 | return ERR_PTR(-EINVAL); | ||
| 27 | |||
| 28 | if (image->cmdline_buf_len) | ||
| 29 | memcpy(data->kernel_buf + COMMAND_LINE_OFFSET, | ||
| 30 | image->cmdline_buf, image->cmdline_buf_len); | ||
| 31 | |||
| 32 | if (image->type == KEXEC_TYPE_CRASH) { | ||
| 33 | loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET); | ||
| 34 | *loc = crashk_res.start; | ||
| 35 | |||
| 36 | loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET); | ||
| 37 | *loc = crashk_res.end - crashk_res.start + 1; | ||
| 38 | } | ||
| 39 | |||
| 40 | if (image->initrd_buf) { | ||
| 41 | loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET); | ||
| 42 | *loc = data->initrd_load_addr; | ||
| 43 | |||
| 44 | loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET); | ||
| 45 | *loc = image->initrd_buf_len; | ||
| 46 | } | ||
| 47 | |||
| 48 | return NULL; | ||
| 49 | } | ||
| 50 | |||
| 51 | static int kexec_file_update_purgatory(struct kimage *image) | ||
| 52 | { | ||
| 53 | u64 entry, type; | ||
| 54 | int ret; | ||
| 55 | |||
| 56 | if (image->type == KEXEC_TYPE_CRASH) { | ||
| 57 | entry = STARTUP_KDUMP_OFFSET; | ||
| 58 | type = KEXEC_TYPE_CRASH; | ||
| 59 | } else { | ||
| 60 | entry = STARTUP_NORMAL_OFFSET; | ||
| 61 | type = KEXEC_TYPE_DEFAULT; | ||
| 62 | } | ||
| 63 | |||
| 64 | ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, | ||
| 65 | sizeof(entry), false); | ||
| 66 | if (ret) | ||
| 67 | return ret; | ||
| 68 | |||
| 69 | ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, | ||
| 70 | sizeof(type), false); | ||
| 71 | if (ret) | ||
| 72 | return ret; | ||
| 73 | |||
| 74 | if (image->type == KEXEC_TYPE_CRASH) { | ||
| 75 | u64 crash_size; | ||
| 76 | |||
| 77 | ret = kexec_purgatory_get_set_symbol(image, "crash_start", | ||
| 78 | &crashk_res.start, | ||
| 79 | sizeof(crashk_res.start), | ||
| 80 | false); | ||
| 81 | if (ret) | ||
| 82 | return ret; | ||
| 83 | |||
| 84 | crash_size = crashk_res.end - crashk_res.start + 1; | ||
| 85 | ret = kexec_purgatory_get_set_symbol(image, "crash_size", | ||
| 86 | &crash_size, | ||
| 87 | sizeof(crash_size), | ||
| 88 | false); | ||
| 89 | } | ||
| 90 | return ret; | ||
| 91 | } | ||
| 92 | |||
| 93 | int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data) | ||
| 94 | { | ||
| 95 | struct kexec_buf buf; | ||
| 96 | int ret; | ||
| 97 | |||
| 98 | buf.image = image; | ||
| 99 | |||
| 100 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); | ||
| 101 | buf.mem = data->memsz; | ||
| 102 | if (image->type == KEXEC_TYPE_CRASH) | ||
| 103 | buf.mem += crashk_res.start; | ||
| 104 | |||
| 105 | ret = kexec_load_purgatory(image, &buf); | ||
| 106 | if (ret) | ||
| 107 | return ret; | ||
| 108 | |||
| 109 | ret = kexec_file_update_purgatory(image); | ||
| 110 | return ret; | ||
| 111 | } | ||
| 112 | |||
| 113 | int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, | ||
| 114 | char *initrd, unsigned long initrd_len) | ||
| 115 | { | ||
| 116 | struct kexec_buf buf; | ||
| 117 | int ret; | ||
| 118 | |||
| 119 | buf.image = image; | ||
| 120 | |||
| 121 | buf.buffer = initrd; | ||
| 122 | buf.bufsz = initrd_len; | ||
| 123 | |||
| 124 | data->memsz = ALIGN(data->memsz, PAGE_SIZE); | ||
| 125 | buf.mem = data->memsz; | ||
| 126 | if (image->type == KEXEC_TYPE_CRASH) | ||
| 127 | buf.mem += crashk_res.start; | ||
| 128 | buf.memsz = buf.bufsz; | ||
| 129 | |||
| 130 | data->initrd_load_addr = buf.mem; | ||
| 131 | data->memsz += buf.memsz; | ||
| 132 | |||
| 133 | ret = kexec_add_buffer(&buf); | ||
| 134 | return ret; | ||
| 135 | } | ||
| 136 | |||
| 137 | /* | ||
| 138 | * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole | ||
| 139 | * and provide kbuf->mem by hand. | ||
| 140 | */ | ||
| 141 | int arch_kexec_walk_mem(struct kexec_buf *kbuf, | ||
| 142 | int (*func)(struct resource *, void *)) | ||
| 143 | { | ||
| 144 | return 1; | ||
| 145 | } | ||
| 146 | |||
| 147 | int arch_kexec_apply_relocations_add(struct purgatory_info *pi, | ||
| 148 | Elf_Shdr *section, | ||
| 149 | const Elf_Shdr *relsec, | ||
| 150 | const Elf_Shdr *symtab) | ||
| 151 | { | ||
| 152 | Elf_Rela *relas; | ||
| 153 | int i; | ||
| 154 | |||
| 155 | relas = (void *)pi->ehdr + relsec->sh_offset; | ||
| 156 | |||
| 157 | for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { | ||
| 158 | const Elf_Sym *sym; /* symbol to relocate */ | ||
| 159 | unsigned long addr; /* final location after relocation */ | ||
| 160 | unsigned long val; /* relocated symbol value */ | ||
| 161 | void *loc; /* tmp location to modify */ | ||
| 162 | |||
| 163 | sym = (void *)pi->ehdr + symtab->sh_offset; | ||
| 164 | sym += ELF64_R_SYM(relas[i].r_info); | ||
| 165 | |||
| 166 | if (sym->st_shndx == SHN_UNDEF) | ||
| 167 | return -ENOEXEC; | ||
| 168 | |||
| 169 | if (sym->st_shndx == SHN_COMMON) | ||
| 170 | return -ENOEXEC; | ||
| 171 | |||
| 172 | if (sym->st_shndx >= pi->ehdr->e_shnum && | ||
| 173 | sym->st_shndx != SHN_ABS) | ||
| 174 | return -ENOEXEC; | ||
| 175 | |||
| 176 | loc = pi->purgatory_buf; | ||
| 177 | loc += section->sh_offset; | ||
| 178 | loc += relas[i].r_offset; | ||
| 179 | |||
| 180 | val = sym->st_value; | ||
| 181 | if (sym->st_shndx != SHN_ABS) | ||
| 182 | val += pi->sechdrs[sym->st_shndx].sh_addr; | ||
| 183 | val += relas[i].r_addend; | ||
| 184 | |||
| 185 | addr = section->sh_addr + relas[i].r_offset; | ||
| 186 | |||
| 187 | switch (ELF64_R_TYPE(relas[i].r_info)) { | ||
| 188 | case R_390_8: /* Direct 8 bit. */ | ||
| 189 | *(u8 *)loc = val; | ||
| 190 | break; | ||
| 191 | case R_390_12: /* Direct 12 bit. */ | ||
| 192 | *(u16 *)loc &= 0xf000; | ||
| 193 | *(u16 *)loc |= val & 0xfff; | ||
| 194 | break; | ||
| 195 | case R_390_16: /* Direct 16 bit. */ | ||
| 196 | *(u16 *)loc = val; | ||
| 197 | break; | ||
| 198 | case R_390_20: /* Direct 20 bit. */ | ||
| 199 | *(u32 *)loc &= 0xf00000ff; | ||
| 200 | *(u32 *)loc |= (val & 0xfff) << 16; /* DL */ | ||
| 201 | *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */ | ||
| 202 | break; | ||
| 203 | case R_390_32: /* Direct 32 bit. */ | ||
| 204 | *(u32 *)loc = val; | ||
| 205 | break; | ||
| 206 | case R_390_64: /* Direct 64 bit. */ | ||
| 207 | *(u64 *)loc = val; | ||
| 208 | break; | ||
| 209 | case R_390_PC16: /* PC relative 16 bit. */ | ||
| 210 | *(u16 *)loc = (val - addr); | ||
| 211 | break; | ||
| 212 | case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ | ||
| 213 | *(u16 *)loc = (val - addr) >> 1; | ||
| 214 | break; | ||
| 215 | case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ | ||
| 216 | *(u32 *)loc = (val - addr) >> 1; | ||
| 217 | break; | ||
| 218 | case R_390_PC32: /* PC relative 32 bit. */ | ||
| 219 | *(u32 *)loc = (val - addr); | ||
| 220 | break; | ||
| 221 | case R_390_PC64: /* PC relative 64 bit. */ | ||
| 222 | *(u64 *)loc = (val - addr); | ||
| 223 | break; | ||
| 224 | default: | ||
| 225 | break; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, | ||
| 232 | unsigned long buf_len) | ||
| 233 | { | ||
| 234 | /* A kernel must be at least large enough to contain head.S. During | ||
| 235 | * load memory in head.S will be accessed, e.g. to register the next | ||
| 236 | * command line. If the next kernel were smaller the current kernel | ||
| 237 | * will panic at load. | ||
| 238 | * | ||
| 239 | * 0x11000 = sizeof(head.S) | ||
| 240 | */ | ||
| 241 | if (buf_len < 0x11000) | ||
| 242 | return -ENOEXEC; | ||
| 243 | |||
| 244 | return kexec_image_probe_default(image, buf, buf_len); | ||
| 245 | } | ||
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index f236ce8757e8..46d49a11663f 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
| 3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
| 4 | #include <linux/cpu.h> | ||
| 4 | #include <asm/nospec-branch.h> | 5 | #include <asm/nospec-branch.h> |
| 5 | 6 | ||
| 6 | static int __init nobp_setup_early(char *str) | 7 | static int __init nobp_setup_early(char *str) |
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index c5bc3f209652..5ee27dc9a10c 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c | |||
| @@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void) | |||
| 583 | model = cpumcf_z13_pmu_event_attr; | 583 | model = cpumcf_z13_pmu_event_attr; |
| 584 | break; | 584 | break; |
| 585 | case 0x3906: | 585 | case 0x3906: |
| 586 | case 0x3907: | ||
| 586 | model = cpumcf_z14_pmu_event_attr; | 587 | model = cpumcf_z14_pmu_event_attr; |
| 587 | break; | 588 | break; |
| 588 | default: | 589 | default: |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index fc3b4aa185cc..d82a9ec64ea9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -821,6 +821,7 @@ static int __init setup_hwcaps(void) | |||
| 821 | strcpy(elf_platform, "z13"); | 821 | strcpy(elf_platform, "z13"); |
| 822 | break; | 822 | break; |
| 823 | case 0x3906: | 823 | case 0x3906: |
| 824 | case 0x3907: | ||
| 824 | strcpy(elf_platform, "z14"); | 825 | strcpy(elf_platform, "z14"); |
| 825 | break; | 826 | break; |
| 826 | } | 827 | } |
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index b38d48464368..8b210ead7956 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl | |||
| @@ -388,3 +388,4 @@ | |||
| 388 | 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage | 388 | 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage |
| 389 | 379 common statx sys_statx compat_sys_statx | 389 | 379 common statx sys_statx compat_sys_statx |
| 390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi | 390 | 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi |
| 391 | 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load | ||
diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore new file mode 100644 index 000000000000..e9e66f178a6d --- /dev/null +++ b/arch/s390/purgatory/.gitignore | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | kexec-purgatory.c | ||
| 2 | purgatory.ro | ||
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile new file mode 100644 index 000000000000..e9525bc1b4a6 --- /dev/null +++ b/arch/s390/purgatory/Makefile | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | OBJECT_FILES_NON_STANDARD := y | ||
| 4 | |||
| 5 | purgatory-y := head.o purgatory.o string.o sha256.o mem.o | ||
| 6 | |||
| 7 | targets += $(purgatory-y) purgatory.ro kexec-purgatory.c | ||
| 8 | PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) | ||
| 9 | |||
| 10 | $(obj)/sha256.o: $(srctree)/lib/sha256.c | ||
| 11 | $(call if_changed_rule,cc_o_c) | ||
| 12 | |||
| 13 | $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S | ||
| 14 | $(call if_changed_rule,as_o_S) | ||
| 15 | |||
| 16 | $(obj)/string.o: $(srctree)/arch/s390/lib/string.c | ||
| 17 | $(call if_changed_rule,cc_o_c) | ||
| 18 | |||
| 19 | LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib | ||
| 20 | LDFLAGS_purgatory.ro += -z nodefaultlib | ||
| 21 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes | ||
| 22 | KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare | ||
| 23 | KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding | ||
| 24 | KBUILD_CFLAGS += -c -MD -Os -m64 | ||
| 25 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | ||
| 26 | |||
| 27 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE | ||
| 28 | $(call if_changed,ld) | ||
| 29 | |||
| 30 | CMD_BIN2C = $(objtree)/scripts/basic/bin2c | ||
| 31 | quiet_cmd_bin2c = BIN2C $@ | ||
| 32 | cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ | ||
| 33 | |||
| 34 | $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE | ||
| 35 | $(call if_changed,bin2c) | ||
| 36 | |||
| 37 | obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o | ||
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S new file mode 100644 index 000000000000..660c96a05a9b --- /dev/null +++ b/arch/s390/purgatory/head.S | |||
| @@ -0,0 +1,279 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Purgatory setup code | ||
| 4 | * | ||
| 5 | * Copyright IBM Corp. 2018 | ||
| 6 | * | ||
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/linkage.h> | ||
| 11 | #include <asm/asm-offsets.h> | ||
| 12 | #include <asm/page.h> | ||
| 13 | #include <asm/sigp.h> | ||
| 14 | |||
| 15 | /* The purgatory is the code running between two kernels. It's main purpose | ||
| 16 | * is to verify that the next kernel was not corrupted after load and to | ||
| 17 | * start it. | ||
| 18 | * | ||
| 19 | * If the next kernel is a crash kernel there are some peculiarities to | ||
| 20 | * consider: | ||
| 21 | * | ||
| 22 | * First the purgatory is called twice. Once only to verify the | ||
| 23 | * sha digest. So if the crash kernel got corrupted the old kernel can try | ||
| 24 | * to trigger a stand-alone dumper. And once to actually load the crash kernel. | ||
| 25 | * | ||
| 26 | * Second the purgatory also has to swap the crash memory region with its | ||
| 27 | * destination at address 0. As the purgatory is part of crash memory this | ||
| 28 | * requires some finesse. The tactic here is that the purgatory first copies | ||
| 29 | * itself to the end of the destination and then swaps the rest of the | ||
| 30 | * memory running from there. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #define bufsz purgatory_end-stack | ||
| 34 | |||
| 35 | .macro MEMCPY dst,src,len | ||
| 36 | lgr %r0,\dst | ||
| 37 | lgr %r1,\len | ||
| 38 | lgr %r2,\src | ||
| 39 | lgr %r3,\len | ||
| 40 | |||
| 41 | 20: mvcle %r0,%r2,0 | ||
| 42 | jo 20b | ||
| 43 | .endm | ||
| 44 | |||
| 45 | .macro MEMSWAP dst,src,buf,len | ||
| 46 | 10: cghi \len,bufsz | ||
| 47 | jh 11f | ||
| 48 | lgr %r4,\len | ||
| 49 | j 12f | ||
| 50 | 11: lghi %r4,bufsz | ||
| 51 | |||
| 52 | 12: MEMCPY \buf,\dst,%r4 | ||
| 53 | MEMCPY \dst,\src,%r4 | ||
| 54 | MEMCPY \src,\buf,%r4 | ||
| 55 | |||
| 56 | agr \dst,%r4 | ||
| 57 | agr \src,%r4 | ||
| 58 | sgr \len,%r4 | ||
| 59 | |||
| 60 | cghi \len,0 | ||
| 61 | jh 10b | ||
| 62 | .endm | ||
| 63 | |||
| 64 | .macro START_NEXT_KERNEL base | ||
| 65 | lg %r4,kernel_entry-\base(%r13) | ||
| 66 | lg %r5,load_psw_mask-\base(%r13) | ||
| 67 | ogr %r4,%r5 | ||
| 68 | stg %r4,0(%r0) | ||
| 69 | |||
| 70 | xgr %r0,%r0 | ||
| 71 | diag %r0,%r0,0x308 | ||
| 72 | .endm | ||
| 73 | |||
| 74 | .text | ||
| 75 | .align PAGE_SIZE | ||
| 76 | ENTRY(purgatory_start) | ||
| 77 | /* The purgatory might be called after a diag308 so better set | ||
| 78 | * architecture and addressing mode. | ||
| 79 | */ | ||
| 80 | lhi %r1,1 | ||
| 81 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | ||
| 82 | sam64 | ||
| 83 | |||
| 84 | larl %r5,gprregs | ||
| 85 | stmg %r6,%r15,0(%r5) | ||
| 86 | |||
| 87 | basr %r13,0 | ||
| 88 | .base_crash: | ||
| 89 | |||
| 90 | /* Setup stack */ | ||
| 91 | larl %r15,purgatory_end | ||
| 92 | aghi %r15,-160 | ||
| 93 | |||
| 94 | /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called | ||
| 95 | * directly with a flag passed in %r2 whether the purgatory shall do | ||
| 96 | * checksum verification only (%r2 = 0 -> verification only). | ||
| 97 | * | ||
| 98 | * Check now and preserve over C function call by storing in | ||
| 99 | * %r10 whith | ||
| 100 | * 1 -> checksum verification only | ||
| 101 | * 0 -> load new kernel | ||
| 102 | */ | ||
| 103 | lghi %r10,0 | ||
| 104 | lg %r11,kernel_type-.base_crash(%r13) | ||
| 105 | cghi %r11,1 /* KEXEC_TYPE_CRASH */ | ||
| 106 | jne .do_checksum_verification | ||
| 107 | cghi %r2,0 /* checksum verification only */ | ||
| 108 | jne .do_checksum_verification | ||
| 109 | lghi %r10,1 | ||
| 110 | |||
| 111 | .do_checksum_verification: | ||
| 112 | brasl %r14,verify_sha256_digest | ||
| 113 | |||
| 114 | cghi %r10,1 /* checksum verification only */ | ||
| 115 | je .return_old_kernel | ||
| 116 | cghi %r2,0 /* checksum match */ | ||
| 117 | jne .disabled_wait | ||
| 118 | |||
| 119 | /* If the next kernel is a crash kernel the purgatory has to swap | ||
| 120 | * the mem regions first. | ||
| 121 | */ | ||
| 122 | cghi %r11,1 /* KEXEC_TYPE_CRASH */ | ||
| 123 | je .start_crash_kernel | ||
| 124 | |||
| 125 | /* start normal kernel */ | ||
| 126 | START_NEXT_KERNEL .base_crash | ||
| 127 | |||
| 128 | .return_old_kernel: | ||
| 129 | lmg %r6,%r15,gprregs-.base_crash(%r13) | ||
| 130 | br %r14 | ||
| 131 | |||
| 132 | .disabled_wait: | ||
| 133 | lpswe disabled_wait_psw-.base_crash(%r13) | ||
| 134 | |||
| 135 | .start_crash_kernel: | ||
| 136 | /* Location of purgatory_start in crash memory */ | ||
| 137 | lgr %r8,%r13 | ||
| 138 | aghi %r8,-(.base_crash-purgatory_start) | ||
| 139 | |||
| 140 | /* Destination for this code i.e. end of memory to be swapped. */ | ||
| 141 | lg %r9,crash_size-.base_crash(%r13) | ||
| 142 | aghi %r9,-(purgatory_end-purgatory_start) | ||
| 143 | |||
| 144 | /* Destination in crash memory, i.e. same as r9 but in crash memory. */ | ||
| 145 | lg %r10,crash_start-.base_crash(%r13) | ||
| 146 | agr %r10,%r9 | ||
| 147 | |||
| 148 | /* Buffer location (in crash memory) and size. As the purgatory is | ||
| 149 | * behind the point of no return it can re-use the stack as buffer. | ||
| 150 | */ | ||
| 151 | lghi %r11,bufsz | ||
| 152 | larl %r12,stack | ||
| 153 | |||
| 154 | MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ | ||
| 155 | MEMCPY %r9,%r8,%r11 /* self -> dst */ | ||
| 156 | |||
| 157 | /* Jump to new location. */ | ||
| 158 | lgr %r7,%r9 | ||
| 159 | aghi %r7,.jump_to_dst-purgatory_start | ||
| 160 | br %r7 | ||
| 161 | |||
| 162 | .jump_to_dst: | ||
| 163 | basr %r13,0 | ||
| 164 | .base_dst: | ||
| 165 | |||
| 166 | /* clear buffer */ | ||
| 167 | MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ | ||
| 168 | |||
| 169 | /* Load new buffer location after jump */ | ||
| 170 | larl %r7,stack | ||
| 171 | aghi %r10,stack-purgatory_start | ||
| 172 | MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ | ||
| 173 | |||
| 174 | /* Now the code is set up to run from its designated location. Start | ||
| 175 | * swapping the rest of crash memory now. | ||
| 176 | * | ||
| 177 | * The registers will be used as follow: | ||
| 178 | * | ||
| 179 | * %r0-%r4 reserved for macros defined above | ||
| 180 | * %r5-%r6 tmp registers | ||
| 181 | * %r7 pointer to current struct sha region | ||
| 182 | * %r8 index to iterate over all sha regions | ||
| 183 | * %r9 pointer in crash memory | ||
| 184 | * %r10 pointer in old kernel | ||
| 185 | * %r11 total size (still) to be moved | ||
| 186 | * %r12 pointer to buffer | ||
| 187 | */ | ||
| 188 | lgr %r12,%r7 | ||
| 189 | lgr %r11,%r9 | ||
| 190 | lghi %r10,0 | ||
| 191 | lg %r9,crash_start-.base_dst(%r13) | ||
| 192 | lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ | ||
| 193 | larl %r7,purgatory_sha_regions | ||
| 194 | |||
| 195 | j .loop_first | ||
| 196 | |||
| 197 | /* Loop over all purgatory_sha_regions. */ | ||
| 198 | .loop_next: | ||
| 199 | aghi %r8,-1 | ||
| 200 | cghi %r8,0 | ||
| 201 | je .loop_out | ||
| 202 | |||
| 203 | aghi %r7,__KEXEC_SHA_REGION_SIZE | ||
| 204 | |||
| 205 | .loop_first: | ||
| 206 | lg %r5,__KEXEC_SHA_REGION_START(%r7) | ||
| 207 | cghi %r5,0 | ||
| 208 | je .loop_next | ||
| 209 | |||
| 210 | /* Copy [end last sha region, start current sha region) */ | ||
| 211 | /* Note: kexec_sha_region->start points in crash memory */ | ||
| 212 | sgr %r5,%r9 | ||
| 213 | MEMCPY %r9,%r10,%r5 | ||
| 214 | |||
| 215 | agr %r9,%r5 | ||
| 216 | agr %r10,%r5 | ||
| 217 | sgr %r11,%r5 | ||
| 218 | |||
| 219 | /* Swap sha region */ | ||
| 220 | lg %r6,__KEXEC_SHA_REGION_LEN(%r7) | ||
| 221 | MEMSWAP %r9,%r10,%r12,%r6 | ||
| 222 | sg %r11,__KEXEC_SHA_REGION_LEN(%r7) | ||
| 223 | j .loop_next | ||
| 224 | |||
| 225 | .loop_out: | ||
| 226 | /* Copy rest of crash memory */ | ||
| 227 | MEMCPY %r9,%r10,%r11 | ||
| 228 | |||
| 229 | /* start crash kernel */ | ||
| 230 | START_NEXT_KERNEL .base_dst | ||
| 231 | |||
| 232 | |||
| 233 | load_psw_mask: | ||
| 234 | .long 0x00080000,0x80000000 | ||
| 235 | |||
| 236 | .align 8 | ||
| 237 | disabled_wait_psw: | ||
| 238 | .quad 0x0002000180000000 | ||
| 239 | .quad 0x0000000000000000 + .do_checksum_verification | ||
| 240 | |||
| 241 | gprregs: | ||
| 242 | .rept 10 | ||
| 243 | .quad 0 | ||
| 244 | .endr | ||
| 245 | |||
| 246 | purgatory_sha256_digest: | ||
| 247 | .global purgatory_sha256_digest | ||
| 248 | .rept 32 /* SHA256_DIGEST_SIZE */ | ||
| 249 | .byte 0 | ||
| 250 | .endr | ||
| 251 | |||
| 252 | purgatory_sha_regions: | ||
| 253 | .global purgatory_sha_regions | ||
| 254 | .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */ | ||
| 255 | .byte 0 | ||
| 256 | .endr | ||
| 257 | |||
| 258 | kernel_entry: | ||
| 259 | .global kernel_entry | ||
| 260 | .quad 0 | ||
| 261 | |||
| 262 | kernel_type: | ||
| 263 | .global kernel_type | ||
| 264 | .quad 0 | ||
| 265 | |||
| 266 | crash_start: | ||
| 267 | .global crash_start | ||
| 268 | .quad 0 | ||
| 269 | |||
| 270 | crash_size: | ||
| 271 | .global crash_size | ||
| 272 | .quad 0 | ||
| 273 | |||
| 274 | .align PAGE_SIZE | ||
| 275 | stack: | ||
| 276 | /* The buffer to move this code must be as big as the code. */ | ||
| 277 | .skip stack-purgatory_start | ||
| 278 | .align PAGE_SIZE | ||
| 279 | purgatory_end: | ||
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c new file mode 100644 index 000000000000..4e2beb3c29b7 --- /dev/null +++ b/arch/s390/purgatory/purgatory.c | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Purgatory code running between two kernels. | ||
| 4 | * | ||
| 5 | * Copyright IBM Corp. 2018 | ||
| 6 | * | ||
| 7 | * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/kexec.h> | ||
| 11 | #include <linux/sha256.h> | ||
| 12 | #include <linux/string.h> | ||
| 13 | #include <asm/purgatory.h> | ||
| 14 | |||
| 15 | struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; | ||
| 16 | u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; | ||
| 17 | |||
| 18 | u64 kernel_entry; | ||
| 19 | u64 kernel_type; | ||
| 20 | |||
| 21 | u64 crash_start; | ||
| 22 | u64 crash_size; | ||
| 23 | |||
| 24 | int verify_sha256_digest(void) | ||
| 25 | { | ||
| 26 | struct kexec_sha_region *ptr, *end; | ||
| 27 | u8 digest[SHA256_DIGEST_SIZE]; | ||
| 28 | struct sha256_state sctx; | ||
| 29 | |||
| 30 | sha256_init(&sctx); | ||
| 31 | end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); | ||
| 32 | |||
| 33 | for (ptr = purgatory_sha_regions; ptr < end; ptr++) | ||
| 34 | sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); | ||
| 35 | |||
| 36 | sha256_final(&sctx, digest); | ||
| 37 | |||
| 38 | if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) | ||
| 39 | return 1; | ||
| 40 | |||
| 41 | return 0; | ||
| 42 | } | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 949c977bc4c9..c25775fad4ed 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1013,6 +1013,7 @@ struct kvm_x86_ops { | |||
| 1013 | 1013 | ||
| 1014 | bool (*has_wbinvd_exit)(void); | 1014 | bool (*has_wbinvd_exit)(void); |
| 1015 | 1015 | ||
| 1016 | u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); | ||
| 1016 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 1017 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
| 1017 | 1018 | ||
| 1018 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 1019 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 3182908b7e6c..7326078eaa7a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
| @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
| 398 | * little bit simple | 398 | * little bit simple |
| 399 | */ | 399 | */ |
| 400 | efi_map_sz = efi_get_runtime_map_size(); | 400 | efi_map_sz = efi_get_runtime_map_size(); |
| 401 | efi_map_sz = ALIGN(efi_map_sz, 16); | ||
| 402 | params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + | 401 | params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + |
| 403 | MAX_ELFCOREHDR_STR_LEN; | 402 | MAX_ELFCOREHDR_STR_LEN; |
| 404 | params_cmdline_sz = ALIGN(params_cmdline_sz, 16); | 403 | params_cmdline_sz = ALIGN(params_cmdline_sz, 16); |
| 405 | kbuf.bufsz = params_cmdline_sz + efi_map_sz + | 404 | kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + |
| 406 | sizeof(struct setup_data) + | 405 | sizeof(struct setup_data) + |
| 407 | sizeof(struct efi_setup_data); | 406 | sizeof(struct efi_setup_data); |
| 408 | 407 | ||
| @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, | |||
| 410 | if (!params) | 409 | if (!params) |
| 411 | return ERR_PTR(-ENOMEM); | 410 | return ERR_PTR(-ENOMEM); |
| 412 | efi_map_offset = params_cmdline_sz; | 411 | efi_map_offset = params_cmdline_sz; |
| 413 | efi_setup_data_offset = efi_map_offset + efi_map_sz; | 412 | efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); |
| 414 | 413 | ||
| 415 | /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ | 414 | /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ |
| 416 | setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; | 415 | setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index d41d896481b8..c9b14020f4dd 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
| @@ -166,7 +166,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 166 | */ | 166 | */ |
| 167 | pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); | 167 | pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); |
| 168 | /* Filter out unsuppored __PAGE_KERNEL* bits: */ | 168 | /* Filter out unsuppored __PAGE_KERNEL* bits: */ |
| 169 | pgprot_val(pte_prot) |= __supported_pte_mask; | 169 | pgprot_val(pte_prot) &= __supported_pte_mask; |
| 170 | pte = pfn_pte(pfn, pte_prot); | 170 | pte = pfn_pte(pfn, pte_prot); |
| 171 | set_pte_at(mm, va, ptep, pte); | 171 | set_pte_at(mm, va, ptep, pte); |
| 172 | pte_unmap_unlock(ptep, ptl); | 172 | pte_unmap_unlock(ptep, ptl); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b58787daf9f8..1fc05e428aba 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |||
| 1423 | seg->base = 0; | 1423 | seg->base = 0; |
| 1424 | } | 1424 | } |
| 1425 | 1425 | ||
| 1426 | static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | ||
| 1427 | { | ||
| 1428 | struct vcpu_svm *svm = to_svm(vcpu); | ||
| 1429 | |||
| 1430 | if (is_guest_mode(vcpu)) | ||
| 1431 | return svm->nested.hsave->control.tsc_offset; | ||
| 1432 | |||
| 1433 | return vcpu->arch.tsc_offset; | ||
| 1434 | } | ||
| 1435 | |||
| 1426 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1436 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 1427 | { | 1437 | { |
| 1428 | struct vcpu_svm *svm = to_svm(vcpu); | 1438 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1429 | u64 g_tsc_offset = 0; | 1439 | u64 g_tsc_offset = 0; |
| 1430 | 1440 | ||
| 1431 | if (is_guest_mode(vcpu)) { | 1441 | if (is_guest_mode(vcpu)) { |
| 1442 | /* Write L1's TSC offset. */ | ||
| 1432 | g_tsc_offset = svm->vmcb->control.tsc_offset - | 1443 | g_tsc_offset = svm->vmcb->control.tsc_offset - |
| 1433 | svm->nested.hsave->control.tsc_offset; | 1444 | svm->nested.hsave->control.tsc_offset; |
| 1434 | svm->nested.hsave->control.tsc_offset = offset; | 1445 | svm->nested.hsave->control.tsc_offset = offset; |
| @@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) | |||
| 3322 | /* Restore the original control entries */ | 3333 | /* Restore the original control entries */ |
| 3323 | copy_vmcb_control_area(vmcb, hsave); | 3334 | copy_vmcb_control_area(vmcb, hsave); |
| 3324 | 3335 | ||
| 3336 | svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; | ||
| 3325 | kvm_clear_exception_queue(&svm->vcpu); | 3337 | kvm_clear_exception_queue(&svm->vcpu); |
| 3326 | kvm_clear_interrupt_queue(&svm->vcpu); | 3338 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 3327 | 3339 | ||
| @@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, | |||
| 3482 | /* We don't want to see VMMCALLs from a nested guest */ | 3494 | /* We don't want to see VMMCALLs from a nested guest */ |
| 3483 | clr_intercept(svm, INTERCEPT_VMMCALL); | 3495 | clr_intercept(svm, INTERCEPT_VMMCALL); |
| 3484 | 3496 | ||
| 3497 | svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; | ||
| 3498 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; | ||
| 3499 | |||
| 3485 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; | 3500 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; |
| 3486 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; | 3501 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
| 3487 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; | 3502 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
| 3488 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; | ||
| 3489 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; | 3503 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
| 3490 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; | 3504 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
| 3491 | 3505 | ||
| @@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 4035 | struct vcpu_svm *svm = to_svm(vcpu); | 4049 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4036 | 4050 | ||
| 4037 | switch (msr_info->index) { | 4051 | switch (msr_info->index) { |
| 4038 | case MSR_IA32_TSC: { | ||
| 4039 | msr_info->data = svm->vmcb->control.tsc_offset + | ||
| 4040 | kvm_scale_tsc(vcpu, rdtsc()); | ||
| 4041 | |||
| 4042 | break; | ||
| 4043 | } | ||
| 4044 | case MSR_STAR: | 4052 | case MSR_STAR: |
| 4045 | msr_info->data = svm->vmcb->save.star; | 4053 | msr_info->data = svm->vmcb->save.star; |
| 4046 | break; | 4054 | break; |
| @@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 4193 | svm->vmcb->save.g_pat = data; | 4201 | svm->vmcb->save.g_pat = data; |
| 4194 | mark_dirty(svm->vmcb, VMCB_NPT); | 4202 | mark_dirty(svm->vmcb, VMCB_NPT); |
| 4195 | break; | 4203 | break; |
| 4196 | case MSR_IA32_TSC: | ||
| 4197 | kvm_write_tsc(vcpu, msr); | ||
| 4198 | break; | ||
| 4199 | case MSR_IA32_SPEC_CTRL: | 4204 | case MSR_IA32_SPEC_CTRL: |
| 4200 | if (!msr->host_initiated && | 4205 | if (!msr->host_initiated && |
| 4201 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) | 4206 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) |
| @@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
| 5265 | } | 5270 | } |
| 5266 | 5271 | ||
| 5267 | if (!ret && svm) { | 5272 | if (!ret && svm) { |
| 5268 | trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, | 5273 | trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, |
| 5269 | host_irq, e->gsi, | 5274 | e->gsi, vcpu_info.vector, |
| 5270 | vcpu_info.vector, | ||
| 5271 | vcpu_info.pi_desc_addr, set); | 5275 | vcpu_info.pi_desc_addr, set); |
| 5272 | } | 5276 | } |
| 5273 | 5277 | ||
| @@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 7102 | 7106 | ||
| 7103 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 7107 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
| 7104 | 7108 | ||
| 7109 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, | ||
| 7105 | .write_tsc_offset = svm_write_tsc_offset, | 7110 | .write_tsc_offset = svm_write_tsc_offset, |
| 7106 | 7111 | ||
| 7107 | .set_tdp_cr3 = set_tdp_cr3, | 7112 | .set_tdp_cr3 = set_tdp_cr3, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aafcc9881e88..aa66ccd6ed6c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
| 2880 | vmx_update_msr_bitmap(&vmx->vcpu); | 2880 | vmx_update_msr_bitmap(&vmx->vcpu); |
| 2881 | } | 2881 | } |
| 2882 | 2882 | ||
| 2883 | /* | 2883 | static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) |
| 2884 | * reads and returns guest's timestamp counter "register" | ||
| 2885 | * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset | ||
| 2886 | * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 | ||
| 2887 | */ | ||
| 2888 | static u64 guest_read_tsc(struct kvm_vcpu *vcpu) | ||
| 2889 | { | 2884 | { |
| 2890 | u64 host_tsc, tsc_offset; | 2885 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 2891 | 2886 | ||
| 2892 | host_tsc = rdtsc(); | 2887 | if (is_guest_mode(vcpu) && |
| 2893 | tsc_offset = vmcs_read64(TSC_OFFSET); | 2888 | (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) |
| 2894 | return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; | 2889 | return vcpu->arch.tsc_offset - vmcs12->tsc_offset; |
| 2890 | |||
| 2891 | return vcpu->arch.tsc_offset; | ||
| 2895 | } | 2892 | } |
| 2896 | 2893 | ||
| 2897 | /* | 2894 | /* |
| @@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 3524 | #endif | 3521 | #endif |
| 3525 | case MSR_EFER: | 3522 | case MSR_EFER: |
| 3526 | return kvm_get_msr_common(vcpu, msr_info); | 3523 | return kvm_get_msr_common(vcpu, msr_info); |
| 3527 | case MSR_IA32_TSC: | ||
| 3528 | msr_info->data = guest_read_tsc(vcpu); | ||
| 3529 | break; | ||
| 3530 | case MSR_IA32_SPEC_CTRL: | 3524 | case MSR_IA32_SPEC_CTRL: |
| 3531 | if (!msr_info->host_initiated && | 3525 | if (!msr_info->host_initiated && |
| 3532 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | 3526 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && |
| @@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 3646 | return 1; | 3640 | return 1; |
| 3647 | vmcs_write64(GUEST_BNDCFGS, data); | 3641 | vmcs_write64(GUEST_BNDCFGS, data); |
| 3648 | break; | 3642 | break; |
| 3649 | case MSR_IA32_TSC: | ||
| 3650 | kvm_write_tsc(vcpu, msr_info); | ||
| 3651 | break; | ||
| 3652 | case MSR_IA32_SPEC_CTRL: | 3643 | case MSR_IA32_SPEC_CTRL: |
| 3653 | if (!msr_info->host_initiated && | 3644 | if (!msr_info->host_initiated && |
| 3654 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | 3645 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && |
| @@ -10608,6 +10599,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, | |||
| 10608 | return true; | 10599 | return true; |
| 10609 | } | 10600 | } |
| 10610 | 10601 | ||
| 10602 | static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, | ||
| 10603 | struct vmcs12 *vmcs12) | ||
| 10604 | { | ||
| 10605 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && | ||
| 10606 | !page_address_valid(vcpu, vmcs12->apic_access_addr)) | ||
| 10607 | return -EINVAL; | ||
| 10608 | else | ||
| 10609 | return 0; | ||
| 10610 | } | ||
| 10611 | |||
| 10611 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | 10612 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, |
| 10612 | struct vmcs12 *vmcs12) | 10613 | struct vmcs12 *vmcs12) |
| 10613 | { | 10614 | { |
| @@ -11176,11 +11177,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 11176 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); | 11177 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); |
| 11177 | } | 11178 | } |
| 11178 | 11179 | ||
| 11179 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | 11180 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
| 11180 | vmcs_write64(TSC_OFFSET, | 11181 | |
| 11181 | vcpu->arch.tsc_offset + vmcs12->tsc_offset); | ||
| 11182 | else | ||
| 11183 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | ||
| 11184 | if (kvm_has_tsc_control) | 11182 | if (kvm_has_tsc_control) |
| 11185 | decache_tsc_multiplier(vmx); | 11183 | decache_tsc_multiplier(vmx); |
| 11186 | 11184 | ||
| @@ -11299,6 +11297,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
| 11299 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) | 11297 | if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) |
| 11300 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 11298 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
| 11301 | 11299 | ||
| 11300 | if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) | ||
| 11301 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11302 | |||
| 11302 | if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) | 11303 | if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) |
| 11303 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | 11304 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; |
| 11304 | 11305 | ||
| @@ -11420,6 +11421,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11420 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 11421 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 11421 | u32 msr_entry_idx; | 11422 | u32 msr_entry_idx; |
| 11422 | u32 exit_qual; | 11423 | u32 exit_qual; |
| 11424 | int r; | ||
| 11423 | 11425 | ||
| 11424 | enter_guest_mode(vcpu); | 11426 | enter_guest_mode(vcpu); |
| 11425 | 11427 | ||
| @@ -11429,26 +11431,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11429 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); | 11431 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); |
| 11430 | vmx_segment_cache_clear(vmx); | 11432 | vmx_segment_cache_clear(vmx); |
| 11431 | 11433 | ||
| 11432 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { | 11434 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) |
| 11433 | leave_guest_mode(vcpu); | 11435 | vcpu->arch.tsc_offset += vmcs12->tsc_offset; |
| 11434 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | 11436 | |
| 11435 | nested_vmx_entry_failure(vcpu, vmcs12, | 11437 | r = EXIT_REASON_INVALID_STATE; |
| 11436 | EXIT_REASON_INVALID_STATE, exit_qual); | 11438 | if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) |
| 11437 | return 1; | 11439 | goto fail; |
| 11438 | } | ||
| 11439 | 11440 | ||
| 11440 | nested_get_vmcs12_pages(vcpu, vmcs12); | 11441 | nested_get_vmcs12_pages(vcpu, vmcs12); |
| 11441 | 11442 | ||
| 11443 | r = EXIT_REASON_MSR_LOAD_FAIL; | ||
| 11442 | msr_entry_idx = nested_vmx_load_msr(vcpu, | 11444 | msr_entry_idx = nested_vmx_load_msr(vcpu, |
| 11443 | vmcs12->vm_entry_msr_load_addr, | 11445 | vmcs12->vm_entry_msr_load_addr, |
| 11444 | vmcs12->vm_entry_msr_load_count); | 11446 | vmcs12->vm_entry_msr_load_count); |
| 11445 | if (msr_entry_idx) { | 11447 | if (msr_entry_idx) |
| 11446 | leave_guest_mode(vcpu); | 11448 | goto fail; |
| 11447 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
| 11448 | nested_vmx_entry_failure(vcpu, vmcs12, | ||
| 11449 | EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); | ||
| 11450 | return 1; | ||
| 11451 | } | ||
| 11452 | 11449 | ||
| 11453 | /* | 11450 | /* |
| 11454 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | 11451 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point |
| @@ -11457,6 +11454,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) | |||
| 11457 | * the success flag) when L2 exits (see nested_vmx_vmexit()). | 11454 | * the success flag) when L2 exits (see nested_vmx_vmexit()). |
| 11458 | */ | 11455 | */ |
| 11459 | return 0; | 11456 | return 0; |
| 11457 | |||
| 11458 | fail: | ||
| 11459 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
| 11460 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
| 11461 | leave_guest_mode(vcpu); | ||
| 11462 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | ||
| 11463 | nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); | ||
| 11464 | return 1; | ||
| 11460 | } | 11465 | } |
| 11461 | 11466 | ||
| 11462 | /* | 11467 | /* |
| @@ -12028,6 +12033,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 12028 | 12033 | ||
| 12029 | leave_guest_mode(vcpu); | 12034 | leave_guest_mode(vcpu); |
| 12030 | 12035 | ||
| 12036 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | ||
| 12037 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; | ||
| 12038 | |||
| 12031 | if (likely(!vmx->fail)) { | 12039 | if (likely(!vmx->fail)) { |
| 12032 | if (exit_reason == -1) | 12040 | if (exit_reason == -1) |
| 12033 | sync_vmcs12(vcpu, vmcs12); | 12041 | sync_vmcs12(vcpu, vmcs12); |
| @@ -12224,10 +12232,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift, | |||
| 12224 | 12232 | ||
| 12225 | static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) | 12233 | static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) |
| 12226 | { | 12234 | { |
| 12227 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 12235 | struct vcpu_vmx *vmx; |
| 12228 | u64 tscl = rdtsc(); | 12236 | u64 tscl, guest_tscl, delta_tsc; |
| 12229 | u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); | 12237 | |
| 12230 | u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; | 12238 | if (kvm_mwait_in_guest(vcpu->kvm)) |
| 12239 | return -EOPNOTSUPP; | ||
| 12240 | |||
| 12241 | vmx = to_vmx(vcpu); | ||
| 12242 | tscl = rdtsc(); | ||
| 12243 | guest_tscl = kvm_read_l1_tsc(vcpu, tscl); | ||
| 12244 | delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; | ||
| 12231 | 12245 | ||
| 12232 | /* Convert to host delta tsc if tsc scaling is enabled */ | 12246 | /* Convert to host delta tsc if tsc scaling is enabled */ |
| 12233 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && | 12247 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && |
| @@ -12533,7 +12547,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
| 12533 | vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); | 12547 | vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); |
| 12534 | vcpu_info.vector = irq.vector; | 12548 | vcpu_info.vector = irq.vector; |
| 12535 | 12549 | ||
| 12536 | trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, | 12550 | trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, |
| 12537 | vcpu_info.vector, vcpu_info.pi_desc_addr, set); | 12551 | vcpu_info.vector, vcpu_info.pi_desc_addr, set); |
| 12538 | 12552 | ||
| 12539 | if (set) | 12553 | if (set) |
| @@ -12712,6 +12726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 12712 | 12726 | ||
| 12713 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 12727 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
| 12714 | 12728 | ||
| 12729 | .read_l1_tsc_offset = vmx_read_l1_tsc_offset, | ||
| 12715 | .write_tsc_offset = vmx_write_tsc_offset, | 12730 | .write_tsc_offset = vmx_write_tsc_offset, |
| 12716 | 12731 | ||
| 12717 | .set_tdp_cr3 = vmx_set_cr3, | 12732 | .set_tdp_cr3 = vmx_set_cr3, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2ff74b12ec4..51ecd381793b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | |||
| 1490 | 1490 | ||
| 1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) | 1491 | static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) |
| 1492 | { | 1492 | { |
| 1493 | u64 curr_offset = vcpu->arch.tsc_offset; | 1493 | u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
| 1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; | 1494 | vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; |
| 1495 | } | 1495 | } |
| 1496 | 1496 | ||
| @@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
| 1532 | 1532 | ||
| 1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | 1533 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) |
| 1534 | { | 1534 | { |
| 1535 | return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | 1535 | u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
| 1536 | |||
| 1537 | return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); | ||
| 1536 | } | 1538 | } |
| 1537 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); | 1539 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); |
| 1538 | 1540 | ||
| @@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2362 | return 1; | 2364 | return 1; |
| 2363 | vcpu->arch.smbase = data; | 2365 | vcpu->arch.smbase = data; |
| 2364 | break; | 2366 | break; |
| 2367 | case MSR_IA32_TSC: | ||
| 2368 | kvm_write_tsc(vcpu, msr_info); | ||
| 2369 | break; | ||
| 2365 | case MSR_SMI_COUNT: | 2370 | case MSR_SMI_COUNT: |
| 2366 | if (!msr_info->host_initiated) | 2371 | if (!msr_info->host_initiated) |
| 2367 | return 1; | 2372 | return 1; |
| @@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2605 | case MSR_IA32_UCODE_REV: | 2610 | case MSR_IA32_UCODE_REV: |
| 2606 | msr_info->data = vcpu->arch.microcode_version; | 2611 | msr_info->data = vcpu->arch.microcode_version; |
| 2607 | break; | 2612 | break; |
| 2613 | case MSR_IA32_TSC: | ||
| 2614 | msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; | ||
| 2615 | break; | ||
| 2608 | case MSR_MTRRcap: | 2616 | case MSR_MTRRcap: |
| 2609 | case 0x200 ... 0x2ff: | 2617 | case 0x200 ... 0x2ff: |
| 2610 | return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); | 2618 | return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); |
| @@ -2819,7 +2827,8 @@ out: | |||
| 2819 | static inline bool kvm_can_mwait_in_guest(void) | 2827 | static inline bool kvm_can_mwait_in_guest(void) |
| 2820 | { | 2828 | { |
| 2821 | return boot_cpu_has(X86_FEATURE_MWAIT) && | 2829 | return boot_cpu_has(X86_FEATURE_MWAIT) && |
| 2822 | !boot_cpu_has_bug(X86_BUG_MONITOR); | 2830 | !boot_cpu_has_bug(X86_BUG_MONITOR) && |
| 2831 | boot_cpu_has(X86_FEATURE_ARAT); | ||
| 2823 | } | 2832 | } |
| 2824 | 2833 | ||
| 2825 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | 2834 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 44abb8a0a5e5..be076606d30e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
| @@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) { | |||
| 671 | if ((vcc->pop) && (skb1->len != 0)) | 671 | if ((vcc->pop) && (skb1->len != 0)) |
| 672 | { | 672 | { |
| 673 | vcc->pop(vcc, skb1); | 673 | vcc->pop(vcc, skb1); |
| 674 | IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", | 674 | IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", |
| 675 | (long)skb1);) | 675 | (long)skb1);) |
| 676 | } | 676 | } |
| 677 | else | 677 | else |
| @@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev) | |||
| 1665 | status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); | 1665 | status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); |
| 1666 | if (status & TRANSMIT_DONE){ | 1666 | if (status & TRANSMIT_DONE){ |
| 1667 | 1667 | ||
| 1668 | IF_EVENT(printk("Tansmit Done Intr logic run\n");) | 1668 | IF_EVENT(printk("Transmit Done Intr logic run\n");) |
| 1669 | spin_lock_irqsave(&iadev->tx_lock, flags); | 1669 | spin_lock_irqsave(&iadev->tx_lock, flags); |
| 1670 | ia_tx_poll(iadev); | 1670 | ia_tx_poll(iadev); |
| 1671 | spin_unlock_irqrestore(&iadev->tx_lock, flags); | 1671 | spin_unlock_irqrestore(&iadev->tx_lock, flags); |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 07dc5419bd63..8e8b04cc569a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) | |||
| 732 | */ | 732 | */ |
| 733 | enum { | 733 | enum { |
| 734 | Opt_queue_depth, | 734 | Opt_queue_depth, |
| 735 | Opt_lock_timeout, | ||
| 735 | Opt_last_int, | 736 | Opt_last_int, |
| 736 | /* int args above */ | 737 | /* int args above */ |
| 737 | Opt_last_string, | 738 | Opt_last_string, |
| @@ -740,11 +741,13 @@ enum { | |||
| 740 | Opt_read_write, | 741 | Opt_read_write, |
| 741 | Opt_lock_on_read, | 742 | Opt_lock_on_read, |
| 742 | Opt_exclusive, | 743 | Opt_exclusive, |
| 744 | Opt_notrim, | ||
| 743 | Opt_err | 745 | Opt_err |
| 744 | }; | 746 | }; |
| 745 | 747 | ||
| 746 | static match_table_t rbd_opts_tokens = { | 748 | static match_table_t rbd_opts_tokens = { |
| 747 | {Opt_queue_depth, "queue_depth=%d"}, | 749 | {Opt_queue_depth, "queue_depth=%d"}, |
| 750 | {Opt_lock_timeout, "lock_timeout=%d"}, | ||
| 748 | /* int args above */ | 751 | /* int args above */ |
| 749 | /* string args above */ | 752 | /* string args above */ |
| 750 | {Opt_read_only, "read_only"}, | 753 | {Opt_read_only, "read_only"}, |
| @@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = { | |||
| 753 | {Opt_read_write, "rw"}, /* Alternate spelling */ | 756 | {Opt_read_write, "rw"}, /* Alternate spelling */ |
| 754 | {Opt_lock_on_read, "lock_on_read"}, | 757 | {Opt_lock_on_read, "lock_on_read"}, |
| 755 | {Opt_exclusive, "exclusive"}, | 758 | {Opt_exclusive, "exclusive"}, |
| 759 | {Opt_notrim, "notrim"}, | ||
| 756 | {Opt_err, NULL} | 760 | {Opt_err, NULL} |
| 757 | }; | 761 | }; |
| 758 | 762 | ||
| 759 | struct rbd_options { | 763 | struct rbd_options { |
| 760 | int queue_depth; | 764 | int queue_depth; |
| 765 | unsigned long lock_timeout; | ||
| 761 | bool read_only; | 766 | bool read_only; |
| 762 | bool lock_on_read; | 767 | bool lock_on_read; |
| 763 | bool exclusive; | 768 | bool exclusive; |
| 769 | bool trim; | ||
| 764 | }; | 770 | }; |
| 765 | 771 | ||
| 766 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ | 772 | #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ |
| 773 | #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ | ||
| 767 | #define RBD_READ_ONLY_DEFAULT false | 774 | #define RBD_READ_ONLY_DEFAULT false |
| 768 | #define RBD_LOCK_ON_READ_DEFAULT false | 775 | #define RBD_LOCK_ON_READ_DEFAULT false |
| 769 | #define RBD_EXCLUSIVE_DEFAULT false | 776 | #define RBD_EXCLUSIVE_DEFAULT false |
| 777 | #define RBD_TRIM_DEFAULT true | ||
| 770 | 778 | ||
| 771 | static int parse_rbd_opts_token(char *c, void *private) | 779 | static int parse_rbd_opts_token(char *c, void *private) |
| 772 | { | 780 | { |
| @@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
| 796 | } | 804 | } |
| 797 | rbd_opts->queue_depth = intval; | 805 | rbd_opts->queue_depth = intval; |
| 798 | break; | 806 | break; |
| 807 | case Opt_lock_timeout: | ||
| 808 | /* 0 is "wait forever" (i.e. infinite timeout) */ | ||
| 809 | if (intval < 0 || intval > INT_MAX / 1000) { | ||
| 810 | pr_err("lock_timeout out of range\n"); | ||
| 811 | return -EINVAL; | ||
| 812 | } | ||
| 813 | rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); | ||
| 814 | break; | ||
| 799 | case Opt_read_only: | 815 | case Opt_read_only: |
| 800 | rbd_opts->read_only = true; | 816 | rbd_opts->read_only = true; |
| 801 | break; | 817 | break; |
| @@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private) | |||
| 808 | case Opt_exclusive: | 824 | case Opt_exclusive: |
| 809 | rbd_opts->exclusive = true; | 825 | rbd_opts->exclusive = true; |
| 810 | break; | 826 | break; |
| 827 | case Opt_notrim: | ||
| 828 | rbd_opts->trim = false; | ||
| 829 | break; | ||
| 811 | default: | 830 | default: |
| 812 | /* libceph prints "bad option" msg */ | 831 | /* libceph prints "bad option" msg */ |
| 813 | return -EINVAL; | 832 | return -EINVAL; |
| @@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) | |||
| 1392 | case OBJ_OP_DISCARD: | 1411 | case OBJ_OP_DISCARD: |
| 1393 | return true; | 1412 | return true; |
| 1394 | default: | 1413 | default: |
| 1395 | rbd_assert(0); | 1414 | BUG(); |
| 1396 | } | 1415 | } |
| 1397 | } | 1416 | } |
| 1398 | 1417 | ||
| @@ -2466,7 +2485,7 @@ again: | |||
| 2466 | } | 2485 | } |
| 2467 | return false; | 2486 | return false; |
| 2468 | default: | 2487 | default: |
| 2469 | rbd_assert(0); | 2488 | BUG(); |
| 2470 | } | 2489 | } |
| 2471 | } | 2490 | } |
| 2472 | 2491 | ||
| @@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) | |||
| 2494 | } | 2513 | } |
| 2495 | return false; | 2514 | return false; |
| 2496 | default: | 2515 | default: |
| 2497 | rbd_assert(0); | 2516 | BUG(); |
| 2498 | } | 2517 | } |
| 2499 | } | 2518 | } |
| 2500 | 2519 | ||
| @@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, | |||
| 3533 | /* | 3552 | /* |
| 3534 | * lock_rwsem must be held for read | 3553 | * lock_rwsem must be held for read |
| 3535 | */ | 3554 | */ |
| 3536 | static void rbd_wait_state_locked(struct rbd_device *rbd_dev) | 3555 | static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) |
| 3537 | { | 3556 | { |
| 3538 | DEFINE_WAIT(wait); | 3557 | DEFINE_WAIT(wait); |
| 3558 | unsigned long timeout; | ||
| 3559 | int ret = 0; | ||
| 3560 | |||
| 3561 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) | ||
| 3562 | return -EBLACKLISTED; | ||
| 3563 | |||
| 3564 | if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) | ||
| 3565 | return 0; | ||
| 3566 | |||
| 3567 | if (!may_acquire) { | ||
| 3568 | rbd_warn(rbd_dev, "exclusive lock required"); | ||
| 3569 | return -EROFS; | ||
| 3570 | } | ||
| 3539 | 3571 | ||
| 3540 | do { | 3572 | do { |
| 3541 | /* | 3573 | /* |
| @@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) | |||
| 3547 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, | 3579 | prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, |
| 3548 | TASK_UNINTERRUPTIBLE); | 3580 | TASK_UNINTERRUPTIBLE); |
| 3549 | up_read(&rbd_dev->lock_rwsem); | 3581 | up_read(&rbd_dev->lock_rwsem); |
| 3550 | schedule(); | 3582 | timeout = schedule_timeout(ceph_timeout_jiffies( |
| 3583 | rbd_dev->opts->lock_timeout)); | ||
| 3551 | down_read(&rbd_dev->lock_rwsem); | 3584 | down_read(&rbd_dev->lock_rwsem); |
| 3552 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && | 3585 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { |
| 3553 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); | 3586 | ret = -EBLACKLISTED; |
| 3587 | break; | ||
| 3588 | } | ||
| 3589 | if (!timeout) { | ||
| 3590 | rbd_warn(rbd_dev, "timed out waiting for lock"); | ||
| 3591 | ret = -ETIMEDOUT; | ||
| 3592 | break; | ||
| 3593 | } | ||
| 3594 | } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); | ||
| 3554 | 3595 | ||
| 3555 | finish_wait(&rbd_dev->lock_waitq, &wait); | 3596 | finish_wait(&rbd_dev->lock_waitq, &wait); |
| 3597 | return ret; | ||
| 3556 | } | 3598 | } |
| 3557 | 3599 | ||
| 3558 | static void rbd_queue_workfn(struct work_struct *work) | 3600 | static void rbd_queue_workfn(struct work_struct *work) |
| @@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
| 3638 | (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); | 3680 | (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); |
| 3639 | if (must_be_locked) { | 3681 | if (must_be_locked) { |
| 3640 | down_read(&rbd_dev->lock_rwsem); | 3682 | down_read(&rbd_dev->lock_rwsem); |
| 3641 | if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && | 3683 | result = rbd_wait_state_locked(rbd_dev, |
| 3642 | !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | 3684 | !rbd_dev->opts->exclusive); |
| 3643 | if (rbd_dev->opts->exclusive) { | 3685 | if (result) |
| 3644 | rbd_warn(rbd_dev, "exclusive lock required"); | ||
| 3645 | result = -EROFS; | ||
| 3646 | goto err_unlock; | ||
| 3647 | } | ||
| 3648 | rbd_wait_state_locked(rbd_dev); | ||
| 3649 | } | ||
| 3650 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | ||
| 3651 | result = -EBLACKLISTED; | ||
| 3652 | goto err_unlock; | 3686 | goto err_unlock; |
| 3653 | } | ||
| 3654 | } | 3687 | } |
| 3655 | 3688 | ||
| 3656 | img_request = rbd_img_request_create(rbd_dev, op_type, snapc); | 3689 | img_request = rbd_img_request_create(rbd_dev, op_type, snapc); |
| @@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
| 3902 | { | 3935 | { |
| 3903 | struct gendisk *disk; | 3936 | struct gendisk *disk; |
| 3904 | struct request_queue *q; | 3937 | struct request_queue *q; |
| 3905 | u64 segment_size; | 3938 | unsigned int objset_bytes = |
| 3939 | rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; | ||
| 3906 | int err; | 3940 | int err; |
| 3907 | 3941 | ||
| 3908 | /* create gendisk info */ | 3942 | /* create gendisk info */ |
| @@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
| 3942 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 3976 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
| 3943 | /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ | 3977 | /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ |
| 3944 | 3978 | ||
| 3945 | /* set io sizes to object size */ | 3979 | blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); |
| 3946 | segment_size = rbd_obj_bytes(&rbd_dev->header); | ||
| 3947 | blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); | ||
| 3948 | q->limits.max_sectors = queue_max_hw_sectors(q); | 3980 | q->limits.max_sectors = queue_max_hw_sectors(q); |
| 3949 | blk_queue_max_segments(q, USHRT_MAX); | 3981 | blk_queue_max_segments(q, USHRT_MAX); |
| 3950 | blk_queue_max_segment_size(q, UINT_MAX); | 3982 | blk_queue_max_segment_size(q, UINT_MAX); |
| 3951 | blk_queue_io_min(q, segment_size); | 3983 | blk_queue_io_min(q, objset_bytes); |
| 3952 | blk_queue_io_opt(q, segment_size); | 3984 | blk_queue_io_opt(q, objset_bytes); |
| 3953 | 3985 | ||
| 3954 | /* enable the discard support */ | 3986 | if (rbd_dev->opts->trim) { |
| 3955 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); | 3987 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
| 3956 | q->limits.discard_granularity = segment_size; | 3988 | q->limits.discard_granularity = objset_bytes; |
| 3957 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); | 3989 | blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); |
| 3958 | blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); | 3990 | blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); |
| 3991 | } | ||
| 3959 | 3992 | ||
| 3960 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) | 3993 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) |
| 3961 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | 3994 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
| @@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf, | |||
| 5179 | 5212 | ||
| 5180 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; | 5213 | rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; |
| 5181 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; | 5214 | rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; |
| 5215 | rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; | ||
| 5182 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; | 5216 | rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; |
| 5183 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; | 5217 | rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; |
| 5218 | rbd_opts->trim = RBD_TRIM_DEFAULT; | ||
| 5184 | 5219 | ||
| 5185 | copts = ceph_parse_options(options, mon_addrs, | 5220 | copts = ceph_parse_options(options, mon_addrs, |
| 5186 | mon_addrs + mon_addrs_size - 1, | 5221 | mon_addrs + mon_addrs_size - 1, |
| @@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) | |||
| 5216 | 5251 | ||
| 5217 | static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) | 5252 | static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) |
| 5218 | { | 5253 | { |
| 5254 | int ret; | ||
| 5255 | |||
| 5219 | if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { | 5256 | if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { |
| 5220 | rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); | 5257 | rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); |
| 5221 | return -EINVAL; | 5258 | return -EINVAL; |
| @@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) | |||
| 5223 | 5260 | ||
| 5224 | /* FIXME: "rbd map --exclusive" should be in interruptible */ | 5261 | /* FIXME: "rbd map --exclusive" should be in interruptible */ |
| 5225 | down_read(&rbd_dev->lock_rwsem); | 5262 | down_read(&rbd_dev->lock_rwsem); |
| 5226 | rbd_wait_state_locked(rbd_dev); | 5263 | ret = rbd_wait_state_locked(rbd_dev, true); |
| 5227 | up_read(&rbd_dev->lock_rwsem); | 5264 | up_read(&rbd_dev->lock_rwsem); |
| 5228 | if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { | 5265 | if (ret) { |
| 5229 | rbd_warn(rbd_dev, "failed to acquire exclusive lock"); | 5266 | rbd_warn(rbd_dev, "failed to acquire exclusive lock"); |
| 5230 | return -EROFS; | 5267 | return -EROFS; |
| 5231 | } | 5268 | } |
diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..3cd3aae24d6d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -427,8 +427,9 @@ struct crng_state primary_crng = { | |||
| 427 | * its value (from 0->1->2). | 427 | * its value (from 0->1->2). |
| 428 | */ | 428 | */ |
| 429 | static int crng_init = 0; | 429 | static int crng_init = 0; |
| 430 | #define crng_ready() (likely(crng_init > 0)) | 430 | #define crng_ready() (likely(crng_init > 1)) |
| 431 | static int crng_init_cnt = 0; | 431 | static int crng_init_cnt = 0; |
| 432 | static unsigned long crng_global_init_time = 0; | ||
| 432 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) | 433 | #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) |
| 433 | static void _extract_crng(struct crng_state *crng, | 434 | static void _extract_crng(struct crng_state *crng, |
| 434 | __u32 out[CHACHA20_BLOCK_WORDS]); | 435 | __u32 out[CHACHA20_BLOCK_WORDS]); |
| @@ -787,6 +788,36 @@ static void crng_initialize(struct crng_state *crng) | |||
| 787 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; | 788 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; |
| 788 | } | 789 | } |
| 789 | 790 | ||
| 791 | #ifdef CONFIG_NUMA | ||
| 792 | static void numa_crng_init(void) | ||
| 793 | { | ||
| 794 | int i; | ||
| 795 | struct crng_state *crng; | ||
| 796 | struct crng_state **pool; | ||
| 797 | |||
| 798 | pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); | ||
| 799 | for_each_online_node(i) { | ||
| 800 | crng = kmalloc_node(sizeof(struct crng_state), | ||
| 801 | GFP_KERNEL | __GFP_NOFAIL, i); | ||
| 802 | spin_lock_init(&crng->lock); | ||
| 803 | crng_initialize(crng); | ||
| 804 | pool[i] = crng; | ||
| 805 | } | ||
| 806 | mb(); | ||
| 807 | if (cmpxchg(&crng_node_pool, NULL, pool)) { | ||
| 808 | for_each_node(i) | ||
| 809 | kfree(pool[i]); | ||
| 810 | kfree(pool); | ||
| 811 | } | ||
| 812 | } | ||
| 813 | #else | ||
| 814 | static void numa_crng_init(void) {} | ||
| 815 | #endif | ||
| 816 | |||
| 817 | /* | ||
| 818 | * crng_fast_load() can be called by code in the interrupt service | ||
| 819 | * path. So we can't afford to dilly-dally. | ||
| 820 | */ | ||
| 790 | static int crng_fast_load(const char *cp, size_t len) | 821 | static int crng_fast_load(const char *cp, size_t len) |
| 791 | { | 822 | { |
| 792 | unsigned long flags; | 823 | unsigned long flags; |
| @@ -794,7 +825,7 @@ static int crng_fast_load(const char *cp, size_t len) | |||
| 794 | 825 | ||
| 795 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) | 826 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) |
| 796 | return 0; | 827 | return 0; |
| 797 | if (crng_ready()) { | 828 | if (crng_init != 0) { |
| 798 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 829 | spin_unlock_irqrestore(&primary_crng.lock, flags); |
| 799 | return 0; | 830 | return 0; |
| 800 | } | 831 | } |
| @@ -813,6 +844,51 @@ static int crng_fast_load(const char *cp, size_t len) | |||
| 813 | return 1; | 844 | return 1; |
| 814 | } | 845 | } |
| 815 | 846 | ||
| 847 | /* | ||
| 848 | * crng_slow_load() is called by add_device_randomness, which has two | ||
| 849 | * attributes. (1) We can't trust the buffer passed to it is | ||
| 850 | * guaranteed to be unpredictable (so it might not have any entropy at | ||
| 851 | * all), and (2) it doesn't have the performance constraints of | ||
| 852 | * crng_fast_load(). | ||
| 853 | * | ||
| 854 | * So we do something more comprehensive which is guaranteed to touch | ||
| 855 | * all of the primary_crng's state, and which uses a LFSR with a | ||
| 856 | * period of 255 as part of the mixing algorithm. Finally, we do | ||
| 857 | * *not* advance crng_init_cnt since buffer we may get may be something | ||
| 858 | * like a fixed DMI table (for example), which might very well be | ||
| 859 | * unique to the machine, but is otherwise unvarying. | ||
| 860 | */ | ||
| 861 | static int crng_slow_load(const char *cp, size_t len) | ||
| 862 | { | ||
| 863 | unsigned long flags; | ||
| 864 | static unsigned char lfsr = 1; | ||
| 865 | unsigned char tmp; | ||
| 866 | unsigned i, max = CHACHA20_KEY_SIZE; | ||
| 867 | const char * src_buf = cp; | ||
| 868 | char * dest_buf = (char *) &primary_crng.state[4]; | ||
| 869 | |||
| 870 | if (!spin_trylock_irqsave(&primary_crng.lock, flags)) | ||
| 871 | return 0; | ||
| 872 | if (crng_init != 0) { | ||
| 873 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 874 | return 0; | ||
| 875 | } | ||
| 876 | if (len > max) | ||
| 877 | max = len; | ||
| 878 | |||
| 879 | for (i = 0; i < max ; i++) { | ||
| 880 | tmp = lfsr; | ||
| 881 | lfsr >>= 1; | ||
| 882 | if (tmp & 1) | ||
| 883 | lfsr ^= 0xE1; | ||
| 884 | tmp = dest_buf[i % CHACHA20_KEY_SIZE]; | ||
| 885 | dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; | ||
| 886 | lfsr += (tmp << 3) | (tmp >> 5); | ||
| 887 | } | ||
| 888 | spin_unlock_irqrestore(&primary_crng.lock, flags); | ||
| 889 | return 1; | ||
| 890 | } | ||
| 891 | |||
| 816 | static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | 892 | static void crng_reseed(struct crng_state *crng, struct entropy_store *r) |
| 817 | { | 893 | { |
| 818 | unsigned long flags; | 894 | unsigned long flags; |
| @@ -831,7 +907,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
| 831 | _crng_backtrack_protect(&primary_crng, buf.block, | 907 | _crng_backtrack_protect(&primary_crng, buf.block, |
| 832 | CHACHA20_KEY_SIZE); | 908 | CHACHA20_KEY_SIZE); |
| 833 | } | 909 | } |
| 834 | spin_lock_irqsave(&primary_crng.lock, flags); | 910 | spin_lock_irqsave(&crng->lock, flags); |
| 835 | for (i = 0; i < 8; i++) { | 911 | for (i = 0; i < 8; i++) { |
| 836 | unsigned long rv; | 912 | unsigned long rv; |
| 837 | if (!arch_get_random_seed_long(&rv) && | 913 | if (!arch_get_random_seed_long(&rv) && |
| @@ -841,9 +917,10 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
| 841 | } | 917 | } |
| 842 | memzero_explicit(&buf, sizeof(buf)); | 918 | memzero_explicit(&buf, sizeof(buf)); |
| 843 | crng->init_time = jiffies; | 919 | crng->init_time = jiffies; |
| 844 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 920 | spin_unlock_irqrestore(&crng->lock, flags); |
| 845 | if (crng == &primary_crng && crng_init < 2) { | 921 | if (crng == &primary_crng && crng_init < 2) { |
| 846 | invalidate_batched_entropy(); | 922 | invalidate_batched_entropy(); |
| 923 | numa_crng_init(); | ||
| 847 | crng_init = 2; | 924 | crng_init = 2; |
| 848 | process_random_ready_list(); | 925 | process_random_ready_list(); |
| 849 | wake_up_interruptible(&crng_init_wait); | 926 | wake_up_interruptible(&crng_init_wait); |
| @@ -856,8 +933,9 @@ static void _extract_crng(struct crng_state *crng, | |||
| 856 | { | 933 | { |
| 857 | unsigned long v, flags; | 934 | unsigned long v, flags; |
| 858 | 935 | ||
| 859 | if (crng_init > 1 && | 936 | if (crng_ready() && |
| 860 | time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) | 937 | (time_after(crng_global_init_time, crng->init_time) || |
| 938 | time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) | ||
| 861 | crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); | 939 | crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); |
| 862 | spin_lock_irqsave(&crng->lock, flags); | 940 | spin_lock_irqsave(&crng->lock, flags); |
| 863 | if (arch_get_random_long(&v)) | 941 | if (arch_get_random_long(&v)) |
| @@ -981,10 +1059,8 @@ void add_device_randomness(const void *buf, unsigned int size) | |||
| 981 | unsigned long time = random_get_entropy() ^ jiffies; | 1059 | unsigned long time = random_get_entropy() ^ jiffies; |
| 982 | unsigned long flags; | 1060 | unsigned long flags; |
| 983 | 1061 | ||
| 984 | if (!crng_ready()) { | 1062 | if (!crng_ready() && size) |
| 985 | crng_fast_load(buf, size); | 1063 | crng_slow_load(buf, size); |
| 986 | return; | ||
| 987 | } | ||
| 988 | 1064 | ||
| 989 | trace_add_device_randomness(size, _RET_IP_); | 1065 | trace_add_device_randomness(size, _RET_IP_); |
| 990 | spin_lock_irqsave(&input_pool.lock, flags); | 1066 | spin_lock_irqsave(&input_pool.lock, flags); |
| @@ -1139,7 +1215,7 @@ void add_interrupt_randomness(int irq, int irq_flags) | |||
| 1139 | fast_mix(fast_pool); | 1215 | fast_mix(fast_pool); |
| 1140 | add_interrupt_bench(cycles); | 1216 | add_interrupt_bench(cycles); |
| 1141 | 1217 | ||
| 1142 | if (!crng_ready()) { | 1218 | if (unlikely(crng_init == 0)) { |
| 1143 | if ((fast_pool->count >= 64) && | 1219 | if ((fast_pool->count >= 64) && |
| 1144 | crng_fast_load((char *) fast_pool->pool, | 1220 | crng_fast_load((char *) fast_pool->pool, |
| 1145 | sizeof(fast_pool->pool))) { | 1221 | sizeof(fast_pool->pool))) { |
| @@ -1680,28 +1756,10 @@ static void init_std_data(struct entropy_store *r) | |||
| 1680 | */ | 1756 | */ |
| 1681 | static int rand_initialize(void) | 1757 | static int rand_initialize(void) |
| 1682 | { | 1758 | { |
| 1683 | #ifdef CONFIG_NUMA | ||
| 1684 | int i; | ||
| 1685 | struct crng_state *crng; | ||
| 1686 | struct crng_state **pool; | ||
| 1687 | #endif | ||
| 1688 | |||
| 1689 | init_std_data(&input_pool); | 1759 | init_std_data(&input_pool); |
| 1690 | init_std_data(&blocking_pool); | 1760 | init_std_data(&blocking_pool); |
| 1691 | crng_initialize(&primary_crng); | 1761 | crng_initialize(&primary_crng); |
| 1692 | 1762 | crng_global_init_time = jiffies; | |
| 1693 | #ifdef CONFIG_NUMA | ||
| 1694 | pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); | ||
| 1695 | for_each_online_node(i) { | ||
| 1696 | crng = kmalloc_node(sizeof(struct crng_state), | ||
| 1697 | GFP_KERNEL | __GFP_NOFAIL, i); | ||
| 1698 | spin_lock_init(&crng->lock); | ||
| 1699 | crng_initialize(crng); | ||
| 1700 | pool[i] = crng; | ||
| 1701 | } | ||
| 1702 | mb(); | ||
| 1703 | crng_node_pool = pool; | ||
| 1704 | #endif | ||
| 1705 | return 0; | 1763 | return 0; |
| 1706 | } | 1764 | } |
| 1707 | early_initcall(rand_initialize); | 1765 | early_initcall(rand_initialize); |
| @@ -1875,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | |||
| 1875 | input_pool.entropy_count = 0; | 1933 | input_pool.entropy_count = 0; |
| 1876 | blocking_pool.entropy_count = 0; | 1934 | blocking_pool.entropy_count = 0; |
| 1877 | return 0; | 1935 | return 0; |
| 1936 | case RNDRESEEDCRNG: | ||
| 1937 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1938 | return -EPERM; | ||
| 1939 | if (crng_init < 2) | ||
| 1940 | return -ENODATA; | ||
| 1941 | crng_reseed(&primary_crng, NULL); | ||
| 1942 | crng_global_init_time = jiffies - 1; | ||
| 1943 | return 0; | ||
| 1878 | default: | 1944 | default: |
| 1879 | return -EINVAL; | 1945 | return -EINVAL; |
| 1880 | } | 1946 | } |
| @@ -2212,7 +2278,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, | |||
| 2212 | { | 2278 | { |
| 2213 | struct entropy_store *poolp = &input_pool; | 2279 | struct entropy_store *poolp = &input_pool; |
| 2214 | 2280 | ||
| 2215 | if (!crng_ready()) { | 2281 | if (unlikely(crng_init == 0)) { |
| 2216 | crng_fast_load(buffer, count); | 2282 | crng_fast_load(buffer, count); |
| 2217 | return; | 2283 | return; |
| 2218 | } | 2284 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 9ee2888275c1..8e8a09755d10 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -133,6 +133,14 @@ config VT8500_TIMER | |||
| 133 | help | 133 | help |
| 134 | Enables support for the VT8500 driver. | 134 | Enables support for the VT8500 driver. |
| 135 | 135 | ||
| 136 | config NPCM7XX_TIMER | ||
| 137 | bool "NPCM7xx timer driver" if COMPILE_TEST | ||
| 138 | depends on HAS_IOMEM | ||
| 139 | select CLKSRC_MMIO | ||
| 140 | help | ||
| 141 | Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, | ||
| 142 | While TIMER0 serves as clockevent and TIMER1 serves as clocksource. | ||
| 143 | |||
| 136 | config CADENCE_TTC_TIMER | 144 | config CADENCE_TTC_TIMER |
| 137 | bool "Cadence TTC timer driver" if COMPILE_TEST | 145 | bool "Cadence TTC timer driver" if COMPILE_TEST |
| 138 | depends on COMMON_CLK | 146 | depends on COMMON_CLK |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e8e76dfef00b..00caf37e52f9 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o | |||
| 56 | obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o | 56 | obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o |
| 57 | obj-$(CONFIG_OWL_TIMER) += owl-timer.o | 57 | obj-$(CONFIG_OWL_TIMER) += owl-timer.o |
| 58 | obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o | 58 | obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o |
| 59 | obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o | ||
| 59 | 60 | ||
| 60 | obj-$(CONFIG_ARC_TIMERS) += arc_timer.o | 61 | obj-$(CONFIG_ARC_TIMERS) += arc_timer.o |
| 61 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | 62 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 21bffdcb2f20..05d97a6871d8 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c | |||
| @@ -17,9 +17,14 @@ | |||
| 17 | #include <linux/of_irq.h> | 17 | #include <linux/of_irq.h> |
| 18 | #include <linux/sched_clock.h> | 18 | #include <linux/sched_clock.h> |
| 19 | 19 | ||
| 20 | #define TPM_PARAM 0x4 | ||
| 21 | #define TPM_PARAM_WIDTH_SHIFT 16 | ||
| 22 | #define TPM_PARAM_WIDTH_MASK (0xff << 16) | ||
| 20 | #define TPM_SC 0x10 | 23 | #define TPM_SC 0x10 |
| 21 | #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) | 24 | #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) |
| 22 | #define TPM_SC_CMOD_DIV_DEFAULT 0x3 | 25 | #define TPM_SC_CMOD_DIV_DEFAULT 0x3 |
| 26 | #define TPM_SC_CMOD_DIV_MAX 0x7 | ||
| 27 | #define TPM_SC_TOF_MASK (0x1 << 7) | ||
| 23 | #define TPM_CNT 0x14 | 28 | #define TPM_CNT 0x14 |
| 24 | #define TPM_MOD 0x18 | 29 | #define TPM_MOD 0x18 |
| 25 | #define TPM_STATUS 0x1c | 30 | #define TPM_STATUS 0x1c |
| @@ -29,8 +34,11 @@ | |||
| 29 | #define TPM_C0SC_MODE_SHIFT 2 | 34 | #define TPM_C0SC_MODE_SHIFT 2 |
| 30 | #define TPM_C0SC_MODE_MASK 0x3c | 35 | #define TPM_C0SC_MODE_MASK 0x3c |
| 31 | #define TPM_C0SC_MODE_SW_COMPARE 0x4 | 36 | #define TPM_C0SC_MODE_SW_COMPARE 0x4 |
| 37 | #define TPM_C0SC_CHF_MASK (0x1 << 7) | ||
| 32 | #define TPM_C0V 0x24 | 38 | #define TPM_C0V 0x24 |
| 33 | 39 | ||
| 40 | static int counter_width; | ||
| 41 | static int rating; | ||
| 34 | static void __iomem *timer_base; | 42 | static void __iomem *timer_base; |
| 35 | static struct clock_event_device clockevent_tpm; | 43 | static struct clock_event_device clockevent_tpm; |
| 36 | 44 | ||
| @@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate) | |||
| 83 | tpm_delay_timer.freq = rate; | 91 | tpm_delay_timer.freq = rate; |
| 84 | register_current_timer_delay(&tpm_delay_timer); | 92 | register_current_timer_delay(&tpm_delay_timer); |
| 85 | 93 | ||
| 86 | sched_clock_register(tpm_read_sched_clock, 32, rate); | 94 | sched_clock_register(tpm_read_sched_clock, counter_width, rate); |
| 87 | 95 | ||
| 88 | return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", | 96 | return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", |
| 89 | rate, 200, 32, clocksource_mmio_readl_up); | 97 | rate, rating, counter_width, |
| 98 | clocksource_mmio_readl_up); | ||
| 90 | } | 99 | } |
| 91 | 100 | ||
| 92 | static int tpm_set_next_event(unsigned long delta, | 101 | static int tpm_set_next_event(unsigned long delta, |
| @@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = { | |||
| 139 | .set_state_oneshot = tpm_set_state_oneshot, | 148 | .set_state_oneshot = tpm_set_state_oneshot, |
| 140 | .set_next_event = tpm_set_next_event, | 149 | .set_next_event = tpm_set_next_event, |
| 141 | .set_state_shutdown = tpm_set_state_shutdown, | 150 | .set_state_shutdown = tpm_set_state_shutdown, |
| 142 | .rating = 200, | ||
| 143 | }; | 151 | }; |
| 144 | 152 | ||
| 145 | static int __init tpm_clockevent_init(unsigned long rate, int irq) | 153 | static int __init tpm_clockevent_init(unsigned long rate, int irq) |
| @@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq) | |||
| 149 | ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, | 157 | ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, |
| 150 | "i.MX7ULP TPM Timer", &clockevent_tpm); | 158 | "i.MX7ULP TPM Timer", &clockevent_tpm); |
| 151 | 159 | ||
| 160 | clockevent_tpm.rating = rating; | ||
| 152 | clockevent_tpm.cpumask = cpumask_of(0); | 161 | clockevent_tpm.cpumask = cpumask_of(0); |
| 153 | clockevent_tpm.irq = irq; | 162 | clockevent_tpm.irq = irq; |
| 154 | clockevents_config_and_register(&clockevent_tpm, | 163 | clockevents_config_and_register(&clockevent_tpm, rate, 300, |
| 155 | rate, 300, 0xfffffffe); | 164 | GENMASK(counter_width - 1, 1)); |
| 156 | 165 | ||
| 157 | return ret; | 166 | return ret; |
| 158 | } | 167 | } |
| @@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np) | |||
| 179 | ipg = of_clk_get_by_name(np, "ipg"); | 188 | ipg = of_clk_get_by_name(np, "ipg"); |
| 180 | per = of_clk_get_by_name(np, "per"); | 189 | per = of_clk_get_by_name(np, "per"); |
| 181 | if (IS_ERR(ipg) || IS_ERR(per)) { | 190 | if (IS_ERR(ipg) || IS_ERR(per)) { |
| 182 | pr_err("tpm: failed to get igp or per clk\n"); | 191 | pr_err("tpm: failed to get ipg or per clk\n"); |
| 183 | ret = -ENODEV; | 192 | ret = -ENODEV; |
| 184 | goto err_clk_get; | 193 | goto err_clk_get; |
| 185 | } | 194 | } |
| @@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np) | |||
| 197 | goto err_per_clk_enable; | 206 | goto err_per_clk_enable; |
| 198 | } | 207 | } |
| 199 | 208 | ||
| 209 | counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK) | ||
| 210 | >> TPM_PARAM_WIDTH_SHIFT; | ||
| 211 | /* use rating 200 for 32-bit counter and 150 for 16-bit counter */ | ||
| 212 | rating = counter_width == 0x20 ? 200 : 150; | ||
| 213 | |||
| 200 | /* | 214 | /* |
| 201 | * Initialize tpm module to a known state | 215 | * Initialize tpm module to a known state |
| 202 | * 1) Counter disabled | 216 | * 1) Counter disabled |
| @@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np) | |||
| 205 | * 4) Channel0 disabled | 219 | * 4) Channel0 disabled |
| 206 | * 5) DMA transfers disabled | 220 | * 5) DMA transfers disabled |
| 207 | */ | 221 | */ |
| 222 | /* make sure counter is disabled */ | ||
| 208 | writel(0, timer_base + TPM_SC); | 223 | writel(0, timer_base + TPM_SC); |
| 224 | /* TOF is W1C */ | ||
| 225 | writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); | ||
| 209 | writel(0, timer_base + TPM_CNT); | 226 | writel(0, timer_base + TPM_CNT); |
| 210 | writel(0, timer_base + TPM_C0SC); | 227 | /* CHF is W1C */ |
| 228 | writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); | ||
| 211 | 229 | ||
| 212 | /* increase per cnt, div 8 by default */ | 230 | /* |
| 213 | writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, | 231 | * increase per cnt, |
| 232 | * div 8 for 32-bit counter and div 128 for 16-bit counter | ||
| 233 | */ | ||
| 234 | writel(TPM_SC_CMOD_INC_PER_CNT | | ||
| 235 | (counter_width == 0x20 ? | ||
| 236 | TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX), | ||
| 214 | timer_base + TPM_SC); | 237 | timer_base + TPM_SC); |
| 215 | 238 | ||
| 216 | /* set MOD register to maximum for free running mode */ | 239 | /* set MOD register to maximum for free running mode */ |
| 217 | writel(0xffffffff, timer_base + TPM_MOD); | 240 | writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD); |
| 218 | 241 | ||
| 219 | rate = clk_get_rate(per) >> 3; | 242 | rate = clk_get_rate(per) >> 3; |
| 220 | ret = tpm_clocksource_init(rate); | 243 | ret = tpm_clocksource_init(rate); |
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c new file mode 100644 index 000000000000..7a9bb5532d99 --- /dev/null +++ b/drivers/clocksource/timer-npcm7xx.c | |||
| @@ -0,0 +1,215 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com | ||
| 4 | * All rights reserved. | ||
| 5 | * | ||
| 6 | * Copyright 2017 Google, Inc. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <linux/sched.h> | ||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/clockchips.h> | ||
| 17 | #include <linux/of_irq.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include "timer-of.h" | ||
| 20 | |||
| 21 | /* Timers registers */ | ||
| 22 | #define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */ | ||
| 23 | #define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */ | ||
| 24 | #define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */ | ||
| 25 | #define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */ | ||
| 26 | #define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */ | ||
| 27 | #define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */ | ||
| 28 | |||
| 29 | /* Timers control */ | ||
| 30 | #define NPCM7XX_Tx_RESETINT 0x1f | ||
| 31 | #define NPCM7XX_Tx_PERIOD BIT(27) | ||
| 32 | #define NPCM7XX_Tx_INTEN BIT(29) | ||
| 33 | #define NPCM7XX_Tx_COUNTEN BIT(30) | ||
| 34 | #define NPCM7XX_Tx_ONESHOT 0x0 | ||
| 35 | #define NPCM7XX_Tx_OPER GENMASK(3, 27) | ||
| 36 | #define NPCM7XX_Tx_MIN_PRESCALE 0x1 | ||
| 37 | #define NPCM7XX_Tx_TDR_MASK_BITS 24 | ||
| 38 | #define NPCM7XX_Tx_MAX_CNT 0xFFFFFF | ||
| 39 | #define NPCM7XX_T0_CLR_INT 0x1 | ||
| 40 | #define NPCM7XX_Tx_CLR_CSR 0x0 | ||
| 41 | |||
| 42 | /* Timers operating mode */ | ||
| 43 | #define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \ | ||
| 44 | NPCM7XX_Tx_INTEN | \ | ||
| 45 | NPCM7XX_Tx_MIN_PRESCALE) | ||
| 46 | |||
| 47 | #define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \ | ||
| 48 | NPCM7XX_Tx_INTEN | \ | ||
| 49 | NPCM7XX_Tx_MIN_PRESCALE) | ||
| 50 | |||
| 51 | #define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \ | ||
| 52 | NPCM7XX_Tx_MIN_PRESCALE) | ||
| 53 | |||
| 54 | #define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE) | ||
| 55 | |||
| 56 | static int npcm7xx_timer_resume(struct clock_event_device *evt) | ||
| 57 | { | ||
| 58 | struct timer_of *to = to_timer_of(evt); | ||
| 59 | u32 val; | ||
| 60 | |||
| 61 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 62 | val |= NPCM7XX_Tx_COUNTEN; | ||
| 63 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | static int npcm7xx_timer_shutdown(struct clock_event_device *evt) | ||
| 69 | { | ||
| 70 | struct timer_of *to = to_timer_of(evt); | ||
| 71 | u32 val; | ||
| 72 | |||
| 73 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 74 | val &= ~NPCM7XX_Tx_COUNTEN; | ||
| 75 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 76 | |||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int npcm7xx_timer_oneshot(struct clock_event_device *evt) | ||
| 81 | { | ||
| 82 | struct timer_of *to = to_timer_of(evt); | ||
| 83 | u32 val; | ||
| 84 | |||
| 85 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 86 | val &= ~NPCM7XX_Tx_OPER; | ||
| 87 | |||
| 88 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 89 | val |= NPCM7XX_START_ONESHOT_Tx; | ||
| 90 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | static int npcm7xx_timer_periodic(struct clock_event_device *evt) | ||
| 96 | { | ||
| 97 | struct timer_of *to = to_timer_of(evt); | ||
| 98 | u32 val; | ||
| 99 | |||
| 100 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 101 | val &= ~NPCM7XX_Tx_OPER; | ||
| 102 | |||
| 103 | writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0); | ||
| 104 | val |= NPCM7XX_START_PERIODIC_Tx; | ||
| 105 | |||
| 106 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 107 | |||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 111 | static int npcm7xx_clockevent_set_next_event(unsigned long evt, | ||
| 112 | struct clock_event_device *clk) | ||
| 113 | { | ||
| 114 | struct timer_of *to = to_timer_of(clk); | ||
| 115 | u32 val; | ||
| 116 | |||
| 117 | writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0); | ||
| 118 | val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 119 | val |= NPCM7XX_START_Tx; | ||
| 120 | writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); | ||
| 121 | |||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id) | ||
| 126 | { | ||
| 127 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | ||
| 128 | struct timer_of *to = to_timer_of(evt); | ||
| 129 | |||
| 130 | writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR); | ||
| 131 | |||
| 132 | evt->event_handler(evt); | ||
| 133 | |||
| 134 | return IRQ_HANDLED; | ||
| 135 | } | ||
| 136 | |||
| 137 | static struct timer_of npcm7xx_to = { | ||
| 138 | .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, | ||
| 139 | |||
| 140 | .clkevt = { | ||
| 141 | .name = "npcm7xx-timer0", | ||
| 142 | .features = CLOCK_EVT_FEAT_PERIODIC | | ||
| 143 | CLOCK_EVT_FEAT_ONESHOT, | ||
| 144 | .set_next_event = npcm7xx_clockevent_set_next_event, | ||
| 145 | .set_state_shutdown = npcm7xx_timer_shutdown, | ||
| 146 | .set_state_periodic = npcm7xx_timer_periodic, | ||
| 147 | .set_state_oneshot = npcm7xx_timer_oneshot, | ||
| 148 | .tick_resume = npcm7xx_timer_resume, | ||
| 149 | .rating = 300, | ||
| 150 | }, | ||
| 151 | |||
| 152 | .of_irq = { | ||
| 153 | .handler = npcm7xx_timer0_interrupt, | ||
| 154 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
| 155 | }, | ||
| 156 | }; | ||
| 157 | |||
| 158 | static void __init npcm7xx_clockevents_init(void) | ||
| 159 | { | ||
| 160 | writel(NPCM7XX_DEFAULT_CSR, | ||
| 161 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0); | ||
| 162 | |||
| 163 | writel(NPCM7XX_Tx_RESETINT, | ||
| 164 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR); | ||
| 165 | |||
| 166 | npcm7xx_to.clkevt.cpumask = cpumask_of(0); | ||
| 167 | clockevents_config_and_register(&npcm7xx_to.clkevt, | ||
| 168 | timer_of_rate(&npcm7xx_to), | ||
| 169 | 0x1, NPCM7XX_Tx_MAX_CNT); | ||
| 170 | } | ||
| 171 | |||
| 172 | static void __init npcm7xx_clocksource_init(void) | ||
| 173 | { | ||
| 174 | u32 val; | ||
| 175 | |||
| 176 | writel(NPCM7XX_DEFAULT_CSR, | ||
| 177 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
| 178 | writel(NPCM7XX_Tx_MAX_CNT, | ||
| 179 | timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1); | ||
| 180 | |||
| 181 | val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
| 182 | val |= NPCM7XX_START_Tx; | ||
| 183 | writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); | ||
| 184 | |||
| 185 | clocksource_mmio_init(timer_of_base(&npcm7xx_to) + | ||
| 186 | NPCM7XX_REG_TDR1, | ||
| 187 | "npcm7xx-timer1", timer_of_rate(&npcm7xx_to), | ||
| 188 | 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS, | ||
| 189 | clocksource_mmio_readl_down); | ||
| 190 | } | ||
| 191 | |||
| 192 | static int __init npcm7xx_timer_init(struct device_node *np) | ||
| 193 | { | ||
| 194 | int ret; | ||
| 195 | |||
| 196 | ret = timer_of_init(np, &npcm7xx_to); | ||
| 197 | if (ret) | ||
| 198 | return ret; | ||
| 199 | |||
| 200 | /* Clock input is divided by PRESCALE + 1 before it is fed */ | ||
| 201 | /* to the counter */ | ||
| 202 | npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate / | ||
| 203 | (NPCM7XX_Tx_MIN_PRESCALE + 1); | ||
| 204 | |||
| 205 | npcm7xx_clocksource_init(); | ||
| 206 | npcm7xx_clockevents_init(); | ||
| 207 | |||
| 208 | pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ", | ||
| 209 | timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to)); | ||
| 210 | |||
| 211 | return 0; | ||
| 212 | } | ||
| 213 | |||
| 214 | TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); | ||
| 215 | |||
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/dax.h> | 19 | #include <linux/dax.h> |
| 20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
| 21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
| 22 | #include <linux/mman.h> | ||
| 22 | #include "dax-private.h" | 23 | #include "dax-private.h" |
| 23 | #include "dax.h" | 24 | #include "dax.h" |
| 24 | 25 | ||
| @@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { | |||
| 540 | .release = dax_release, | 541 | .release = dax_release, |
| 541 | .get_unmapped_area = dax_get_unmapped_area, | 542 | .get_unmapped_area = dax_get_unmapped_area, |
| 542 | .mmap = dax_mmap, | 543 | .mmap = dax_mmap, |
| 544 | .mmap_supported_flags = MAP_SYNC, | ||
| 543 | }; | 545 | }; |
| 544 | 546 | ||
| 545 | static void dev_dax_release(struct device *dev) | 547 | static void dev_dax_release(struct device *dev) |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a3a7ead3012..0b5cc910f62e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -525,6 +525,9 @@ | |||
| 525 | #define I2C_VENDOR_ID_HANTICK 0x0911 | 525 | #define I2C_VENDOR_ID_HANTICK 0x0911 |
| 526 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 | 526 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 |
| 527 | 527 | ||
| 528 | #define I2C_VENDOR_ID_RAYD 0x2386 | ||
| 529 | #define I2C_PRODUCT_ID_RAYD_3118 0x3118 | ||
| 530 | |||
| 528 | #define USB_VENDOR_ID_HANWANG 0x0b57 | 531 | #define USB_VENDOR_ID_HANWANG 0x0b57 |
| 529 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 | 532 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 |
| 530 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff | 533 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 6836a856c243..930652c25120 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
| @@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy, | |||
| 387 | break; | 387 | break; |
| 388 | 388 | ||
| 389 | case POWER_SUPPLY_PROP_CAPACITY: | 389 | case POWER_SUPPLY_PROP_CAPACITY: |
| 390 | if (dev->battery_report_type == HID_FEATURE_REPORT) { | 390 | if (dev->battery_status != HID_BATTERY_REPORTED && |
| 391 | !dev->battery_avoid_query) { | ||
| 391 | value = hidinput_query_battery_capacity(dev); | 392 | value = hidinput_query_battery_capacity(dev); |
| 392 | if (value < 0) | 393 | if (value < 0) |
| 393 | return value; | 394 | return value; |
| @@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy, | |||
| 403 | break; | 404 | break; |
| 404 | 405 | ||
| 405 | case POWER_SUPPLY_PROP_STATUS: | 406 | case POWER_SUPPLY_PROP_STATUS: |
| 406 | if (!dev->battery_reported && | 407 | if (dev->battery_status != HID_BATTERY_REPORTED && |
| 407 | dev->battery_report_type == HID_FEATURE_REPORT) { | 408 | !dev->battery_avoid_query) { |
| 408 | value = hidinput_query_battery_capacity(dev); | 409 | value = hidinput_query_battery_capacity(dev); |
| 409 | if (value < 0) | 410 | if (value < 0) |
| 410 | return value; | 411 | return value; |
| 411 | 412 | ||
| 412 | dev->battery_capacity = value; | 413 | dev->battery_capacity = value; |
| 413 | dev->battery_reported = true; | 414 | dev->battery_status = HID_BATTERY_QUERIED; |
| 414 | } | 415 | } |
| 415 | 416 | ||
| 416 | if (!dev->battery_reported) | 417 | if (dev->battery_status == HID_BATTERY_UNKNOWN) |
| 417 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; | 418 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; |
| 418 | else if (dev->battery_capacity == 100) | 419 | else if (dev->battery_capacity == 100) |
| 419 | val->intval = POWER_SUPPLY_STATUS_FULL; | 420 | val->intval = POWER_SUPPLY_STATUS_FULL; |
| @@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, | |||
| 486 | dev->battery_report_type = report_type; | 487 | dev->battery_report_type = report_type; |
| 487 | dev->battery_report_id = field->report->id; | 488 | dev->battery_report_id = field->report->id; |
| 488 | 489 | ||
| 490 | /* | ||
| 491 | * Stylus is normally not connected to the device and thus we | ||
| 492 | * can't query the device and get meaningful battery strength. | ||
| 493 | * We have to wait for the device to report it on its own. | ||
| 494 | */ | ||
| 495 | dev->battery_avoid_query = report_type == HID_INPUT_REPORT && | ||
| 496 | field->physical == HID_DG_STYLUS; | ||
| 497 | |||
| 489 | dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); | 498 | dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); |
| 490 | if (IS_ERR(dev->battery)) { | 499 | if (IS_ERR(dev->battery)) { |
| 491 | error = PTR_ERR(dev->battery); | 500 | error = PTR_ERR(dev->battery); |
| @@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value) | |||
| 530 | 539 | ||
| 531 | capacity = hidinput_scale_battery_capacity(dev, value); | 540 | capacity = hidinput_scale_battery_capacity(dev, value); |
| 532 | 541 | ||
| 533 | if (!dev->battery_reported || capacity != dev->battery_capacity) { | 542 | if (dev->battery_status != HID_BATTERY_REPORTED || |
| 543 | capacity != dev->battery_capacity) { | ||
| 534 | dev->battery_capacity = capacity; | 544 | dev->battery_capacity = capacity; |
| 535 | dev->battery_reported = true; | 545 | dev->battery_status = HID_BATTERY_REPORTED; |
| 536 | power_supply_changed(dev->battery); | 546 | power_supply_changed(dev->battery); |
| 537 | } | 547 | } |
| 538 | } | 548 | } |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index fbfcc8009432..b39844adea47 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t | |||
| 192 | int ret = 0, len; | 192 | int ret = 0, len; |
| 193 | unsigned char report_number; | 193 | unsigned char report_number; |
| 194 | 194 | ||
| 195 | if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { | ||
| 196 | ret = -ENODEV; | ||
| 197 | goto out; | ||
| 198 | } | ||
| 199 | |||
| 195 | dev = hidraw_table[minor]->hid; | 200 | dev = hidraw_table[minor]->hid; |
| 196 | 201 | ||
| 197 | if (!dev->ll_driver->raw_request) { | 202 | if (!dev->ll_driver->raw_request) { |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 97689e98e53f..963328674e93 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | /* quirks to control the device */ | 47 | /* quirks to control the device */ |
| 48 | #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) | 48 | #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) |
| 49 | #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) | 49 | #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) |
| 50 | #define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) | ||
| 50 | 51 | ||
| 51 | /* flags */ | 52 | /* flags */ |
| 52 | #define I2C_HID_STARTED 0 | 53 | #define I2C_HID_STARTED 0 |
| @@ -171,6 +172,8 @@ static const struct i2c_hid_quirks { | |||
| 171 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, | 172 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, |
| 172 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, | 173 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, |
| 173 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, | 174 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, |
| 175 | { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, | ||
| 176 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | ||
| 174 | { 0, 0 } | 177 | { 0, 0 } |
| 175 | }; | 178 | }; |
| 176 | 179 | ||
| @@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev) | |||
| 1220 | if (ret) | 1223 | if (ret) |
| 1221 | return ret; | 1224 | return ret; |
| 1222 | 1225 | ||
| 1226 | /* RAYDIUM device (2386:3118) need to re-send report descr cmd | ||
| 1227 | * after resume, after this it will be back normal. | ||
| 1228 | * otherwise it issues too many incomplete reports. | ||
| 1229 | */ | ||
| 1230 | if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { | ||
| 1231 | ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); | ||
| 1232 | if (ret) | ||
| 1233 | return ret; | ||
| 1234 | } | ||
| 1235 | |||
| 1223 | if (hid->driver && hid->driver->reset_resume) { | 1236 | if (hid->driver && hid->driver->reset_resume) { |
| 1224 | ret = hid->driver->reset_resume(hid); | 1237 | ret = hid->driver->reset_resume(hid); |
| 1225 | return ret; | 1238 | return ret; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 6da16a879c9f..5f947ec20dcb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id) | |||
| 689 | return tool_type; | 689 | return tool_type; |
| 690 | } | 690 | } |
| 691 | 691 | ||
| 692 | static void wacom_exit_report(struct wacom_wac *wacom) | ||
| 693 | { | ||
| 694 | struct input_dev *input = wacom->pen_input; | ||
| 695 | struct wacom_features *features = &wacom->features; | ||
| 696 | unsigned char *data = wacom->data; | ||
| 697 | int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; | ||
| 698 | |||
| 699 | /* | ||
| 700 | * Reset all states otherwise we lose the initial states | ||
| 701 | * when in-prox next time | ||
| 702 | */ | ||
| 703 | input_report_abs(input, ABS_X, 0); | ||
| 704 | input_report_abs(input, ABS_Y, 0); | ||
| 705 | input_report_abs(input, ABS_DISTANCE, 0); | ||
| 706 | input_report_abs(input, ABS_TILT_X, 0); | ||
| 707 | input_report_abs(input, ABS_TILT_Y, 0); | ||
| 708 | if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { | ||
| 709 | input_report_key(input, BTN_LEFT, 0); | ||
| 710 | input_report_key(input, BTN_MIDDLE, 0); | ||
| 711 | input_report_key(input, BTN_RIGHT, 0); | ||
| 712 | input_report_key(input, BTN_SIDE, 0); | ||
| 713 | input_report_key(input, BTN_EXTRA, 0); | ||
| 714 | input_report_abs(input, ABS_THROTTLE, 0); | ||
| 715 | input_report_abs(input, ABS_RZ, 0); | ||
| 716 | } else { | ||
| 717 | input_report_abs(input, ABS_PRESSURE, 0); | ||
| 718 | input_report_key(input, BTN_STYLUS, 0); | ||
| 719 | input_report_key(input, BTN_STYLUS2, 0); | ||
| 720 | input_report_key(input, BTN_TOUCH, 0); | ||
| 721 | input_report_abs(input, ABS_WHEEL, 0); | ||
| 722 | if (features->type >= INTUOS3S) | ||
| 723 | input_report_abs(input, ABS_Z, 0); | ||
| 724 | } | ||
| 725 | input_report_key(input, wacom->tool[idx], 0); | ||
| 726 | input_report_abs(input, ABS_MISC, 0); /* reset tool id */ | ||
| 727 | input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); | ||
| 728 | wacom->id[idx] = 0; | ||
| 729 | } | ||
| 730 | |||
| 692 | static int wacom_intuos_inout(struct wacom_wac *wacom) | 731 | static int wacom_intuos_inout(struct wacom_wac *wacom) |
| 693 | { | 732 | { |
| 694 | struct wacom_features *features = &wacom->features; | 733 | struct wacom_features *features = &wacom->features; |
| @@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
| 741 | if (!wacom->id[idx]) | 780 | if (!wacom->id[idx]) |
| 742 | return 1; | 781 | return 1; |
| 743 | 782 | ||
| 744 | /* | 783 | wacom_exit_report(wacom); |
| 745 | * Reset all states otherwise we lose the initial states | ||
| 746 | * when in-prox next time | ||
| 747 | */ | ||
| 748 | input_report_abs(input, ABS_X, 0); | ||
| 749 | input_report_abs(input, ABS_Y, 0); | ||
| 750 | input_report_abs(input, ABS_DISTANCE, 0); | ||
| 751 | input_report_abs(input, ABS_TILT_X, 0); | ||
| 752 | input_report_abs(input, ABS_TILT_Y, 0); | ||
| 753 | if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { | ||
| 754 | input_report_key(input, BTN_LEFT, 0); | ||
| 755 | input_report_key(input, BTN_MIDDLE, 0); | ||
| 756 | input_report_key(input, BTN_RIGHT, 0); | ||
| 757 | input_report_key(input, BTN_SIDE, 0); | ||
| 758 | input_report_key(input, BTN_EXTRA, 0); | ||
| 759 | input_report_abs(input, ABS_THROTTLE, 0); | ||
| 760 | input_report_abs(input, ABS_RZ, 0); | ||
| 761 | } else { | ||
| 762 | input_report_abs(input, ABS_PRESSURE, 0); | ||
| 763 | input_report_key(input, BTN_STYLUS, 0); | ||
| 764 | input_report_key(input, BTN_STYLUS2, 0); | ||
| 765 | input_report_key(input, BTN_TOUCH, 0); | ||
| 766 | input_report_abs(input, ABS_WHEEL, 0); | ||
| 767 | if (features->type >= INTUOS3S) | ||
| 768 | input_report_abs(input, ABS_Z, 0); | ||
| 769 | } | ||
| 770 | input_report_key(input, wacom->tool[idx], 0); | ||
| 771 | input_report_abs(input, ABS_MISC, 0); /* reset tool id */ | ||
| 772 | input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); | ||
| 773 | wacom->id[idx] = 0; | ||
| 774 | return 2; | 784 | return 2; |
| 775 | } | 785 | } |
| 776 | 786 | ||
| @@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) | |||
| 1235 | if (!valid) | 1245 | if (!valid) |
| 1236 | continue; | 1246 | continue; |
| 1237 | 1247 | ||
| 1248 | if (!prox) { | ||
| 1249 | wacom->shared->stylus_in_proximity = false; | ||
| 1250 | wacom_exit_report(wacom); | ||
| 1251 | input_sync(pen_input); | ||
| 1252 | return; | ||
| 1253 | } | ||
| 1238 | if (range) { | 1254 | if (range) { |
| 1239 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); | 1255 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); |
| 1240 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); | 1256 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); |
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c index a6e87076acc2..5336bbdbfdc5 100644 --- a/drivers/isdn/mISDN/dsp_hwec.c +++ b/drivers/isdn/mISDN/dsp_hwec.c | |||
| @@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) | |||
| 68 | goto _do; | 68 | goto _do; |
| 69 | 69 | ||
| 70 | { | 70 | { |
| 71 | char _dup[len + 1]; | ||
| 72 | char *dup, *tok, *name, *val; | 71 | char *dup, *tok, *name, *val; |
| 73 | int tmp; | 72 | int tmp; |
| 74 | 73 | ||
| 75 | strcpy(_dup, arg); | 74 | dup = kstrdup(arg, GFP_ATOMIC); |
| 76 | dup = _dup; | 75 | if (!dup) |
| 76 | return; | ||
| 77 | 77 | ||
| 78 | while ((tok = strsep(&dup, ","))) { | 78 | while ((tok = strsep(&dup, ","))) { |
| 79 | if (!strlen(tok)) | 79 | if (!strlen(tok)) |
| @@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg) | |||
| 89 | deftaps = tmp; | 89 | deftaps = tmp; |
| 90 | } | 90 | } |
| 91 | } | 91 | } |
| 92 | |||
| 93 | kfree(dup); | ||
| 92 | } | 94 | } |
| 93 | 95 | ||
| 94 | _do: | 96 | _do: |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 21d50e4cc5e1..b05022f94f18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
| @@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, | |||
| 279 | u16 timebase, u8 *buf, int len) | 279 | u16 timebase, u8 *buf, int len) |
| 280 | { | 280 | { |
| 281 | u8 *p; | 281 | u8 *p; |
| 282 | u8 frame[len + 32]; | 282 | u8 frame[MAX_DFRAME_LEN_L1 + 32]; |
| 283 | struct socket *socket = NULL; | 283 | struct socket *socket = NULL; |
| 284 | 284 | ||
| 285 | if (debug & DEBUG_L1OIP_MSG) | 285 | if (debug & DEBUG_L1OIP_MSG) |
| @@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) | |||
| 902 | p = skb->data; | 902 | p = skb->data; |
| 903 | l = skb->len; | 903 | l = skb->len; |
| 904 | while (l) { | 904 | while (l) { |
| 905 | ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; | 905 | /* |
| 906 | * This is technically bounded by L1OIP_MAX_PERFRAME but | ||
| 907 | * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME | ||
| 908 | */ | ||
| 909 | ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; | ||
| 906 | l1oip_socket_send(hc, 0, dch->slot, 0, | 910 | l1oip_socket_send(hc, 0, dch->slot, 0, |
| 907 | hc->chan[dch->slot].tx_counter++, p, ll); | 911 | hc->chan[dch->slot].tx_counter++, p, ll); |
| 908 | p += ll; | 912 | p += ll; |
| @@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) | |||
| 1140 | p = skb->data; | 1144 | p = skb->data; |
| 1141 | l = skb->len; | 1145 | l = skb->len; |
| 1142 | while (l) { | 1146 | while (l) { |
| 1143 | ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; | 1147 | /* |
| 1148 | * This is technically bounded by L1OIP_MAX_PERFRAME but | ||
| 1149 | * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME | ||
| 1150 | */ | ||
| 1151 | ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; | ||
| 1144 | l1oip_socket_send(hc, hc->codec, bch->slot, 0, | 1152 | l1oip_socket_send(hc, hc->codec, bch->slot, 0, |
| 1145 | hc->chan[bch->slot].tx_counter, p, ll); | 1153 | hc->chan[bch->slot].tx_counter, p, ll); |
| 1146 | hc->chan[bch->slot].tx_counter += ll; | 1154 | hc->chan[bch->slot].tx_counter += ll; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 3bea45e8ccff..c208c01f63a5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr) | |||
| 9256 | check_sb_changes(mddev, rdev); | 9256 | check_sb_changes(mddev, rdev); |
| 9257 | 9257 | ||
| 9258 | /* Read all rdev's to update recovery_offset */ | 9258 | /* Read all rdev's to update recovery_offset */ |
| 9259 | rdev_for_each_rcu(rdev, mddev) | 9259 | rdev_for_each_rcu(rdev, mddev) { |
| 9260 | read_rdev(mddev, rdev); | 9260 | if (!test_bit(Faulty, &rdev->flags)) |
| 9261 | read_rdev(mddev, rdev); | ||
| 9262 | } | ||
| 9261 | } | 9263 | } |
| 9262 | EXPORT_SYMBOL(md_reload_sb); | 9264 | EXPORT_SYMBOL(md_reload_sb); |
| 9263 | 9265 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e2943fb74056..e9e3308cb0a7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
| 854 | * there is no normal IO happeing. It must arrange to call | 854 | * there is no normal IO happeing. It must arrange to call |
| 855 | * lower_barrier when the particular background IO completes. | 855 | * lower_barrier when the particular background IO completes. |
| 856 | */ | 856 | */ |
| 857 | static void raise_barrier(struct r1conf *conf, sector_t sector_nr) | 857 | static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr) |
| 858 | { | 858 | { |
| 859 | int idx = sector_to_idx(sector_nr); | 859 | int idx = sector_to_idx(sector_nr); |
| 860 | 860 | ||
| @@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr) | |||
| 885 | * max resync count which allowed on current I/O barrier bucket. | 885 | * max resync count which allowed on current I/O barrier bucket. |
| 886 | */ | 886 | */ |
| 887 | wait_event_lock_irq(conf->wait_barrier, | 887 | wait_event_lock_irq(conf->wait_barrier, |
| 888 | !conf->array_frozen && | 888 | (!conf->array_frozen && |
| 889 | !atomic_read(&conf->nr_pending[idx]) && | 889 | !atomic_read(&conf->nr_pending[idx]) && |
| 890 | atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, | 890 | atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || |
| 891 | test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), | ||
| 891 | conf->resync_lock); | 892 | conf->resync_lock); |
| 892 | 893 | ||
| 894 | if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { | ||
| 895 | atomic_dec(&conf->barrier[idx]); | ||
| 896 | spin_unlock_irq(&conf->resync_lock); | ||
| 897 | wake_up(&conf->wait_barrier); | ||
| 898 | return -EINTR; | ||
| 899 | } | ||
| 900 | |||
| 893 | atomic_inc(&conf->nr_sync_pending); | 901 | atomic_inc(&conf->nr_sync_pending); |
| 894 | spin_unlock_irq(&conf->resync_lock); | 902 | spin_unlock_irq(&conf->resync_lock); |
| 903 | |||
| 904 | return 0; | ||
| 895 | } | 905 | } |
| 896 | 906 | ||
| 897 | static void lower_barrier(struct r1conf *conf, sector_t sector_nr) | 907 | static void lower_barrier(struct r1conf *conf, sector_t sector_nr) |
| @@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, | |||
| 1092 | goto skip_copy; | 1102 | goto skip_copy; |
| 1093 | } | 1103 | } |
| 1094 | 1104 | ||
| 1105 | behind_bio->bi_write_hint = bio->bi_write_hint; | ||
| 1106 | |||
| 1095 | while (i < vcnt && size) { | 1107 | while (i < vcnt && size) { |
| 1096 | struct page *page; | 1108 | struct page *page; |
| 1097 | int len = min_t(int, PAGE_SIZE, size); | 1109 | int len = min_t(int, PAGE_SIZE, size); |
| @@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 2662 | 2674 | ||
| 2663 | bitmap_cond_end_sync(mddev->bitmap, sector_nr, | 2675 | bitmap_cond_end_sync(mddev->bitmap, sector_nr, |
| 2664 | mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); | 2676 | mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); |
| 2665 | r1_bio = raid1_alloc_init_r1buf(conf); | ||
| 2666 | 2677 | ||
| 2667 | raise_barrier(conf, sector_nr); | 2678 | |
| 2679 | if (raise_barrier(conf, sector_nr)) | ||
| 2680 | return 0; | ||
| 2681 | |||
| 2682 | r1_bio = raid1_alloc_init_r1buf(conf); | ||
| 2668 | 2683 | ||
| 2669 | rcu_read_lock(); | 2684 | rcu_read_lock(); |
| 2670 | /* | 2685 | /* |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8e0acd197c43..6af946d16d24 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/bitops.h> | ||
| 12 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| 13 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
| 14 | #include <linux/io-64-nonatomic-hi-lo.h> | 15 | #include <linux/io-64-nonatomic-hi-lo.h> |
| @@ -62,6 +63,17 @@ | |||
| 62 | * need a custom accessor. | 63 | * need a custom accessor. |
| 63 | */ | 64 | */ |
| 64 | 65 | ||
| 66 | static unsigned long global_flags; | ||
| 67 | /* | ||
| 68 | * Workaround for avoiding to use RX DMAC by multiple channels. | ||
| 69 | * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use | ||
| 70 | * RX DMAC simultaneously, sometimes hundreds of bytes data are not | ||
| 71 | * stored into the system memory even if the DMAC interrupt happened. | ||
| 72 | * So, this driver then uses one RX DMAC channel only. | ||
| 73 | */ | ||
| 74 | #define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 | ||
| 75 | #define SDHI_INTERNAL_DMAC_RX_IN_USE 1 | ||
| 76 | |||
| 65 | /* Definitions for sampling clocks */ | 77 | /* Definitions for sampling clocks */ |
| 66 | static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { | 78 | static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { |
| 67 | { | 79 | { |
| @@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { | |||
| 126 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, | 138 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, |
| 127 | RST_RESERVED_BITS | val); | 139 | RST_RESERVED_BITS | val); |
| 128 | 140 | ||
| 141 | if (host->data && host->data->flags & MMC_DATA_READ) | ||
| 142 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); | ||
| 143 | |||
| 129 | renesas_sdhi_internal_dmac_enable_dma(host, true); | 144 | renesas_sdhi_internal_dmac_enable_dma(host, true); |
| 130 | } | 145 | } |
| 131 | 146 | ||
| @@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, | |||
| 155 | if (data->flags & MMC_DATA_READ) { | 170 | if (data->flags & MMC_DATA_READ) { |
| 156 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; | 171 | dtran_mode |= DTRAN_MODE_CH_NUM_CH1; |
| 157 | dir = DMA_FROM_DEVICE; | 172 | dir = DMA_FROM_DEVICE; |
| 173 | if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && | ||
| 174 | test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) | ||
| 175 | goto force_pio; | ||
| 158 | } else { | 176 | } else { |
| 159 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; | 177 | dtran_mode |= DTRAN_MODE_CH_NUM_CH0; |
| 160 | dir = DMA_TO_DEVICE; | 178 | dir = DMA_TO_DEVICE; |
| @@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) | |||
| 208 | renesas_sdhi_internal_dmac_enable_dma(host, false); | 226 | renesas_sdhi_internal_dmac_enable_dma(host, false); |
| 209 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); | 227 | dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); |
| 210 | 228 | ||
| 229 | if (dir == DMA_FROM_DEVICE) | ||
| 230 | clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); | ||
| 231 | |||
| 211 | tmio_mmc_do_data_irq(host); | 232 | tmio_mmc_do_data_irq(host); |
| 212 | out: | 233 | out: |
| 213 | spin_unlock_irq(&host->lock); | 234 | spin_unlock_irq(&host->lock); |
| @@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { | |||
| 251 | * implementation as others may use a different implementation. | 272 | * implementation as others may use a different implementation. |
| 252 | */ | 273 | */ |
| 253 | static const struct soc_device_attribute gen3_soc_whitelist[] = { | 274 | static const struct soc_device_attribute gen3_soc_whitelist[] = { |
| 254 | { .soc_id = "r8a7795", .revision = "ES1.*" }, | 275 | { .soc_id = "r8a7795", .revision = "ES1.*", |
| 255 | { .soc_id = "r8a7795", .revision = "ES2.0" }, | 276 | .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, |
| 256 | { .soc_id = "r8a7796", .revision = "ES1.0" }, | 277 | { .soc_id = "r8a7795", .revision = "ES2.0" }, |
| 257 | { .soc_id = "r8a77995", .revision = "ES1.0" }, | 278 | { .soc_id = "r8a7796", .revision = "ES1.0", |
| 258 | { /* sentinel */ } | 279 | .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, |
| 280 | { .soc_id = "r8a77995", .revision = "ES1.0" }, | ||
| 281 | { /* sentinel */ } | ||
| 259 | }; | 282 | }; |
| 260 | 283 | ||
| 261 | static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) | 284 | static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) |
| 262 | { | 285 | { |
| 263 | if (!soc_device_match(gen3_soc_whitelist)) | 286 | const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); |
| 287 | |||
| 288 | if (!soc) | ||
| 264 | return -ENODEV; | 289 | return -ENODEV; |
| 265 | 290 | ||
| 291 | global_flags |= (unsigned long)soc->data; | ||
| 292 | |||
| 266 | return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); | 293 | return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); |
| 267 | } | 294 | } |
| 268 | 295 | ||
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 787434e5589d..78c25ad35fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev) | |||
| 1312 | pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); | 1312 | pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); |
| 1313 | } | 1313 | } |
| 1314 | 1314 | ||
| 1315 | static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) | 1315 | static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) |
| 1316 | { | 1316 | { |
| 1317 | struct sdhci_pci_slot *slot = sdhci_priv(host); | 1317 | struct sdhci_pci_slot *slot = sdhci_priv(host); |
| 1318 | struct pci_dev *pdev = slot->chip->pdev; | 1318 | struct pci_dev *pdev = slot->chip->pdev; |
| @@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) | |||
| 1351 | return 0; | 1351 | return 0; |
| 1352 | } | 1352 | } |
| 1353 | 1353 | ||
| 1354 | static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) | ||
| 1355 | { | ||
| 1356 | struct sdhci_host *host = mmc_priv(mmc); | ||
| 1357 | |||
| 1358 | /* AMD requires custom HS200 tuning */ | ||
| 1359 | if (host->timing == MMC_TIMING_MMC_HS200) | ||
| 1360 | return amd_execute_tuning_hs200(host, opcode); | ||
| 1361 | |||
| 1362 | /* Otherwise perform standard SDHCI tuning */ | ||
| 1363 | return sdhci_execute_tuning(mmc, opcode); | ||
| 1364 | } | ||
| 1365 | |||
| 1366 | static int amd_probe_slot(struct sdhci_pci_slot *slot) | ||
| 1367 | { | ||
| 1368 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; | ||
| 1369 | |||
| 1370 | ops->execute_tuning = amd_execute_tuning; | ||
| 1371 | |||
| 1372 | return 0; | ||
| 1373 | } | ||
| 1374 | |||
| 1354 | static int amd_probe(struct sdhci_pci_chip *chip) | 1375 | static int amd_probe(struct sdhci_pci_chip *chip) |
| 1355 | { | 1376 | { |
| 1356 | struct pci_dev *smbus_dev; | 1377 | struct pci_dev *smbus_dev; |
| @@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = { | |||
| 1385 | .set_bus_width = sdhci_set_bus_width, | 1406 | .set_bus_width = sdhci_set_bus_width, |
| 1386 | .reset = sdhci_reset, | 1407 | .reset = sdhci_reset, |
| 1387 | .set_uhs_signaling = sdhci_set_uhs_signaling, | 1408 | .set_uhs_signaling = sdhci_set_uhs_signaling, |
| 1388 | .platform_execute_tuning = amd_execute_tuning, | ||
| 1389 | }; | 1409 | }; |
| 1390 | 1410 | ||
| 1391 | static const struct sdhci_pci_fixes sdhci_amd = { | 1411 | static const struct sdhci_pci_fixes sdhci_amd = { |
| 1392 | .probe = amd_probe, | 1412 | .probe = amd_probe, |
| 1393 | .ops = &amd_sdhci_pci_ops, | 1413 | .ops = &amd_sdhci_pci_ops, |
| 1414 | .probe_slot = amd_probe_slot, | ||
| 1394 | }; | 1415 | }; |
| 1395 | 1416 | ||
| 1396 | static const struct pci_device_id pci_ids[] = { | 1417 | static const struct pci_device_id pci_ids[] = { |
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index ac7694c71266..a036c490b7ce 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c | |||
| @@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, | |||
| 285 | struct sk_buff_head *rxq) | 285 | struct sk_buff_head *rxq) |
| 286 | { | 286 | { |
| 287 | u16 buf[4] = { 0 }, status, seq_id; | 287 | u16 buf[4] = { 0 }, status, seq_id; |
| 288 | u64 ns, timelo, timehi; | ||
| 289 | struct skb_shared_hwtstamps *shwt; | 288 | struct skb_shared_hwtstamps *shwt; |
| 289 | struct sk_buff_head received; | ||
| 290 | u64 ns, timelo, timehi; | ||
| 291 | unsigned long flags; | ||
| 290 | int err; | 292 | int err; |
| 291 | 293 | ||
| 294 | /* The latched timestamp belongs to one of the received frames. */ | ||
| 295 | __skb_queue_head_init(&received); | ||
| 296 | spin_lock_irqsave(&rxq->lock, flags); | ||
| 297 | skb_queue_splice_tail_init(rxq, &received); | ||
| 298 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
| 299 | |||
| 292 | mutex_lock(&chip->reg_lock); | 300 | mutex_lock(&chip->reg_lock); |
| 293 | err = mv88e6xxx_port_ptp_read(chip, ps->port_id, | 301 | err = mv88e6xxx_port_ptp_read(chip, ps->port_id, |
| 294 | reg, buf, ARRAY_SIZE(buf)); | 302 | reg, buf, ARRAY_SIZE(buf)); |
| @@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, | |||
| 311 | /* Since the device can only handle one time stamp at a time, | 319 | /* Since the device can only handle one time stamp at a time, |
| 312 | * we purge any extra frames from the queue. | 320 | * we purge any extra frames from the queue. |
| 313 | */ | 321 | */ |
| 314 | for ( ; skb; skb = skb_dequeue(rxq)) { | 322 | for ( ; skb; skb = __skb_dequeue(&received)) { |
| 315 | if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { | 323 | if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { |
| 316 | ns = timehi << 16 | timelo; | 324 | ns = timehi << 16 | timelo; |
| 317 | 325 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1f622ca2a64f..8ba14ae00e8f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | |||
| @@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) | |||
| 1927 | return retval; | 1927 | return retval; |
| 1928 | } | 1928 | } |
| 1929 | 1929 | ||
| 1930 | static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) | 1930 | static void bnxt_get_pkgver(struct net_device *dev) |
| 1931 | { | 1931 | { |
| 1932 | struct bnxt *bp = netdev_priv(dev); | ||
| 1932 | u16 index = 0; | 1933 | u16 index = 0; |
| 1933 | u32 datalen; | 1934 | char *pkgver; |
| 1935 | u32 pkglen; | ||
| 1936 | u8 *pkgbuf; | ||
| 1937 | int len; | ||
| 1934 | 1938 | ||
| 1935 | if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, | 1939 | if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, |
| 1936 | BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, | 1940 | BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, |
| 1937 | &index, NULL, &datalen) != 0) | 1941 | &index, NULL, &pkglen) != 0) |
| 1938 | return NULL; | 1942 | return; |
| 1939 | 1943 | ||
| 1940 | memset(buf, 0, buflen); | 1944 | pkgbuf = kzalloc(pkglen, GFP_KERNEL); |
| 1941 | if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) | 1945 | if (!pkgbuf) { |
| 1942 | return NULL; | 1946 | dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", |
| 1947 | pkglen); | ||
| 1948 | return; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) | ||
| 1952 | goto err; | ||
| 1943 | 1953 | ||
| 1944 | return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, | 1954 | pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, |
| 1945 | datalen); | 1955 | pkglen); |
| 1956 | if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { | ||
| 1957 | len = strlen(bp->fw_ver_str); | ||
| 1958 | snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, | ||
| 1959 | "/pkg %s", pkgver); | ||
| 1960 | } | ||
| 1961 | err: | ||
| 1962 | kfree(pkgbuf); | ||
| 1946 | } | 1963 | } |
| 1947 | 1964 | ||
| 1948 | static int bnxt_get_eeprom(struct net_device *dev, | 1965 | static int bnxt_get_eeprom(struct net_device *dev, |
| @@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp) | |||
| 2615 | struct hwrm_selftest_qlist_input req = {0}; | 2632 | struct hwrm_selftest_qlist_input req = {0}; |
| 2616 | struct bnxt_test_info *test_info; | 2633 | struct bnxt_test_info *test_info; |
| 2617 | struct net_device *dev = bp->dev; | 2634 | struct net_device *dev = bp->dev; |
| 2618 | char *pkglog; | ||
| 2619 | int i, rc; | 2635 | int i, rc; |
| 2620 | 2636 | ||
| 2621 | pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); | 2637 | bnxt_get_pkgver(dev); |
| 2622 | if (pkglog) { | ||
| 2623 | char *pkgver; | ||
| 2624 | int len; | ||
| 2625 | 2638 | ||
| 2626 | pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); | ||
| 2627 | if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { | ||
| 2628 | len = strlen(bp->fw_ver_str); | ||
| 2629 | snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, | ||
| 2630 | "/pkg %s", pkgver); | ||
| 2631 | } | ||
| 2632 | kfree(pkglog); | ||
| 2633 | } | ||
| 2634 | if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) | 2639 | if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) |
| 2635 | return; | 2640 | return; |
| 2636 | 2641 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 73f2249555b5..83444811d3c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h | |||
| @@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { | |||
| 59 | #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) | 59 | #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) |
| 60 | #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) | 60 | #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) |
| 61 | 61 | ||
| 62 | #define BNX_PKG_LOG_MAX_LENGTH 4096 | ||
| 63 | |||
| 64 | enum bnxnvm_pkglog_field_index { | 62 | enum bnxnvm_pkglog_field_index { |
| 65 | BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, | 63 | BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, |
| 66 | BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, | 64 | BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af011..fa5b30f547f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -87,7 +87,7 @@ do { \ | |||
| 87 | 87 | ||
| 88 | #define HNAE_AE_REGISTER 0x1 | 88 | #define HNAE_AE_REGISTER 0x1 |
| 89 | 89 | ||
| 90 | #define RCB_RING_NAME_LEN 16 | 90 | #define RCB_RING_NAME_LEN (IFNAMSIZ + 4) |
| 91 | 91 | ||
| 92 | #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 | 92 | #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 |
| 93 | #define HNAE_LOW_LATENCY_COAL_PARAM 80 | 93 | #define HNAE_LOW_LATENCY_COAL_PARAM 80 |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index aad5658d79d5..2df01ad98df7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 794 | { | 794 | { |
| 795 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 795 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 796 | unsigned long timeout = msecs_to_jiffies(30000); | 796 | unsigned long timeout = msecs_to_jiffies(30000); |
| 797 | struct device *dev = &adapter->vdev->dev; | 797 | int retry_count = 0; |
| 798 | int rc; | 798 | int rc; |
| 799 | 799 | ||
| 800 | do { | 800 | do { |
| 801 | if (adapter->renegotiate) { | 801 | if (retry_count > IBMVNIC_MAX_QUEUES) { |
| 802 | adapter->renegotiate = false; | 802 | netdev_warn(netdev, "Login attempts exceeded\n"); |
| 803 | return -1; | ||
| 804 | } | ||
| 805 | |||
| 806 | adapter->init_done_rc = 0; | ||
| 807 | reinit_completion(&adapter->init_done); | ||
| 808 | rc = send_login(adapter); | ||
| 809 | if (rc) { | ||
| 810 | netdev_warn(netdev, "Unable to login\n"); | ||
| 811 | return rc; | ||
| 812 | } | ||
| 813 | |||
| 814 | if (!wait_for_completion_timeout(&adapter->init_done, | ||
| 815 | timeout)) { | ||
| 816 | netdev_warn(netdev, "Login timed out\n"); | ||
| 817 | return -1; | ||
| 818 | } | ||
| 819 | |||
| 820 | if (adapter->init_done_rc == PARTIALSUCCESS) { | ||
| 821 | retry_count++; | ||
| 803 | release_sub_crqs(adapter, 1); | 822 | release_sub_crqs(adapter, 1); |
| 804 | 823 | ||
| 824 | adapter->init_done_rc = 0; | ||
| 805 | reinit_completion(&adapter->init_done); | 825 | reinit_completion(&adapter->init_done); |
| 806 | send_cap_queries(adapter); | 826 | send_cap_queries(adapter); |
| 807 | if (!wait_for_completion_timeout(&adapter->init_done, | 827 | if (!wait_for_completion_timeout(&adapter->init_done, |
| 808 | timeout)) { | 828 | timeout)) { |
| 809 | dev_err(dev, "Capabilities query timeout\n"); | 829 | netdev_warn(netdev, |
| 830 | "Capabilities query timed out\n"); | ||
| 810 | return -1; | 831 | return -1; |
| 811 | } | 832 | } |
| 833 | |||
| 812 | rc = init_sub_crqs(adapter); | 834 | rc = init_sub_crqs(adapter); |
| 813 | if (rc) { | 835 | if (rc) { |
| 814 | dev_err(dev, | 836 | netdev_warn(netdev, |
| 815 | "Initialization of SCRQ's failed\n"); | 837 | "SCRQ initialization failed\n"); |
| 816 | return -1; | 838 | return -1; |
| 817 | } | 839 | } |
| 840 | |||
| 818 | rc = init_sub_crq_irqs(adapter); | 841 | rc = init_sub_crq_irqs(adapter); |
| 819 | if (rc) { | 842 | if (rc) { |
| 820 | dev_err(dev, | 843 | netdev_warn(netdev, |
| 821 | "Initialization of SCRQ's irqs failed\n"); | 844 | "SCRQ irq initialization failed\n"); |
| 822 | return -1; | 845 | return -1; |
| 823 | } | 846 | } |
| 824 | } | 847 | } else if (adapter->init_done_rc) { |
| 825 | 848 | netdev_warn(netdev, "Adapter login failed\n"); | |
| 826 | reinit_completion(&adapter->init_done); | ||
| 827 | rc = send_login(adapter); | ||
| 828 | if (rc) { | ||
| 829 | dev_err(dev, "Unable to attempt device login\n"); | ||
| 830 | return rc; | ||
| 831 | } else if (!wait_for_completion_timeout(&adapter->init_done, | ||
| 832 | timeout)) { | ||
| 833 | dev_err(dev, "Login timeout\n"); | ||
| 834 | return -1; | 849 | return -1; |
| 835 | } | 850 | } |
| 836 | } while (adapter->renegotiate); | 851 | } while (adapter->init_done_rc == PARTIALSUCCESS); |
| 837 | 852 | ||
| 838 | /* handle pending MAC address changes after successful login */ | 853 | /* handle pending MAC address changes after successful login */ |
| 839 | if (adapter->mac_change_pending) { | 854 | if (adapter->mac_change_pending) { |
| @@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev) | |||
| 1034 | netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); | 1049 | netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); |
| 1035 | if (prev_state == VNIC_CLOSED) | 1050 | if (prev_state == VNIC_CLOSED) |
| 1036 | enable_irq(adapter->rx_scrq[i]->irq); | 1051 | enable_irq(adapter->rx_scrq[i]->irq); |
| 1037 | else | 1052 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); |
| 1038 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); | ||
| 1039 | } | 1053 | } |
| 1040 | 1054 | ||
| 1041 | for (i = 0; i < adapter->req_tx_queues; i++) { | 1055 | for (i = 0; i < adapter->req_tx_queues; i++) { |
| 1042 | netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); | 1056 | netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); |
| 1043 | if (prev_state == VNIC_CLOSED) | 1057 | if (prev_state == VNIC_CLOSED) |
| 1044 | enable_irq(adapter->tx_scrq[i]->irq); | 1058 | enable_irq(adapter->tx_scrq[i]->irq); |
| 1045 | else | 1059 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); |
| 1046 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); | ||
| 1047 | } | 1060 | } |
| 1048 | 1061 | ||
| 1049 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); | 1062 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); |
| @@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) | |||
| 1184 | if (adapter->tx_scrq[i]->irq) { | 1197 | if (adapter->tx_scrq[i]->irq) { |
| 1185 | netdev_dbg(netdev, | 1198 | netdev_dbg(netdev, |
| 1186 | "Disabling tx_scrq[%d] irq\n", i); | 1199 | "Disabling tx_scrq[%d] irq\n", i); |
| 1200 | disable_scrq_irq(adapter, adapter->tx_scrq[i]); | ||
| 1187 | disable_irq(adapter->tx_scrq[i]->irq); | 1201 | disable_irq(adapter->tx_scrq[i]->irq); |
| 1188 | } | 1202 | } |
| 1189 | } | 1203 | } |
| @@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) | |||
| 1193 | if (adapter->rx_scrq[i]->irq) { | 1207 | if (adapter->rx_scrq[i]->irq) { |
| 1194 | netdev_dbg(netdev, | 1208 | netdev_dbg(netdev, |
| 1195 | "Disabling rx_scrq[%d] irq\n", i); | 1209 | "Disabling rx_scrq[%d] irq\n", i); |
| 1210 | disable_scrq_irq(adapter, adapter->rx_scrq[i]); | ||
| 1196 | disable_irq(adapter->rx_scrq[i]->irq); | 1211 | disable_irq(adapter->rx_scrq[i]->irq); |
| 1197 | } | 1212 | } |
| 1198 | } | 1213 | } |
| @@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1828 | for (i = 0; i < adapter->req_rx_queues; i++) | 1843 | for (i = 0; i < adapter->req_rx_queues; i++) |
| 1829 | napi_schedule(&adapter->napi[i]); | 1844 | napi_schedule(&adapter->napi[i]); |
| 1830 | 1845 | ||
| 1831 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) | 1846 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
| 1847 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) | ||
| 1832 | netdev_notify_peers(netdev); | 1848 | netdev_notify_peers(netdev); |
| 1833 | 1849 | ||
| 1834 | netif_carrier_on(netdev); | 1850 | netif_carrier_on(netdev); |
| @@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, | |||
| 2601 | { | 2617 | { |
| 2602 | struct device *dev = &adapter->vdev->dev; | 2618 | struct device *dev = &adapter->vdev->dev; |
| 2603 | unsigned long rc; | 2619 | unsigned long rc; |
| 2620 | u64 val; | ||
| 2604 | 2621 | ||
| 2605 | if (scrq->hw_irq > 0x100000000ULL) { | 2622 | if (scrq->hw_irq > 0x100000000ULL) { |
| 2606 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); | 2623 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); |
| 2607 | return 1; | 2624 | return 1; |
| 2608 | } | 2625 | } |
| 2609 | 2626 | ||
| 2627 | val = (0xff000000) | scrq->hw_irq; | ||
| 2628 | rc = plpar_hcall_norets(H_EOI, val); | ||
| 2629 | if (rc) | ||
| 2630 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", | ||
| 2631 | val, rc); | ||
| 2632 | |||
| 2610 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, | 2633 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
| 2611 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); | 2634 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); |
| 2612 | if (rc) | 2635 | if (rc) |
| @@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) | |||
| 3170 | struct vnic_login_client_data { | 3193 | struct vnic_login_client_data { |
| 3171 | u8 type; | 3194 | u8 type; |
| 3172 | __be16 len; | 3195 | __be16 len; |
| 3173 | char name; | 3196 | char name[]; |
| 3174 | } __packed; | 3197 | } __packed; |
| 3175 | 3198 | ||
| 3176 | static int vnic_client_data_len(struct ibmvnic_adapter *adapter) | 3199 | static int vnic_client_data_len(struct ibmvnic_adapter *adapter) |
| @@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, | |||
| 3199 | vlcd->type = 1; | 3222 | vlcd->type = 1; |
| 3200 | len = strlen(os_name) + 1; | 3223 | len = strlen(os_name) + 1; |
| 3201 | vlcd->len = cpu_to_be16(len); | 3224 | vlcd->len = cpu_to_be16(len); |
| 3202 | strncpy(&vlcd->name, os_name, len); | 3225 | strncpy(vlcd->name, os_name, len); |
| 3203 | vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); | 3226 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
| 3204 | 3227 | ||
| 3205 | /* Type 2 - LPAR name */ | 3228 | /* Type 2 - LPAR name */ |
| 3206 | vlcd->type = 2; | 3229 | vlcd->type = 2; |
| 3207 | len = strlen(utsname()->nodename) + 1; | 3230 | len = strlen(utsname()->nodename) + 1; |
| 3208 | vlcd->len = cpu_to_be16(len); | 3231 | vlcd->len = cpu_to_be16(len); |
| 3209 | strncpy(&vlcd->name, utsname()->nodename, len); | 3232 | strncpy(vlcd->name, utsname()->nodename, len); |
| 3210 | vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); | 3233 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
| 3211 | 3234 | ||
| 3212 | /* Type 3 - device name */ | 3235 | /* Type 3 - device name */ |
| 3213 | vlcd->type = 3; | 3236 | vlcd->type = 3; |
| 3214 | len = strlen(adapter->netdev->name) + 1; | 3237 | len = strlen(adapter->netdev->name) + 1; |
| 3215 | vlcd->len = cpu_to_be16(len); | 3238 | vlcd->len = cpu_to_be16(len); |
| 3216 | strncpy(&vlcd->name, adapter->netdev->name, len); | 3239 | strncpy(vlcd->name, adapter->netdev->name, len); |
| 3217 | } | 3240 | } |
| 3218 | 3241 | ||
| 3219 | static int send_login(struct ibmvnic_adapter *adapter) | 3242 | static int send_login(struct ibmvnic_adapter *adapter) |
| @@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |||
| 3942 | * to resend the login buffer with fewer queues requested. | 3965 | * to resend the login buffer with fewer queues requested. |
| 3943 | */ | 3966 | */ |
| 3944 | if (login_rsp_crq->generic.rc.code) { | 3967 | if (login_rsp_crq->generic.rc.code) { |
| 3945 | adapter->renegotiate = true; | 3968 | adapter->init_done_rc = login_rsp_crq->generic.rc.code; |
| 3946 | complete(&adapter->init_done); | 3969 | complete(&adapter->init_done); |
| 3947 | return 0; | 3970 | return 0; |
| 3948 | } | 3971 | } |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 99c0b58c2c39..22391e8805f6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
| @@ -1035,7 +1035,6 @@ struct ibmvnic_adapter { | |||
| 1035 | 1035 | ||
| 1036 | struct ibmvnic_sub_crq_queue **tx_scrq; | 1036 | struct ibmvnic_sub_crq_queue **tx_scrq; |
| 1037 | struct ibmvnic_sub_crq_queue **rx_scrq; | 1037 | struct ibmvnic_sub_crq_queue **rx_scrq; |
| 1038 | bool renegotiate; | ||
| 1039 | 1038 | ||
| 1040 | /* rx structs */ | 1039 | /* rx structs */ |
| 1041 | struct napi_struct *napi; | 1040 | struct napi_struct *napi; |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 54a038943c06..4202f9b5b966 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -663,7 +663,7 @@ enum mvpp2_tag_type { | |||
| 663 | #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) | 663 | #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) |
| 664 | #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ | 664 | #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ |
| 665 | MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) | 665 | MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) |
| 666 | #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) | 666 | #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) |
| 667 | #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) | 667 | #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) |
| 668 | #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) | 668 | #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) |
| 669 | #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) | 669 | #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) |
| @@ -916,6 +916,8 @@ static struct { | |||
| 916 | 916 | ||
| 917 | #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) | 917 | #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) |
| 918 | 918 | ||
| 919 | #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) | ||
| 920 | |||
| 919 | /* Definitions */ | 921 | /* Definitions */ |
| 920 | 922 | ||
| 921 | /* Shared Packet Processor resources */ | 923 | /* Shared Packet Processor resources */ |
| @@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, | |||
| 1429 | if (port->priv->hw_version == MVPP21) | 1431 | if (port->priv->hw_version == MVPP21) |
| 1430 | return tx_desc->pp21.buf_dma_addr; | 1432 | return tx_desc->pp21.buf_dma_addr; |
| 1431 | else | 1433 | else |
| 1432 | return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); | 1434 | return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; |
| 1433 | } | 1435 | } |
| 1434 | 1436 | ||
| 1435 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, | 1437 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
| @@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, | |||
| 1447 | } else { | 1449 | } else { |
| 1448 | u64 val = (u64)addr; | 1450 | u64 val = (u64)addr; |
| 1449 | 1451 | ||
| 1450 | tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); | 1452 | tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; |
| 1451 | tx_desc->pp22.buf_dma_addr_ptp |= val; | 1453 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
| 1452 | tx_desc->pp22.packet_offset = offset; | 1454 | tx_desc->pp22.packet_offset = offset; |
| 1453 | } | 1455 | } |
| @@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, | |||
| 1507 | if (port->priv->hw_version == MVPP21) | 1509 | if (port->priv->hw_version == MVPP21) |
| 1508 | return rx_desc->pp21.buf_dma_addr; | 1510 | return rx_desc->pp21.buf_dma_addr; |
| 1509 | else | 1511 | else |
| 1510 | return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); | 1512 | return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; |
| 1511 | } | 1513 | } |
| 1512 | 1514 | ||
| 1513 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, | 1515 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
| @@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, | |||
| 1516 | if (port->priv->hw_version == MVPP21) | 1518 | if (port->priv->hw_version == MVPP21) |
| 1517 | return rx_desc->pp21.buf_cookie; | 1519 | return rx_desc->pp21.buf_cookie; |
| 1518 | else | 1520 | else |
| 1519 | return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); | 1521 | return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; |
| 1520 | } | 1522 | } |
| 1521 | 1523 | ||
| 1522 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, | 1524 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
| @@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8789 | } | 8791 | } |
| 8790 | 8792 | ||
| 8791 | if (priv->hw_version == MVPP22) { | 8793 | if (priv->hw_version == MVPP22) { |
| 8792 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); | 8794 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
| 8793 | if (err) | 8795 | if (err) |
| 8794 | goto err_mg_clk; | 8796 | goto err_mg_clk; |
| 8795 | /* Sadly, the BM pools all share the same register to | 8797 | /* Sadly, the BM pools all share the same register to |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 3735c09d2112..577659f332e4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c | |||
| @@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 258 | case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: | 258 | case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: |
| 259 | nfp_tunnel_keep_alive(app, skb); | 259 | nfp_tunnel_keep_alive(app, skb); |
| 260 | break; | 260 | break; |
| 261 | case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: | ||
| 262 | /* Acks from the NFP that the route is added - ignore. */ | ||
| 263 | break; | ||
| 264 | default: | 261 | default: |
| 265 | nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", | 262 | nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", |
| 266 | type); | 263 | type); |
| @@ -275,18 +272,49 @@ out: | |||
| 275 | 272 | ||
| 276 | void nfp_flower_cmsg_process_rx(struct work_struct *work) | 273 | void nfp_flower_cmsg_process_rx(struct work_struct *work) |
| 277 | { | 274 | { |
| 275 | struct sk_buff_head cmsg_joined; | ||
| 278 | struct nfp_flower_priv *priv; | 276 | struct nfp_flower_priv *priv; |
| 279 | struct sk_buff *skb; | 277 | struct sk_buff *skb; |
| 280 | 278 | ||
| 281 | priv = container_of(work, struct nfp_flower_priv, cmsg_work); | 279 | priv = container_of(work, struct nfp_flower_priv, cmsg_work); |
| 280 | skb_queue_head_init(&cmsg_joined); | ||
| 281 | |||
| 282 | spin_lock_bh(&priv->cmsg_skbs_high.lock); | ||
| 283 | skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); | ||
| 284 | spin_unlock_bh(&priv->cmsg_skbs_high.lock); | ||
| 282 | 285 | ||
| 283 | while ((skb = skb_dequeue(&priv->cmsg_skbs))) | 286 | spin_lock_bh(&priv->cmsg_skbs_low.lock); |
| 287 | skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); | ||
| 288 | spin_unlock_bh(&priv->cmsg_skbs_low.lock); | ||
| 289 | |||
| 290 | while ((skb = __skb_dequeue(&cmsg_joined))) | ||
| 284 | nfp_flower_cmsg_process_one_rx(priv->app, skb); | 291 | nfp_flower_cmsg_process_one_rx(priv->app, skb); |
| 285 | } | 292 | } |
| 286 | 293 | ||
| 287 | void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | 294 | static void |
| 295 | nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) | ||
| 288 | { | 296 | { |
| 289 | struct nfp_flower_priv *priv = app->priv; | 297 | struct nfp_flower_priv *priv = app->priv; |
| 298 | struct sk_buff_head *skb_head; | ||
| 299 | |||
| 300 | if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || | ||
| 301 | type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) | ||
| 302 | skb_head = &priv->cmsg_skbs_high; | ||
| 303 | else | ||
| 304 | skb_head = &priv->cmsg_skbs_low; | ||
| 305 | |||
| 306 | if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { | ||
| 307 | nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); | ||
| 308 | dev_kfree_skb_any(skb); | ||
| 309 | return; | ||
| 310 | } | ||
| 311 | |||
| 312 | skb_queue_tail(skb_head, skb); | ||
| 313 | schedule_work(&priv->cmsg_work); | ||
| 314 | } | ||
| 315 | |||
| 316 | void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | ||
| 317 | { | ||
| 290 | struct nfp_flower_cmsg_hdr *cmsg_hdr; | 318 | struct nfp_flower_cmsg_hdr *cmsg_hdr; |
| 291 | 319 | ||
| 292 | cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); | 320 | cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); |
| @@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 306 | nfp_flower_process_mtu_ack(app, skb)) { | 334 | nfp_flower_process_mtu_ack(app, skb)) { |
| 307 | /* Handle MTU acks outside wq to prevent RTNL conflict. */ | 335 | /* Handle MTU acks outside wq to prevent RTNL conflict. */ |
| 308 | dev_consume_skb_any(skb); | 336 | dev_consume_skb_any(skb); |
| 337 | } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { | ||
| 338 | /* Acks from the NFP that the route is added - ignore. */ | ||
| 339 | dev_consume_skb_any(skb); | ||
| 309 | } else { | 340 | } else { |
| 310 | skb_queue_tail(&priv->cmsg_skbs, skb); | 341 | nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); |
| 311 | schedule_work(&priv->cmsg_work); | ||
| 312 | } | 342 | } |
| 313 | } | 343 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 96bc0e33980c..b6c0fd053a50 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h | |||
| @@ -108,6 +108,8 @@ | |||
| 108 | #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) | 108 | #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) |
| 109 | #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) | 109 | #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) |
| 110 | 110 | ||
| 111 | #define NFP_FLOWER_WORKQ_MAX_SKBS 30000 | ||
| 112 | |||
| 111 | #define nfp_flower_cmsg_warn(app, fmt, args...) \ | 113 | #define nfp_flower_cmsg_warn(app, fmt, args...) \ |
| 112 | do { \ | 114 | do { \ |
| 113 | if (net_ratelimit()) \ | 115 | if (net_ratelimit()) \ |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6357e0720f43..ad02592a82b7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c | |||
| @@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app) | |||
| 519 | 519 | ||
| 520 | app->priv = app_priv; | 520 | app->priv = app_priv; |
| 521 | app_priv->app = app; | 521 | app_priv->app = app; |
| 522 | skb_queue_head_init(&app_priv->cmsg_skbs); | 522 | skb_queue_head_init(&app_priv->cmsg_skbs_high); |
| 523 | skb_queue_head_init(&app_priv->cmsg_skbs_low); | ||
| 523 | INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); | 524 | INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); |
| 524 | init_waitqueue_head(&app_priv->reify_wait_queue); | 525 | init_waitqueue_head(&app_priv->reify_wait_queue); |
| 525 | 526 | ||
| @@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app) | |||
| 549 | { | 550 | { |
| 550 | struct nfp_flower_priv *app_priv = app->priv; | 551 | struct nfp_flower_priv *app_priv = app->priv; |
| 551 | 552 | ||
| 552 | skb_queue_purge(&app_priv->cmsg_skbs); | 553 | skb_queue_purge(&app_priv->cmsg_skbs_high); |
| 554 | skb_queue_purge(&app_priv->cmsg_skbs_low); | ||
| 553 | flush_work(&app_priv->cmsg_work); | 555 | flush_work(&app_priv->cmsg_work); |
| 554 | 556 | ||
| 555 | nfp_flower_metadata_cleanup(app); | 557 | nfp_flower_metadata_cleanup(app); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index e030b3ce4510..c67e1b54c614 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h | |||
| @@ -107,7 +107,10 @@ struct nfp_mtu_conf { | |||
| 107 | * @mask_table: Hash table used to store masks | 107 | * @mask_table: Hash table used to store masks |
| 108 | * @flow_table: Hash table used to store flower rules | 108 | * @flow_table: Hash table used to store flower rules |
| 109 | * @cmsg_work: Workqueue for control messages processing | 109 | * @cmsg_work: Workqueue for control messages processing |
| 110 | * @cmsg_skbs: List of skbs for control message processing | 110 | * @cmsg_skbs_high: List of higher priority skbs for control message |
| 111 | * processing | ||
| 112 | * @cmsg_skbs_low: List of lower priority skbs for control message | ||
| 113 | * processing | ||
| 111 | * @nfp_mac_off_list: List of MAC addresses to offload | 114 | * @nfp_mac_off_list: List of MAC addresses to offload |
| 112 | * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs | 115 | * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs |
| 113 | * @nfp_ipv4_off_list: List of IPv4 addresses to offload | 116 | * @nfp_ipv4_off_list: List of IPv4 addresses to offload |
| @@ -136,7 +139,8 @@ struct nfp_flower_priv { | |||
| 136 | DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); | 139 | DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); |
| 137 | DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); | 140 | DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); |
| 138 | struct work_struct cmsg_work; | 141 | struct work_struct cmsg_work; |
| 139 | struct sk_buff_head cmsg_skbs; | 142 | struct sk_buff_head cmsg_skbs_high; |
| 143 | struct sk_buff_head cmsg_skbs_low; | ||
| 140 | struct list_head nfp_mac_off_list; | 144 | struct list_head nfp_mac_off_list; |
| 141 | struct list_head nfp_mac_index_list; | 145 | struct list_head nfp_mac_index_list; |
| 142 | struct list_head nfp_ipv4_off_list; | 146 | struct list_head nfp_ipv4_off_list; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index f7b958181126..cb28ac03e4ca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c | |||
| @@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) | |||
| 211 | break; | 211 | break; |
| 212 | 212 | ||
| 213 | err = msleep_interruptible(timeout_ms); | 213 | err = msleep_interruptible(timeout_ms); |
| 214 | if (err != 0) | 214 | if (err != 0) { |
| 215 | nfp_info(mutex->cpp, | ||
| 216 | "interrupted waiting for NFP mutex\n"); | ||
| 215 | return -ERESTARTSYS; | 217 | return -ERESTARTSYS; |
| 218 | } | ||
| 216 | 219 | ||
| 217 | if (time_is_before_eq_jiffies(warn_at)) { | 220 | if (time_is_before_eq_jiffies(warn_at)) { |
| 218 | warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; | 221 | warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 99bb679a9801..2abee0fe3a7c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | |||
| @@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, | |||
| 281 | if ((*reg & mask) == val) | 281 | if ((*reg & mask) == val) |
| 282 | return 0; | 282 | return 0; |
| 283 | 283 | ||
| 284 | if (msleep_interruptible(25)) | 284 | msleep(25); |
| 285 | return -ERESTARTSYS; | ||
| 286 | 285 | ||
| 287 | if (time_after(start_time, wait_until)) | 286 | if (time_after(start_time, wait_until)) |
| 288 | return -ETIMEDOUT; | 287 | return -ETIMEDOUT; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index d33988570217..5f4e447c5dce 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
| @@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
| 350 | 350 | ||
| 351 | real_dev = priv->real_dev; | 351 | real_dev = priv->real_dev; |
| 352 | 352 | ||
| 353 | if (!rmnet_is_real_dev_registered(real_dev)) | ||
| 354 | return -ENODEV; | ||
| 355 | |||
| 356 | if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) | 353 | if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) |
| 357 | goto nla_put_failure; | 354 | goto nla_put_failure; |
| 358 | 355 | ||
| 359 | port = rmnet_get_port_rtnl(real_dev); | 356 | if (rmnet_is_real_dev_registered(real_dev)) { |
| 357 | port = rmnet_get_port_rtnl(real_dev); | ||
| 358 | f.flags = port->data_format; | ||
| 359 | } else { | ||
| 360 | f.flags = 0; | ||
| 361 | } | ||
| 360 | 362 | ||
| 361 | f.flags = port->data_format; | ||
| 362 | f.mask = ~0; | 363 | f.mask = ~0; |
| 363 | 364 | ||
| 364 | if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) | 365 | if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50daad0a1482..83ce229f4eb7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -4776,8 +4776,7 @@ static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |||
| 4776 | goto out_unlock; | 4776 | goto out_unlock; |
| 4777 | } | 4777 | } |
| 4778 | 4778 | ||
| 4779 | if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, | 4779 | if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { |
| 4780 | flow_id, filter_idx)) { | ||
| 4781 | ret = false; | 4780 | ret = false; |
| 4782 | goto out_unlock; | 4781 | goto out_unlock; |
| 4783 | } | 4782 | } |
| @@ -5265,7 +5264,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 5265 | ids = vlan->uc; | 5264 | ids = vlan->uc; |
| 5266 | } | 5265 | } |
| 5267 | 5266 | ||
| 5268 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | 5267 | filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 5269 | 5268 | ||
| 5270 | /* Insert/renew filters */ | 5269 | /* Insert/renew filters */ |
| 5271 | for (i = 0; i < addr_count; i++) { | 5270 | for (i = 0; i < addr_count; i++) { |
| @@ -5334,7 +5333,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, | |||
| 5334 | int rc; | 5333 | int rc; |
| 5335 | u16 *id; | 5334 | u16 *id; |
| 5336 | 5335 | ||
| 5337 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | 5336 | filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 5338 | 5337 | ||
| 5339 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); | 5338 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 5340 | 5339 | ||
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4a19c7efdf8d..7174ef5e5c5e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
| @@ -2912,7 +2912,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |||
| 2912 | if (test_bit(index, table->used_bitmap) && | 2912 | if (test_bit(index, table->used_bitmap) && |
| 2913 | table->spec[index].priority == EFX_FILTER_PRI_HINT && | 2913 | table->spec[index].priority == EFX_FILTER_PRI_HINT && |
| 2914 | rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, | 2914 | rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, |
| 2915 | flow_id, index)) { | 2915 | flow_id, 0)) { |
| 2916 | efx_farch_filter_table_clear_entry(efx, table, index); | 2916 | efx_farch_filter_table_clear_entry(efx, table, index); |
| 2917 | ret = true; | 2917 | ret = true; |
| 2918 | } | 2918 | } |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 5e379a83c729..eea3808b3f25 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
| @@ -733,6 +733,27 @@ struct efx_rss_context { | |||
| 733 | u32 rx_indir_table[128]; | 733 | u32 rx_indir_table[128]; |
| 734 | }; | 734 | }; |
| 735 | 735 | ||
| 736 | #ifdef CONFIG_RFS_ACCEL | ||
| 737 | /** | ||
| 738 | * struct efx_async_filter_insertion - Request to asynchronously insert a filter | ||
| 739 | * @net_dev: Reference to the netdevice | ||
| 740 | * @spec: The filter to insert | ||
| 741 | * @work: Workitem for this request | ||
| 742 | * @rxq_index: Identifies the channel for which this request was made | ||
| 743 | * @flow_id: Identifies the kernel-side flow for which this request was made | ||
| 744 | */ | ||
| 745 | struct efx_async_filter_insertion { | ||
| 746 | struct net_device *net_dev; | ||
| 747 | struct efx_filter_spec spec; | ||
| 748 | struct work_struct work; | ||
| 749 | u16 rxq_index; | ||
| 750 | u32 flow_id; | ||
| 751 | }; | ||
| 752 | |||
| 753 | /* Maximum number of ARFS workitems that may be in flight on an efx_nic */ | ||
| 754 | #define EFX_RPS_MAX_IN_FLIGHT 8 | ||
| 755 | #endif /* CONFIG_RFS_ACCEL */ | ||
| 756 | |||
| 736 | /** | 757 | /** |
| 737 | * struct efx_nic - an Efx NIC | 758 | * struct efx_nic - an Efx NIC |
| 738 | * @name: Device name (net device name or bus id before net device registered) | 759 | * @name: Device name (net device name or bus id before net device registered) |
| @@ -850,6 +871,8 @@ struct efx_rss_context { | |||
| 850 | * @rps_expire_channel: Next channel to check for expiry | 871 | * @rps_expire_channel: Next channel to check for expiry |
| 851 | * @rps_expire_index: Next index to check for expiry in | 872 | * @rps_expire_index: Next index to check for expiry in |
| 852 | * @rps_expire_channel's @rps_flow_id | 873 | * @rps_expire_channel's @rps_flow_id |
| 874 | * @rps_slot_map: bitmap of in-flight entries in @rps_slot | ||
| 875 | * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() | ||
| 853 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. | 876 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. |
| 854 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. | 877 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. |
| 855 | * Decremented when the efx_flush_rx_queue() is called. | 878 | * Decremented when the efx_flush_rx_queue() is called. |
| @@ -1004,6 +1027,8 @@ struct efx_nic { | |||
| 1004 | struct mutex rps_mutex; | 1027 | struct mutex rps_mutex; |
| 1005 | unsigned int rps_expire_channel; | 1028 | unsigned int rps_expire_channel; |
| 1006 | unsigned int rps_expire_index; | 1029 | unsigned int rps_expire_index; |
| 1030 | unsigned long rps_slot_map; | ||
| 1031 | struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; | ||
| 1007 | #endif | 1032 | #endif |
| 1008 | 1033 | ||
| 1009 | atomic_t active_queues; | 1034 | atomic_t active_queues; |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 95682831484e..9c593c661cbf 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -827,31 +827,16 @@ MODULE_PARM_DESC(rx_refill_threshold, | |||
| 827 | 827 | ||
| 828 | #ifdef CONFIG_RFS_ACCEL | 828 | #ifdef CONFIG_RFS_ACCEL |
| 829 | 829 | ||
| 830 | /** | ||
| 831 | * struct efx_async_filter_insertion - Request to asynchronously insert a filter | ||
| 832 | * @net_dev: Reference to the netdevice | ||
| 833 | * @spec: The filter to insert | ||
| 834 | * @work: Workitem for this request | ||
| 835 | * @rxq_index: Identifies the channel for which this request was made | ||
| 836 | * @flow_id: Identifies the kernel-side flow for which this request was made | ||
| 837 | */ | ||
| 838 | struct efx_async_filter_insertion { | ||
| 839 | struct net_device *net_dev; | ||
| 840 | struct efx_filter_spec spec; | ||
| 841 | struct work_struct work; | ||
| 842 | u16 rxq_index; | ||
| 843 | u32 flow_id; | ||
| 844 | }; | ||
| 845 | |||
| 846 | static void efx_filter_rfs_work(struct work_struct *data) | 830 | static void efx_filter_rfs_work(struct work_struct *data) |
| 847 | { | 831 | { |
| 848 | struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, | 832 | struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, |
| 849 | work); | 833 | work); |
| 850 | struct efx_nic *efx = netdev_priv(req->net_dev); | 834 | struct efx_nic *efx = netdev_priv(req->net_dev); |
| 851 | struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); | 835 | struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); |
| 836 | int slot_idx = req - efx->rps_slot; | ||
| 852 | int rc; | 837 | int rc; |
| 853 | 838 | ||
| 854 | rc = efx->type->filter_insert(efx, &req->spec, false); | 839 | rc = efx->type->filter_insert(efx, &req->spec, true); |
| 855 | if (rc >= 0) { | 840 | if (rc >= 0) { |
| 856 | /* Remember this so we can check whether to expire the filter | 841 | /* Remember this so we can check whether to expire the filter |
| 857 | * later. | 842 | * later. |
| @@ -878,8 +863,8 @@ static void efx_filter_rfs_work(struct work_struct *data) | |||
| 878 | } | 863 | } |
| 879 | 864 | ||
| 880 | /* Release references */ | 865 | /* Release references */ |
| 866 | clear_bit(slot_idx, &efx->rps_slot_map); | ||
| 881 | dev_put(req->net_dev); | 867 | dev_put(req->net_dev); |
| 882 | kfree(req); | ||
| 883 | } | 868 | } |
| 884 | 869 | ||
| 885 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | 870 | int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, |
| @@ -888,22 +873,36 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 888 | struct efx_nic *efx = netdev_priv(net_dev); | 873 | struct efx_nic *efx = netdev_priv(net_dev); |
| 889 | struct efx_async_filter_insertion *req; | 874 | struct efx_async_filter_insertion *req; |
| 890 | struct flow_keys fk; | 875 | struct flow_keys fk; |
| 876 | int slot_idx; | ||
| 877 | int rc; | ||
| 891 | 878 | ||
| 892 | if (flow_id == RPS_FLOW_ID_INVALID) | 879 | /* find a free slot */ |
| 893 | return -EINVAL; | 880 | for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) |
| 881 | if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) | ||
| 882 | break; | ||
| 883 | if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) | ||
| 884 | return -EBUSY; | ||
| 894 | 885 | ||
| 895 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) | 886 | if (flow_id == RPS_FLOW_ID_INVALID) { |
| 896 | return -EPROTONOSUPPORT; | 887 | rc = -EINVAL; |
| 888 | goto out_clear; | ||
| 889 | } | ||
| 897 | 890 | ||
| 898 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) | 891 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { |
| 899 | return -EPROTONOSUPPORT; | 892 | rc = -EPROTONOSUPPORT; |
| 900 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) | 893 | goto out_clear; |
| 901 | return -EPROTONOSUPPORT; | 894 | } |
| 902 | 895 | ||
| 903 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | 896 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { |
| 904 | if (!req) | 897 | rc = -EPROTONOSUPPORT; |
| 905 | return -ENOMEM; | 898 | goto out_clear; |
| 899 | } | ||
| 900 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { | ||
| 901 | rc = -EPROTONOSUPPORT; | ||
| 902 | goto out_clear; | ||
| 903 | } | ||
| 906 | 904 | ||
| 905 | req = efx->rps_slot + slot_idx; | ||
| 907 | efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, | 906 | efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, |
| 908 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, | 907 | efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, |
| 909 | rxq_index); | 908 | rxq_index); |
| @@ -933,6 +932,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 933 | req->flow_id = flow_id; | 932 | req->flow_id = flow_id; |
| 934 | schedule_work(&req->work); | 933 | schedule_work(&req->work); |
| 935 | return 0; | 934 | return 0; |
| 935 | out_clear: | ||
| 936 | clear_bit(slot_idx, &efx->rps_slot_map); | ||
| 937 | return rc; | ||
| 936 | } | 938 | } |
| 937 | 939 | ||
| 938 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) | 940 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index c7bff596c665..dedd40613090 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h | |||
| @@ -347,7 +347,7 @@ enum power_event { | |||
| 347 | #define MTL_RX_OVERFLOW_INT BIT(16) | 347 | #define MTL_RX_OVERFLOW_INT BIT(16) |
| 348 | 348 | ||
| 349 | /* Default operating mode of the MAC */ | 349 | /* Default operating mode of the MAC */ |
| 350 | #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ | 350 | #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ |
| 351 | GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) | 351 | GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) |
| 352 | 352 | ||
| 353 | /* To dump the core regs excluding the Address Registers */ | 353 | /* To dump the core regs excluding the Address Registers */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a3af92ebbca8..517b1f6736a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, | |||
| 31 | 31 | ||
| 32 | value |= GMAC_CORE_INIT; | 32 | value |= GMAC_CORE_INIT; |
| 33 | 33 | ||
| 34 | /* Clear ACS bit because Ethernet switch tagging formats such as | ||
| 35 | * Broadcom tags can look like invalid LLC/SNAP packets and cause the | ||
| 36 | * hardware to truncate packets on reception. | ||
| 37 | */ | ||
| 38 | if (netdev_uses_dsa(dev)) | ||
| 39 | value &= ~GMAC_CONFIG_ACS; | ||
| 40 | |||
| 41 | if (mtu > 1500) | 34 | if (mtu > 1500) |
| 42 | value |= GMAC_CONFIG_2K; | 35 | value |= GMAC_CONFIG_2K; |
| 43 | if (mtu > 2000) | 36 | if (mtu > 2000) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a16931ce39d..b65e2d144698 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3495 | 3495 | ||
| 3496 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 3496 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
| 3497 | * Type frames (LLC/LLC-SNAP) | 3497 | * Type frames (LLC/LLC-SNAP) |
| 3498 | * | ||
| 3499 | * llc_snap is never checked in GMAC >= 4, so this ACS | ||
| 3500 | * feature is always disabled and packets need to be | ||
| 3501 | * stripped manually. | ||
| 3498 | */ | 3502 | */ |
| 3499 | if (unlikely(status != llc_snap)) | 3503 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
| 3504 | unlikely(status != llc_snap)) | ||
| 3500 | frame_len -= ETH_FCS_LEN; | 3505 | frame_len -= ETH_FCS_LEN; |
| 3501 | 3506 | ||
| 3502 | if (netif_msg_rx_status(priv)) { | 3507 | if (netif_msg_rx_status(priv)) { |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 9cbb0c8a896a..7de88b33d5b9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
| @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
| 3277 | 3277 | ||
| 3278 | err = netdev_upper_dev_link(real_dev, dev, extack); | 3278 | err = netdev_upper_dev_link(real_dev, dev, extack); |
| 3279 | if (err < 0) | 3279 | if (err < 0) |
| 3280 | goto put_dev; | 3280 | goto unregister; |
| 3281 | 3281 | ||
| 3282 | /* need to be already registered so that ->init has run and | 3282 | /* need to be already registered so that ->init has run and |
| 3283 | * the MAC addr is set | 3283 | * the MAC addr is set |
| @@ -3316,8 +3316,7 @@ del_dev: | |||
| 3316 | macsec_del_dev(macsec); | 3316 | macsec_del_dev(macsec); |
| 3317 | unlink: | 3317 | unlink: |
| 3318 | netdev_upper_dev_unlink(real_dev, dev); | 3318 | netdev_upper_dev_unlink(real_dev, dev); |
| 3319 | put_dev: | 3319 | unregister: |
| 3320 | dev_put(real_dev); | ||
| 3321 | unregister_netdevice(dev); | 3320 | unregister_netdevice(dev); |
| 3322 | return err; | 3321 | return err; |
| 3323 | } | 3322 | } |
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0f293ef28935..a97ac8c12c4c 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/ethtool.h> | 20 | #include <linux/ethtool.h> |
| 21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
| 22 | #include <linux/microchipphy.h> | 22 | #include <linux/microchipphy.h> |
| 23 | #include <linux/delay.h> | ||
| 23 | 24 | ||
| 24 | #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" | 25 | #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" |
| 25 | #define DRIVER_DESC "Microchip LAN88XX PHY driver" | 26 | #define DRIVER_DESC "Microchip LAN88XX PHY driver" |
| @@ -30,6 +31,16 @@ struct lan88xx_priv { | |||
| 30 | __u32 wolopts; | 31 | __u32 wolopts; |
| 31 | }; | 32 | }; |
| 32 | 33 | ||
| 34 | static int lan88xx_read_page(struct phy_device *phydev) | ||
| 35 | { | ||
| 36 | return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS); | ||
| 37 | } | ||
| 38 | |||
| 39 | static int lan88xx_write_page(struct phy_device *phydev, int page) | ||
| 40 | { | ||
| 41 | return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); | ||
| 42 | } | ||
| 43 | |||
| 33 | static int lan88xx_phy_config_intr(struct phy_device *phydev) | 44 | static int lan88xx_phy_config_intr(struct phy_device *phydev) |
| 34 | { | 45 | { |
| 35 | int rc; | 46 | int rc; |
| @@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev) | |||
| 66 | return 0; | 77 | return 0; |
| 67 | } | 78 | } |
| 68 | 79 | ||
| 80 | static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, | ||
| 81 | u32 data) | ||
| 82 | { | ||
| 83 | int val, save_page, ret = 0; | ||
| 84 | u16 buf; | ||
| 85 | |||
| 86 | /* Save current page */ | ||
| 87 | save_page = phy_save_page(phydev); | ||
| 88 | if (save_page < 0) { | ||
| 89 | pr_warn("Failed to get current page\n"); | ||
| 90 | goto err; | ||
| 91 | } | ||
| 92 | |||
| 93 | /* Switch to TR page */ | ||
| 94 | lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR); | ||
| 95 | |||
| 96 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, | ||
| 97 | (data & 0xFFFF)); | ||
| 98 | if (ret < 0) { | ||
| 99 | pr_warn("Failed to write TR low data\n"); | ||
| 100 | goto err; | ||
| 101 | } | ||
| 102 | |||
| 103 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, | ||
| 104 | (data & 0x00FF0000) >> 16); | ||
| 105 | if (ret < 0) { | ||
| 106 | pr_warn("Failed to write TR high data\n"); | ||
| 107 | goto err; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* Config control bits [15:13] of register */ | ||
| 111 | buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */ | ||
| 112 | buf |= 0x8000; /* Set [15] to Packet transmit */ | ||
| 113 | |||
| 114 | ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); | ||
| 115 | if (ret < 0) { | ||
| 116 | pr_warn("Failed to write data in reg\n"); | ||
| 117 | goto err; | ||
| 118 | } | ||
| 119 | |||
| 120 | usleep_range(1000, 2000);/* Wait for Data to be written */ | ||
| 121 | val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); | ||
| 122 | if (!(val & 0x8000)) | ||
| 123 | pr_warn("TR Register[0x%X] configuration failed\n", regaddr); | ||
| 124 | err: | ||
| 125 | return phy_restore_page(phydev, save_page, ret); | ||
| 126 | } | ||
| 127 | |||
| 128 | static void lan88xx_config_TR_regs(struct phy_device *phydev) | ||
| 129 | { | ||
| 130 | int err; | ||
| 131 | |||
| 132 | /* Get access to Channel 0x1, Node 0xF , Register 0x01. | ||
| 133 | * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf, | ||
| 134 | * MrvlTrFix1000Kp, MasterEnableTR bits. | ||
| 135 | */ | ||
| 136 | err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); | ||
| 137 | if (err < 0) | ||
| 138 | pr_warn("Failed to Set Register[0x0F82]\n"); | ||
| 139 | |||
| 140 | /* Get access to Channel b'10, Node b'1101, Register 0x06. | ||
| 141 | * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, | ||
| 142 | * SSTrKp1000Mas bits. | ||
| 143 | */ | ||
| 144 | err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); | ||
| 145 | if (err < 0) | ||
| 146 | pr_warn("Failed to Set Register[0x168C]\n"); | ||
| 147 | |||
| 148 | /* Get access to Channel b'10, Node b'1111, Register 0x11. | ||
| 149 | * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh | ||
| 150 | * bits | ||
| 151 | */ | ||
| 152 | err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); | ||
| 153 | if (err < 0) | ||
| 154 | pr_warn("Failed to Set Register[0x17A2]\n"); | ||
| 155 | |||
| 156 | /* Get access to Channel b'10, Node b'1101, Register 0x10. | ||
| 157 | * Write 24-bit value 0xEEFFDD to register. Setting | ||
| 158 | * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000, | ||
| 159 | * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits. | ||
| 160 | */ | ||
| 161 | err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); | ||
| 162 | if (err < 0) | ||
| 163 | pr_warn("Failed to Set Register[0x16A0]\n"); | ||
| 164 | |||
| 165 | /* Get access to Channel b'10, Node b'1101, Register 0x13. | ||
| 166 | * Write 24-bit value 0x071448 to register. Setting | ||
| 167 | * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits. | ||
| 168 | */ | ||
| 169 | err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); | ||
| 170 | if (err < 0) | ||
| 171 | pr_warn("Failed to Set Register[0x16A6]\n"); | ||
| 172 | |||
| 173 | /* Get access to Channel b'10, Node b'1101, Register 0x12. | ||
| 174 | * Write 24-bit value 0x13132F to register. Setting | ||
| 175 | * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits. | ||
| 176 | */ | ||
| 177 | err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); | ||
| 178 | if (err < 0) | ||
| 179 | pr_warn("Failed to Set Register[0x16A4]\n"); | ||
| 180 | |||
| 181 | /* Get access to Channel b'10, Node b'1101, Register 0x14. | ||
| 182 | * Write 24-bit value 0x0 to register. Setting eee_3level_delay, | ||
| 183 | * eee_TrKf_freeze_delay bits. | ||
| 184 | */ | ||
| 185 | err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); | ||
| 186 | if (err < 0) | ||
| 187 | pr_warn("Failed to Set Register[0x16A8]\n"); | ||
| 188 | |||
| 189 | /* Get access to Channel b'01, Node b'1111, Register 0x34. | ||
| 190 | * Write 24-bit value 0x91B06C to register. Setting | ||
| 191 | * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000, | ||
| 192 | * FastMseSearchUpdGain1000 bits. | ||
| 193 | */ | ||
| 194 | err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); | ||
| 195 | if (err < 0) | ||
| 196 | pr_warn("Failed to Set Register[0x0FE8]\n"); | ||
| 197 | |||
| 198 | /* Get access to Channel b'01, Node b'1111, Register 0x3E. | ||
| 199 | * Write 24-bit value 0xC0A028 to register. Setting | ||
| 200 | * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000, | ||
| 201 | * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits. | ||
| 202 | */ | ||
| 203 | err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); | ||
| 204 | if (err < 0) | ||
| 205 | pr_warn("Failed to Set Register[0x0FFC]\n"); | ||
| 206 | |||
| 207 | /* Get access to Channel b'01, Node b'1111, Register 0x35. | ||
| 208 | * Write 24-bit value 0x041600 to register. Setting | ||
| 209 | * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000, | ||
| 210 | * FastMsePhChangeDelay1000 bits. | ||
| 211 | */ | ||
| 212 | err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); | ||
| 213 | if (err < 0) | ||
| 214 | pr_warn("Failed to Set Register[0x0FEA]\n"); | ||
| 215 | |||
| 216 | /* Get access to Channel b'10, Node b'1101, Register 0x03. | ||
| 217 | * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. | ||
| 218 | */ | ||
| 219 | err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); | ||
| 220 | if (err < 0) | ||
| 221 | pr_warn("Failed to Set Register[0x1686]\n"); | ||
| 222 | } | ||
| 223 | |||
| 69 | static int lan88xx_probe(struct phy_device *phydev) | 224 | static int lan88xx_probe(struct phy_device *phydev) |
| 70 | { | 225 | { |
| 71 | struct device *dev = &phydev->mdio.dev; | 226 | struct device *dev = &phydev->mdio.dev; |
| @@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev) | |||
| 132 | phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); | 287 | phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); |
| 133 | } | 288 | } |
| 134 | 289 | ||
| 290 | static int lan88xx_config_init(struct phy_device *phydev) | ||
| 291 | { | ||
| 292 | int val; | ||
| 293 | |||
| 294 | genphy_config_init(phydev); | ||
| 295 | /*Zerodetect delay enable */ | ||
| 296 | val = phy_read_mmd(phydev, MDIO_MMD_PCS, | ||
| 297 | PHY_ARDENNES_MMD_DEV_3_PHY_CFG); | ||
| 298 | val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_; | ||
| 299 | |||
| 300 | phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG, | ||
| 301 | val); | ||
| 302 | |||
| 303 | /* Config DSP registers */ | ||
| 304 | lan88xx_config_TR_regs(phydev); | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 135 | static int lan88xx_config_aneg(struct phy_device *phydev) | 309 | static int lan88xx_config_aneg(struct phy_device *phydev) |
| 136 | { | 310 | { |
| 137 | lan88xx_set_mdix(phydev); | 311 | lan88xx_set_mdix(phydev); |
| @@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = { | |||
| 151 | .probe = lan88xx_probe, | 325 | .probe = lan88xx_probe, |
| 152 | .remove = lan88xx_remove, | 326 | .remove = lan88xx_remove, |
| 153 | 327 | ||
| 154 | .config_init = genphy_config_init, | 328 | .config_init = lan88xx_config_init, |
| 155 | .config_aneg = lan88xx_config_aneg, | 329 | .config_aneg = lan88xx_config_aneg, |
| 156 | 330 | ||
| 157 | .ack_interrupt = lan88xx_phy_ack_interrupt, | 331 | .ack_interrupt = lan88xx_phy_ack_interrupt, |
| @@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = { | |||
| 160 | .suspend = lan88xx_suspend, | 334 | .suspend = lan88xx_suspend, |
| 161 | .resume = genphy_resume, | 335 | .resume = genphy_resume, |
| 162 | .set_wol = lan88xx_set_wol, | 336 | .set_wol = lan88xx_set_wol, |
| 337 | .read_page = lan88xx_read_page, | ||
| 338 | .write_page = lan88xx_write_page, | ||
| 163 | } }; | 339 | } }; |
| 164 | 340 | ||
| 165 | module_phy_driver(microchip_phy_driver); | 341 | module_phy_driver(microchip_phy_driver); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a6c6ce19eeee..acbe84967834 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, | |||
| 261 | } | 261 | } |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | static bool __team_option_inst_tmp_find(const struct list_head *opts, | ||
| 265 | const struct team_option_inst *needle) | ||
| 266 | { | ||
| 267 | struct team_option_inst *opt_inst; | ||
| 268 | |||
| 269 | list_for_each_entry(opt_inst, opts, tmp_list) | ||
| 270 | if (opt_inst == needle) | ||
| 271 | return true; | ||
| 272 | return false; | ||
| 273 | } | ||
| 274 | |||
| 264 | static int __team_options_register(struct team *team, | 275 | static int __team_options_register(struct team *team, |
| 265 | const struct team_option *option, | 276 | const struct team_option *option, |
| 266 | size_t option_count) | 277 | size_t option_count) |
| @@ -2568,6 +2579,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
| 2568 | if (err) | 2579 | if (err) |
| 2569 | goto team_put; | 2580 | goto team_put; |
| 2570 | opt_inst->changed = true; | 2581 | opt_inst->changed = true; |
| 2582 | |||
| 2583 | /* dumb/evil user-space can send us duplicate opt, | ||
| 2584 | * keep only the last one | ||
| 2585 | */ | ||
| 2586 | if (__team_option_inst_tmp_find(&opt_inst_list, | ||
| 2587 | opt_inst)) | ||
| 2588 | continue; | ||
| 2589 | |||
| 2571 | list_add(&opt_inst->tmp_list, &opt_inst_list); | 2590 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
| 2572 | } | 2591 | } |
| 2573 | if (!opt_found) { | 2592 | if (!opt_found) { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 28583aa0c17d..ef33950a45d9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1102 | goto drop; | 1102 | goto drop; |
| 1103 | 1103 | ||
| 1104 | len = run_ebpf_filter(tun, skb, len); | 1104 | len = run_ebpf_filter(tun, skb, len); |
| 1105 | 1105 | if (len == 0 || pskb_trim(skb, len)) | |
| 1106 | /* Trim extra bytes since we may insert vlan proto & TCI | ||
| 1107 | * in tun_put_user(). | ||
| 1108 | */ | ||
| 1109 | len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; | ||
| 1110 | if (len <= 0 || pskb_trim(skb, len)) | ||
| 1111 | goto drop; | 1106 | goto drop; |
| 1112 | 1107 | ||
| 1113 | if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) | 1108 | if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ca066b785e9f..c853e7410f5a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = { | |||
| 1107 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ | 1107 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ |
| 1108 | {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ | 1108 | {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ |
| 1109 | {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ | 1109 | {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ |
| 1110 | {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ | ||
| 1110 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ | 1111 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ |
| 1111 | {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ | 1112 | {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ |
| 1112 | {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ | 1113 | {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b187ec7411e..770422e953f7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -147,6 +147,17 @@ struct receive_queue { | |||
| 147 | struct xdp_rxq_info xdp_rxq; | 147 | struct xdp_rxq_info xdp_rxq; |
| 148 | }; | 148 | }; |
| 149 | 149 | ||
| 150 | /* Control VQ buffers: protected by the rtnl lock */ | ||
| 151 | struct control_buf { | ||
| 152 | struct virtio_net_ctrl_hdr hdr; | ||
| 153 | virtio_net_ctrl_ack status; | ||
| 154 | struct virtio_net_ctrl_mq mq; | ||
| 155 | u8 promisc; | ||
| 156 | u8 allmulti; | ||
| 157 | __virtio16 vid; | ||
| 158 | __virtio64 offloads; | ||
| 159 | }; | ||
| 160 | |||
| 150 | struct virtnet_info { | 161 | struct virtnet_info { |
| 151 | struct virtio_device *vdev; | 162 | struct virtio_device *vdev; |
| 152 | struct virtqueue *cvq; | 163 | struct virtqueue *cvq; |
| @@ -192,14 +203,7 @@ struct virtnet_info { | |||
| 192 | struct hlist_node node; | 203 | struct hlist_node node; |
| 193 | struct hlist_node node_dead; | 204 | struct hlist_node node_dead; |
| 194 | 205 | ||
| 195 | /* Control VQ buffers: protected by the rtnl lock */ | 206 | struct control_buf *ctrl; |
| 196 | struct virtio_net_ctrl_hdr ctrl_hdr; | ||
| 197 | virtio_net_ctrl_ack ctrl_status; | ||
| 198 | struct virtio_net_ctrl_mq ctrl_mq; | ||
| 199 | u8 ctrl_promisc; | ||
| 200 | u8 ctrl_allmulti; | ||
| 201 | u16 ctrl_vid; | ||
| 202 | u64 ctrl_offloads; | ||
| 203 | 207 | ||
| 204 | /* Ethtool settings */ | 208 | /* Ethtool settings */ |
| 205 | u8 duplex; | 209 | u8 duplex; |
| @@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
| 1269 | { | 1273 | { |
| 1270 | struct receive_queue *rq = | 1274 | struct receive_queue *rq = |
| 1271 | container_of(napi, struct receive_queue, napi); | 1275 | container_of(napi, struct receive_queue, napi); |
| 1272 | unsigned int received; | 1276 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1277 | struct send_queue *sq; | ||
| 1278 | unsigned int received, qp; | ||
| 1273 | bool xdp_xmit = false; | 1279 | bool xdp_xmit = false; |
| 1274 | 1280 | ||
| 1275 | virtnet_poll_cleantx(rq); | 1281 | virtnet_poll_cleantx(rq); |
| @@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
| 1280 | if (received < budget) | 1286 | if (received < budget) |
| 1281 | virtqueue_napi_complete(napi, rq->vq, received); | 1287 | virtqueue_napi_complete(napi, rq->vq, received); |
| 1282 | 1288 | ||
| 1283 | if (xdp_xmit) | 1289 | if (xdp_xmit) { |
| 1290 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + | ||
| 1291 | smp_processor_id(); | ||
| 1292 | sq = &vi->sq[qp]; | ||
| 1293 | virtqueue_kick(sq->vq); | ||
| 1284 | xdp_do_flush_map(); | 1294 | xdp_do_flush_map(); |
| 1295 | } | ||
| 1285 | 1296 | ||
| 1286 | return received; | 1297 | return received; |
| 1287 | } | 1298 | } |
| @@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
| 1454 | /* Caller should know better */ | 1465 | /* Caller should know better */ |
| 1455 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); | 1466 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
| 1456 | 1467 | ||
| 1457 | vi->ctrl_status = ~0; | 1468 | vi->ctrl->status = ~0; |
| 1458 | vi->ctrl_hdr.class = class; | 1469 | vi->ctrl->hdr.class = class; |
| 1459 | vi->ctrl_hdr.cmd = cmd; | 1470 | vi->ctrl->hdr.cmd = cmd; |
| 1460 | /* Add header */ | 1471 | /* Add header */ |
| 1461 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); | 1472 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
| 1462 | sgs[out_num++] = &hdr; | 1473 | sgs[out_num++] = &hdr; |
| 1463 | 1474 | ||
| 1464 | if (out) | 1475 | if (out) |
| 1465 | sgs[out_num++] = out; | 1476 | sgs[out_num++] = out; |
| 1466 | 1477 | ||
| 1467 | /* Add return status. */ | 1478 | /* Add return status. */ |
| 1468 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); | 1479 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
| 1469 | sgs[out_num] = &stat; | 1480 | sgs[out_num] = &stat; |
| 1470 | 1481 | ||
| 1471 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); | 1482 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
| 1472 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); | 1483 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
| 1473 | 1484 | ||
| 1474 | if (unlikely(!virtqueue_kick(vi->cvq))) | 1485 | if (unlikely(!virtqueue_kick(vi->cvq))) |
| 1475 | return vi->ctrl_status == VIRTIO_NET_OK; | 1486 | return vi->ctrl->status == VIRTIO_NET_OK; |
| 1476 | 1487 | ||
| 1477 | /* Spin for a response, the kick causes an ioport write, trapping | 1488 | /* Spin for a response, the kick causes an ioport write, trapping |
| 1478 | * into the hypervisor, so the request should be handled immediately. | 1489 | * into the hypervisor, so the request should be handled immediately. |
| @@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
| 1481 | !virtqueue_is_broken(vi->cvq)) | 1492 | !virtqueue_is_broken(vi->cvq)) |
| 1482 | cpu_relax(); | 1493 | cpu_relax(); |
| 1483 | 1494 | ||
| 1484 | return vi->ctrl_status == VIRTIO_NET_OK; | 1495 | return vi->ctrl->status == VIRTIO_NET_OK; |
| 1485 | } | 1496 | } |
| 1486 | 1497 | ||
| 1487 | static int virtnet_set_mac_address(struct net_device *dev, void *p) | 1498 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| @@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) | |||
| 1593 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | 1604 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1594 | return 0; | 1605 | return 0; |
| 1595 | 1606 | ||
| 1596 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); | 1607 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
| 1597 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); | 1608 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); |
| 1598 | 1609 | ||
| 1599 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | 1610 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
| 1600 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { | 1611 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
| @@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) | |||
| 1653 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) | 1664 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1654 | return; | 1665 | return; |
| 1655 | 1666 | ||
| 1656 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); | 1667 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1657 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | 1668 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
| 1658 | 1669 | ||
| 1659 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); | 1670 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
| 1660 | 1671 | ||
| 1661 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1672 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 1662 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) | 1673 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
| 1663 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", | 1674 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 1664 | vi->ctrl_promisc ? "en" : "dis"); | 1675 | vi->ctrl->promisc ? "en" : "dis"); |
| 1665 | 1676 | ||
| 1666 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); | 1677 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
| 1667 | 1678 | ||
| 1668 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1679 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 1669 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) | 1680 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
| 1670 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", | 1681 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 1671 | vi->ctrl_allmulti ? "en" : "dis"); | 1682 | vi->ctrl->allmulti ? "en" : "dis"); |
| 1672 | 1683 | ||
| 1673 | uc_count = netdev_uc_count(dev); | 1684 | uc_count = netdev_uc_count(dev); |
| 1674 | mc_count = netdev_mc_count(dev); | 1685 | mc_count = netdev_mc_count(dev); |
| @@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, | |||
| 1714 | struct virtnet_info *vi = netdev_priv(dev); | 1725 | struct virtnet_info *vi = netdev_priv(dev); |
| 1715 | struct scatterlist sg; | 1726 | struct scatterlist sg; |
| 1716 | 1727 | ||
| 1717 | vi->ctrl_vid = vid; | 1728 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
| 1718 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | 1729 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
| 1719 | 1730 | ||
| 1720 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1731 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 1721 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) | 1732 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
| @@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, | |||
| 1729 | struct virtnet_info *vi = netdev_priv(dev); | 1740 | struct virtnet_info *vi = netdev_priv(dev); |
| 1730 | struct scatterlist sg; | 1741 | struct scatterlist sg; |
| 1731 | 1742 | ||
| 1732 | vi->ctrl_vid = vid; | 1743 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
| 1733 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | 1744 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
| 1734 | 1745 | ||
| 1735 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1746 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 1736 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) | 1747 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
| @@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) | |||
| 2126 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) | 2137 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
| 2127 | { | 2138 | { |
| 2128 | struct scatterlist sg; | 2139 | struct scatterlist sg; |
| 2129 | vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); | 2140 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
| 2130 | 2141 | ||
| 2131 | sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); | 2142 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
| 2132 | 2143 | ||
| 2133 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, | 2144 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
| 2134 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { | 2145 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
| @@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) | |||
| 2351 | 2362 | ||
| 2352 | kfree(vi->rq); | 2363 | kfree(vi->rq); |
| 2353 | kfree(vi->sq); | 2364 | kfree(vi->sq); |
| 2365 | kfree(vi->ctrl); | ||
| 2354 | } | 2366 | } |
| 2355 | 2367 | ||
| 2356 | static void _free_receive_bufs(struct virtnet_info *vi) | 2368 | static void _free_receive_bufs(struct virtnet_info *vi) |
| @@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) | |||
| 2543 | { | 2555 | { |
| 2544 | int i; | 2556 | int i; |
| 2545 | 2557 | ||
| 2558 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); | ||
| 2559 | if (!vi->ctrl) | ||
| 2560 | goto err_ctrl; | ||
| 2546 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | 2561 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
| 2547 | if (!vi->sq) | 2562 | if (!vi->sq) |
| 2548 | goto err_sq; | 2563 | goto err_sq; |
| @@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) | |||
| 2571 | err_rq: | 2586 | err_rq: |
| 2572 | kfree(vi->sq); | 2587 | kfree(vi->sq); |
| 2573 | err_sq: | 2588 | err_sq: |
| 2589 | kfree(vi->ctrl); | ||
| 2590 | err_ctrl: | ||
| 2574 | return -ENOMEM; | 2591 | return -ENOMEM; |
| 2575 | } | 2592 | } |
| 2576 | 2593 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e04937f44f33..9ebe2a689966 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, | |||
| 1218 | union { | 1218 | union { |
| 1219 | void *ptr; | 1219 | void *ptr; |
| 1220 | struct ethhdr *eth; | 1220 | struct ethhdr *eth; |
| 1221 | struct vlan_ethhdr *veth; | ||
| 1221 | struct iphdr *ipv4; | 1222 | struct iphdr *ipv4; |
| 1222 | struct ipv6hdr *ipv6; | 1223 | struct ipv6hdr *ipv6; |
| 1223 | struct tcphdr *tcp; | 1224 | struct tcphdr *tcp; |
| @@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, | |||
| 1228 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) | 1229 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) |
| 1229 | return 0; | 1230 | return 0; |
| 1230 | 1231 | ||
| 1232 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || | ||
| 1233 | skb->protocol == cpu_to_be16(ETH_P_8021AD)) | ||
| 1234 | hlen = sizeof(struct vlan_ethhdr); | ||
| 1235 | else | ||
| 1236 | hlen = sizeof(struct ethhdr); | ||
| 1237 | |||
| 1231 | hdr.eth = eth_hdr(skb); | 1238 | hdr.eth = eth_hdr(skb); |
| 1232 | if (gdesc->rcd.v4) { | 1239 | if (gdesc->rcd.v4) { |
| 1233 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); | 1240 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && |
| 1234 | hdr.ptr += sizeof(struct ethhdr); | 1241 | hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); |
| 1242 | hdr.ptr += hlen; | ||
| 1235 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); | 1243 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); |
| 1236 | hlen = hdr.ipv4->ihl << 2; | 1244 | hlen = hdr.ipv4->ihl << 2; |
| 1237 | hdr.ptr += hdr.ipv4->ihl << 2; | 1245 | hdr.ptr += hdr.ipv4->ihl << 2; |
| 1238 | } else if (gdesc->rcd.v6) { | 1246 | } else if (gdesc->rcd.v6) { |
| 1239 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); | 1247 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && |
| 1240 | hdr.ptr += sizeof(struct ethhdr); | 1248 | hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); |
| 1249 | hdr.ptr += hlen; | ||
| 1241 | /* Use an estimated value, since we also need to handle | 1250 | /* Use an estimated value, since we also need to handle |
| 1242 | * TSO case. | 1251 | * TSO case. |
| 1243 | */ | 1252 | */ |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 59ec34052a65..a3326463b71f 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig | |||
| @@ -103,8 +103,7 @@ config NVDIMM_DAX | |||
| 103 | Select Y if unsure | 103 | Select Y if unsure |
| 104 | 104 | ||
| 105 | config OF_PMEM | 105 | config OF_PMEM |
| 106 | # FIXME: make tristate once OF_NUMA dependency removed | 106 | tristate "Device-tree support for persistent memory regions" |
| 107 | bool "Device-tree support for persistent memory regions" | ||
| 108 | depends on OF | 107 | depends on OF |
| 109 | default LIBNVDIMM | 108 | default LIBNVDIMM |
| 110 | help | 109 | help |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
| @@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |||
| 88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | 88 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) |
| 89 | { | 89 | { |
| 90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | 90 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
| 91 | int rc = validate_dimm(ndd), cmd_rc = 0; | ||
| 91 | struct nd_cmd_get_config_data_hdr *cmd; | 92 | struct nd_cmd_get_config_data_hdr *cmd; |
| 92 | struct nvdimm_bus_descriptor *nd_desc; | 93 | struct nvdimm_bus_descriptor *nd_desc; |
| 93 | int rc = validate_dimm(ndd); | ||
| 94 | u32 max_cmd_size, config_size; | 94 | u32 max_cmd_size, config_size; |
| 95 | size_t offset; | 95 | size_t offset; |
| 96 | 96 | ||
| @@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |||
| 124 | cmd->in_offset = offset; | 124 | cmd->in_offset = offset; |
| 125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | 125 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
| 126 | ND_CMD_GET_CONFIG_DATA, cmd, | 126 | ND_CMD_GET_CONFIG_DATA, cmd, |
| 127 | cmd->in_length + sizeof(*cmd), NULL); | 127 | cmd->in_length + sizeof(*cmd), &cmd_rc); |
| 128 | if (rc || cmd->status) { | 128 | if (rc < 0) |
| 129 | rc = -ENXIO; | 129 | break; |
| 130 | if (cmd_rc < 0) { | ||
| 131 | rc = cmd_rc; | ||
| 130 | break; | 132 | break; |
| 131 | } | 133 | } |
| 132 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | 134 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); |
| @@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |||
| 140 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | 142 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
| 141 | void *buf, size_t len) | 143 | void *buf, size_t len) |
| 142 | { | 144 | { |
| 143 | int rc = validate_dimm(ndd); | ||
| 144 | size_t max_cmd_size, buf_offset; | 145 | size_t max_cmd_size, buf_offset; |
| 145 | struct nd_cmd_set_config_hdr *cmd; | 146 | struct nd_cmd_set_config_hdr *cmd; |
| 147 | int rc = validate_dimm(ndd), cmd_rc = 0; | ||
| 146 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | 148 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
| 147 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | 149 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
| 148 | 150 | ||
| @@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | |||
| 164 | for (buf_offset = 0; len; len -= cmd->in_length, | 166 | for (buf_offset = 0; len; len -= cmd->in_length, |
| 165 | buf_offset += cmd->in_length) { | 167 | buf_offset += cmd->in_length) { |
| 166 | size_t cmd_size; | 168 | size_t cmd_size; |
| 167 | u32 *status; | ||
| 168 | 169 | ||
| 169 | cmd->in_offset = offset + buf_offset; | 170 | cmd->in_offset = offset + buf_offset; |
| 170 | cmd->in_length = min(max_cmd_size, len); | 171 | cmd->in_length = min(max_cmd_size, len); |
| @@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | |||
| 172 | 173 | ||
| 173 | /* status is output in the last 4-bytes of the command buffer */ | 174 | /* status is output in the last 4-bytes of the command buffer */ |
| 174 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | 175 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); |
| 175 | status = ((void *) cmd) + cmd_size - sizeof(u32); | ||
| 176 | 176 | ||
| 177 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | 177 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), |
| 178 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); | 178 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
| 179 | if (rc || *status) { | 179 | if (rc < 0) |
| 180 | rc = rc ? rc : -ENXIO; | 180 | break; |
| 181 | if (cmd_rc < 0) { | ||
| 182 | rc = cmd_rc; | ||
| 181 | break; | 183 | break; |
| 182 | } | 184 | } |
| 183 | } | 185 | } |
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c | |||
| @@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) | |||
| 67 | */ | 67 | */ |
| 68 | memset(&ndr_desc, 0, sizeof(ndr_desc)); | 68 | memset(&ndr_desc, 0, sizeof(ndr_desc)); |
| 69 | ndr_desc.attr_groups = region_attr_groups; | 69 | ndr_desc.attr_groups = region_attr_groups; |
| 70 | ndr_desc.numa_node = of_node_to_nid(np); | 70 | ndr_desc.numa_node = dev_to_node(&pdev->dev); |
| 71 | ndr_desc.res = &pdev->resource[i]; | 71 | ndr_desc.res = &pdev->resource[i]; |
| 72 | ndr_desc.of_node = np; | 72 | ndr_desc.of_node = np; |
| 73 | set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); | 73 | set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
| @@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, | |||
| 740 | tx->callback = dma_xfer_callback; | 740 | tx->callback = dma_xfer_callback; |
| 741 | tx->callback_param = req; | 741 | tx->callback_param = req; |
| 742 | 742 | ||
| 743 | req->dmach = chan; | ||
| 744 | req->sync = sync; | ||
| 745 | req->status = DMA_IN_PROGRESS; | 743 | req->status = DMA_IN_PROGRESS; |
| 746 | init_completion(&req->req_comp); | ||
| 747 | kref_get(&req->refcount); | 744 | kref_get(&req->refcount); |
| 748 | 745 | ||
| 749 | cookie = dmaengine_submit(tx); | 746 | cookie = dmaengine_submit(tx); |
| @@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
| 831 | if (!req) | 828 | if (!req) |
| 832 | return -ENOMEM; | 829 | return -ENOMEM; |
| 833 | 830 | ||
| 834 | kref_init(&req->refcount); | ||
| 835 | |||
| 836 | ret = get_dma_channel(priv); | 831 | ret = get_dma_channel(priv); |
| 837 | if (ret) { | 832 | if (ret) { |
| 838 | kfree(req); | 833 | kfree(req); |
| 839 | return ret; | 834 | return ret; |
| 840 | } | 835 | } |
| 836 | chan = priv->dmach; | ||
| 837 | |||
| 838 | kref_init(&req->refcount); | ||
| 839 | init_completion(&req->req_comp); | ||
| 840 | req->dir = dir; | ||
| 841 | req->filp = filp; | ||
| 842 | req->priv = priv; | ||
| 843 | req->dmach = chan; | ||
| 844 | req->sync = sync; | ||
| 841 | 845 | ||
| 842 | /* | 846 | /* |
| 843 | * If parameter loc_addr != NULL, we are transferring data from/to | 847 | * If parameter loc_addr != NULL, we are transferring data from/to |
| @@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, | |||
| 925 | xfer->offset, xfer->length); | 929 | xfer->offset, xfer->length); |
| 926 | } | 930 | } |
| 927 | 931 | ||
| 928 | req->dir = dir; | ||
| 929 | req->filp = filp; | ||
| 930 | req->priv = priv; | ||
| 931 | chan = priv->dmach; | ||
| 932 | |||
| 933 | nents = dma_map_sg(chan->device->dev, | 932 | nents = dma_map_sg(chan->device->dev, |
| 934 | req->sgt.sgl, req->sgt.nents, dir); | 933 | req->sgt.sgl, req->sgt.nents, dir); |
| 935 | if (nents == 0) { | 934 | if (nents == 0) { |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index f035c2f25d35..131f1989f6f3 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | #include <asm/io.h> | 27 | #include <asm/io.h> |
| 28 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
| 29 | #include <asm/vtoc.h> | 29 | #include <asm/vtoc.h> |
| 30 | #include <asm/diag.h> | ||
| 31 | 30 | ||
| 32 | #include "dasd_int.h" | 31 | #include "dasd_int.h" |
| 33 | #include "dasd_diag.h" | 32 | #include "dasd_diag.h" |
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 5f8d9ea69ebd..eceba3858cef 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c | |||
| @@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized; | |||
| 18 | * Used to keep track of the size of the event masks. Qemu until version 2.11 | 18 | * Used to keep track of the size of the event masks. Qemu until version 2.11 |
| 19 | * only supports 4 and needs a workaround. | 19 | * only supports 4 and needs a workaround. |
| 20 | */ | 20 | */ |
| 21 | bool sclp_mask_compat_mode; | 21 | bool sclp_mask_compat_mode __section(.data); |
| 22 | 22 | ||
| 23 | void sclp_early_wait_irq(void) | 23 | void sclp_early_wait_irq(void) |
| 24 | { | 24 | { |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 50a313806dde..2ad6f12f3d49 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -21,7 +21,6 @@ | |||
| 21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
| 22 | #include <linux/hash.h> | 22 | #include <linux/hash.h> |
| 23 | #include <linux/hashtable.h> | 23 | #include <linux/hashtable.h> |
| 24 | #include <linux/string.h> | ||
| 25 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
| 26 | #include "qeth_core.h" | 25 | #include "qeth_core.h" |
| 27 | #include "qeth_l2.h" | 26 | #include "qeth_l2.h" |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 3b0c8b8a7634..066b5c3aaae6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
| @@ -176,7 +176,7 @@ static struct device_driver smsg_driver = { | |||
| 176 | 176 | ||
| 177 | static void __exit smsg_exit(void) | 177 | static void __exit smsg_exit(void) |
| 178 | { | 178 | { |
| 179 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 179 | cpcmd("SET SMSG OFF", NULL, 0, NULL); |
| 180 | device_unregister(smsg_dev); | 180 | device_unregister(smsg_dev); |
| 181 | iucv_unregister(&smsg_handler, 1); | 181 | iucv_unregister(&smsg_handler, 1); |
| 182 | driver_unregister(&smsg_driver); | 182 | driver_unregister(&smsg_driver); |
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c | |||
| @@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); | |||
| 55 | #define WDT_CTRL_WDT_INTR BIT(2) | 55 | #define WDT_CTRL_WDT_INTR BIT(2) |
| 56 | #define WDT_CTRL_RESET_SYSTEM BIT(1) | 56 | #define WDT_CTRL_RESET_SYSTEM BIT(1) |
| 57 | #define WDT_CTRL_ENABLE BIT(0) | 57 | #define WDT_CTRL_ENABLE BIT(0) |
| 58 | #define WDT_TIMEOUT_STATUS 0x10 | ||
| 59 | #define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) | ||
| 58 | 60 | ||
| 59 | /* | 61 | /* |
| 60 | * WDT_RESET_WIDTH controls the characteristics of the external pulse (if | 62 | * WDT_RESET_WIDTH controls the characteristics of the external pulse (if |
| @@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) | |||
| 192 | struct device_node *np; | 194 | struct device_node *np; |
| 193 | const char *reset_type; | 195 | const char *reset_type; |
| 194 | u32 duration; | 196 | u32 duration; |
| 197 | u32 status; | ||
| 195 | int ret; | 198 | int ret; |
| 196 | 199 | ||
| 197 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); | 200 | wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); |
| @@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) | |||
| 307 | writel(duration - 1, wdt->base + WDT_RESET_WIDTH); | 310 | writel(duration - 1, wdt->base + WDT_RESET_WIDTH); |
| 308 | } | 311 | } |
| 309 | 312 | ||
| 313 | status = readl(wdt->base + WDT_TIMEOUT_STATUS); | ||
| 314 | if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) | ||
| 315 | wdt->wdd.bootstatus = WDIOF_CARDRESET; | ||
| 316 | |||
| 310 | ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); | 317 | ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); |
| 311 | if (ret) { | 318 | if (ret) { |
| 312 | dev_err(&pdev->dev, "failed to register\n"); | 319 | dev_err(&pdev->dev, "failed to register\n"); |
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c | |||
| @@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, | |||
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static const struct watchdog_info rwdt_ident = { | 123 | static const struct watchdog_info rwdt_ident = { |
| 124 | .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, | 124 | .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | |
| 125 | WDIOF_CARDRESET, | ||
| 125 | .identity = "Renesas WDT Watchdog", | 126 | .identity = "Renesas WDT Watchdog", |
| 126 | }; | 127 | }; |
| 127 | 128 | ||
| @@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) | |||
| 197 | return PTR_ERR(clk); | 198 | return PTR_ERR(clk); |
| 198 | 199 | ||
| 199 | pm_runtime_enable(&pdev->dev); | 200 | pm_runtime_enable(&pdev->dev); |
| 200 | |||
| 201 | pm_runtime_get_sync(&pdev->dev); | 201 | pm_runtime_get_sync(&pdev->dev); |
| 202 | priv->clk_rate = clk_get_rate(clk); | 202 | priv->clk_rate = clk_get_rate(clk); |
| 203 | priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & | ||
| 204 | RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; | ||
| 203 | pm_runtime_put(&pdev->dev); | 205 | pm_runtime_put(&pdev->dev); |
| 204 | 206 | ||
| 205 | if (!priv->clk_rate) { | 207 | if (!priv->clk_rate) { |
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c | |||
| @@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, | |||
| 299 | if (sch311x_wdt_set_heartbeat(new_timeout)) | 299 | if (sch311x_wdt_set_heartbeat(new_timeout)) |
| 300 | return -EINVAL; | 300 | return -EINVAL; |
| 301 | sch311x_wdt_keepalive(); | 301 | sch311x_wdt_keepalive(); |
| 302 | /* Fall */ | 302 | /* Fall through */ |
| 303 | case WDIOC_GETTIMEOUT: | 303 | case WDIOC_GETTIMEOUT: |
| 304 | return put_user(timeout, p); | 304 | return put_user(timeout, p); |
| 305 | default: | 305 | default: |
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c | |||
| @@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 427 | return -EINVAL; | 427 | return -EINVAL; |
| 428 | 428 | ||
| 429 | wdt_keepalive(); | 429 | wdt_keepalive(); |
| 430 | /* Fall */ | 430 | /* Fall through */ |
| 431 | 431 | ||
| 432 | case WDIOC_GETTIMEOUT: | 432 | case WDIOC_GETTIMEOUT: |
| 433 | return put_user(timeout, uarg.i); | 433 | return put_user(timeout, uarg.i); |
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c | |||
| @@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, | |||
| 178 | timeout = new_timeout; | 178 | timeout = new_timeout; |
| 179 | wafwdt_stop(); | 179 | wafwdt_stop(); |
| 180 | wafwdt_start(); | 180 | wafwdt_start(); |
| 181 | /* Fall */ | 181 | /* Fall through */ |
| 182 | case WDIOC_GETTIMEOUT: | 182 | case WDIOC_GETTIMEOUT: |
| 183 | return put_user(timeout, p); | 183 | return put_user(timeout, p); |
| 184 | 184 | ||
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c index 89d9744ece61..ed593d1042a6 100644 --- a/drivers/xen/xen-pciback/conf_space_quirks.c +++ b/drivers/xen/xen-pciback/conf_space_quirks.c | |||
| @@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev) | |||
| 95 | struct xen_pcibk_config_quirk *quirk; | 95 | struct xen_pcibk_config_quirk *quirk; |
| 96 | int ret = 0; | 96 | int ret = 0; |
| 97 | 97 | ||
| 98 | quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); | 98 | quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); |
| 99 | if (!quirk) { | 99 | if (!quirk) { |
| 100 | ret = -ENOMEM; | 100 | ret = -ENOMEM; |
| 101 | goto out; | 101 | goto out; |
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9e480fdebe1f..59661db144e5 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
| @@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) | |||
| 71 | 71 | ||
| 72 | dev_dbg(&dev->dev, "pcistub_device_alloc\n"); | 72 | dev_dbg(&dev->dev, "pcistub_device_alloc\n"); |
| 73 | 73 | ||
| 74 | psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); | 74 | psdev = kzalloc(sizeof(*psdev), GFP_KERNEL); |
| 75 | if (!psdev) | 75 | if (!psdev) |
| 76 | return NULL; | 76 | return NULL; |
| 77 | 77 | ||
| @@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev) | |||
| 364 | * here and then to call kfree(pci_get_drvdata(psdev->dev)). | 364 | * here and then to call kfree(pci_get_drvdata(psdev->dev)). |
| 365 | */ | 365 | */ |
| 366 | dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") | 366 | dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") |
| 367 | + strlen(pci_name(dev)) + 1, GFP_ATOMIC); | 367 | + strlen(pci_name(dev)) + 1, GFP_KERNEL); |
| 368 | if (!dev_data) { | 368 | if (!dev_data) { |
| 369 | err = -ENOMEM; | 369 | err = -ENOMEM; |
| 370 | goto out; | 370 | goto out; |
| @@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 577 | } | 577 | } |
| 578 | 578 | ||
| 579 | if (!match) { | 579 | if (!match) { |
| 580 | pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); | 580 | pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); |
| 581 | if (!pci_dev_id) { | 581 | if (!pci_dev_id) { |
| 582 | err = -ENOMEM; | 582 | err = -ENOMEM; |
| 583 | goto out; | 583 | goto out; |
| @@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, | |||
| 1149 | } | 1149 | } |
| 1150 | dev = psdev->dev; | 1150 | dev = psdev->dev; |
| 1151 | 1151 | ||
| 1152 | field = kzalloc(sizeof(*field), GFP_ATOMIC); | 1152 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
| 1153 | if (!field) { | 1153 | if (!field) { |
| 1154 | err = -ENOMEM; | 1154 | err = -ENOMEM; |
| 1155 | goto out; | 1155 | goto out; |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0d6d9264d6a9..c3e201025ef0 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
| @@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, | |||
| 403 | { | 403 | { |
| 404 | struct { | 404 | struct { |
| 405 | struct xsd_sockmsg hdr; | 405 | struct xsd_sockmsg hdr; |
| 406 | const char body[16]; | 406 | char body[16]; |
| 407 | } msg; | 407 | } msg; |
| 408 | int rc; | 408 | int rc; |
| 409 | 409 | ||
| @@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, | |||
| 412 | msg.hdr.len = strlen(reply) + 1; | 412 | msg.hdr.len = strlen(reply) + 1; |
| 413 | if (msg.hdr.len > sizeof(msg.body)) | 413 | if (msg.hdr.len > sizeof(msg.body)) |
| 414 | return -E2BIG; | 414 | return -E2BIG; |
| 415 | memcpy(&msg.body, reply, msg.hdr.len); | ||
| 415 | 416 | ||
| 416 | mutex_lock(&u->reply_mutex); | 417 | mutex_lock(&u->reply_mutex); |
| 417 | rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); | 418 | rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); |
diff --git a/fs/afs/server.c b/fs/afs/server.c index e23be63998a8..629c74986cff 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
| @@ -428,8 +428,15 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) | |||
| 428 | } | 428 | } |
| 429 | write_sequnlock(&net->fs_lock); | 429 | write_sequnlock(&net->fs_lock); |
| 430 | 430 | ||
| 431 | if (deleted) | 431 | if (deleted) { |
| 432 | write_seqlock(&net->fs_addr_lock); | ||
| 433 | if (!hlist_unhashed(&server->addr4_link)) | ||
| 434 | hlist_del_rcu(&server->addr4_link); | ||
| 435 | if (!hlist_unhashed(&server->addr6_link)) | ||
| 436 | hlist_del_rcu(&server->addr6_link); | ||
| 437 | write_sequnlock(&net->fs_addr_lock); | ||
| 432 | afs_destroy_server(net, server); | 438 | afs_destroy_server(net, server); |
| 439 | } | ||
| 433 | } | 440 | } |
| 434 | } | 441 | } |
| 435 | 442 | ||
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 82e8f6edfb48..b12e37f27530 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
| @@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir, | |||
| 749 | 749 | ||
| 750 | autofs4_del_active(dentry); | 750 | autofs4_del_active(dentry); |
| 751 | 751 | ||
| 752 | inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); | 752 | inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); |
| 753 | if (!inode) | 753 | if (!inode) |
| 754 | return -ENOMEM; | 754 | return -ENOMEM; |
| 755 | d_add(dentry, inode); | 755 | d_add(dentry, inode); |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 41e04183e4ce..4ad6f669fe34 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, | |||
| 377 | } else | 377 | } else |
| 378 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | 378 | map_addr = vm_mmap(filep, addr, size, prot, type, off); |
| 379 | 379 | ||
| 380 | if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) | 380 | if ((type & MAP_FIXED_NOREPLACE) && |
| 381 | pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", | 381 | PTR_ERR((void *)map_addr) == -EEXIST) |
| 382 | task_pid_nr(current), current->comm, | 382 | pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", |
| 383 | (void *)addr); | 383 | task_pid_nr(current), current->comm, (void *)addr); |
| 384 | 384 | ||
| 385 | return(map_addr); | 385 | return(map_addr); |
| 386 | } | 386 | } |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 8bf60250309e..ae056927080d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -669,13 +669,15 @@ void ceph_fill_file_time(struct inode *inode, int issued, | |||
| 669 | CEPH_CAP_FILE_BUFFER| | 669 | CEPH_CAP_FILE_BUFFER| |
| 670 | CEPH_CAP_AUTH_EXCL| | 670 | CEPH_CAP_AUTH_EXCL| |
| 671 | CEPH_CAP_XATTR_EXCL)) { | 671 | CEPH_CAP_XATTR_EXCL)) { |
| 672 | if (timespec_compare(ctime, &inode->i_ctime) > 0) { | 672 | if (ci->i_version == 0 || |
| 673 | timespec_compare(ctime, &inode->i_ctime) > 0) { | ||
| 673 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", | 674 | dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", |
| 674 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, | 675 | inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, |
| 675 | ctime->tv_sec, ctime->tv_nsec); | 676 | ctime->tv_sec, ctime->tv_nsec); |
| 676 | inode->i_ctime = *ctime; | 677 | inode->i_ctime = *ctime; |
| 677 | } | 678 | } |
| 678 | if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { | 679 | if (ci->i_version == 0 || |
| 680 | ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { | ||
| 679 | /* the MDS did a utimes() */ | 681 | /* the MDS did a utimes() */ |
| 680 | dout("mtime %ld.%09ld -> %ld.%09ld " | 682 | dout("mtime %ld.%09ld -> %ld.%09ld " |
| 681 | "tw %d -> %d\n", | 683 | "tw %d -> %d\n", |
| @@ -795,7 +797,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, | |||
| 795 | new_issued = ~issued & le32_to_cpu(info->cap.caps); | 797 | new_issued = ~issued & le32_to_cpu(info->cap.caps); |
| 796 | 798 | ||
| 797 | /* update inode */ | 799 | /* update inode */ |
| 798 | ci->i_version = le64_to_cpu(info->version); | ||
| 799 | inode->i_rdev = le32_to_cpu(info->rdev); | 800 | inode->i_rdev = le32_to_cpu(info->rdev); |
| 800 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; | 801 | inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; |
| 801 | 802 | ||
| @@ -868,6 +869,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page, | |||
| 868 | xattr_blob = NULL; | 869 | xattr_blob = NULL; |
| 869 | } | 870 | } |
| 870 | 871 | ||
| 872 | /* finally update i_version */ | ||
| 873 | ci->i_version = le64_to_cpu(info->version); | ||
| 874 | |||
| 871 | inode->i_mapping->a_ops = &ceph_aops; | 875 | inode->i_mapping->a_ops = &ceph_aops; |
| 872 | 876 | ||
| 873 | switch (inode->i_mode & S_IFMT) { | 877 | switch (inode->i_mode & S_IFMT) { |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 846ca150d52e..4dd842f72846 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
| @@ -1997,6 +1997,16 @@ out: | |||
| 1997 | return rc; | 1997 | return rc; |
| 1998 | } | 1998 | } |
| 1999 | 1999 | ||
| 2000 | static bool is_dot_dotdot(const char *name, size_t name_size) | ||
| 2001 | { | ||
| 2002 | if (name_size == 1 && name[0] == '.') | ||
| 2003 | return true; | ||
| 2004 | else if (name_size == 2 && name[0] == '.' && name[1] == '.') | ||
| 2005 | return true; | ||
| 2006 | |||
| 2007 | return false; | ||
| 2008 | } | ||
| 2009 | |||
| 2000 | /** | 2010 | /** |
| 2001 | * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext | 2011 | * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext |
| 2002 | * @plaintext_name: The plaintext name | 2012 | * @plaintext_name: The plaintext name |
| @@ -2021,13 +2031,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, | |||
| 2021 | size_t packet_size; | 2031 | size_t packet_size; |
| 2022 | int rc = 0; | 2032 | int rc = 0; |
| 2023 | 2033 | ||
| 2024 | if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) | 2034 | if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && |
| 2025 | && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) | 2035 | !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) { |
| 2026 | && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) | 2036 | if (is_dot_dotdot(name, name_size)) { |
| 2027 | && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, | 2037 | rc = ecryptfs_copy_filename(plaintext_name, |
| 2028 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { | 2038 | plaintext_name_size, |
| 2029 | const char *orig_name = name; | 2039 | name, name_size); |
| 2030 | size_t orig_name_size = name_size; | 2040 | goto out; |
| 2041 | } | ||
| 2042 | |||
| 2043 | if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE || | ||
| 2044 | strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, | ||
| 2045 | ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) { | ||
| 2046 | rc = -EINVAL; | ||
| 2047 | goto out; | ||
| 2048 | } | ||
| 2031 | 2049 | ||
| 2032 | name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; | 2050 | name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; |
| 2033 | name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; | 2051 | name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; |
| @@ -2047,12 +2065,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, | |||
| 2047 | decoded_name, | 2065 | decoded_name, |
| 2048 | decoded_name_size); | 2066 | decoded_name_size); |
| 2049 | if (rc) { | 2067 | if (rc) { |
| 2050 | printk(KERN_INFO "%s: Could not parse tag 70 packet " | 2068 | ecryptfs_printk(KERN_DEBUG, |
| 2051 | "from filename; copying through filename " | 2069 | "%s: Could not parse tag 70 packet from filename\n", |
| 2052 | "as-is\n", __func__); | 2070 | __func__); |
| 2053 | rc = ecryptfs_copy_filename(plaintext_name, | ||
| 2054 | plaintext_name_size, | ||
| 2055 | orig_name, orig_name_size); | ||
| 2056 | goto out_free; | 2071 | goto out_free; |
| 2057 | } | 2072 | } |
| 2058 | } else { | 2073 | } else { |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c74ed3ca3372..b76a9853325e 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
| @@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name, | |||
| 82 | buf->sb, lower_name, | 82 | buf->sb, lower_name, |
| 83 | lower_namelen); | 83 | lower_namelen); |
| 84 | if (rc) { | 84 | if (rc) { |
| 85 | printk(KERN_ERR "%s: Error attempting to decode and decrypt " | 85 | if (rc != -EINVAL) { |
| 86 | "filename [%s]; rc = [%d]\n", __func__, lower_name, | 86 | ecryptfs_printk(KERN_DEBUG, |
| 87 | rc); | 87 | "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n", |
| 88 | goto out; | 88 | __func__, lower_name, rc); |
| 89 | return rc; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Mask -EINVAL errors as these are most likely due a plaintext | ||
| 93 | * filename present in the lower filesystem despite filename | ||
| 94 | * encryption being enabled. One unavoidable example would be | ||
| 95 | * the "lost+found" dentry in the root directory of an Ext4 | ||
| 96 | * filesystem. | ||
| 97 | */ | ||
| 98 | return 0; | ||
| 89 | } | 99 | } |
| 100 | |||
| 90 | buf->caller->pos = buf->ctx.pos; | 101 | buf->caller->pos = buf->ctx.pos; |
| 91 | rc = !dir_emit(buf->caller, name, name_size, ino, d_type); | 102 | rc = !dir_emit(buf->caller, name, name_size, ino, d_type); |
| 92 | kfree(name); | 103 | kfree(name); |
| 93 | if (!rc) | 104 | if (!rc) |
| 94 | buf->entries_written++; | 105 | buf->entries_written++; |
| 95 | out: | 106 | |
| 96 | return rc; | 107 | return rc; |
| 97 | } | 108 | } |
| 98 | 109 | ||
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 847904aa63a9..97d17eaeba07 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
| @@ -395,8 +395,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
| 395 | 395 | ||
| 396 | mount_crypt_stat = &ecryptfs_superblock_to_private( | 396 | mount_crypt_stat = &ecryptfs_superblock_to_private( |
| 397 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 397 | ecryptfs_dentry->d_sb)->mount_crypt_stat; |
| 398 | if (mount_crypt_stat | 398 | if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { |
| 399 | && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { | ||
| 400 | rc = ecryptfs_encrypt_and_encode_filename( | 399 | rc = ecryptfs_encrypt_and_encode_filename( |
| 401 | &encrypted_and_encoded_name, &len, | 400 | &encrypted_and_encoded_name, &len, |
| 402 | mount_crypt_stat, name, len); | 401 | mount_crypt_stat, name, len); |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c89a58cfc991..e74fe84d0886 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
| @@ -1880,7 +1880,7 @@ find_next_matching_auth_tok: | |||
| 1880 | candidate_auth_tok = &auth_tok_list_item->auth_tok; | 1880 | candidate_auth_tok = &auth_tok_list_item->auth_tok; |
| 1881 | if (unlikely(ecryptfs_verbosity > 0)) { | 1881 | if (unlikely(ecryptfs_verbosity > 0)) { |
| 1882 | ecryptfs_printk(KERN_DEBUG, | 1882 | ecryptfs_printk(KERN_DEBUG, |
| 1883 | "Considering cadidate auth tok:\n"); | 1883 | "Considering candidate auth tok:\n"); |
| 1884 | ecryptfs_dump_auth_tok(candidate_auth_tok); | 1884 | ecryptfs_dump_auth_tok(candidate_auth_tok); |
| 1885 | } | 1885 | } |
| 1886 | rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, | 1886 | rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig, |
diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 09640220fda8..047c327a6b23 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c | |||
| @@ -88,11 +88,11 @@ out_unlock: | |||
| 88 | * The default page_lock and i_size verification done by non-DAX fault paths | 88 | * The default page_lock and i_size verification done by non-DAX fault paths |
| 89 | * is sufficient because ext2 doesn't support hole punching. | 89 | * is sufficient because ext2 doesn't support hole punching. |
| 90 | */ | 90 | */ |
| 91 | static int ext2_dax_fault(struct vm_fault *vmf) | 91 | static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) |
| 92 | { | 92 | { |
| 93 | struct inode *inode = file_inode(vmf->vma->vm_file); | 93 | struct inode *inode = file_inode(vmf->vma->vm_file); |
| 94 | struct ext2_inode_info *ei = EXT2_I(inode); | 94 | struct ext2_inode_info *ei = EXT2_I(inode); |
| 95 | int ret; | 95 | vm_fault_t ret; |
| 96 | 96 | ||
| 97 | if (vmf->flags & FAULT_FLAG_WRITE) { | 97 | if (vmf->flags & FAULT_FLAG_WRITE) { |
| 98 | sb_start_pagefault(inode->i_sb); | 98 | sb_start_pagefault(inode->i_sb); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4b12ba70a895..47d7c151fcba 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) | |||
| 745 | */ | 745 | */ |
| 746 | if (inode && inode_to_wb_is_valid(inode)) { | 746 | if (inode && inode_to_wb_is_valid(inode)) { |
| 747 | struct bdi_writeback *wb; | 747 | struct bdi_writeback *wb; |
| 748 | bool locked, congested; | 748 | struct wb_lock_cookie lock_cookie = {}; |
| 749 | bool congested; | ||
| 749 | 750 | ||
| 750 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 751 | wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); |
| 751 | congested = wb_congested(wb, cong_bits); | 752 | congested = wb_congested(wb, cong_bits); |
| 752 | unlocked_inode_to_wb_end(inode, locked); | 753 | unlocked_inode_to_wb_end(inode, &lock_cookie); |
| 753 | return congested; | 754 | return congested; |
| 754 | } | 755 | } |
| 755 | 756 | ||
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 9bb2fe35799d..10205ececc27 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 21 | #include <linux/bio.h> | 21 | #include <linux/bio.h> |
| 22 | 22 | ||
| 23 | #include <linux/slab.h> | ||
| 23 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
| 24 | #include <linux/zlib.h> | 25 | #include <linux/zlib.h> |
| 25 | 26 | ||
| @@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, | |||
| 59 | >> bufshift; | 60 | >> bufshift; |
| 60 | int haveblocks; | 61 | int haveblocks; |
| 61 | blkcnt_t blocknum; | 62 | blkcnt_t blocknum; |
| 62 | struct buffer_head *bhs[needblocks + 1]; | 63 | struct buffer_head **bhs; |
| 63 | int curbh, curpage; | 64 | int curbh, curpage; |
| 64 | 65 | ||
| 65 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { | 66 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { |
| @@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, | |||
| 80 | 81 | ||
| 81 | /* Because zlib is not thread-safe, do all the I/O at the top. */ | 82 | /* Because zlib is not thread-safe, do all the I/O at the top. */ |
| 82 | blocknum = block_start >> bufshift; | 83 | blocknum = block_start >> bufshift; |
| 83 | memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); | 84 | bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); |
| 85 | if (!bhs) { | ||
| 86 | *errp = -ENOMEM; | ||
| 87 | return 0; | ||
| 88 | } | ||
| 84 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); | 89 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); |
| 85 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); | 90 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); |
| 86 | 91 | ||
| @@ -190,6 +195,7 @@ z_eio: | |||
| 190 | b_eio: | 195 | b_eio: |
| 191 | for (i = 0; i < haveblocks; i++) | 196 | for (i = 0; i < haveblocks; i++) |
| 192 | brelse(bhs[i]); | 197 | brelse(bhs[i]); |
| 198 | kfree(bhs); | ||
| 193 | return stream.total_out; | 199 | return stream.total_out; |
| 194 | } | 200 | } |
| 195 | 201 | ||
| @@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
| 305 | unsigned int zisofs_pages_per_cblock = | 311 | unsigned int zisofs_pages_per_cblock = |
| 306 | PAGE_SHIFT <= zisofs_block_shift ? | 312 | PAGE_SHIFT <= zisofs_block_shift ? |
| 307 | (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; | 313 | (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; |
| 308 | struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; | 314 | struct page **pages; |
| 309 | pgoff_t index = page->index, end_index; | 315 | pgoff_t index = page->index, end_index; |
| 310 | 316 | ||
| 311 | end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 317 | end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| @@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
| 330 | full_page = 0; | 336 | full_page = 0; |
| 331 | pcount = 1; | 337 | pcount = 1; |
| 332 | } | 338 | } |
| 339 | pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), | ||
| 340 | sizeof(*pages), GFP_KERNEL); | ||
| 341 | if (!pages) { | ||
| 342 | unlock_page(page); | ||
| 343 | return -ENOMEM; | ||
| 344 | } | ||
| 333 | pages[full_page] = page; | 345 | pages[full_page] = page; |
| 334 | 346 | ||
| 335 | for (i = 0; i < pcount; i++, index++) { | 347 | for (i = 0; i < pcount; i++, index++) { |
| @@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page *page) | |||
| 357 | } | 369 | } |
| 358 | 370 | ||
| 359 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ | 371 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ |
| 372 | kfree(pages); | ||
| 360 | return err; | 373 | return err; |
| 361 | } | 374 | } |
| 362 | 375 | ||
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bc258a4402f6..ec3fba7d492f 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
| @@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt) | |||
| 394 | break; | 394 | break; |
| 395 | #ifdef CONFIG_JOLIET | 395 | #ifdef CONFIG_JOLIET |
| 396 | case Opt_iocharset: | 396 | case Opt_iocharset: |
| 397 | kfree(popt->iocharset); | ||
| 397 | popt->iocharset = match_strdup(&args[0]); | 398 | popt->iocharset = match_strdup(&args[0]); |
| 399 | if (!popt->iocharset) | ||
| 400 | return 0; | ||
| 398 | break; | 401 | break; |
| 399 | #endif | 402 | #endif |
| 400 | case Opt_map_a: | 403 | case Opt_map_a: |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f60dee7faf03..87bdf0f4cba1 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
| @@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
| 342 | static void jffs2_kill_sb(struct super_block *sb) | 342 | static void jffs2_kill_sb(struct super_block *sb) |
| 343 | { | 343 | { |
| 344 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | 344 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); |
| 345 | if (!sb_rdonly(sb)) | 345 | if (c && !sb_rdonly(sb)) |
| 346 | jffs2_stop_garbage_collect_thread(c); | 346 | jffs2_stop_garbage_collect_thread(c); |
| 347 | kill_mtd_super(sb); | 347 | kill_mtd_super(sb); |
| 348 | kfree(c); | 348 | kfree(c); |
diff --git a/fs/namespace.c b/fs/namespace.c index e398f32d7541..5f75969adff1 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
| 1089 | goto out_free; | 1089 | goto out_free; |
| 1090 | } | 1090 | } |
| 1091 | 1091 | ||
| 1092 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); | 1092 | mnt->mnt.mnt_flags = old->mnt.mnt_flags; |
| 1093 | mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); | ||
| 1093 | /* Don't allow unprivileged users to change mount flags */ | 1094 | /* Don't allow unprivileged users to change mount flags */ |
| 1094 | if (flag & CL_UNPRIVILEGED) { | 1095 | if (flag & CL_UNPRIVILEGED) { |
| 1095 | mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; | 1096 | mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; |
| @@ -2814,7 +2815,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, | |||
| 2814 | mnt_flags |= MNT_NODIRATIME; | 2815 | mnt_flags |= MNT_NODIRATIME; |
| 2815 | if (flags & MS_STRICTATIME) | 2816 | if (flags & MS_STRICTATIME) |
| 2816 | mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); | 2817 | mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); |
| 2817 | if (flags & SB_RDONLY) | 2818 | if (flags & MS_RDONLY) |
| 2818 | mnt_flags |= MNT_READONLY; | 2819 | mnt_flags |= MNT_READONLY; |
| 2819 | 2820 | ||
| 2820 | /* The default atime for remount is preservation */ | 2821 | /* The default atime for remount is preservation */ |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index d51e1bb781cf..d94e8031fe5f 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
| @@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
| 92 | u32 event_mask, | 92 | u32 event_mask, |
| 93 | const void *data, int data_type) | 93 | const void *data, int data_type) |
| 94 | { | 94 | { |
| 95 | __u32 marks_mask, marks_ignored_mask; | 95 | __u32 marks_mask = 0, marks_ignored_mask = 0; |
| 96 | const struct path *path = data; | 96 | const struct path *path = data; |
| 97 | 97 | ||
| 98 | pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" | 98 | pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" |
| @@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
| 108 | !d_can_lookup(path->dentry)) | 108 | !d_can_lookup(path->dentry)) |
| 109 | return false; | 109 | return false; |
| 110 | 110 | ||
| 111 | if (inode_mark && vfsmnt_mark) { | 111 | /* |
| 112 | marks_mask = (vfsmnt_mark->mask | inode_mark->mask); | 112 | * if the event is for a child and this inode doesn't care about |
| 113 | marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); | 113 | * events on the child, don't send it! |
| 114 | } else if (inode_mark) { | 114 | */ |
| 115 | /* | 115 | if (inode_mark && |
| 116 | * if the event is for a child and this inode doesn't care about | 116 | (!(event_mask & FS_EVENT_ON_CHILD) || |
| 117 | * events on the child, don't send it! | 117 | (inode_mark->mask & FS_EVENT_ON_CHILD))) { |
| 118 | */ | 118 | marks_mask |= inode_mark->mask; |
| 119 | if ((event_mask & FS_EVENT_ON_CHILD) && | 119 | marks_ignored_mask |= inode_mark->ignored_mask; |
| 120 | !(inode_mark->mask & FS_EVENT_ON_CHILD)) | 120 | } |
| 121 | return false; | 121 | |
| 122 | marks_mask = inode_mark->mask; | 122 | if (vfsmnt_mark) { |
| 123 | marks_ignored_mask = inode_mark->ignored_mask; | 123 | marks_mask |= vfsmnt_mark->mask; |
| 124 | } else if (vfsmnt_mark) { | 124 | marks_ignored_mask |= vfsmnt_mark->ignored_mask; |
| 125 | marks_mask = vfsmnt_mark->mask; | ||
| 126 | marks_ignored_mask = vfsmnt_mark->ignored_mask; | ||
| 127 | } else { | ||
| 128 | BUG(); | ||
| 129 | } | 125 | } |
| 130 | 126 | ||
| 131 | if (d_is_dir(path->dentry) && | 127 | if (d_is_dir(path->dentry) && |
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 219b269c737e..613ec7e5a465 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c | |||
| @@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell, | |||
| 192 | struct fsnotify_iter_info *iter_info) | 192 | struct fsnotify_iter_info *iter_info) |
| 193 | { | 193 | { |
| 194 | struct fsnotify_group *group = NULL; | 194 | struct fsnotify_group *group = NULL; |
| 195 | __u32 inode_test_mask = 0; | 195 | __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); |
| 196 | __u32 vfsmount_test_mask = 0; | 196 | __u32 marks_mask = 0; |
| 197 | __u32 marks_ignored_mask = 0; | ||
| 197 | 198 | ||
| 198 | if (unlikely(!inode_mark && !vfsmount_mark)) { | 199 | if (unlikely(!inode_mark && !vfsmount_mark)) { |
| 199 | BUG(); | 200 | BUG(); |
| @@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell, | |||
| 213 | /* does the inode mark tell us to do something? */ | 214 | /* does the inode mark tell us to do something? */ |
| 214 | if (inode_mark) { | 215 | if (inode_mark) { |
| 215 | group = inode_mark->group; | 216 | group = inode_mark->group; |
| 216 | inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); | 217 | marks_mask |= inode_mark->mask; |
| 217 | inode_test_mask &= inode_mark->mask; | 218 | marks_ignored_mask |= inode_mark->ignored_mask; |
| 218 | inode_test_mask &= ~inode_mark->ignored_mask; | ||
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | /* does the vfsmount_mark tell us to do something? */ | 221 | /* does the vfsmount_mark tell us to do something? */ |
| 222 | if (vfsmount_mark) { | 222 | if (vfsmount_mark) { |
| 223 | vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); | ||
| 224 | group = vfsmount_mark->group; | 223 | group = vfsmount_mark->group; |
| 225 | vfsmount_test_mask &= vfsmount_mark->mask; | 224 | marks_mask |= vfsmount_mark->mask; |
| 226 | vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; | 225 | marks_ignored_mask |= vfsmount_mark->ignored_mask; |
| 227 | if (inode_mark) | ||
| 228 | vfsmount_test_mask &= ~inode_mark->ignored_mask; | ||
| 229 | } | 226 | } |
| 230 | 227 | ||
| 231 | pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" | 228 | pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" |
| 232 | " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" | 229 | " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" |
| 233 | " data=%p data_is=%d cookie=%d\n", | 230 | " data=%p data_is=%d cookie=%d\n", |
| 234 | __func__, group, to_tell, mask, inode_mark, | 231 | __func__, group, to_tell, mask, inode_mark, vfsmount_mark, |
| 235 | inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, | 232 | marks_mask, marks_ignored_mask, data, |
| 236 | data_is, cookie); | 233 | data_is, cookie); |
| 237 | 234 | ||
| 238 | if (!inode_test_mask && !vfsmount_test_mask) | 235 | if (!(test_mask & marks_mask & ~marks_ignored_mask)) |
| 239 | return 0; | 236 | return 0; |
| 240 | 237 | ||
| 241 | return group->ops->handle_event(group, to_tell, inode_mark, | 238 | return group->ops->handle_event(group, to_tell, inode_mark, |
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 3ae5fdba0225..10796d3fe27d 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c | |||
| @@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb) | |||
| 579 | /* provided sb cleanup */ | 579 | /* provided sb cleanup */ |
| 580 | kill_anon_super(sb); | 580 | kill_anon_super(sb); |
| 581 | 581 | ||
| 582 | if (!ORANGEFS_SB(sb)) { | ||
| 583 | mutex_lock(&orangefs_request_mutex); | ||
| 584 | mutex_unlock(&orangefs_request_mutex); | ||
| 585 | return; | ||
| 586 | } | ||
| 582 | /* | 587 | /* |
| 583 | * issue the unmount to userspace to tell it to remove the | 588 | * issue the unmount to userspace to tell it to remove the |
| 584 | * dynamic mount info it has for this superblock | 589 | * dynamic mount info it has for this superblock |
diff --git a/fs/proc/base.c b/fs/proc/base.c index eafa39a3a88c..1b2ede6abcdf 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode, | |||
| 1693 | kuid_t uid; | 1693 | kuid_t uid; |
| 1694 | kgid_t gid; | 1694 | kgid_t gid; |
| 1695 | 1695 | ||
| 1696 | if (unlikely(task->flags & PF_KTHREAD)) { | ||
| 1697 | *ruid = GLOBAL_ROOT_UID; | ||
| 1698 | *rgid = GLOBAL_ROOT_GID; | ||
| 1699 | return; | ||
| 1700 | } | ||
| 1701 | |||
| 1696 | /* Default to the tasks effective ownership */ | 1702 | /* Default to the tasks effective ownership */ |
| 1697 | rcu_read_lock(); | 1703 | rcu_read_lock(); |
| 1698 | cred = __task_cred(task); | 1704 | cred = __task_cred(task); |
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index a000d7547479..b572cc865b92 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c | |||
| @@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v) | |||
| 24 | LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), | 24 | LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), |
| 25 | LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), | 25 | LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), |
| 26 | nr_running(), nr_threads, | 26 | nr_running(), nr_threads, |
| 27 | idr_get_cursor(&task_active_pid_ns(current)->idr)); | 27 | idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); |
| 28 | return 0; | 28 | return 0; |
| 29 | } | 29 | } |
| 30 | 30 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 65ae54659833..c486ad4b43f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, | |||
| 1310 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 1310 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 1311 | else if (is_swap_pmd(pmd)) { | 1311 | else if (is_swap_pmd(pmd)) { |
| 1312 | swp_entry_t entry = pmd_to_swp_entry(pmd); | 1312 | swp_entry_t entry = pmd_to_swp_entry(pmd); |
| 1313 | unsigned long offset = swp_offset(entry); | ||
| 1313 | 1314 | ||
| 1315 | offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; | ||
| 1314 | frame = swp_type(entry) | | 1316 | frame = swp_type(entry) | |
| 1315 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | 1317 | (offset << MAX_SWAPFILES_SHIFT); |
| 1316 | flags |= PM_SWAP; | 1318 | flags |= PM_SWAP; |
| 1317 | if (pmd_swp_soft_dirty(pmd)) | 1319 | if (pmd_swp_soft_dirty(pmd)) |
| 1318 | flags |= PM_SOFT_DIRTY; | 1320 | flags |= PM_SOFT_DIRTY; |
| @@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, | |||
| 1332 | break; | 1334 | break; |
| 1333 | if (pm->show_pfn && (flags & PM_PRESENT)) | 1335 | if (pm->show_pfn && (flags & PM_PRESENT)) |
| 1334 | frame++; | 1336 | frame++; |
| 1337 | else if (flags & PM_SWAP) | ||
| 1338 | frame += (1 << MAX_SWAPFILES_SHIFT); | ||
| 1335 | } | 1339 | } |
| 1336 | spin_unlock(ptl); | 1340 | spin_unlock(ptl); |
| 1337 | return err; | 1341 | return err; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 020c597ef9b6..d88231e3b2be 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -2966,7 +2966,7 @@ static int __init dquot_init(void) | |||
| 2966 | NULL); | 2966 | NULL); |
| 2967 | 2967 | ||
| 2968 | order = 0; | 2968 | order = 0; |
| 2969 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | 2969 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); |
| 2970 | if (!dquot_hash) | 2970 | if (!dquot_hash) |
| 2971 | panic("Cannot create dquot hash table"); | 2971 | panic("Cannot create dquot hash table"); |
| 2972 | 2972 | ||
diff --git a/fs/super.c b/fs/super.c index 5fa9a8d8d865..122c402049a2 100644 --- a/fs/super.c +++ b/fs/super.c | |||
| @@ -167,6 +167,7 @@ static void destroy_unused_super(struct super_block *s) | |||
| 167 | security_sb_free(s); | 167 | security_sb_free(s); |
| 168 | put_user_ns(s->s_user_ns); | 168 | put_user_ns(s->s_user_ns); |
| 169 | kfree(s->s_subtype); | 169 | kfree(s->s_subtype); |
| 170 | free_prealloced_shrinker(&s->s_shrink); | ||
| 170 | /* no delays needed */ | 171 | /* no delays needed */ |
| 171 | destroy_super_work(&s->destroy_work); | 172 | destroy_super_work(&s->destroy_work); |
| 172 | } | 173 | } |
| @@ -252,6 +253,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, | |||
| 252 | s->s_shrink.count_objects = super_cache_count; | 253 | s->s_shrink.count_objects = super_cache_count; |
| 253 | s->s_shrink.batch = 1024; | 254 | s->s_shrink.batch = 1024; |
| 254 | s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; | 255 | s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; |
| 256 | if (prealloc_shrinker(&s->s_shrink)) | ||
| 257 | goto fail; | ||
| 255 | return s; | 258 | return s; |
| 256 | 259 | ||
| 257 | fail: | 260 | fail: |
| @@ -518,11 +521,7 @@ retry: | |||
| 518 | hlist_add_head(&s->s_instances, &type->fs_supers); | 521 | hlist_add_head(&s->s_instances, &type->fs_supers); |
| 519 | spin_unlock(&sb_lock); | 522 | spin_unlock(&sb_lock); |
| 520 | get_filesystem(type); | 523 | get_filesystem(type); |
| 521 | err = register_shrinker(&s->s_shrink); | 524 | register_shrinker_prepared(&s->s_shrink); |
| 522 | if (err) { | ||
| 523 | deactivate_locked_super(s); | ||
| 524 | s = ERR_PTR(err); | ||
| 525 | } | ||
| 526 | return s; | 525 | return s; |
| 527 | } | 526 | } |
| 528 | 527 | ||
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index f897e55f2cd0..16a8ad21b77e 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c | |||
| @@ -28,6 +28,9 @@ | |||
| 28 | 28 | ||
| 29 | #include "udf_sb.h" | 29 | #include "udf_sb.h" |
| 30 | 30 | ||
| 31 | #define SURROGATE_MASK 0xfffff800 | ||
| 32 | #define SURROGATE_PAIR 0x0000d800 | ||
| 33 | |||
| 31 | static int udf_uni2char_utf8(wchar_t uni, | 34 | static int udf_uni2char_utf8(wchar_t uni, |
| 32 | unsigned char *out, | 35 | unsigned char *out, |
| 33 | int boundlen) | 36 | int boundlen) |
| @@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni, | |||
| 37 | if (boundlen <= 0) | 40 | if (boundlen <= 0) |
| 38 | return -ENAMETOOLONG; | 41 | return -ENAMETOOLONG; |
| 39 | 42 | ||
| 43 | if ((uni & SURROGATE_MASK) == SURROGATE_PAIR) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 40 | if (uni < 0x80) { | 46 | if (uni < 0x80) { |
| 41 | out[u_len++] = (unsigned char)uni; | 47 | out[u_len++] = (unsigned char)uni; |
| 42 | } else if (uni < 0x800) { | 48 | } else if (uni < 0x800) { |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index bfe86b54f6c1..0bd432a4d7bd 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
| 223 | set_wb_congested(bdi->wb.congested, sync); | 223 | set_wb_congested(bdi->wb.congested, sync); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | struct wb_lock_cookie { | ||
| 227 | bool locked; | ||
| 228 | unsigned long flags; | ||
| 229 | }; | ||
| 230 | |||
| 226 | #ifdef CONFIG_CGROUP_WRITEBACK | 231 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 227 | 232 | ||
| 228 | /** | 233 | /** |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f6be4b0b6c18..72ca0f3d39f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) | |||
| 347 | /** | 347 | /** |
| 348 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction | 348 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction |
| 349 | * @inode: target inode | 349 | * @inode: target inode |
| 350 | * @lockedp: temp bool output param, to be passed to the end function | 350 | * @cookie: output param, to be passed to the end function |
| 351 | * | 351 | * |
| 352 | * The caller wants to access the wb associated with @inode but isn't | 352 | * The caller wants to access the wb associated with @inode but isn't |
| 353 | * holding inode->i_lock, the i_pages lock or wb->list_lock. This | 353 | * holding inode->i_lock, the i_pages lock or wb->list_lock. This |
| @@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) | |||
| 355 | * association doesn't change until the transaction is finished with | 355 | * association doesn't change until the transaction is finished with |
| 356 | * unlocked_inode_to_wb_end(). | 356 | * unlocked_inode_to_wb_end(). |
| 357 | * | 357 | * |
| 358 | * The caller must call unlocked_inode_to_wb_end() with *@lockdep | 358 | * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and |
| 359 | * afterwards and can't sleep during transaction. IRQ may or may not be | 359 | * can't sleep during the transaction. IRQs may or may not be disabled on |
| 360 | * disabled on return. | 360 | * return. |
| 361 | */ | 361 | */ |
| 362 | static inline struct bdi_writeback * | 362 | static inline struct bdi_writeback * |
| 363 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | 363 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
| 364 | { | 364 | { |
| 365 | rcu_read_lock(); | 365 | rcu_read_lock(); |
| 366 | 366 | ||
| @@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | |||
| 368 | * Paired with store_release in inode_switch_wb_work_fn() and | 368 | * Paired with store_release in inode_switch_wb_work_fn() and |
| 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. |
| 370 | */ | 370 | */ |
| 371 | *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; | 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
| 372 | 372 | ||
| 373 | if (unlikely(*lockedp)) | 373 | if (unlikely(cookie->locked)) |
| 374 | xa_lock_irq(&inode->i_mapping->i_pages); | 374 | xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); |
| 375 | 375 | ||
| 376 | /* | 376 | /* |
| 377 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages | 377 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages |
| @@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | |||
| 383 | /** | 383 | /** |
| 384 | * unlocked_inode_to_wb_end - end inode wb access transaction | 384 | * unlocked_inode_to_wb_end - end inode wb access transaction |
| 385 | * @inode: target inode | 385 | * @inode: target inode |
| 386 | * @locked: *@lockedp from unlocked_inode_to_wb_begin() | 386 | * @cookie: @cookie from unlocked_inode_to_wb_begin() |
| 387 | */ | 387 | */ |
| 388 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | 388 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
| 389 | struct wb_lock_cookie *cookie) | ||
| 389 | { | 390 | { |
| 390 | if (unlikely(locked)) | 391 | if (unlikely(cookie->locked)) |
| 391 | xa_unlock_irq(&inode->i_mapping->i_pages); | 392 | xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); |
| 392 | 393 | ||
| 393 | rcu_read_unlock(); | 394 | rcu_read_unlock(); |
| 394 | } | 395 | } |
| @@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | |||
| 435 | } | 436 | } |
| 436 | 437 | ||
| 437 | static inline struct bdi_writeback * | 438 | static inline struct bdi_writeback * |
| 438 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | 439 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
| 439 | { | 440 | { |
| 440 | return inode_to_wb(inode); | 441 | return inode_to_wb(inode); |
| 441 | } | 442 | } |
| 442 | 443 | ||
| 443 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | 444 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
| 445 | struct wb_lock_cookie *cookie) | ||
| 444 | { | 446 | { |
| 445 | } | 447 | } |
| 446 | 448 | ||
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ceb96ecab96e..7d98e263e048 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
| @@ -25,6 +25,9 @@ | |||
| 25 | #define __SANITIZE_ADDRESS__ | 25 | #define __SANITIZE_ADDRESS__ |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | #undef __no_sanitize_address | ||
| 29 | #define __no_sanitize_address __attribute__((no_sanitize("address"))) | ||
| 30 | |||
| 28 | /* Clang doesn't have a way to turn it off per-function, yet. */ | 31 | /* Clang doesn't have a way to turn it off per-function, yet. */ |
| 29 | #ifdef __noretpoline | 32 | #ifdef __noretpoline |
| 30 | #undef __noretpoline | 33 | #undef __noretpoline |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 9f1edb92c97e..e0c95c9f1e29 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -248,7 +248,7 @@ struct fsnotify_mark { | |||
| 248 | /* Group this mark is for. Set on mark creation, stable until last ref | 248 | /* Group this mark is for. Set on mark creation, stable until last ref |
| 249 | * is dropped */ | 249 | * is dropped */ |
| 250 | struct fsnotify_group *group; | 250 | struct fsnotify_group *group; |
| 251 | /* List of marks by group->i_fsnotify_marks. Also reused for queueing | 251 | /* List of marks by group->marks_list. Also reused for queueing |
| 252 | * mark into destroy_list when it's waiting for the end of SRCU period | 252 | * mark into destroy_list when it's waiting for the end of SRCU period |
| 253 | * before it can be freed. [group->mark_mutex] */ | 253 | * before it can be freed. [group->mark_mutex] */ |
| 254 | struct list_head g_list; | 254 | struct list_head g_list; |
diff --git a/include/linux/hid.h b/include/linux/hid.h index 8da3e1f48195..26240a22978a 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -516,6 +516,12 @@ enum hid_type { | |||
| 516 | HID_TYPE_USBNONE | 516 | HID_TYPE_USBNONE |
| 517 | }; | 517 | }; |
| 518 | 518 | ||
| 519 | enum hid_battery_status { | ||
| 520 | HID_BATTERY_UNKNOWN = 0, | ||
| 521 | HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ | ||
| 522 | HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ | ||
| 523 | }; | ||
| 524 | |||
| 519 | struct hid_driver; | 525 | struct hid_driver; |
| 520 | struct hid_ll_driver; | 526 | struct hid_ll_driver; |
| 521 | 527 | ||
| @@ -558,7 +564,8 @@ struct hid_device { /* device report descriptor */ | |||
| 558 | __s32 battery_max; | 564 | __s32 battery_max; |
| 559 | __s32 battery_report_type; | 565 | __s32 battery_report_type; |
| 560 | __s32 battery_report_id; | 566 | __s32 battery_report_id; |
| 561 | bool battery_reported; | 567 | enum hid_battery_status battery_status; |
| 568 | bool battery_avoid_query; | ||
| 562 | #endif | 569 | #endif |
| 563 | 570 | ||
| 564 | unsigned int status; /* see STAT flags above */ | 571 | unsigned int status; /* see STAT flags above */ |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d11f41d5269f..78a5a90b4267 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
| @@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) | |||
| 663 | * Returns true if the skb is tagged with multiple vlan headers, regardless | 663 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
| 664 | * of whether it is hardware accelerated or not. | 664 | * of whether it is hardware accelerated or not. |
| 665 | */ | 665 | */ |
| 666 | static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | 666 | static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
| 667 | { | 667 | { |
| 668 | __be16 protocol = skb->protocol; | 668 | __be16 protocol = skb->protocol; |
| 669 | 669 | ||
| @@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |||
| 673 | if (likely(!eth_type_vlan(protocol))) | 673 | if (likely(!eth_type_vlan(protocol))) |
| 674 | return false; | 674 | return false; |
| 675 | 675 | ||
| 676 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) | ||
| 677 | return false; | ||
| 678 | |||
| 676 | veh = (struct vlan_ethhdr *)skb->data; | 679 | veh = (struct vlan_ethhdr *)skb->data; |
| 677 | protocol = veh->h_vlan_encapsulated_proto; | 680 | protocol = veh->h_vlan_encapsulated_proto; |
| 678 | } | 681 | } |
| @@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |||
| 690 | * | 693 | * |
| 691 | * Returns features without unsafe ones if the skb has multiple tags. | 694 | * Returns features without unsafe ones if the skb has multiple tags. |
| 692 | */ | 695 | */ |
| 693 | static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, | 696 | static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
| 694 | netdev_features_t features) | 697 | netdev_features_t features) |
| 695 | { | 698 | { |
| 696 | if (skb_vlan_tagged_multi(skb)) { | 699 | if (skb_vlan_tagged_multi(skb)) { |
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 4754f01c1abb..aec44b1d9582 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
| @@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void) | |||
| 186 | IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); | 186 | IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | typedef int (*klp_shadow_ctor_t)(void *obj, | ||
| 190 | void *shadow_data, | ||
| 191 | void *ctor_data); | ||
| 192 | typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); | ||
| 193 | |||
| 189 | void *klp_shadow_get(void *obj, unsigned long id); | 194 | void *klp_shadow_get(void *obj, unsigned long id); |
| 190 | void *klp_shadow_alloc(void *obj, unsigned long id, void *data, | 195 | void *klp_shadow_alloc(void *obj, unsigned long id, |
| 191 | size_t size, gfp_t gfp_flags); | 196 | size_t size, gfp_t gfp_flags, |
| 192 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 197 | klp_shadow_ctor_t ctor, void *ctor_data); |
| 193 | size_t size, gfp_t gfp_flags); | 198 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, |
| 194 | void klp_shadow_free(void *obj, unsigned long id); | 199 | size_t size, gfp_t gfp_flags, |
| 195 | void klp_shadow_free_all(unsigned long id); | 200 | klp_shadow_ctor_t ctor, void *ctor_data); |
| 201 | void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); | ||
| 202 | void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); | ||
| 196 | 203 | ||
| 197 | #else /* !CONFIG_LIVEPATCH */ | 204 | #else /* !CONFIG_LIVEPATCH */ |
| 198 | 205 | ||
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h index eb492d47f717..8f9c90379732 100644 --- a/include/linux/microchipphy.h +++ b/include/linux/microchipphy.h | |||
| @@ -70,4 +70,12 @@ | |||
| 70 | #define LAN88XX_MMD3_CHIP_ID (32877) | 70 | #define LAN88XX_MMD3_CHIP_ID (32877) |
| 71 | #define LAN88XX_MMD3_CHIP_REV (32878) | 71 | #define LAN88XX_MMD3_CHIP_REV (32878) |
| 72 | 72 | ||
| 73 | /* DSP registers */ | ||
| 74 | #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) | ||
| 75 | #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) | ||
| 76 | #define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) | ||
| 77 | #define LAN88XX_EXT_PAGE_TR_CR 16 | ||
| 78 | #define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 | ||
| 79 | #define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 | ||
| 80 | |||
| 73 | #endif /* _MICROCHIPPHY_H */ | 81 | #endif /* _MICROCHIPPHY_H */ |
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 388ff2936a87..6794490f25b2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h | |||
| @@ -75,6 +75,9 @@ struct shrinker { | |||
| 75 | #define SHRINKER_NUMA_AWARE (1 << 0) | 75 | #define SHRINKER_NUMA_AWARE (1 << 0) |
| 76 | #define SHRINKER_MEMCG_AWARE (1 << 1) | 76 | #define SHRINKER_MEMCG_AWARE (1 << 1) |
| 77 | 77 | ||
| 78 | extern int register_shrinker(struct shrinker *); | 78 | extern int prealloc_shrinker(struct shrinker *shrinker); |
| 79 | extern void unregister_shrinker(struct shrinker *); | 79 | extern void register_shrinker_prepared(struct shrinker *shrinker); |
| 80 | extern int register_shrinker(struct shrinker *shrinker); | ||
| 81 | extern void unregister_shrinker(struct shrinker *shrinker); | ||
| 82 | extern void free_prealloced_shrinker(struct shrinker *shrinker); | ||
| 80 | #endif | 83 | #endif |
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 0494db3fd9e8..13770cfe33ad 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h | |||
| @@ -62,7 +62,7 @@ struct ts_config | |||
| 62 | int flags; | 62 | int flags; |
| 63 | 63 | ||
| 64 | /** | 64 | /** |
| 65 | * get_next_block - fetch next block of data | 65 | * @get_next_block: fetch next block of data |
| 66 | * @consumed: number of bytes consumed by the caller | 66 | * @consumed: number of bytes consumed by the caller |
| 67 | * @dst: destination buffer | 67 | * @dst: destination buffer |
| 68 | * @conf: search configuration | 68 | * @conf: search configuration |
| @@ -79,7 +79,7 @@ struct ts_config | |||
| 79 | struct ts_state *state); | 79 | struct ts_state *state); |
| 80 | 80 | ||
| 81 | /** | 81 | /** |
| 82 | * finish - finalize/clean a series of get_next_block() calls | 82 | * @finish: finalize/clean a series of get_next_block() calls |
| 83 | * @conf: search configuration | 83 | * @conf: search configuration |
| 84 | * @state: search state | 84 | * @state: search state |
| 85 | * | 85 | * |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a9..cf2862bd134a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -43,11 +43,7 @@ enum { | |||
| 43 | #define THREAD_ALIGN THREAD_SIZE | 43 | #define THREAD_ALIGN THREAD_SIZE |
| 44 | #endif | 44 | #endif |
| 45 | 45 | ||
| 46 | #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) | 46 | #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) |
| 47 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) | ||
| 48 | #else | ||
| 49 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) | ||
| 50 | #endif | ||
| 51 | 47 | ||
| 52 | /* | 48 | /* |
| 53 | * flag set/clear/test wrappers | 49 | * flag set/clear/test wrappers |
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h index c34f4490d025..26ee91300e3e 100644 --- a/include/uapi/linux/random.h +++ b/include/uapi/linux/random.h | |||
| @@ -35,6 +35,9 @@ | |||
| 35 | /* Clear the entropy pool and associated counters. (Superuser only.) */ | 35 | /* Clear the entropy pool and associated counters. (Superuser only.) */ |
| 36 | #define RNDCLEARPOOL _IO( 'R', 0x06 ) | 36 | #define RNDCLEARPOOL _IO( 'R', 0x06 ) |
| 37 | 37 | ||
| 38 | /* Reseed CRNG. (Superuser only.) */ | ||
| 39 | #define RNDRESEEDCRNG _IO( 'R', 0x07 ) | ||
| 40 | |||
| 38 | struct rand_pool_info { | 41 | struct rand_pool_info { |
| 39 | int entropy_count; | 42 | int entropy_count; |
| 40 | int buf_size; | 43 | int buf_size; |
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h index 5c918276835e..78bb5d9f8d83 100644 --- a/include/xen/interface/io/sndif.h +++ b/include/xen/interface/io/sndif.h | |||
| @@ -38,6 +38,13 @@ | |||
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| 40 | ****************************************************************************** | 40 | ****************************************************************************** |
| 41 | * Protocol version | ||
| 42 | ****************************************************************************** | ||
| 43 | */ | ||
| 44 | #define XENSND_PROTOCOL_VERSION 2 | ||
| 45 | |||
| 46 | /* | ||
| 47 | ****************************************************************************** | ||
| 41 | * Feature and Parameter Negotiation | 48 | * Feature and Parameter Negotiation |
| 42 | ****************************************************************************** | 49 | ****************************************************************************** |
| 43 | * | 50 | * |
| @@ -106,6 +113,8 @@ | |||
| 106 | * | 113 | * |
| 107 | * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" | 114 | * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" |
| 108 | * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" | 115 | * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" |
| 116 | * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386" | ||
| 117 | * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215" | ||
| 109 | * | 118 | * |
| 110 | *------------------------------ Stream 1, capture ---------------------------- | 119 | *------------------------------ Stream 1, capture ---------------------------- |
| 111 | * | 120 | * |
| @@ -115,6 +124,8 @@ | |||
| 115 | * | 124 | * |
| 116 | * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" | 125 | * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" |
| 117 | * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" | 126 | * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" |
| 127 | * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384" | ||
| 128 | * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213" | ||
| 118 | * | 129 | * |
| 119 | *------------------------------- PCM device 1 -------------------------------- | 130 | *------------------------------- PCM device 1 -------------------------------- |
| 120 | * | 131 | * |
| @@ -128,6 +139,8 @@ | |||
| 128 | * | 139 | * |
| 129 | * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" | 140 | * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" |
| 130 | * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" | 141 | * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" |
| 142 | * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387" | ||
| 143 | * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351" | ||
| 131 | * | 144 | * |
| 132 | *------------------------------- PCM device 2 -------------------------------- | 145 | *------------------------------- PCM device 2 -------------------------------- |
| 133 | * | 146 | * |
| @@ -140,6 +153,8 @@ | |||
| 140 | * | 153 | * |
| 141 | * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" | 154 | * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" |
| 142 | * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" | 155 | * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" |
| 156 | * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389" | ||
| 157 | * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452" | ||
| 143 | * | 158 | * |
| 144 | ****************************************************************************** | 159 | ****************************************************************************** |
| 145 | * Backend XenBus Nodes | 160 | * Backend XenBus Nodes |
| @@ -285,6 +300,23 @@ | |||
| 285 | * The Xen grant reference granting permission for the backend to map | 300 | * The Xen grant reference granting permission for the backend to map |
| 286 | * a sole page in a single page sized ring buffer. | 301 | * a sole page in a single page sized ring buffer. |
| 287 | * | 302 | * |
| 303 | *--------------------- Stream Event Transport Parameters --------------------- | ||
| 304 | * | ||
| 305 | * This communication path is used to deliver asynchronous events from backend | ||
| 306 | * to frontend, set up per stream. | ||
| 307 | * | ||
| 308 | * evt-event-channel | ||
| 309 | * Values: <uint32_t> | ||
| 310 | * | ||
| 311 | * The identifier of the Xen event channel used to signal activity | ||
| 312 | * in the ring buffer. | ||
| 313 | * | ||
| 314 | * evt-ring-ref | ||
| 315 | * Values: <uint32_t> | ||
| 316 | * | ||
| 317 | * The Xen grant reference granting permission for the backend to map | ||
| 318 | * a sole page in a single page sized ring buffer. | ||
| 319 | * | ||
| 288 | ****************************************************************************** | 320 | ****************************************************************************** |
| 289 | * STATE DIAGRAMS | 321 | * STATE DIAGRAMS |
| 290 | ****************************************************************************** | 322 | ****************************************************************************** |
| @@ -432,6 +464,20 @@ | |||
| 432 | #define XENSND_OP_GET_VOLUME 5 | 464 | #define XENSND_OP_GET_VOLUME 5 |
| 433 | #define XENSND_OP_MUTE 6 | 465 | #define XENSND_OP_MUTE 6 |
| 434 | #define XENSND_OP_UNMUTE 7 | 466 | #define XENSND_OP_UNMUTE 7 |
| 467 | #define XENSND_OP_TRIGGER 8 | ||
| 468 | #define XENSND_OP_HW_PARAM_QUERY 9 | ||
| 469 | |||
| 470 | #define XENSND_OP_TRIGGER_START 0 | ||
| 471 | #define XENSND_OP_TRIGGER_PAUSE 1 | ||
| 472 | #define XENSND_OP_TRIGGER_STOP 2 | ||
| 473 | #define XENSND_OP_TRIGGER_RESUME 3 | ||
| 474 | |||
| 475 | /* | ||
| 476 | ****************************************************************************** | ||
| 477 | * EVENT CODES | ||
| 478 | ****************************************************************************** | ||
| 479 | */ | ||
| 480 | #define XENSND_EVT_CUR_POS 0 | ||
| 435 | 481 | ||
| 436 | /* | 482 | /* |
| 437 | ****************************************************************************** | 483 | ****************************************************************************** |
| @@ -448,6 +494,8 @@ | |||
| 448 | #define XENSND_FIELD_VCARD_LONG_NAME "long-name" | 494 | #define XENSND_FIELD_VCARD_LONG_NAME "long-name" |
| 449 | #define XENSND_FIELD_RING_REF "ring-ref" | 495 | #define XENSND_FIELD_RING_REF "ring-ref" |
| 450 | #define XENSND_FIELD_EVT_CHNL "event-channel" | 496 | #define XENSND_FIELD_EVT_CHNL "event-channel" |
| 497 | #define XENSND_FIELD_EVT_RING_REF "evt-ring-ref" | ||
| 498 | #define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel" | ||
| 451 | #define XENSND_FIELD_DEVICE_NAME "name" | 499 | #define XENSND_FIELD_DEVICE_NAME "name" |
| 452 | #define XENSND_FIELD_TYPE "type" | 500 | #define XENSND_FIELD_TYPE "type" |
| 453 | #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" | 501 | #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" |
| @@ -526,7 +574,7 @@ | |||
| 526 | * | 574 | * |
| 527 | *---------------------------------- Requests --------------------------------- | 575 | *---------------------------------- Requests --------------------------------- |
| 528 | * | 576 | * |
| 529 | * All request packets have the same length (32 octets) | 577 | * All request packets have the same length (64 octets) |
| 530 | * All request packets have common header: | 578 | * All request packets have common header: |
| 531 | * 0 1 2 3 octet | 579 | * 0 1 2 3 octet |
| 532 | * +----------------+----------------+----------------+----------------+ | 580 | * +----------------+----------------+----------------+----------------+ |
| @@ -559,11 +607,13 @@ | |||
| 559 | * +----------------+----------------+----------------+----------------+ | 607 | * +----------------+----------------+----------------+----------------+ |
| 560 | * | gref_directory | 24 | 608 | * | gref_directory | 24 |
| 561 | * +----------------+----------------+----------------+----------------+ | 609 | * +----------------+----------------+----------------+----------------+ |
| 562 | * | reserved | 28 | 610 | * | period_sz | 28 |
| 611 | * +----------------+----------------+----------------+----------------+ | ||
| 612 | * | reserved | 32 | ||
| 563 | * +----------------+----------------+----------------+----------------+ | 613 | * +----------------+----------------+----------------+----------------+ |
| 564 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 614 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 565 | * +----------------+----------------+----------------+----------------+ | 615 | * +----------------+----------------+----------------+----------------+ |
| 566 | * | reserved | 32 | 616 | * | reserved | 64 |
| 567 | * +----------------+----------------+----------------+----------------+ | 617 | * +----------------+----------------+----------------+----------------+ |
| 568 | * | 618 | * |
| 569 | * pcm_rate - uint32_t, stream data rate, Hz | 619 | * pcm_rate - uint32_t, stream data rate, Hz |
| @@ -571,6 +621,14 @@ | |||
| 571 | * pcm_channels - uint8_t, number of channels of this stream, | 621 | * pcm_channels - uint8_t, number of channels of this stream, |
| 572 | * [channels-min; channels-max] | 622 | * [channels-min; channels-max] |
| 573 | * buffer_sz - uint32_t, buffer size to be allocated, octets | 623 | * buffer_sz - uint32_t, buffer size to be allocated, octets |
| 624 | * period_sz - uint32_t, event period size, octets | ||
| 625 | * This is the requested value of the period at which frontend would | ||
| 626 | * like to receive XENSND_EVT_CUR_POS notifications from the backend when | ||
| 627 | * stream position advances during playback/capture. | ||
| 628 | * It shows how many octets are expected to be played/captured before | ||
| 629 | * sending such an event. | ||
| 630 | * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend. | ||
| 631 | * | ||
| 574 | * gref_directory - grant_ref_t, a reference to the first shared page | 632 | * gref_directory - grant_ref_t, a reference to the first shared page |
| 575 | * describing shared buffer references. At least one page exists. If shared | 633 | * describing shared buffer references. At least one page exists. If shared |
| 576 | * buffer size (buffer_sz) exceeds what can be addressed by this single page, | 634 | * buffer size (buffer_sz) exceeds what can be addressed by this single page, |
| @@ -585,6 +643,7 @@ struct xensnd_open_req { | |||
| 585 | uint16_t reserved; | 643 | uint16_t reserved; |
| 586 | uint32_t buffer_sz; | 644 | uint32_t buffer_sz; |
| 587 | grant_ref_t gref_directory; | 645 | grant_ref_t gref_directory; |
| 646 | uint32_t period_sz; | ||
| 588 | }; | 647 | }; |
| 589 | 648 | ||
| 590 | /* | 649 | /* |
| @@ -632,7 +691,7 @@ struct xensnd_page_directory { | |||
| 632 | * +----------------+----------------+----------------+----------------+ | 691 | * +----------------+----------------+----------------+----------------+ |
| 633 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 692 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 634 | * +----------------+----------------+----------------+----------------+ | 693 | * +----------------+----------------+----------------+----------------+ |
| 635 | * | reserved | 32 | 694 | * | reserved | 64 |
| 636 | * +----------------+----------------+----------------+----------------+ | 695 | * +----------------+----------------+----------------+----------------+ |
| 637 | * | 696 | * |
| 638 | * Request read/write - used for read (for capture) or write (for playback): | 697 | * Request read/write - used for read (for capture) or write (for playback): |
| @@ -650,7 +709,7 @@ struct xensnd_page_directory { | |||
| 650 | * +----------------+----------------+----------------+----------------+ | 709 | * +----------------+----------------+----------------+----------------+ |
| 651 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 710 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 652 | * +----------------+----------------+----------------+----------------+ | 711 | * +----------------+----------------+----------------+----------------+ |
| 653 | * | reserved | 32 | 712 | * | reserved | 64 |
| 654 | * +----------------+----------------+----------------+----------------+ | 713 | * +----------------+----------------+----------------+----------------+ |
| 655 | * | 714 | * |
| 656 | * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write | 715 | * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write |
| @@ -673,9 +732,11 @@ struct xensnd_rw_req { | |||
| 673 | * +----------------+----------------+----------------+----------------+ | 732 | * +----------------+----------------+----------------+----------------+ |
| 674 | * | length | 16 | 733 | * | length | 16 |
| 675 | * +----------------+----------------+----------------+----------------+ | 734 | * +----------------+----------------+----------------+----------------+ |
| 735 | * | reserved | 20 | ||
| 736 | * +----------------+----------------+----------------+----------------+ | ||
| 676 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 737 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 677 | * +----------------+----------------+----------------+----------------+ | 738 | * +----------------+----------------+----------------+----------------+ |
| 678 | * | reserved | 32 | 739 | * | reserved | 64 |
| 679 | * +----------------+----------------+----------------+----------------+ | 740 | * +----------------+----------------+----------------+----------------+ |
| 680 | * | 741 | * |
| 681 | * operation - XENSND_OP_SET_VOLUME for volume set | 742 | * operation - XENSND_OP_SET_VOLUME for volume set |
| @@ -713,9 +774,11 @@ struct xensnd_rw_req { | |||
| 713 | * +----------------+----------------+----------------+----------------+ | 774 | * +----------------+----------------+----------------+----------------+ |
| 714 | * | length | 16 | 775 | * | length | 16 |
| 715 | * +----------------+----------------+----------------+----------------+ | 776 | * +----------------+----------------+----------------+----------------+ |
| 777 | * | reserved | 20 | ||
| 778 | * +----------------+----------------+----------------+----------------+ | ||
| 716 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 779 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 717 | * +----------------+----------------+----------------+----------------+ | 780 | * +----------------+----------------+----------------+----------------+ |
| 718 | * | reserved | 32 | 781 | * | reserved | 64 |
| 719 | * +----------------+----------------+----------------+----------------+ | 782 | * +----------------+----------------+----------------+----------------+ |
| 720 | * | 783 | * |
| 721 | * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute | 784 | * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute |
| @@ -743,32 +806,213 @@ struct xensnd_rw_req { | |||
| 743 | * | 806 | * |
| 744 | * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, | 807 | * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, |
| 745 | * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. | 808 | * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. |
| 809 | * | ||
| 810 | * Request stream running state change - trigger PCM stream running state | ||
| 811 | * to start, stop, pause or resume: | ||
| 812 | * | ||
| 813 | * 0 1 2 3 octet | ||
| 814 | * +----------------+----------------+----------------+----------------+ | ||
| 815 | * | id | _OP_TRIGGER | reserved | 4 | ||
| 816 | * +----------------+----------------+----------------+----------------+ | ||
| 817 | * | reserved | 8 | ||
| 818 | * +----------------+----------------+----------------+----------------+ | ||
| 819 | * | type | reserved | 12 | ||
| 820 | * +----------------+----------------+----------------+----------------+ | ||
| 821 | * | reserved | 16 | ||
| 822 | * +----------------+----------------+----------------+----------------+ | ||
| 823 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
| 824 | * +----------------+----------------+----------------+----------------+ | ||
| 825 | * | reserved | 64 | ||
| 826 | * +----------------+----------------+----------------+----------------+ | ||
| 827 | * | ||
| 828 | * type - uint8_t, XENSND_OP_TRIGGER_XXX value | ||
| 746 | */ | 829 | */ |
| 747 | 830 | ||
| 831 | struct xensnd_trigger_req { | ||
| 832 | uint8_t type; | ||
| 833 | }; | ||
| 834 | |||
| 748 | /* | 835 | /* |
| 749 | *---------------------------------- Responses -------------------------------- | 836 | * Request stream parameter ranges: request intervals and |
| 837 | * masks of supported ranges for stream configuration values. | ||
| 750 | * | 838 | * |
| 751 | * All response packets have the same length (32 octets) | 839 | * Sound device configuration for a particular stream is a limited subset |
| 840 | * of the multidimensional configuration available on XenStore, e.g. | ||
| 841 | * once the frame rate has been selected there is a limited supported range | ||
| 842 | * for sample rates becomes available (which might be the same set configured | ||
| 843 | * on XenStore or less). For example, selecting 96kHz sample rate may limit | ||
| 844 | * number of channels available for such configuration from 4 to 2, etc. | ||
| 845 | * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration | ||
| 846 | * space making it possible to iteratively get the final stream configuration, | ||
| 847 | * used in XENSND_OP_OPEN request. | ||
| 848 | * | ||
| 849 | * See response format for this request. | ||
| 752 | * | 850 | * |
| 753 | * Response for all requests: | ||
| 754 | * 0 1 2 3 octet | 851 | * 0 1 2 3 octet |
| 755 | * +----------------+----------------+----------------+----------------+ | 852 | * +----------------+----------------+----------------+----------------+ |
| 756 | * | id | operation | reserved | 4 | 853 | * | id | _HW_PARAM_QUERY| reserved | 4 |
| 757 | * +----------------+----------------+----------------+----------------+ | 854 | * +----------------+----------------+----------------+----------------+ |
| 758 | * | status | 8 | 855 | * | reserved | 8 |
| 856 | * +----------------+----------------+----------------+----------------+ | ||
| 857 | * | formats mask low 32-bit | 12 | ||
| 858 | * +----------------+----------------+----------------+----------------+ | ||
| 859 | * | formats mask high 32-bit | 16 | ||
| 759 | * +----------------+----------------+----------------+----------------+ | 860 | * +----------------+----------------+----------------+----------------+ |
| 760 | * | reserved | 12 | 861 | * | min rate | 20 |
| 862 | * +----------------+----------------+----------------+----------------+ | ||
| 863 | * | max rate | 24 | ||
| 864 | * +----------------+----------------+----------------+----------------+ | ||
| 865 | * | min channels | 28 | ||
| 866 | * +----------------+----------------+----------------+----------------+ | ||
| 867 | * | max channels | 32 | ||
| 868 | * +----------------+----------------+----------------+----------------+ | ||
| 869 | * | min buffer frames | 36 | ||
| 870 | * +----------------+----------------+----------------+----------------+ | ||
| 871 | * | max buffer frames | 40 | ||
| 872 | * +----------------+----------------+----------------+----------------+ | ||
| 873 | * | min period frames | 44 | ||
| 874 | * +----------------+----------------+----------------+----------------+ | ||
| 875 | * | max period frames | 48 | ||
| 876 | * +----------------+----------------+----------------+----------------+ | ||
| 877 | * | reserved | 52 | ||
| 761 | * +----------------+----------------+----------------+----------------+ | 878 | * +----------------+----------------+----------------+----------------+ |
| 762 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | 879 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| |
| 763 | * +----------------+----------------+----------------+----------------+ | 880 | * +----------------+----------------+----------------+----------------+ |
| 764 | * | reserved | 32 | 881 | * | reserved | 64 |
| 882 | * +----------------+----------------+----------------+----------------+ | ||
| 883 | * | ||
| 884 | * formats - uint64_t, bit mask representing values of the parameter | ||
| 885 | * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values | ||
| 886 | * | ||
| 887 | * For interval parameters: | ||
| 888 | * min - uint32_t, minimum value of the parameter | ||
| 889 | * max - uint32_t, maximum value of the parameter | ||
| 890 | * | ||
| 891 | * Frame is defined as a product of the number of channels by the | ||
| 892 | * number of octets per one sample. | ||
| 893 | */ | ||
| 894 | |||
| 895 | struct xensnd_query_hw_param { | ||
| 896 | uint64_t formats; | ||
| 897 | struct { | ||
| 898 | uint32_t min; | ||
| 899 | uint32_t max; | ||
| 900 | } rates; | ||
| 901 | struct { | ||
| 902 | uint32_t min; | ||
| 903 | uint32_t max; | ||
| 904 | } channels; | ||
| 905 | struct { | ||
| 906 | uint32_t min; | ||
| 907 | uint32_t max; | ||
| 908 | } buffer; | ||
| 909 | struct { | ||
| 910 | uint32_t min; | ||
| 911 | uint32_t max; | ||
| 912 | } period; | ||
| 913 | }; | ||
| 914 | |||
| 915 | /* | ||
| 916 | *---------------------------------- Responses -------------------------------- | ||
| 917 | * | ||
| 918 | * All response packets have the same length (64 octets) | ||
| 919 | * | ||
| 920 | * All response packets have common header: | ||
| 921 | * 0 1 2 3 octet | ||
| 922 | * +----------------+----------------+----------------+----------------+ | ||
| 923 | * | id | operation | reserved | 4 | ||
| 924 | * +----------------+----------------+----------------+----------------+ | ||
| 925 | * | status | 8 | ||
| 765 | * +----------------+----------------+----------------+----------------+ | 926 | * +----------------+----------------+----------------+----------------+ |
| 766 | * | 927 | * |
| 767 | * id - uint16_t, copied from the request | 928 | * id - uint16_t, copied from the request |
| 768 | * operation - uint8_t, XENSND_OP_* - copied from request | 929 | * operation - uint8_t, XENSND_OP_* - copied from request |
| 769 | * status - int32_t, response status, zero on success and -XEN_EXX on failure | 930 | * status - int32_t, response status, zero on success and -XEN_EXX on failure |
| 931 | * | ||
| 932 | * | ||
| 933 | * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY: | ||
| 934 | * 0 1 2 3 octet | ||
| 935 | * +----------------+----------------+----------------+----------------+ | ||
| 936 | * | id | operation | reserved | 4 | ||
| 937 | * +----------------+----------------+----------------+----------------+ | ||
| 938 | * | status | 8 | ||
| 939 | * +----------------+----------------+----------------+----------------+ | ||
| 940 | * | formats mask low 32-bit | 12 | ||
| 941 | * +----------------+----------------+----------------+----------------+ | ||
| 942 | * | formats mask high 32-bit | 16 | ||
| 943 | * +----------------+----------------+----------------+----------------+ | ||
| 944 | * | min rate | 20 | ||
| 945 | * +----------------+----------------+----------------+----------------+ | ||
| 946 | * | max rate | 24 | ||
| 947 | * +----------------+----------------+----------------+----------------+ | ||
| 948 | * | min channels | 28 | ||
| 949 | * +----------------+----------------+----------------+----------------+ | ||
| 950 | * | max channels | 32 | ||
| 951 | * +----------------+----------------+----------------+----------------+ | ||
| 952 | * | min buffer frames | 36 | ||
| 953 | * +----------------+----------------+----------------+----------------+ | ||
| 954 | * | max buffer frames | 40 | ||
| 955 | * +----------------+----------------+----------------+----------------+ | ||
| 956 | * | min period frames | 44 | ||
| 957 | * +----------------+----------------+----------------+----------------+ | ||
| 958 | * | max period frames | 48 | ||
| 959 | * +----------------+----------------+----------------+----------------+ | ||
| 960 | * | reserved | 52 | ||
| 961 | * +----------------+----------------+----------------+----------------+ | ||
| 962 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
| 963 | * +----------------+----------------+----------------+----------------+ | ||
| 964 | * | reserved | 64 | ||
| 965 | * +----------------+----------------+----------------+----------------+ | ||
| 966 | * | ||
| 967 | * Meaning of the values in this response is the same as for | ||
| 968 | * XENSND_OP_HW_PARAM_QUERY request. | ||
| 969 | */ | ||
| 970 | |||
| 971 | /* | ||
| 972 | *----------------------------------- Events ---------------------------------- | ||
| 973 | * | ||
| 974 | * Events are sent via shared page allocated by the front and propagated by | ||
| 975 | * evt-event-channel/evt-ring-ref XenStore entries | ||
| 976 | * All event packets have the same length (64 octets) | ||
| 977 | * All event packets have common header: | ||
| 978 | * 0 1 2 3 octet | ||
| 979 | * +----------------+----------------+----------------+----------------+ | ||
| 980 | * | id | type | reserved | 4 | ||
| 981 | * +----------------+----------------+----------------+----------------+ | ||
| 982 | * | reserved | 8 | ||
| 983 | * +----------------+----------------+----------------+----------------+ | ||
| 984 | * | ||
| 985 | * id - uint16_t, event id, may be used by front | ||
| 986 | * type - uint8_t, type of the event | ||
| 987 | * | ||
| 988 | * | ||
| 989 | * Current stream position - event from back to front when stream's | ||
| 990 | * playback/capture position has advanced: | ||
| 991 | * 0 1 2 3 octet | ||
| 992 | * +----------------+----------------+----------------+----------------+ | ||
| 993 | * | id | _EVT_CUR_POS | reserved | 4 | ||
| 994 | * +----------------+----------------+----------------+----------------+ | ||
| 995 | * | reserved | 8 | ||
| 996 | * +----------------+----------------+----------------+----------------+ | ||
| 997 | * | position low 32-bit | 12 | ||
| 998 | * +----------------+----------------+----------------+----------------+ | ||
| 999 | * | position high 32-bit | 16 | ||
| 1000 | * +----------------+----------------+----------------+----------------+ | ||
| 1001 | * | reserved | 20 | ||
| 1002 | * +----------------+----------------+----------------+----------------+ | ||
| 1003 | * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| | ||
| 1004 | * +----------------+----------------+----------------+----------------+ | ||
| 1005 | * | reserved | 64 | ||
| 1006 | * +----------------+----------------+----------------+----------------+ | ||
| 1007 | * | ||
| 1008 | * position - current value of stream's playback/capture position, octets | ||
| 1009 | * | ||
| 770 | */ | 1010 | */ |
| 771 | 1011 | ||
| 1012 | struct xensnd_cur_pos_evt { | ||
| 1013 | uint64_t position; | ||
| 1014 | }; | ||
| 1015 | |||
| 772 | struct xensnd_req { | 1016 | struct xensnd_req { |
| 773 | uint16_t id; | 1017 | uint16_t id; |
| 774 | uint8_t operation; | 1018 | uint8_t operation; |
| @@ -776,7 +1020,9 @@ struct xensnd_req { | |||
| 776 | union { | 1020 | union { |
| 777 | struct xensnd_open_req open; | 1021 | struct xensnd_open_req open; |
| 778 | struct xensnd_rw_req rw; | 1022 | struct xensnd_rw_req rw; |
| 779 | uint8_t reserved[24]; | 1023 | struct xensnd_trigger_req trigger; |
| 1024 | struct xensnd_query_hw_param hw_param; | ||
| 1025 | uint8_t reserved[56]; | ||
| 780 | } op; | 1026 | } op; |
| 781 | }; | 1027 | }; |
| 782 | 1028 | ||
| @@ -785,9 +1031,53 @@ struct xensnd_resp { | |||
| 785 | uint8_t operation; | 1031 | uint8_t operation; |
| 786 | uint8_t reserved; | 1032 | uint8_t reserved; |
| 787 | int32_t status; | 1033 | int32_t status; |
| 788 | uint8_t reserved1[24]; | 1034 | union { |
| 1035 | struct xensnd_query_hw_param hw_param; | ||
| 1036 | uint8_t reserved1[56]; | ||
| 1037 | } resp; | ||
| 1038 | }; | ||
| 1039 | |||
| 1040 | struct xensnd_evt { | ||
| 1041 | uint16_t id; | ||
| 1042 | uint8_t type; | ||
| 1043 | uint8_t reserved[5]; | ||
| 1044 | union { | ||
| 1045 | struct xensnd_cur_pos_evt cur_pos; | ||
| 1046 | uint8_t reserved[56]; | ||
| 1047 | } op; | ||
| 789 | }; | 1048 | }; |
| 790 | 1049 | ||
| 791 | DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); | 1050 | DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); |
| 792 | 1051 | ||
| 1052 | /* | ||
| 1053 | ****************************************************************************** | ||
| 1054 | * Back to front events delivery | ||
| 1055 | ****************************************************************************** | ||
| 1056 | * In order to deliver asynchronous events from back to front a shared page is | ||
| 1057 | * allocated by front and its granted reference propagated to back via | ||
| 1058 | * XenStore entries (evt-ring-ref/evt-event-channel). | ||
| 1059 | * This page has a common header used by both front and back to synchronize | ||
| 1060 | * access and control event's ring buffer, while back being a producer of the | ||
| 1061 | * events and front being a consumer. The rest of the page after the header | ||
| 1062 | * is used for event packets. | ||
| 1063 | * | ||
| 1064 | * Upon reception of an event(s) front may confirm its reception | ||
| 1065 | * for either each event, group of events or none. | ||
| 1066 | */ | ||
| 1067 | |||
| 1068 | struct xensnd_event_page { | ||
| 1069 | uint32_t in_cons; | ||
| 1070 | uint32_t in_prod; | ||
| 1071 | uint8_t reserved[56]; | ||
| 1072 | }; | ||
| 1073 | |||
| 1074 | #define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE | ||
| 1075 | #define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page)) | ||
| 1076 | #define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS) | ||
| 1077 | #define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt)) | ||
| 1078 | #define XENSND_IN_RING(page) \ | ||
| 1079 | ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS)) | ||
| 1080 | #define XENSND_IN_RING_REF(page, idx) \ | ||
| 1081 | (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN]) | ||
| 1082 | |||
| 793 | #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ | 1083 | #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 242c8c93d285..a5d21c42acfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
| 216 | if (!s) | 216 | if (!s) |
| 217 | continue; | 217 | continue; |
| 218 | 218 | ||
| 219 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 220 | /* Clear stale pointers from reused stack. */ | 219 | /* Clear stale pointers from reused stack. */ |
| 221 | memset(s->addr, 0, THREAD_SIZE); | 220 | memset(s->addr, 0, THREAD_SIZE); |
| 222 | #endif | 221 | |
| 223 | tsk->stack_vm_area = s; | 222 | tsk->stack_vm_area = s; |
| 224 | return s->addr; | 223 | return s->addr; |
| 225 | } | 224 | } |
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index fdac27588d60..83958c814439 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c | |||
| @@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id) | |||
| 113 | } | 113 | } |
| 114 | EXPORT_SYMBOL_GPL(klp_shadow_get); | 114 | EXPORT_SYMBOL_GPL(klp_shadow_get); |
| 115 | 115 | ||
| 116 | static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 116 | static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, |
| 117 | size_t size, gfp_t gfp_flags, bool warn_on_exist) | 117 | size_t size, gfp_t gfp_flags, |
| 118 | klp_shadow_ctor_t ctor, void *ctor_data, | ||
| 119 | bool warn_on_exist) | ||
| 118 | { | 120 | { |
| 119 | struct klp_shadow *new_shadow; | 121 | struct klp_shadow *new_shadow; |
| 120 | void *shadow_data; | 122 | void *shadow_data; |
| @@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | |||
| 125 | if (shadow_data) | 127 | if (shadow_data) |
| 126 | goto exists; | 128 | goto exists; |
| 127 | 129 | ||
| 128 | /* Allocate a new shadow variable for use inside the lock below */ | 130 | /* |
| 131 | * Allocate a new shadow variable. Fill it with zeroes by default. | ||
| 132 | * More complex setting can be done by @ctor function. But it is | ||
| 133 | * called only when the buffer is really used (under klp_shadow_lock). | ||
| 134 | */ | ||
| 129 | new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); | 135 | new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); |
| 130 | if (!new_shadow) | 136 | if (!new_shadow) |
| 131 | return NULL; | 137 | return NULL; |
| 132 | 138 | ||
| 133 | new_shadow->obj = obj; | ||
| 134 | new_shadow->id = id; | ||
| 135 | |||
| 136 | /* Initialize the shadow variable if data provided */ | ||
| 137 | if (data) | ||
| 138 | memcpy(new_shadow->data, data, size); | ||
| 139 | |||
| 140 | /* Look for <obj, id> again under the lock */ | 139 | /* Look for <obj, id> again under the lock */ |
| 141 | spin_lock_irqsave(&klp_shadow_lock, flags); | 140 | spin_lock_irqsave(&klp_shadow_lock, flags); |
| 142 | shadow_data = klp_shadow_get(obj, id); | 141 | shadow_data = klp_shadow_get(obj, id); |
| @@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | |||
| 150 | goto exists; | 149 | goto exists; |
| 151 | } | 150 | } |
| 152 | 151 | ||
| 152 | new_shadow->obj = obj; | ||
| 153 | new_shadow->id = id; | ||
| 154 | |||
| 155 | if (ctor) { | ||
| 156 | int err; | ||
| 157 | |||
| 158 | err = ctor(obj, new_shadow->data, ctor_data); | ||
| 159 | if (err) { | ||
| 160 | spin_unlock_irqrestore(&klp_shadow_lock, flags); | ||
| 161 | kfree(new_shadow); | ||
| 162 | pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n", | ||
| 163 | obj, id, err); | ||
| 164 | return NULL; | ||
| 165 | } | ||
| 166 | } | ||
| 167 | |||
| 153 | /* No <obj, id> found, so attach the newly allocated one */ | 168 | /* No <obj, id> found, so attach the newly allocated one */ |
| 154 | hash_add_rcu(klp_shadow_hash, &new_shadow->node, | 169 | hash_add_rcu(klp_shadow_hash, &new_shadow->node, |
| 155 | (unsigned long)new_shadow->obj); | 170 | (unsigned long)new_shadow->obj); |
| @@ -170,26 +185,32 @@ exists: | |||
| 170 | * klp_shadow_alloc() - allocate and add a new shadow variable | 185 | * klp_shadow_alloc() - allocate and add a new shadow variable |
| 171 | * @obj: pointer to parent object | 186 | * @obj: pointer to parent object |
| 172 | * @id: data identifier | 187 | * @id: data identifier |
| 173 | * @data: pointer to data to attach to parent | ||
| 174 | * @size: size of attached data | 188 | * @size: size of attached data |
| 175 | * @gfp_flags: GFP mask for allocation | 189 | * @gfp_flags: GFP mask for allocation |
| 190 | * @ctor: custom constructor to initialize the shadow data (optional) | ||
| 191 | * @ctor_data: pointer to any data needed by @ctor (optional) | ||
| 192 | * | ||
| 193 | * Allocates @size bytes for new shadow variable data using @gfp_flags. | ||
| 194 | * The data are zeroed by default. They are further initialized by @ctor | ||
| 195 | * function if it is not NULL. The new shadow variable is then added | ||
| 196 | * to the global hashtable. | ||
| 176 | * | 197 | * |
| 177 | * Allocates @size bytes for new shadow variable data using @gfp_flags | 198 | * If an existing <obj, id> shadow variable can be found, this routine will |
| 178 | * and copies @size bytes from @data into the new shadow variable's own | 199 | * issue a WARN, exit early and return NULL. |
| 179 | * data space. If @data is NULL, @size bytes are still allocated, but | ||
| 180 | * no copy is performed. The new shadow variable is then added to the | ||
| 181 | * global hashtable. | ||
| 182 | * | 200 | * |
| 183 | * If an existing <obj, id> shadow variable can be found, this routine | 201 | * This function guarantees that the constructor function is called only when |
| 184 | * will issue a WARN, exit early and return NULL. | 202 | * the variable did not exist before. The cost is that @ctor is called |
| 203 | * in atomic context under a spin lock. | ||
| 185 | * | 204 | * |
| 186 | * Return: the shadow variable data element, NULL on duplicate or | 205 | * Return: the shadow variable data element, NULL on duplicate or |
| 187 | * failure. | 206 | * failure. |
| 188 | */ | 207 | */ |
| 189 | void *klp_shadow_alloc(void *obj, unsigned long id, void *data, | 208 | void *klp_shadow_alloc(void *obj, unsigned long id, |
| 190 | size_t size, gfp_t gfp_flags) | 209 | size_t size, gfp_t gfp_flags, |
| 210 | klp_shadow_ctor_t ctor, void *ctor_data) | ||
| 191 | { | 211 | { |
| 192 | return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); | 212 | return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, |
| 213 | ctor, ctor_data, true); | ||
| 193 | } | 214 | } |
| 194 | EXPORT_SYMBOL_GPL(klp_shadow_alloc); | 215 | EXPORT_SYMBOL_GPL(klp_shadow_alloc); |
| 195 | 216 | ||
| @@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc); | |||
| 197 | * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable | 218 | * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable |
| 198 | * @obj: pointer to parent object | 219 | * @obj: pointer to parent object |
| 199 | * @id: data identifier | 220 | * @id: data identifier |
| 200 | * @data: pointer to data to attach to parent | ||
| 201 | * @size: size of attached data | 221 | * @size: size of attached data |
| 202 | * @gfp_flags: GFP mask for allocation | 222 | * @gfp_flags: GFP mask for allocation |
| 223 | * @ctor: custom constructor to initialize the shadow data (optional) | ||
| 224 | * @ctor_data: pointer to any data needed by @ctor (optional) | ||
| 203 | * | 225 | * |
| 204 | * Returns a pointer to existing shadow data if an <obj, id> shadow | 226 | * Returns a pointer to existing shadow data if an <obj, id> shadow |
| 205 | * variable is already present. Otherwise, it creates a new shadow | 227 | * variable is already present. Otherwise, it creates a new shadow |
| 206 | * variable like klp_shadow_alloc(). | 228 | * variable like klp_shadow_alloc(). |
| 207 | * | 229 | * |
| 208 | * This function guarantees that only one shadow variable exists with | 230 | * This function guarantees that only one shadow variable exists with the given |
| 209 | * the given @id for the given @obj. It also guarantees that the shadow | 231 | * @id for the given @obj. It also guarantees that the constructor function |
| 210 | * variable will be initialized by the given @data only when it did not | 232 | * will be called only when the variable did not exist before. The cost is |
| 211 | * exist before. | 233 | * that @ctor is called in atomic context under a spin lock. |
| 212 | * | 234 | * |
| 213 | * Return: the shadow variable data element, NULL on failure. | 235 | * Return: the shadow variable data element, NULL on failure. |
| 214 | */ | 236 | */ |
| 215 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, | 237 | void *klp_shadow_get_or_alloc(void *obj, unsigned long id, |
| 216 | size_t size, gfp_t gfp_flags) | 238 | size_t size, gfp_t gfp_flags, |
| 239 | klp_shadow_ctor_t ctor, void *ctor_data) | ||
| 217 | { | 240 | { |
| 218 | return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); | 241 | return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, |
| 242 | ctor, ctor_data, false); | ||
| 219 | } | 243 | } |
| 220 | EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); | 244 | EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); |
| 221 | 245 | ||
| 246 | static void klp_shadow_free_struct(struct klp_shadow *shadow, | ||
| 247 | klp_shadow_dtor_t dtor) | ||
| 248 | { | ||
| 249 | hash_del_rcu(&shadow->node); | ||
| 250 | if (dtor) | ||
| 251 | dtor(shadow->obj, shadow->data); | ||
| 252 | kfree_rcu(shadow, rcu_head); | ||
| 253 | } | ||
| 254 | |||
| 222 | /** | 255 | /** |
| 223 | * klp_shadow_free() - detach and free a <obj, id> shadow variable | 256 | * klp_shadow_free() - detach and free a <obj, id> shadow variable |
| 224 | * @obj: pointer to parent object | 257 | * @obj: pointer to parent object |
| 225 | * @id: data identifier | 258 | * @id: data identifier |
| 259 | * @dtor: custom callback that can be used to unregister the variable | ||
| 260 | * and/or free data that the shadow variable points to (optional) | ||
| 226 | * | 261 | * |
| 227 | * This function releases the memory for this <obj, id> shadow variable | 262 | * This function releases the memory for this <obj, id> shadow variable |
| 228 | * instance, callers should stop referencing it accordingly. | 263 | * instance, callers should stop referencing it accordingly. |
| 229 | */ | 264 | */ |
| 230 | void klp_shadow_free(void *obj, unsigned long id) | 265 | void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) |
| 231 | { | 266 | { |
| 232 | struct klp_shadow *shadow; | 267 | struct klp_shadow *shadow; |
| 233 | unsigned long flags; | 268 | unsigned long flags; |
| @@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id) | |||
| 239 | (unsigned long)obj) { | 274 | (unsigned long)obj) { |
| 240 | 275 | ||
| 241 | if (klp_shadow_match(shadow, obj, id)) { | 276 | if (klp_shadow_match(shadow, obj, id)) { |
| 242 | hash_del_rcu(&shadow->node); | 277 | klp_shadow_free_struct(shadow, dtor); |
| 243 | kfree_rcu(shadow, rcu_head); | ||
| 244 | break; | 278 | break; |
| 245 | } | 279 | } |
| 246 | } | 280 | } |
| @@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free); | |||
| 252 | /** | 286 | /** |
| 253 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables | 287 | * klp_shadow_free_all() - detach and free all <*, id> shadow variables |
| 254 | * @id: data identifier | 288 | * @id: data identifier |
| 289 | * @dtor: custom callback that can be used to unregister the variable | ||
| 290 | * and/or free data that the shadow variable points to (optional) | ||
| 255 | * | 291 | * |
| 256 | * This function releases the memory for all <*, id> shadow variable | 292 | * This function releases the memory for all <*, id> shadow variable |
| 257 | * instances, callers should stop referencing them accordingly. | 293 | * instances, callers should stop referencing them accordingly. |
| 258 | */ | 294 | */ |
| 259 | void klp_shadow_free_all(unsigned long id) | 295 | void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) |
| 260 | { | 296 | { |
| 261 | struct klp_shadow *shadow; | 297 | struct klp_shadow *shadow; |
| 262 | unsigned long flags; | 298 | unsigned long flags; |
| @@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id) | |||
| 266 | 302 | ||
| 267 | /* Delete all <*, id> from hash */ | 303 | /* Delete all <*, id> from hash */ |
| 268 | hash_for_each(klp_shadow_hash, i, shadow, node) { | 304 | hash_for_each(klp_shadow_hash, i, shadow, node) { |
| 269 | if (klp_shadow_match(shadow, shadow->obj, id)) { | 305 | if (klp_shadow_match(shadow, shadow->obj, id)) |
| 270 | hash_del_rcu(&shadow->node); | 306 | klp_shadow_free_struct(shadow, dtor); |
| 271 | kfree_rcu(shadow, rcu_head); | ||
| 272 | } | ||
| 273 | } | 307 | } |
| 274 | 308 | ||
| 275 | spin_unlock_irqrestore(&klp_shadow_lock, flags); | 309 | spin_unlock_irqrestore(&klp_shadow_lock, flags); |
diff --git a/lib/textsearch.c b/lib/textsearch.c index 0b79908dfe89..5939549c0e7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c | |||
| @@ -10,7 +10,10 @@ | |||
| 10 | * Pablo Neira Ayuso <pablo@netfilter.org> | 10 | * Pablo Neira Ayuso <pablo@netfilter.org> |
| 11 | * | 11 | * |
| 12 | * ========================================================================== | 12 | * ========================================================================== |
| 13 | * | 13 | */ |
| 14 | |||
| 15 | /** | ||
| 16 | * DOC: ts_intro | ||
| 14 | * INTRODUCTION | 17 | * INTRODUCTION |
| 15 | * | 18 | * |
| 16 | * The textsearch infrastructure provides text searching facilities for | 19 | * The textsearch infrastructure provides text searching facilities for |
| @@ -19,7 +22,9 @@ | |||
| 19 | * | 22 | * |
| 20 | * ARCHITECTURE | 23 | * ARCHITECTURE |
| 21 | * | 24 | * |
| 22 | * User | 25 | * .. code-block:: none |
| 26 | * | ||
| 27 | * User | ||
| 23 | * +----------------+ | 28 | * +----------------+ |
| 24 | * | finish()|<--------------(6)-----------------+ | 29 | * | finish()|<--------------(6)-----------------+ |
| 25 | * |get_next_block()|<--------------(5)---------------+ | | 30 | * |get_next_block()|<--------------(5)---------------+ | |
| @@ -33,21 +38,21 @@ | |||
| 33 | * | (3)|----->| find()/next() |-----------+ | | 38 | * | (3)|----->| find()/next() |-----------+ | |
| 34 | * | (7)|----->| destroy() |----------------------+ | 39 | * | (7)|----->| destroy() |----------------------+ |
| 35 | * +----------------+ +---------------+ | 40 | * +----------------+ +---------------+ |
| 36 | * | 41 | * |
| 37 | * (1) User configures a search by calling _prepare() specifying the | 42 | * (1) User configures a search by calling textsearch_prepare() specifying |
| 38 | * search parameters such as the pattern and algorithm name. | 43 | * the search parameters such as the pattern and algorithm name. |
| 39 | * (2) Core requests the algorithm to allocate and initialize a search | 44 | * (2) Core requests the algorithm to allocate and initialize a search |
| 40 | * configuration according to the specified parameters. | 45 | * configuration according to the specified parameters. |
| 41 | * (3) User starts the search(es) by calling _find() or _next() to | 46 | * (3) User starts the search(es) by calling textsearch_find() or |
| 42 | * fetch subsequent occurrences. A state variable is provided | 47 | * textsearch_next() to fetch subsequent occurrences. A state variable |
| 43 | * to the algorithm to store persistent variables. | 48 | * is provided to the algorithm to store persistent variables. |
| 44 | * (4) Core eventually resets the search offset and forwards the find() | 49 | * (4) Core eventually resets the search offset and forwards the find() |
| 45 | * request to the algorithm. | 50 | * request to the algorithm. |
| 46 | * (5) Algorithm calls get_next_block() provided by the user continuously | 51 | * (5) Algorithm calls get_next_block() provided by the user continuously |
| 47 | * to fetch the data to be searched in block by block. | 52 | * to fetch the data to be searched in block by block. |
| 48 | * (6) Algorithm invokes finish() after the last call to get_next_block | 53 | * (6) Algorithm invokes finish() after the last call to get_next_block |
| 49 | * to clean up any leftovers from get_next_block. (Optional) | 54 | * to clean up any leftovers from get_next_block. (Optional) |
| 50 | * (7) User destroys the configuration by calling _destroy(). | 55 | * (7) User destroys the configuration by calling textsearch_destroy(). |
| 51 | * (8) Core notifies the algorithm to destroy algorithm specific | 56 | * (8) Core notifies the algorithm to destroy algorithm specific |
| 52 | * allocations. (Optional) | 57 | * allocations. (Optional) |
| 53 | * | 58 | * |
| @@ -62,9 +67,10 @@ | |||
| 62 | * amount of times and even in parallel as long as a separate struct | 67 | * amount of times and even in parallel as long as a separate struct |
| 63 | * ts_state variable is provided to every instance. | 68 | * ts_state variable is provided to every instance. |
| 64 | * | 69 | * |
| 65 | * The actual search is performed by either calling textsearch_find_- | 70 | * The actual search is performed by either calling |
| 66 | * continuous() for linear data or by providing an own get_next_block() | 71 | * textsearch_find_continuous() for linear data or by providing |
| 67 | * implementation and calling textsearch_find(). Both functions return | 72 | * an own get_next_block() implementation and |
| 73 | * calling textsearch_find(). Both functions return | ||
| 68 | * the position of the first occurrence of the pattern or UINT_MAX if | 74 | * the position of the first occurrence of the pattern or UINT_MAX if |
| 69 | * no match was found. Subsequent occurrences can be found by calling | 75 | * no match was found. Subsequent occurrences can be found by calling |
| 70 | * textsearch_next() regardless of the linearity of the data. | 76 | * textsearch_next() regardless of the linearity of the data. |
| @@ -72,7 +78,7 @@ | |||
| 72 | * Once you're done using a configuration it must be given back via | 78 | * Once you're done using a configuration it must be given back via |
| 73 | * textsearch_destroy. | 79 | * textsearch_destroy. |
| 74 | * | 80 | * |
| 75 | * EXAMPLE | 81 | * EXAMPLE:: |
| 76 | * | 82 | * |
| 77 | * int pos; | 83 | * int pos; |
| 78 | * struct ts_config *conf; | 84 | * struct ts_config *conf; |
| @@ -87,13 +93,13 @@ | |||
| 87 | * goto errout; | 93 | * goto errout; |
| 88 | * } | 94 | * } |
| 89 | * | 95 | * |
| 90 | * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); | 96 | * pos = textsearch_find_continuous(conf, \&state, example, strlen(example)); |
| 91 | * if (pos != UINT_MAX) | 97 | * if (pos != UINT_MAX) |
| 92 | * panic("Oh my god, dancing chickens at %d\n", pos); | 98 | * panic("Oh my god, dancing chickens at \%d\n", pos); |
| 93 | * | 99 | * |
| 94 | * textsearch_destroy(conf); | 100 | * textsearch_destroy(conf); |
| 95 | * ========================================================================== | ||
| 96 | */ | 101 | */ |
| 102 | /* ========================================================================== */ | ||
| 97 | 103 | ||
| 98 | #include <linux/module.h> | 104 | #include <linux/module.h> |
| 99 | #include <linux/types.h> | 105 | #include <linux/types.h> |
| @@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, | |||
| 225 | * | 231 | * |
| 226 | * Returns the position of first occurrence of the pattern or | 232 | * Returns the position of first occurrence of the pattern or |
| 227 | * %UINT_MAX if no occurrence was found. | 233 | * %UINT_MAX if no occurrence was found. |
| 228 | */ | 234 | */ |
| 229 | unsigned int textsearch_find_continuous(struct ts_config *conf, | 235 | unsigned int textsearch_find_continuous(struct ts_config *conf, |
| 230 | struct ts_state *state, | 236 | struct ts_state *state, |
| 231 | const void *data, unsigned int len) | 237 | const void *data, unsigned int len) |
diff --git a/mm/filemap.c b/mm/filemap.c index 9276bdb2343c..0604cb02e6f3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
| 786 | VM_BUG_ON_PAGE(!PageLocked(new), new); | 786 | VM_BUG_ON_PAGE(!PageLocked(new), new); |
| 787 | VM_BUG_ON_PAGE(new->mapping, new); | 787 | VM_BUG_ON_PAGE(new->mapping, new); |
| 788 | 788 | ||
| 789 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 789 | error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); |
| 790 | if (!error) { | 790 | if (!error) { |
| 791 | struct address_space *mapping = old->mapping; | 791 | struct address_space *mapping = old->mapping; |
| 792 | void (*freepage)(struct page *); | 792 | void (*freepage)(struct page *); |
| @@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page, | |||
| 842 | return error; | 842 | return error; |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); | 845 | error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); |
| 846 | if (error) { | 846 | if (error) { |
| 847 | if (!huge) | 847 | if (!huge) |
| 848 | mem_cgroup_cancel_charge(page, memcg, false); | 848 | mem_cgroup_cancel_charge(page, memcg, false); |
| @@ -1585,8 +1585,7 @@ no_page: | |||
| 1585 | if (fgp_flags & FGP_ACCESSED) | 1585 | if (fgp_flags & FGP_ACCESSED) |
| 1586 | __SetPageReferenced(page); | 1586 | __SetPageReferenced(page); |
| 1587 | 1587 | ||
| 1588 | err = add_to_page_cache_lru(page, mapping, offset, | 1588 | err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); |
| 1589 | gfp_mask & GFP_RECLAIM_MASK); | ||
| 1590 | if (unlikely(err)) { | 1589 | if (unlikely(err)) { |
| 1591 | put_page(page); | 1590 | put_page(page); |
| 1592 | page = NULL; | 1591 | page = NULL; |
| @@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) | |||
| 2387 | if (!page) | 2386 | if (!page) |
| 2388 | return -ENOMEM; | 2387 | return -ENOMEM; |
| 2389 | 2388 | ||
| 2390 | ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); | 2389 | ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); |
| 2391 | if (ret == 0) | 2390 | if (ret == 0) |
| 2392 | ret = mapping->a_ops->readpage(file, page); | 2391 | ret = mapping->a_ops->readpage(file, page); |
| 2393 | else if (ret == -EEXIST) | 2392 | else if (ret == -EEXIST) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 14ed6ee5e02f..a3a1815f8e11 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) | |||
| 2925 | pmde = maybe_pmd_mkwrite(pmde, vma); | 2925 | pmde = maybe_pmd_mkwrite(pmde, vma); |
| 2926 | 2926 | ||
| 2927 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); | 2927 | flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); |
| 2928 | page_add_anon_rmap(new, vma, mmun_start, true); | 2928 | if (PageAnon(new)) |
| 2929 | page_add_anon_rmap(new, vma, mmun_start, true); | ||
| 2930 | else | ||
| 2931 | page_add_file_rmap(new, true); | ||
| 2929 | set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); | 2932 | set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); |
| 2930 | if (vma->vm_flags & VM_LOCKED) | 2933 | if (vma->vm_flags & VM_LOCKED) |
| 2931 | mlock_vma_page(new); | 2934 | mlock_vma_page(new); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e074f7c637aa..2bd3df3d101a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, | |||
| 2192 | { | 2192 | { |
| 2193 | struct memcg_kmem_cache_create_work *cw; | 2193 | struct memcg_kmem_cache_create_work *cw; |
| 2194 | 2194 | ||
| 2195 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT); | 2195 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); |
| 2196 | if (!cw) | 2196 | if (!cw) |
| 2197 | return; | 2197 | return; |
| 2198 | 2198 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index f65dd69e1fd1..568433023831 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
| 472 | pslot = radix_tree_lookup_slot(&mapping->i_pages, | 472 | pslot = radix_tree_lookup_slot(&mapping->i_pages, |
| 473 | page_index(page)); | 473 | page_index(page)); |
| 474 | 474 | ||
| 475 | expected_count += 1 + page_has_private(page); | 475 | expected_count += hpage_nr_pages(page) + page_has_private(page); |
| 476 | if (page_count(page) != expected_count || | 476 | if (page_count(page) != expected_count || |
| 477 | radix_tree_deref_slot_protected(pslot, | 477 | radix_tree_deref_slot_protected(pslot, |
| 478 | &mapping->i_pages.xa_lock) != page) { | 478 | &mapping->i_pages.xa_lock) != page) { |
| @@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
| 505 | */ | 505 | */ |
| 506 | newpage->index = page->index; | 506 | newpage->index = page->index; |
| 507 | newpage->mapping = page->mapping; | 507 | newpage->mapping = page->mapping; |
| 508 | get_page(newpage); /* add cache reference */ | 508 | page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ |
| 509 | if (PageSwapBacked(page)) { | 509 | if (PageSwapBacked(page)) { |
| 510 | __SetPageSwapBacked(newpage); | 510 | __SetPageSwapBacked(newpage); |
| 511 | if (PageSwapCache(page)) { | 511 | if (PageSwapCache(page)) { |
| @@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
| 524 | } | 524 | } |
| 525 | 525 | ||
| 526 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); | 526 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); |
| 527 | if (PageTransHuge(page)) { | ||
| 528 | int i; | ||
| 529 | int index = page_index(page); | ||
| 530 | |||
| 531 | for (i = 0; i < HPAGE_PMD_NR; i++) { | ||
| 532 | pslot = radix_tree_lookup_slot(&mapping->i_pages, | ||
| 533 | index + i); | ||
| 534 | radix_tree_replace_slot(&mapping->i_pages, pslot, | ||
| 535 | newpage + i); | ||
| 536 | } | ||
| 537 | } else { | ||
| 538 | radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); | ||
| 539 | } | ||
| 527 | 540 | ||
| 528 | /* | 541 | /* |
| 529 | * Drop cache reference from old page by unfreezing | 542 | * Drop cache reference from old page by unfreezing |
| 530 | * to one less reference. | 543 | * to one less reference. |
| 531 | * We know this isn't the last reference. | 544 | * We know this isn't the last reference. |
| 532 | */ | 545 | */ |
| 533 | page_ref_unfreeze(page, expected_count - 1); | 546 | page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); |
| 534 | 547 | ||
| 535 | xa_unlock(&mapping->i_pages); | 548 | xa_unlock(&mapping->i_pages); |
| 536 | /* Leave irq disabled to prevent preemption while updating stats */ | 549 | /* Leave irq disabled to prevent preemption while updating stats */ |
| @@ -1622,6 +1635,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, | |||
| 1622 | current_node = NUMA_NO_NODE; | 1635 | current_node = NUMA_NO_NODE; |
| 1623 | } | 1636 | } |
| 1624 | out_flush: | 1637 | out_flush: |
| 1638 | if (list_empty(&pagelist)) | ||
| 1639 | return err; | ||
| 1640 | |||
| 1625 | /* Make sure we do not overwrite the existing error */ | 1641 | /* Make sure we do not overwrite the existing error */ |
| 1626 | err1 = do_move_pages_to_node(mm, &pagelist, current_node); | 1642 | err1 = do_move_pages_to_node(mm, &pagelist, current_node); |
| 1627 | if (!err1) | 1643 | if (!err1) |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5c1a3279e63f..337c6afb3345 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page) | |||
| 2502 | if (mapping && mapping_cap_account_dirty(mapping)) { | 2502 | if (mapping && mapping_cap_account_dirty(mapping)) { |
| 2503 | struct inode *inode = mapping->host; | 2503 | struct inode *inode = mapping->host; |
| 2504 | struct bdi_writeback *wb; | 2504 | struct bdi_writeback *wb; |
| 2505 | bool locked; | 2505 | struct wb_lock_cookie cookie = {}; |
| 2506 | 2506 | ||
| 2507 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2507 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
| 2508 | current->nr_dirtied--; | 2508 | current->nr_dirtied--; |
| 2509 | dec_node_page_state(page, NR_DIRTIED); | 2509 | dec_node_page_state(page, NR_DIRTIED); |
| 2510 | dec_wb_stat(wb, WB_DIRTIED); | 2510 | dec_wb_stat(wb, WB_DIRTIED); |
| 2511 | unlocked_inode_to_wb_end(inode, locked); | 2511 | unlocked_inode_to_wb_end(inode, &cookie); |
| 2512 | } | 2512 | } |
| 2513 | } | 2513 | } |
| 2514 | EXPORT_SYMBOL(account_page_redirty); | 2514 | EXPORT_SYMBOL(account_page_redirty); |
| @@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page) | |||
| 2614 | if (mapping_cap_account_dirty(mapping)) { | 2614 | if (mapping_cap_account_dirty(mapping)) { |
| 2615 | struct inode *inode = mapping->host; | 2615 | struct inode *inode = mapping->host; |
| 2616 | struct bdi_writeback *wb; | 2616 | struct bdi_writeback *wb; |
| 2617 | bool locked; | 2617 | struct wb_lock_cookie cookie = {}; |
| 2618 | 2618 | ||
| 2619 | lock_page_memcg(page); | 2619 | lock_page_memcg(page); |
| 2620 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2620 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
| 2621 | 2621 | ||
| 2622 | if (TestClearPageDirty(page)) | 2622 | if (TestClearPageDirty(page)) |
| 2623 | account_page_cleaned(page, mapping, wb); | 2623 | account_page_cleaned(page, mapping, wb); |
| 2624 | 2624 | ||
| 2625 | unlocked_inode_to_wb_end(inode, locked); | 2625 | unlocked_inode_to_wb_end(inode, &cookie); |
| 2626 | unlock_page_memcg(page); | 2626 | unlock_page_memcg(page); |
| 2627 | } else { | 2627 | } else { |
| 2628 | ClearPageDirty(page); | 2628 | ClearPageDirty(page); |
| @@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page) | |||
| 2654 | if (mapping && mapping_cap_account_dirty(mapping)) { | 2654 | if (mapping && mapping_cap_account_dirty(mapping)) { |
| 2655 | struct inode *inode = mapping->host; | 2655 | struct inode *inode = mapping->host; |
| 2656 | struct bdi_writeback *wb; | 2656 | struct bdi_writeback *wb; |
| 2657 | bool locked; | 2657 | struct wb_lock_cookie cookie = {}; |
| 2658 | 2658 | ||
| 2659 | /* | 2659 | /* |
| 2660 | * Yes, Virginia, this is indeed insane. | 2660 | * Yes, Virginia, this is indeed insane. |
| @@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page) | |||
| 2691 | * always locked coming in here, so we get the desired | 2691 | * always locked coming in here, so we get the desired |
| 2692 | * exclusion. | 2692 | * exclusion. |
| 2693 | */ | 2693 | */ |
| 2694 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2694 | wb = unlocked_inode_to_wb_begin(inode, &cookie); |
| 2695 | if (TestClearPageDirty(page)) { | 2695 | if (TestClearPageDirty(page)) { |
| 2696 | dec_lruvec_page_state(page, NR_FILE_DIRTY); | 2696 | dec_lruvec_page_state(page, NR_FILE_DIRTY); |
| 2697 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); | 2697 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); |
| 2698 | dec_wb_stat(wb, WB_RECLAIMABLE); | 2698 | dec_wb_stat(wb, WB_RECLAIMABLE); |
| 2699 | ret = 1; | 2699 | ret = 1; |
| 2700 | } | 2700 | } |
| 2701 | unlocked_inode_to_wb_end(inode, locked); | 2701 | unlocked_inode_to_wb_end(inode, &cookie); |
| 2702 | return ret; | 2702 | return ret; |
| 2703 | } | 2703 | } |
| 2704 | return TestClearPageDirty(page); | 2704 | return TestClearPageDirty(page); |
| @@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 1374 | if (!pvmw.pte && (flags & TTU_MIGRATION)) { | 1374 | if (!pvmw.pte && (flags & TTU_MIGRATION)) { |
| 1375 | VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); | 1375 | VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); |
| 1376 | 1376 | ||
| 1377 | if (!PageAnon(page)) | ||
| 1378 | continue; | ||
| 1379 | |||
| 1380 | set_pmd_migration_entry(&pvmw, page); | 1377 | set_pmd_migration_entry(&pvmw, page); |
| 1381 | continue; | 1378 | continue; |
| 1382 | } | 1379 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b920ce3ae02..9b697323a88c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -303,7 +303,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone | |||
| 303 | /* | 303 | /* |
| 304 | * Add a shrinker callback to be called from the vm. | 304 | * Add a shrinker callback to be called from the vm. |
| 305 | */ | 305 | */ |
| 306 | int register_shrinker(struct shrinker *shrinker) | 306 | int prealloc_shrinker(struct shrinker *shrinker) |
| 307 | { | 307 | { |
| 308 | size_t size = sizeof(*shrinker->nr_deferred); | 308 | size_t size = sizeof(*shrinker->nr_deferred); |
| 309 | 309 | ||
| @@ -313,10 +313,29 @@ int register_shrinker(struct shrinker *shrinker) | |||
| 313 | shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); | 313 | shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); |
| 314 | if (!shrinker->nr_deferred) | 314 | if (!shrinker->nr_deferred) |
| 315 | return -ENOMEM; | 315 | return -ENOMEM; |
| 316 | return 0; | ||
| 317 | } | ||
| 318 | |||
| 319 | void free_prealloced_shrinker(struct shrinker *shrinker) | ||
| 320 | { | ||
| 321 | kfree(shrinker->nr_deferred); | ||
| 322 | shrinker->nr_deferred = NULL; | ||
| 323 | } | ||
| 316 | 324 | ||
| 325 | void register_shrinker_prepared(struct shrinker *shrinker) | ||
| 326 | { | ||
| 317 | down_write(&shrinker_rwsem); | 327 | down_write(&shrinker_rwsem); |
| 318 | list_add_tail(&shrinker->list, &shrinker_list); | 328 | list_add_tail(&shrinker->list, &shrinker_list); |
| 319 | up_write(&shrinker_rwsem); | 329 | up_write(&shrinker_rwsem); |
| 330 | } | ||
| 331 | |||
| 332 | int register_shrinker(struct shrinker *shrinker) | ||
| 333 | { | ||
| 334 | int err = prealloc_shrinker(shrinker); | ||
| 335 | |||
| 336 | if (err) | ||
| 337 | return err; | ||
| 338 | register_shrinker_prepared(shrinker); | ||
| 320 | return 0; | 339 | return 0; |
| 321 | } | 340 | } |
| 322 | EXPORT_SYMBOL(register_shrinker); | 341 | EXPORT_SYMBOL(register_shrinker); |
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 53ecda10b790..13e2ae6be620 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
| @@ -174,7 +174,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, | |||
| 174 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : | 174 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : |
| 175 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : | 175 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : |
| 176 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? | 176 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? |
| 177 | "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); | 177 | "REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND"); |
| 178 | 178 | ||
| 179 | 179 | ||
| 180 | 180 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 969462ebb296..af0558b00c6c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2969,7 +2969,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb, | |||
| 2969 | } | 2969 | } |
| 2970 | EXPORT_SYMBOL(passthru_features_check); | 2970 | EXPORT_SYMBOL(passthru_features_check); |
| 2971 | 2971 | ||
| 2972 | static netdev_features_t dflt_features_check(const struct sk_buff *skb, | 2972 | static netdev_features_t dflt_features_check(struct sk_buff *skb, |
| 2973 | struct net_device *dev, | 2973 | struct net_device *dev, |
| 2974 | netdev_features_t features) | 2974 | netdev_features_t features) |
| 2975 | { | 2975 | { |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index e3e6a3e2ca22..d884d8f5f0e5 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
| @@ -839,7 +839,7 @@ void dev_mc_flush(struct net_device *dev) | |||
| 839 | EXPORT_SYMBOL(dev_mc_flush); | 839 | EXPORT_SYMBOL(dev_mc_flush); |
| 840 | 840 | ||
| 841 | /** | 841 | /** |
| 842 | * dev_mc_flush - Init multicast address list | 842 | * dev_mc_init - Init multicast address list |
| 843 | * @dev: device | 843 | * @dev: device |
| 844 | * | 844 | * |
| 845 | * Init multicast address list. | 845 | * Init multicast address list. |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7b7a14abba28..ce519861be59 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t); | |||
| 55 | static void __neigh_notify(struct neighbour *n, int type, int flags, | 55 | static void __neigh_notify(struct neighbour *n, int type, int flags, |
| 56 | u32 pid); | 56 | u32 pid); |
| 57 | static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); | 57 | static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); |
| 58 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); | 58 | static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
| 59 | struct net_device *dev); | ||
| 59 | 60 | ||
| 60 | #ifdef CONFIG_PROC_FS | 61 | #ifdef CONFIG_PROC_FS |
| 61 | static const struct file_operations neigh_stat_seq_fops; | 62 | static const struct file_operations neigh_stat_seq_fops; |
| @@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |||
| 291 | { | 292 | { |
| 292 | write_lock_bh(&tbl->lock); | 293 | write_lock_bh(&tbl->lock); |
| 293 | neigh_flush_dev(tbl, dev); | 294 | neigh_flush_dev(tbl, dev); |
| 294 | pneigh_ifdown(tbl, dev); | 295 | pneigh_ifdown_and_unlock(tbl, dev); |
| 295 | write_unlock_bh(&tbl->lock); | ||
| 296 | 296 | ||
| 297 | del_timer_sync(&tbl->proxy_timer); | 297 | del_timer_sync(&tbl->proxy_timer); |
| 298 | pneigh_queue_purge(&tbl->proxy_queue); | 298 | pneigh_queue_purge(&tbl->proxy_queue); |
| @@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, | |||
| 681 | return -ENOENT; | 681 | return -ENOENT; |
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | 684 | static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
| 685 | struct net_device *dev) | ||
| 685 | { | 686 | { |
| 686 | struct pneigh_entry *n, **np; | 687 | struct pneigh_entry *n, **np, *freelist = NULL; |
| 687 | u32 h; | 688 | u32 h; |
| 688 | 689 | ||
| 689 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | 690 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { |
| @@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |||
| 691 | while ((n = *np) != NULL) { | 692 | while ((n = *np) != NULL) { |
| 692 | if (!dev || n->dev == dev) { | 693 | if (!dev || n->dev == dev) { |
| 693 | *np = n->next; | 694 | *np = n->next; |
| 694 | if (tbl->pdestructor) | 695 | n->next = freelist; |
| 695 | tbl->pdestructor(n); | 696 | freelist = n; |
| 696 | if (n->dev) | ||
| 697 | dev_put(n->dev); | ||
| 698 | kfree(n); | ||
| 699 | continue; | 697 | continue; |
| 700 | } | 698 | } |
| 701 | np = &n->next; | 699 | np = &n->next; |
| 702 | } | 700 | } |
| 703 | } | 701 | } |
| 702 | write_unlock_bh(&tbl->lock); | ||
| 703 | while ((n = freelist)) { | ||
| 704 | freelist = n->next; | ||
| 705 | n->next = NULL; | ||
| 706 | if (tbl->pdestructor) | ||
| 707 | tbl->pdestructor(n); | ||
| 708 | if (n->dev) | ||
| 709 | dev_put(n->dev); | ||
| 710 | kfree(n); | ||
| 711 | } | ||
| 704 | return -ENOENT; | 712 | return -ENOENT; |
| 705 | } | 713 | } |
| 706 | 714 | ||
| @@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
| 2323 | 2331 | ||
| 2324 | err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); | 2332 | err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); |
| 2325 | if (!err) { | 2333 | if (!err) { |
| 2326 | if (tb[NDA_IFINDEX]) | 2334 | if (tb[NDA_IFINDEX]) { |
| 2335 | if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) | ||
| 2336 | return -EINVAL; | ||
| 2327 | filter_idx = nla_get_u32(tb[NDA_IFINDEX]); | 2337 | filter_idx = nla_get_u32(tb[NDA_IFINDEX]); |
| 2328 | 2338 | } | |
| 2329 | if (tb[NDA_MASTER]) | 2339 | if (tb[NDA_MASTER]) { |
| 2340 | if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) | ||
| 2341 | return -EINVAL; | ||
| 2330 | filter_master_idx = nla_get_u32(tb[NDA_MASTER]); | 2342 | filter_master_idx = nla_get_u32(tb[NDA_MASTER]); |
| 2331 | 2343 | } | |
| 2332 | if (filter_idx || filter_master_idx) | 2344 | if (filter_idx || filter_master_idx) |
| 2333 | flags |= NLM_F_DUMP_FILTERED; | 2345 | flags |= NLM_F_DUMP_FILTERED; |
| 2334 | } | 2346 | } |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 8396705deffc..40c851693f77 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
| @@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) | |||
| 91 | 91 | ||
| 92 | next_opt = memchr(opt, '#', end - opt) ?: end; | 92 | next_opt = memchr(opt, '#', end - opt) ?: end; |
| 93 | opt_len = next_opt - opt; | 93 | opt_len = next_opt - opt; |
| 94 | if (!opt_len) { | 94 | if (opt_len <= 0 || opt_len > 128) { |
| 95 | printk(KERN_WARNING | 95 | pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", |
| 96 | "Empty option to dns_resolver key\n"); | 96 | opt_len); |
| 97 | return -EINVAL; | 97 | return -EINVAL; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| @@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | bad_option_value: | 129 | bad_option_value: |
| 130 | printk(KERN_WARNING | 130 | pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", |
| 131 | "Option '%*.*s' to dns_resolver key:" | 131 | opt_nlen, opt_nlen, opt); |
| 132 | " bad/missing value\n", | ||
| 133 | opt_nlen, opt_nlen, opt); | ||
| 134 | return -EINVAL; | 132 | return -EINVAL; |
| 135 | } while (opt = next_opt + 1, opt < end); | 133 | } while (opt = next_opt + 1, opt < end); |
| 136 | } | 134 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4c11b810a447..83c73bab2c3d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -1109,6 +1109,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
| 1109 | struct ip_options_rcu *opt; | 1109 | struct ip_options_rcu *opt; |
| 1110 | struct rtable *rt; | 1110 | struct rtable *rt; |
| 1111 | 1111 | ||
| 1112 | rt = *rtp; | ||
| 1113 | if (unlikely(!rt)) | ||
| 1114 | return -EFAULT; | ||
| 1115 | |||
| 1112 | /* | 1116 | /* |
| 1113 | * setup for corking. | 1117 | * setup for corking. |
| 1114 | */ | 1118 | */ |
| @@ -1124,9 +1128,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
| 1124 | cork->flags |= IPCORK_OPT; | 1128 | cork->flags |= IPCORK_OPT; |
| 1125 | cork->addr = ipc->addr; | 1129 | cork->addr = ipc->addr; |
| 1126 | } | 1130 | } |
| 1127 | rt = *rtp; | 1131 | |
| 1128 | if (unlikely(!rt)) | ||
| 1129 | return -EFAULT; | ||
| 1130 | /* | 1132 | /* |
| 1131 | * We steal reference to this route, caller should not release it | 1133 | * We steal reference to this route, caller should not release it |
| 1132 | */ | 1134 | */ |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bccc4c270087..9ce1c726185e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2368,6 +2368,7 @@ void tcp_write_queue_purge(struct sock *sk) | |||
| 2368 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); | 2368 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); |
| 2369 | sk_mem_reclaim(sk); | 2369 | sk_mem_reclaim(sk); |
| 2370 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 2370 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
| 2371 | tcp_sk(sk)->packets_out = 0; | ||
| 2371 | } | 2372 | } |
| 2372 | 2373 | ||
| 2373 | int tcp_disconnect(struct sock *sk, int flags) | 2374 | int tcp_disconnect(struct sock *sk, int flags) |
| @@ -2417,7 +2418,6 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
| 2417 | icsk->icsk_backoff = 0; | 2418 | icsk->icsk_backoff = 0; |
| 2418 | tp->snd_cwnd = 2; | 2419 | tp->snd_cwnd = 2; |
| 2419 | icsk->icsk_probes_out = 0; | 2420 | icsk->icsk_probes_out = 0; |
| 2420 | tp->packets_out = 0; | ||
| 2421 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 2421 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
| 2422 | tp->snd_cwnd_cnt = 0; | 2422 | tp->snd_cwnd_cnt = 0; |
| 2423 | tp->window_clamp = 0; | 2423 | tp->window_clamp = 0; |
| @@ -2813,8 +2813,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
| 2813 | #ifdef CONFIG_TCP_MD5SIG | 2813 | #ifdef CONFIG_TCP_MD5SIG |
| 2814 | case TCP_MD5SIG: | 2814 | case TCP_MD5SIG: |
| 2815 | case TCP_MD5SIG_EXT: | 2815 | case TCP_MD5SIG_EXT: |
| 2816 | /* Read the IP->Key mappings from userspace */ | 2816 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
| 2817 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); | 2817 | err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
| 2818 | else | ||
| 2819 | err = -EINVAL; | ||
| 2818 | break; | 2820 | break; |
| 2819 | #endif | 2821 | #endif |
| 2820 | case TCP_USER_TIMEOUT: | 2822 | case TCP_USER_TIMEOUT: |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0fbd3ee26165..40261cb68e83 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) | |||
| 183 | } | 183 | } |
| 184 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); | 184 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); |
| 185 | 185 | ||
| 186 | struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) | ||
| 187 | { | ||
| 188 | const struct l2tp_net *pn = l2tp_pernet(net); | ||
| 189 | struct l2tp_tunnel *tunnel; | ||
| 190 | int count = 0; | ||
| 191 | |||
| 192 | rcu_read_lock_bh(); | ||
| 193 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
| 194 | if (++count > nth) { | ||
| 195 | l2tp_tunnel_inc_refcount(tunnel); | ||
| 196 | rcu_read_unlock_bh(); | ||
| 197 | return tunnel; | ||
| 198 | } | ||
| 199 | } | ||
| 200 | rcu_read_unlock_bh(); | ||
| 201 | |||
| 202 | return NULL; | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); | ||
| 205 | |||
| 186 | /* Lookup a session. A new reference is held on the returned session. */ | 206 | /* Lookup a session. A new reference is held on the returned session. */ |
| 187 | struct l2tp_session *l2tp_session_get(const struct net *net, | 207 | struct l2tp_session *l2tp_session_get(const struct net *net, |
| 188 | struct l2tp_tunnel *tunnel, | 208 | struct l2tp_tunnel *tunnel, |
| @@ -335,26 +355,6 @@ err_tlock: | |||
| 335 | } | 355 | } |
| 336 | EXPORT_SYMBOL_GPL(l2tp_session_register); | 356 | EXPORT_SYMBOL_GPL(l2tp_session_register); |
| 337 | 357 | ||
| 338 | struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) | ||
| 339 | { | ||
| 340 | struct l2tp_net *pn = l2tp_pernet(net); | ||
| 341 | struct l2tp_tunnel *tunnel; | ||
| 342 | int count = 0; | ||
| 343 | |||
| 344 | rcu_read_lock_bh(); | ||
| 345 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
| 346 | if (++count > nth) { | ||
| 347 | rcu_read_unlock_bh(); | ||
| 348 | return tunnel; | ||
| 349 | } | ||
| 350 | } | ||
| 351 | |||
| 352 | rcu_read_unlock_bh(); | ||
| 353 | |||
| 354 | return NULL; | ||
| 355 | } | ||
| 356 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); | ||
| 357 | |||
| 358 | /***************************************************************************** | 358 | /***************************************************************************** |
| 359 | * Receive data handling | 359 | * Receive data handling |
| 360 | *****************************************************************************/ | 360 | *****************************************************************************/ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index ba33cbec71eb..c199020f8a8a 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) | |||
| 212 | } | 212 | } |
| 213 | 213 | ||
| 214 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); | 214 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); |
| 215 | struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); | ||
| 216 | |||
| 215 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 217 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
| 216 | 218 | ||
| 217 | struct l2tp_session *l2tp_session_get(const struct net *net, | 219 | struct l2tp_session *l2tp_session_get(const struct net *net, |
| @@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net, | |||
| 220 | struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); | 222 | struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); |
| 221 | struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, | 223 | struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, |
| 222 | const char *ifname); | 224 | const char *ifname); |
| 223 | struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); | ||
| 224 | 225 | ||
| 225 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, | 226 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, |
| 226 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, | 227 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 72e713da4733..b8f9d45bfeb1 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
| @@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data { | |||
| 47 | 47 | ||
| 48 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) | 48 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) |
| 49 | { | 49 | { |
| 50 | pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); | 50 | /* Drop reference taken during previous invocation */ |
| 51 | if (pd->tunnel) | ||
| 52 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
| 53 | |||
| 54 | pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); | ||
| 51 | pd->tunnel_idx++; | 55 | pd->tunnel_idx++; |
| 52 | } | 56 | } |
| 53 | 57 | ||
| @@ -96,7 +100,14 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 96 | 100 | ||
| 97 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) | 101 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) |
| 98 | { | 102 | { |
| 99 | /* nothing to do */ | 103 | struct l2tp_dfs_seq_data *pd = v; |
| 104 | |||
| 105 | if (!pd || pd == SEQ_START_TOKEN) | ||
| 106 | return; | ||
| 107 | |||
| 108 | /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ | ||
| 109 | if (pd->tunnel) | ||
| 110 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
| 100 | } | 111 | } |
| 101 | 112 | ||
| 102 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | 113 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index b05dbd9ffcb2..6616c9fd292f 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
| @@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback | |||
| 487 | struct net *net = sock_net(skb->sk); | 487 | struct net *net = sock_net(skb->sk); |
| 488 | 488 | ||
| 489 | for (;;) { | 489 | for (;;) { |
| 490 | tunnel = l2tp_tunnel_find_nth(net, ti); | 490 | tunnel = l2tp_tunnel_get_nth(net, ti); |
| 491 | if (tunnel == NULL) | 491 | if (tunnel == NULL) |
| 492 | goto out; | 492 | goto out; |
| 493 | 493 | ||
| 494 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, | 494 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, |
| 495 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 495 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 496 | tunnel, L2TP_CMD_TUNNEL_GET) < 0) | 496 | tunnel, L2TP_CMD_TUNNEL_GET) < 0) { |
| 497 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 497 | goto out; | 498 | goto out; |
| 499 | } | ||
| 500 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 498 | 501 | ||
| 499 | ti++; | 502 | ti++; |
| 500 | } | 503 | } |
| @@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
| 848 | 851 | ||
| 849 | for (;;) { | 852 | for (;;) { |
| 850 | if (tunnel == NULL) { | 853 | if (tunnel == NULL) { |
| 851 | tunnel = l2tp_tunnel_find_nth(net, ti); | 854 | tunnel = l2tp_tunnel_get_nth(net, ti); |
| 852 | if (tunnel == NULL) | 855 | if (tunnel == NULL) |
| 853 | goto out; | 856 | goto out; |
| 854 | } | 857 | } |
| @@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
| 856 | session = l2tp_session_get_nth(tunnel, si); | 859 | session = l2tp_session_get_nth(tunnel, si); |
| 857 | if (session == NULL) { | 860 | if (session == NULL) { |
| 858 | ti++; | 861 | ti++; |
| 862 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 859 | tunnel = NULL; | 863 | tunnel = NULL; |
| 860 | si = 0; | 864 | si = 0; |
| 861 | continue; | 865 | continue; |
| @@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback | |||
| 865 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 869 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 866 | session, L2TP_CMD_SESSION_GET) < 0) { | 870 | session, L2TP_CMD_SESSION_GET) < 0) { |
| 867 | l2tp_session_dec_refcount(session); | 871 | l2tp_session_dec_refcount(session); |
| 872 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 868 | break; | 873 | break; |
| 869 | } | 874 | } |
| 870 | l2tp_session_dec_refcount(session); | 875 | l2tp_session_dec_refcount(session); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 896bbca9bdaa..7d0c963680e6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -1551,16 +1551,19 @@ struct pppol2tp_seq_data { | |||
| 1551 | 1551 | ||
| 1552 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) | 1552 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) |
| 1553 | { | 1553 | { |
| 1554 | /* Drop reference taken during previous invocation */ | ||
| 1555 | if (pd->tunnel) | ||
| 1556 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
| 1557 | |||
| 1554 | for (;;) { | 1558 | for (;;) { |
| 1555 | pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); | 1559 | pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); |
| 1556 | pd->tunnel_idx++; | 1560 | pd->tunnel_idx++; |
| 1557 | 1561 | ||
| 1558 | if (pd->tunnel == NULL) | 1562 | /* Only accept L2TPv2 tunnels */ |
| 1559 | break; | 1563 | if (!pd->tunnel || pd->tunnel->version == 2) |
| 1564 | return; | ||
| 1560 | 1565 | ||
| 1561 | /* Ignore L2TPv3 tunnels */ | 1566 | l2tp_tunnel_dec_refcount(pd->tunnel); |
| 1562 | if (pd->tunnel->version < 3) | ||
| 1563 | break; | ||
| 1564 | } | 1567 | } |
| 1565 | } | 1568 | } |
| 1566 | 1569 | ||
| @@ -1609,7 +1612,14 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1609 | 1612 | ||
| 1610 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) | 1613 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) |
| 1611 | { | 1614 | { |
| 1612 | /* nothing to do */ | 1615 | struct pppol2tp_seq_data *pd = v; |
| 1616 | |||
| 1617 | if (!pd || pd == SEQ_START_TOKEN) | ||
| 1618 | return; | ||
| 1619 | |||
| 1620 | /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ | ||
| 1621 | if (pd->tunnel) | ||
| 1622 | l2tp_tunnel_dec_refcount(pd->tunnel); | ||
| 1613 | } | 1623 | } |
| 1614 | 1624 | ||
| 1615 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | 1625 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 01dcc0823d1f..6d29b2b94e84 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
| @@ -189,6 +189,7 @@ static int llc_ui_release(struct socket *sock) | |||
| 189 | { | 189 | { |
| 190 | struct sock *sk = sock->sk; | 190 | struct sock *sk = sock->sk; |
| 191 | struct llc_sock *llc; | 191 | struct llc_sock *llc; |
| 192 | struct llc_sap *sap; | ||
| 192 | 193 | ||
| 193 | if (unlikely(sk == NULL)) | 194 | if (unlikely(sk == NULL)) |
| 194 | goto out; | 195 | goto out; |
| @@ -199,9 +200,15 @@ static int llc_ui_release(struct socket *sock) | |||
| 199 | llc->laddr.lsap, llc->daddr.lsap); | 200 | llc->laddr.lsap, llc->daddr.lsap); |
| 200 | if (!llc_send_disc(sk)) | 201 | if (!llc_send_disc(sk)) |
| 201 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); | 202 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); |
| 203 | sap = llc->sap; | ||
| 204 | /* Hold this for release_sock(), so that llc_backlog_rcv() could still | ||
| 205 | * use it. | ||
| 206 | */ | ||
| 207 | llc_sap_hold(sap); | ||
| 202 | if (!sock_flag(sk, SOCK_ZAPPED)) | 208 | if (!sock_flag(sk, SOCK_ZAPPED)) |
| 203 | llc_sap_remove_socket(llc->sap, sk); | 209 | llc_sap_remove_socket(llc->sap, sk); |
| 204 | release_sock(sk); | 210 | release_sock(sk); |
| 211 | llc_sap_put(sap); | ||
| 205 | if (llc->dev) | 212 | if (llc->dev) |
| 206 | dev_put(llc->dev); | 213 | dev_put(llc->dev); |
| 207 | sock_put(sk); | 214 | sock_put(sk); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 616cb9c18f88..c31b0687396a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -3008,6 +3008,7 @@ static int packet_release(struct socket *sock) | |||
| 3008 | 3008 | ||
| 3009 | packet_flush_mclist(sk); | 3009 | packet_flush_mclist(sk); |
| 3010 | 3010 | ||
| 3011 | lock_sock(sk); | ||
| 3011 | if (po->rx_ring.pg_vec) { | 3012 | if (po->rx_ring.pg_vec) { |
| 3012 | memset(&req_u, 0, sizeof(req_u)); | 3013 | memset(&req_u, 0, sizeof(req_u)); |
| 3013 | packet_set_ring(sk, &req_u, 1, 0); | 3014 | packet_set_ring(sk, &req_u, 1, 0); |
| @@ -3017,6 +3018,7 @@ static int packet_release(struct socket *sock) | |||
| 3017 | memset(&req_u, 0, sizeof(req_u)); | 3018 | memset(&req_u, 0, sizeof(req_u)); |
| 3018 | packet_set_ring(sk, &req_u, 1, 1); | 3019 | packet_set_ring(sk, &req_u, 1, 1); |
| 3019 | } | 3020 | } |
| 3021 | release_sock(sk); | ||
| 3020 | 3022 | ||
| 3021 | f = fanout_release(sk); | 3023 | f = fanout_release(sk); |
| 3022 | 3024 | ||
| @@ -3643,6 +3645,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
| 3643 | union tpacket_req_u req_u; | 3645 | union tpacket_req_u req_u; |
| 3644 | int len; | 3646 | int len; |
| 3645 | 3647 | ||
| 3648 | lock_sock(sk); | ||
| 3646 | switch (po->tp_version) { | 3649 | switch (po->tp_version) { |
| 3647 | case TPACKET_V1: | 3650 | case TPACKET_V1: |
| 3648 | case TPACKET_V2: | 3651 | case TPACKET_V2: |
| @@ -3653,12 +3656,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
| 3653 | len = sizeof(req_u.req3); | 3656 | len = sizeof(req_u.req3); |
| 3654 | break; | 3657 | break; |
| 3655 | } | 3658 | } |
| 3656 | if (optlen < len) | 3659 | if (optlen < len) { |
| 3657 | return -EINVAL; | 3660 | ret = -EINVAL; |
| 3658 | if (copy_from_user(&req_u.req, optval, len)) | 3661 | } else { |
| 3659 | return -EFAULT; | 3662 | if (copy_from_user(&req_u.req, optval, len)) |
| 3660 | return packet_set_ring(sk, &req_u, 0, | 3663 | ret = -EFAULT; |
| 3661 | optname == PACKET_TX_RING); | 3664 | else |
| 3665 | ret = packet_set_ring(sk, &req_u, 0, | ||
| 3666 | optname == PACKET_TX_RING); | ||
| 3667 | } | ||
| 3668 | release_sock(sk); | ||
| 3669 | return ret; | ||
| 3662 | } | 3670 | } |
| 3663 | case PACKET_COPY_THRESH: | 3671 | case PACKET_COPY_THRESH: |
| 3664 | { | 3672 | { |
| @@ -4208,8 +4216,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4208 | /* Added to avoid minimal code churn */ | 4216 | /* Added to avoid minimal code churn */ |
| 4209 | struct tpacket_req *req = &req_u->req; | 4217 | struct tpacket_req *req = &req_u->req; |
| 4210 | 4218 | ||
| 4211 | lock_sock(sk); | ||
| 4212 | |||
| 4213 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; | 4219 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
| 4214 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | 4220 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
| 4215 | 4221 | ||
| @@ -4347,7 +4353,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4347 | if (pg_vec) | 4353 | if (pg_vec) |
| 4348 | free_pg_vec(pg_vec, order, req->tp_block_nr); | 4354 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
| 4349 | out: | 4355 | out: |
| 4350 | release_sock(sk); | ||
| 4351 | return err; | 4356 | return err; |
| 4352 | } | 4357 | } |
| 4353 | 4358 | ||
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index b33e5aeb4c06..2aa07b547b16 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c | |||
| @@ -1135,3 +1135,4 @@ module_exit(qrtr_proto_fini); | |||
| 1135 | 1135 | ||
| 1136 | MODULE_DESCRIPTION("Qualcomm IPC-router driver"); | 1136 | MODULE_DESCRIPTION("Qualcomm IPC-router driver"); |
| 1137 | MODULE_LICENSE("GPL v2"); | 1137 | MODULE_LICENSE("GPL v2"); |
| 1138 | MODULE_ALIAS_NETPROTO(PF_QIPCRTR); | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 31083b5035ec..2e3f7b75a8ec 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -556,46 +556,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, | |||
| 556 | addr->v6.sin6_scope_id = 0; | 556 | addr->v6.sin6_scope_id = 0; |
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | /* Compare addresses exactly. | 559 | static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, |
| 560 | * v4-mapped-v6 is also in consideration. | 560 | const union sctp_addr *addr2) |
| 561 | */ | ||
| 562 | static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | ||
| 563 | const union sctp_addr *addr2) | ||
| 564 | { | 561 | { |
| 565 | if (addr1->sa.sa_family != addr2->sa.sa_family) { | 562 | if (addr1->sa.sa_family != addr2->sa.sa_family) { |
| 566 | if (addr1->sa.sa_family == AF_INET && | 563 | if (addr1->sa.sa_family == AF_INET && |
| 567 | addr2->sa.sa_family == AF_INET6 && | 564 | addr2->sa.sa_family == AF_INET6 && |
| 568 | ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { | 565 | ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && |
| 569 | if (addr2->v6.sin6_port == addr1->v4.sin_port && | 566 | addr2->v6.sin6_addr.s6_addr32[3] == |
| 570 | addr2->v6.sin6_addr.s6_addr32[3] == | 567 | addr1->v4.sin_addr.s_addr) |
| 571 | addr1->v4.sin_addr.s_addr) | 568 | return 1; |
| 572 | return 1; | 569 | |
| 573 | } | ||
| 574 | if (addr2->sa.sa_family == AF_INET && | 570 | if (addr2->sa.sa_family == AF_INET && |
| 575 | addr1->sa.sa_family == AF_INET6 && | 571 | addr1->sa.sa_family == AF_INET6 && |
| 576 | ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { | 572 | ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && |
| 577 | if (addr1->v6.sin6_port == addr2->v4.sin_port && | 573 | addr1->v6.sin6_addr.s6_addr32[3] == |
| 578 | addr1->v6.sin6_addr.s6_addr32[3] == | 574 | addr2->v4.sin_addr.s_addr) |
| 579 | addr2->v4.sin_addr.s_addr) | 575 | return 1; |
| 580 | return 1; | 576 | |
| 581 | } | ||
| 582 | return 0; | 577 | return 0; |
| 583 | } | 578 | } |
| 584 | if (addr1->v6.sin6_port != addr2->v6.sin6_port) | 579 | |
| 585 | return 0; | ||
| 586 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) | 580 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) |
| 587 | return 0; | 581 | return 0; |
| 582 | |||
| 588 | /* If this is a linklocal address, compare the scope_id. */ | 583 | /* If this is a linklocal address, compare the scope_id. */ |
| 589 | if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { | 584 | if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && |
| 590 | if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && | 585 | addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && |
| 591 | (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { | 586 | addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) |
| 592 | return 0; | 587 | return 0; |
| 593 | } | ||
| 594 | } | ||
| 595 | 588 | ||
| 596 | return 1; | 589 | return 1; |
| 597 | } | 590 | } |
| 598 | 591 | ||
| 592 | /* Compare addresses exactly. | ||
| 593 | * v4-mapped-v6 is also in consideration. | ||
| 594 | */ | ||
| 595 | static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | ||
| 596 | const union sctp_addr *addr2) | ||
| 597 | { | ||
| 598 | return __sctp_v6_cmp_addr(addr1, addr2) && | ||
| 599 | addr1->v6.sin6_port == addr2->v6.sin6_port; | ||
| 600 | } | ||
| 601 | |||
| 599 | /* Initialize addr struct to INADDR_ANY. */ | 602 | /* Initialize addr struct to INADDR_ANY. */ |
| 600 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) | 603 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) |
| 601 | { | 604 | { |
| @@ -875,8 +878,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
| 875 | const union sctp_addr *addr2, | 878 | const union sctp_addr *addr2, |
| 876 | struct sctp_sock *opt) | 879 | struct sctp_sock *opt) |
| 877 | { | 880 | { |
| 878 | struct sctp_af *af1, *af2; | ||
| 879 | struct sock *sk = sctp_opt2sk(opt); | 881 | struct sock *sk = sctp_opt2sk(opt); |
| 882 | struct sctp_af *af1, *af2; | ||
| 880 | 883 | ||
| 881 | af1 = sctp_get_af_specific(addr1->sa.sa_family); | 884 | af1 = sctp_get_af_specific(addr1->sa.sa_family); |
| 882 | af2 = sctp_get_af_specific(addr2->sa.sa_family); | 885 | af2 = sctp_get_af_specific(addr2->sa.sa_family); |
| @@ -892,10 +895,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | |||
| 892 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) | 895 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) |
| 893 | return 1; | 896 | return 1; |
| 894 | 897 | ||
| 895 | if (addr1->sa.sa_family != addr2->sa.sa_family) | 898 | return __sctp_v6_cmp_addr(addr1, addr2); |
| 896 | return 0; | ||
| 897 | |||
| 898 | return af1->cmp_addr(addr1, addr2); | ||
| 899 | } | 899 | } |
| 900 | 900 | ||
| 901 | /* Verify that the provided sockaddr looks bindable. Common verification, | 901 | /* Verify that the provided sockaddr looks bindable. Common verification, |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 5f8046c62d90..f5d4b69dbabc 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -1259,14 +1259,12 @@ static int smc_shutdown(struct socket *sock, int how) | |||
| 1259 | rc = smc_close_shutdown_write(smc); | 1259 | rc = smc_close_shutdown_write(smc); |
| 1260 | break; | 1260 | break; |
| 1261 | case SHUT_RD: | 1261 | case SHUT_RD: |
| 1262 | if (sk->sk_state == SMC_LISTEN) | 1262 | rc = 0; |
| 1263 | rc = smc_close_active(smc); | 1263 | /* nothing more to do because peer is not involved */ |
| 1264 | else | ||
| 1265 | rc = 0; | ||
| 1266 | /* nothing more to do because peer is not involved */ | ||
| 1267 | break; | 1264 | break; |
| 1268 | } | 1265 | } |
| 1269 | rc1 = kernel_sock_shutdown(smc->clcsock, how); | 1266 | if (smc->clcsock) |
| 1267 | rc1 = kernel_sock_shutdown(smc->clcsock, how); | ||
| 1270 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ | 1268 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ |
| 1271 | sk->sk_shutdown |= how + 1; | 1269 | sk->sk_shutdown |= how + 1; |
| 1272 | 1270 | ||
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index b9283ce5cd85..805b139756db 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
| @@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
| 296 | strp_start_timer(strp, timeo); | 296 | strp_start_timer(strp, timeo); |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | stm->accum_len += cand_len; | ||
| 299 | strp->need_bytes = stm->strp.full_len - | 300 | strp->need_bytes = stm->strp.full_len - |
| 300 | stm->accum_len; | 301 | stm->accum_len; |
| 301 | stm->accum_len += cand_len; | ||
| 302 | stm->early_eaten = cand_len; | 302 | stm->early_eaten = cand_len; |
| 303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); | 303 | STRP_STATS_ADD(strp->stats.bytes, cand_len); |
| 304 | desc->count = 0; /* Stop reading socket */ | 304 | desc->count = 0; /* Stop reading socket */ |
| @@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
| 321 | /* Hurray, we have a new message! */ | 321 | /* Hurray, we have a new message! */ |
| 322 | cancel_delayed_work(&strp->msg_timer_work); | 322 | cancel_delayed_work(&strp->msg_timer_work); |
| 323 | strp->skb_head = NULL; | 323 | strp->skb_head = NULL; |
| 324 | strp->need_bytes = 0; | ||
| 324 | STRP_STATS_INCR(strp->stats.msgs); | 325 | STRP_STATS_INCR(strp->stats.msgs); |
| 325 | 326 | ||
| 326 | /* Give skb to upper layer */ | 327 | /* Give skb to upper layer */ |
| @@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp) | |||
| 410 | return; | 411 | return; |
| 411 | 412 | ||
| 412 | if (strp->need_bytes) { | 413 | if (strp->need_bytes) { |
| 413 | if (strp_peek_len(strp) >= strp->need_bytes) | 414 | if (strp_peek_len(strp) < strp->need_bytes) |
| 414 | strp->need_bytes = 0; | ||
| 415 | else | ||
| 416 | return; | 415 | return; |
| 417 | } | 416 | } |
| 418 | 417 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0f08934b2cea..c81ef5e6c981 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry) | |||
| 1375 | struct dentry *clnt_dir = pipe_dentry->d_parent; | 1375 | struct dentry *clnt_dir = pipe_dentry->d_parent; |
| 1376 | struct dentry *gssd_dir = clnt_dir->d_parent; | 1376 | struct dentry *gssd_dir = clnt_dir->d_parent; |
| 1377 | 1377 | ||
| 1378 | dget(pipe_dentry); | ||
| 1378 | __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); | 1379 | __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); |
| 1379 | __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); | 1380 | __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); |
| 1380 | __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); | 1381 | __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); |
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 32dc33a94bc7..5453e564da82 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c | |||
| @@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, | |||
| 777 | 777 | ||
| 778 | ret = tipc_bearer_get_name(net, bearer_name, bearer_id); | 778 | ret = tipc_bearer_get_name(net, bearer_name, bearer_id); |
| 779 | if (ret || !mon) | 779 | if (ret || !mon) |
| 780 | return -EINVAL; | 780 | return 0; |
| 781 | 781 | ||
| 782 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 782 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
| 783 | NLM_F_MULTI, TIPC_NL_MON_GET); | 783 | NLM_F_MULTI, TIPC_NL_MON_GET); |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index b1fe20972aa9..dd1c4fa2eb78 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -241,7 +241,8 @@ err: | |||
| 241 | static struct publication *tipc_service_remove_publ(struct net *net, | 241 | static struct publication *tipc_service_remove_publ(struct net *net, |
| 242 | struct tipc_service *sc, | 242 | struct tipc_service *sc, |
| 243 | u32 lower, u32 upper, | 243 | u32 lower, u32 upper, |
| 244 | u32 node, u32 key) | 244 | u32 node, u32 key, |
| 245 | struct service_range **rng) | ||
| 245 | { | 246 | { |
| 246 | struct tipc_subscription *sub, *tmp; | 247 | struct tipc_subscription *sub, *tmp; |
| 247 | struct service_range *sr; | 248 | struct service_range *sr; |
| @@ -275,19 +276,15 @@ static struct publication *tipc_service_remove_publ(struct net *net, | |||
| 275 | 276 | ||
| 276 | list_del(&p->all_publ); | 277 | list_del(&p->all_publ); |
| 277 | list_del(&p->local_publ); | 278 | list_del(&p->local_publ); |
| 278 | 279 | if (list_empty(&sr->all_publ)) | |
| 279 | /* Remove service range item if this was its last publication */ | ||
| 280 | if (list_empty(&sr->all_publ)) { | ||
| 281 | last = true; | 280 | last = true; |
| 282 | rb_erase(&sr->tree_node, &sc->ranges); | ||
| 283 | kfree(sr); | ||
| 284 | } | ||
| 285 | 281 | ||
| 286 | /* Notify any waiting subscriptions */ | 282 | /* Notify any waiting subscriptions */ |
| 287 | list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { | 283 | list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { |
| 288 | tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, | 284 | tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, |
| 289 | p->port, p->node, p->scope, last); | 285 | p->port, p->node, p->scope, last); |
| 290 | } | 286 | } |
| 287 | *rng = sr; | ||
| 291 | return p; | 288 | return p; |
| 292 | } | 289 | } |
| 293 | 290 | ||
| @@ -379,13 +376,20 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, | |||
| 379 | u32 node, u32 key) | 376 | u32 node, u32 key) |
| 380 | { | 377 | { |
| 381 | struct tipc_service *sc = tipc_service_find(net, type); | 378 | struct tipc_service *sc = tipc_service_find(net, type); |
| 379 | struct service_range *sr = NULL; | ||
| 382 | struct publication *p = NULL; | 380 | struct publication *p = NULL; |
| 383 | 381 | ||
| 384 | if (!sc) | 382 | if (!sc) |
| 385 | return NULL; | 383 | return NULL; |
| 386 | 384 | ||
| 387 | spin_lock_bh(&sc->lock); | 385 | spin_lock_bh(&sc->lock); |
| 388 | p = tipc_service_remove_publ(net, sc, lower, upper, node, key); | 386 | p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr); |
| 387 | |||
| 388 | /* Remove service range item if this was its last publication */ | ||
| 389 | if (sr && list_empty(&sr->all_publ)) { | ||
| 390 | rb_erase(&sr->tree_node, &sc->ranges); | ||
| 391 | kfree(sr); | ||
| 392 | } | ||
| 389 | 393 | ||
| 390 | /* Delete service item if this no more publications and subscriptions */ | 394 | /* Delete service item if this no more publications and subscriptions */ |
| 391 | if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { | 395 | if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { |
| @@ -665,13 +669,14 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, | |||
| 665 | /** | 669 | /** |
| 666 | * tipc_nametbl_subscribe - add a subscription object to the name table | 670 | * tipc_nametbl_subscribe - add a subscription object to the name table |
| 667 | */ | 671 | */ |
| 668 | void tipc_nametbl_subscribe(struct tipc_subscription *sub) | 672 | bool tipc_nametbl_subscribe(struct tipc_subscription *sub) |
| 669 | { | 673 | { |
| 670 | struct name_table *nt = tipc_name_table(sub->net); | 674 | struct name_table *nt = tipc_name_table(sub->net); |
| 671 | struct tipc_net *tn = tipc_net(sub->net); | 675 | struct tipc_net *tn = tipc_net(sub->net); |
| 672 | struct tipc_subscr *s = &sub->evt.s; | 676 | struct tipc_subscr *s = &sub->evt.s; |
| 673 | u32 type = tipc_sub_read(s, seq.type); | 677 | u32 type = tipc_sub_read(s, seq.type); |
| 674 | struct tipc_service *sc; | 678 | struct tipc_service *sc; |
| 679 | bool res = true; | ||
| 675 | 680 | ||
| 676 | spin_lock_bh(&tn->nametbl_lock); | 681 | spin_lock_bh(&tn->nametbl_lock); |
| 677 | sc = tipc_service_find(sub->net, type); | 682 | sc = tipc_service_find(sub->net, type); |
| @@ -685,8 +690,10 @@ void tipc_nametbl_subscribe(struct tipc_subscription *sub) | |||
| 685 | pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, | 690 | pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, |
| 686 | tipc_sub_read(s, seq.lower), | 691 | tipc_sub_read(s, seq.lower), |
| 687 | tipc_sub_read(s, seq.upper)); | 692 | tipc_sub_read(s, seq.upper)); |
| 693 | res = false; | ||
| 688 | } | 694 | } |
| 689 | spin_unlock_bh(&tn->nametbl_lock); | 695 | spin_unlock_bh(&tn->nametbl_lock); |
| 696 | return res; | ||
| 690 | } | 697 | } |
| 691 | 698 | ||
| 692 | /** | 699 | /** |
| @@ -744,16 +751,17 @@ int tipc_nametbl_init(struct net *net) | |||
| 744 | static void tipc_service_delete(struct net *net, struct tipc_service *sc) | 751 | static void tipc_service_delete(struct net *net, struct tipc_service *sc) |
| 745 | { | 752 | { |
| 746 | struct service_range *sr, *tmpr; | 753 | struct service_range *sr, *tmpr; |
| 747 | struct publication *p, *tmpb; | 754 | struct publication *p, *tmp; |
| 748 | 755 | ||
| 749 | spin_lock_bh(&sc->lock); | 756 | spin_lock_bh(&sc->lock); |
| 750 | rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { | 757 | rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { |
| 751 | list_for_each_entry_safe(p, tmpb, | 758 | list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { |
| 752 | &sr->all_publ, all_publ) { | ||
| 753 | tipc_service_remove_publ(net, sc, p->lower, p->upper, | 759 | tipc_service_remove_publ(net, sc, p->lower, p->upper, |
| 754 | p->node, p->key); | 760 | p->node, p->key, &sr); |
| 755 | kfree_rcu(p, rcu); | 761 | kfree_rcu(p, rcu); |
| 756 | } | 762 | } |
| 763 | rb_erase(&sr->tree_node, &sc->ranges); | ||
| 764 | kfree(sr); | ||
| 757 | } | 765 | } |
| 758 | hlist_del_init_rcu(&sc->service_list); | 766 | hlist_del_init_rcu(&sc->service_list); |
| 759 | spin_unlock_bh(&sc->lock); | 767 | spin_unlock_bh(&sc->lock); |
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 4b14fc28d9e2..0febba41da86 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h | |||
| @@ -126,7 +126,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, | |||
| 126 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, | 126 | struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, |
| 127 | u32 lower, u32 upper, | 127 | u32 lower, u32 upper, |
| 128 | u32 node, u32 key); | 128 | u32 node, u32 key); |
| 129 | void tipc_nametbl_subscribe(struct tipc_subscription *s); | 129 | bool tipc_nametbl_subscribe(struct tipc_subscription *s); |
| 130 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); | 130 | void tipc_nametbl_unsubscribe(struct tipc_subscription *s); |
| 131 | int tipc_nametbl_init(struct net *net); | 131 | int tipc_nametbl_init(struct net *net); |
| 132 | void tipc_nametbl_stop(struct net *net); | 132 | void tipc_nametbl_stop(struct net *net); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 856f9e97ea29..4fbaa0464405 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
| @@ -252,6 +252,8 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) | |||
| 252 | u64 *w0 = (u64 *)&node_id[0]; | 252 | u64 *w0 = (u64 *)&node_id[0]; |
| 253 | u64 *w1 = (u64 *)&node_id[8]; | 253 | u64 *w1 = (u64 *)&node_id[8]; |
| 254 | 254 | ||
| 255 | if (!attrs[TIPC_NLA_NET_NODEID_W1]) | ||
| 256 | return -EINVAL; | ||
| 255 | *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); | 257 | *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); |
| 256 | *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); | 258 | *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); |
| 257 | tipc_net_init(net, node_id, 0); | 259 | tipc_net_init(net, node_id, 0); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index b76f13f6fea1..6ff2254088f6 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
| @@ -79,7 +79,10 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { | |||
| 79 | 79 | ||
| 80 | const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { | 80 | const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { |
| 81 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, | 81 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, |
| 82 | [TIPC_NLA_NET_ID] = { .type = NLA_U32 } | 82 | [TIPC_NLA_NET_ID] = { .type = NLA_U32 }, |
| 83 | [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 }, | ||
| 84 | [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 }, | ||
| 85 | [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 }, | ||
| 83 | }; | 86 | }; |
| 84 | 87 | ||
| 85 | const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { | 88 | const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { |
diff --git a/net/tipc/node.c b/net/tipc/node.c index c77dd2f3c589..6f98b56dd48e 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -2232,8 +2232,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2232 | struct net *net = sock_net(skb->sk); | 2232 | struct net *net = sock_net(skb->sk); |
| 2233 | u32 prev_bearer = cb->args[0]; | 2233 | u32 prev_bearer = cb->args[0]; |
| 2234 | struct tipc_nl_msg msg; | 2234 | struct tipc_nl_msg msg; |
| 2235 | int bearer_id; | ||
| 2235 | int err; | 2236 | int err; |
| 2236 | int i; | ||
| 2237 | 2237 | ||
| 2238 | if (prev_bearer == MAX_BEARERS) | 2238 | if (prev_bearer == MAX_BEARERS) |
| 2239 | return 0; | 2239 | return 0; |
| @@ -2243,16 +2243,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2243 | msg.seq = cb->nlh->nlmsg_seq; | 2243 | msg.seq = cb->nlh->nlmsg_seq; |
| 2244 | 2244 | ||
| 2245 | rtnl_lock(); | 2245 | rtnl_lock(); |
| 2246 | for (i = prev_bearer; i < MAX_BEARERS; i++) { | 2246 | for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { |
| 2247 | prev_bearer = i; | ||
| 2248 | err = __tipc_nl_add_monitor(net, &msg, prev_bearer); | 2247 | err = __tipc_nl_add_monitor(net, &msg, prev_bearer); |
| 2249 | if (err) | 2248 | if (err) |
| 2250 | goto out; | 2249 | break; |
| 2251 | } | 2250 | } |
| 2252 | |||
| 2253 | out: | ||
| 2254 | rtnl_unlock(); | 2251 | rtnl_unlock(); |
| 2255 | cb->args[0] = prev_bearer; | 2252 | cb->args[0] = bearer_id; |
| 2256 | 2253 | ||
| 2257 | return skb->len; | 2254 | return skb->len; |
| 2258 | } | 2255 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1fd1c8b5ce03..252a52ae0893 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -1278,7 +1278,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
| 1278 | struct tipc_msg *hdr = &tsk->phdr; | 1278 | struct tipc_msg *hdr = &tsk->phdr; |
| 1279 | struct tipc_name_seq *seq; | 1279 | struct tipc_name_seq *seq; |
| 1280 | struct sk_buff_head pkts; | 1280 | struct sk_buff_head pkts; |
| 1281 | u32 dnode, dport; | 1281 | u32 dport, dnode = 0; |
| 1282 | u32 type, inst; | 1282 | u32 type, inst; |
| 1283 | int mtu, rc; | 1283 | int mtu, rc; |
| 1284 | 1284 | ||
| @@ -1348,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) | |||
| 1348 | msg_set_destnode(hdr, dnode); | 1348 | msg_set_destnode(hdr, dnode); |
| 1349 | msg_set_destport(hdr, dest->addr.id.ref); | 1349 | msg_set_destport(hdr, dest->addr.id.ref); |
| 1350 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); | 1350 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); |
| 1351 | } else { | ||
| 1352 | return -EINVAL; | ||
| 1351 | } | 1353 | } |
| 1352 | 1354 | ||
| 1353 | /* Block or return if destination link is congested */ | 1355 | /* Block or return if destination link is congested */ |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index b7d80bc5f4ab..f340e53da625 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
| @@ -153,7 +153,10 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net, | |||
| 153 | memcpy(&sub->evt.s, s, sizeof(*s)); | 153 | memcpy(&sub->evt.s, s, sizeof(*s)); |
| 154 | spin_lock_init(&sub->lock); | 154 | spin_lock_init(&sub->lock); |
| 155 | kref_init(&sub->kref); | 155 | kref_init(&sub->kref); |
| 156 | tipc_nametbl_subscribe(sub); | 156 | if (!tipc_nametbl_subscribe(sub)) { |
| 157 | kfree(sub); | ||
| 158 | return NULL; | ||
| 159 | } | ||
| 157 | timer_setup(&sub->timer, tipc_sub_timeout, 0); | 160 | timer_setup(&sub->timer, tipc_sub_timeout, 0); |
| 158 | timeout = tipc_sub_read(&sub->evt.s, timeout); | 161 | timeout = tipc_sub_read(&sub->evt.s, timeout); |
| 159 | if (timeout != TIPC_WAIT_FOREVER) | 162 | if (timeout != TIPC_WAIT_FOREVER) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4dc766b03f00..71e79597f940 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -41,6 +41,8 @@ | |||
| 41 | #include <net/strparser.h> | 41 | #include <net/strparser.h> |
| 42 | #include <net/tls.h> | 42 | #include <net/tls.h> |
| 43 | 43 | ||
| 44 | #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE | ||
| 45 | |||
| 44 | static int tls_do_decryption(struct sock *sk, | 46 | static int tls_do_decryption(struct sock *sk, |
| 45 | struct scatterlist *sgin, | 47 | struct scatterlist *sgin, |
| 46 | struct scatterlist *sgout, | 48 | struct scatterlist *sgout, |
| @@ -673,7 +675,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
| 673 | { | 675 | { |
| 674 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 676 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 675 | struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); | 677 | struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); |
| 676 | char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size]; | 678 | char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; |
| 677 | struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; | 679 | struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; |
| 678 | struct scatterlist *sgin = &sgin_arr[0]; | 680 | struct scatterlist *sgin = &sgin_arr[0]; |
| 679 | struct strp_msg *rxm = strp_msg(skb); | 681 | struct strp_msg *rxm = strp_msg(skb); |
| @@ -1094,6 +1096,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | |||
| 1094 | goto free_priv; | 1096 | goto free_priv; |
| 1095 | } | 1097 | } |
| 1096 | 1098 | ||
| 1099 | /* Sanity-check the IV size for stack allocations. */ | ||
| 1100 | if (iv_size > MAX_IV_SIZE) { | ||
| 1101 | rc = -EINVAL; | ||
| 1102 | goto free_priv; | ||
| 1103 | } | ||
| 1104 | |||
| 1097 | cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; | 1105 | cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; |
| 1098 | cctx->tag_size = tag_size; | 1106 | cctx->tag_size = tag_size; |
| 1099 | cctx->overhead_size = cctx->prepend_size + cctx->tag_size; | 1107 | cctx->overhead_size = cctx->prepend_size + cctx->tag_size; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index aac9b8f6552e..c1076c19b858 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
| @@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void) | |||
| 2018 | } | 2018 | } |
| 2019 | EXPORT_SYMBOL_GPL(vsock_core_get_transport); | 2019 | EXPORT_SYMBOL_GPL(vsock_core_get_transport); |
| 2020 | 2020 | ||
| 2021 | static void __exit vsock_exit(void) | ||
| 2022 | { | ||
| 2023 | /* Do nothing. This function makes this module removable. */ | ||
| 2024 | } | ||
| 2025 | |||
| 2021 | module_init(vsock_init_tables); | 2026 | module_init(vsock_init_tables); |
| 2027 | module_exit(vsock_exit); | ||
| 2022 | 2028 | ||
| 2023 | MODULE_AUTHOR("VMware, Inc."); | 2029 | MODULE_AUTHOR("VMware, Inc."); |
| 2024 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); | 2030 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); |
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 830c55514f9f..49b13553eaae 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c | |||
| @@ -56,6 +56,21 @@ struct dummy { | |||
| 56 | unsigned long jiffies_expire; | 56 | unsigned long jiffies_expire; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | /* | ||
| 60 | * The constructor makes more sense together with klp_shadow_get_or_alloc(). | ||
| 61 | * In this example, it would be safe to assign the pointer also to the shadow | ||
| 62 | * variable returned by klp_shadow_alloc(). But we wanted to show the more | ||
| 63 | * complicated use of the API. | ||
| 64 | */ | ||
| 65 | static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data) | ||
| 66 | { | ||
| 67 | void **shadow_leak = shadow_data; | ||
| 68 | void *leak = ctor_data; | ||
| 69 | |||
| 70 | *shadow_leak = leak; | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 59 | struct dummy *livepatch_fix1_dummy_alloc(void) | 74 | struct dummy *livepatch_fix1_dummy_alloc(void) |
| 60 | { | 75 | { |
| 61 | struct dummy *d; | 76 | struct dummy *d; |
| @@ -74,7 +89,8 @@ struct dummy *livepatch_fix1_dummy_alloc(void) | |||
| 74 | * pointer to handle resource release. | 89 | * pointer to handle resource release. |
| 75 | */ | 90 | */ |
| 76 | leak = kzalloc(sizeof(int), GFP_KERNEL); | 91 | leak = kzalloc(sizeof(int), GFP_KERNEL); |
| 77 | klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL); | 92 | klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL, |
| 93 | shadow_leak_ctor, leak); | ||
| 78 | 94 | ||
| 79 | pr_info("%s: dummy @ %p, expires @ %lx\n", | 95 | pr_info("%s: dummy @ %p, expires @ %lx\n", |
| 80 | __func__, d, d->jiffies_expire); | 96 | __func__, d, d->jiffies_expire); |
| @@ -82,9 +98,19 @@ struct dummy *livepatch_fix1_dummy_alloc(void) | |||
| 82 | return d; | 98 | return d; |
| 83 | } | 99 | } |
| 84 | 100 | ||
| 101 | static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data) | ||
| 102 | { | ||
| 103 | void *d = obj; | ||
| 104 | void **shadow_leak = shadow_data; | ||
| 105 | |||
| 106 | kfree(*shadow_leak); | ||
| 107 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
| 108 | __func__, d, *shadow_leak); | ||
| 109 | } | ||
| 110 | |||
| 85 | void livepatch_fix1_dummy_free(struct dummy *d) | 111 | void livepatch_fix1_dummy_free(struct dummy *d) |
| 86 | { | 112 | { |
| 87 | void **shadow_leak, *leak; | 113 | void **shadow_leak; |
| 88 | 114 | ||
| 89 | /* | 115 | /* |
| 90 | * Patch: fetch the saved SV_LEAK shadow variable, detach and | 116 | * Patch: fetch the saved SV_LEAK shadow variable, detach and |
| @@ -93,15 +119,10 @@ void livepatch_fix1_dummy_free(struct dummy *d) | |||
| 93 | * was loaded.) | 119 | * was loaded.) |
| 94 | */ | 120 | */ |
| 95 | shadow_leak = klp_shadow_get(d, SV_LEAK); | 121 | shadow_leak = klp_shadow_get(d, SV_LEAK); |
| 96 | if (shadow_leak) { | 122 | if (shadow_leak) |
| 97 | leak = *shadow_leak; | 123 | klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor); |
| 98 | klp_shadow_free(d, SV_LEAK); | 124 | else |
| 99 | kfree(leak); | ||
| 100 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
| 101 | __func__, d, leak); | ||
| 102 | } else { | ||
| 103 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); | 125 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); |
| 104 | } | ||
| 105 | 126 | ||
| 106 | kfree(d); | 127 | kfree(d); |
| 107 | } | 128 | } |
| @@ -147,7 +168,7 @@ static int livepatch_shadow_fix1_init(void) | |||
| 147 | static void livepatch_shadow_fix1_exit(void) | 168 | static void livepatch_shadow_fix1_exit(void) |
| 148 | { | 169 | { |
| 149 | /* Cleanup any existing SV_LEAK shadow variables */ | 170 | /* Cleanup any existing SV_LEAK shadow variables */ |
| 150 | klp_shadow_free_all(SV_LEAK); | 171 | klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor); |
| 151 | 172 | ||
| 152 | WARN_ON(klp_unregister_patch(&patch)); | 173 | WARN_ON(klp_unregister_patch(&patch)); |
| 153 | } | 174 | } |
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c index ff9948f0ec00..b34c7bf83356 100644 --- a/samples/livepatch/livepatch-shadow-fix2.c +++ b/samples/livepatch/livepatch-shadow-fix2.c | |||
| @@ -53,39 +53,42 @@ struct dummy { | |||
| 53 | bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) | 53 | bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) |
| 54 | { | 54 | { |
| 55 | int *shadow_count; | 55 | int *shadow_count; |
| 56 | int count; | ||
| 57 | 56 | ||
| 58 | /* | 57 | /* |
| 59 | * Patch: handle in-flight dummy structures, if they do not | 58 | * Patch: handle in-flight dummy structures, if they do not |
| 60 | * already have a SV_COUNTER shadow variable, then attach a | 59 | * already have a SV_COUNTER shadow variable, then attach a |
| 61 | * new one. | 60 | * new one. |
| 62 | */ | 61 | */ |
| 63 | count = 0; | ||
| 64 | shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, | 62 | shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, |
| 65 | &count, sizeof(count), | 63 | sizeof(*shadow_count), GFP_NOWAIT, |
| 66 | GFP_NOWAIT); | 64 | NULL, NULL); |
| 67 | if (shadow_count) | 65 | if (shadow_count) |
| 68 | *shadow_count += 1; | 66 | *shadow_count += 1; |
| 69 | 67 | ||
| 70 | return time_after(jiffies, d->jiffies_expire); | 68 | return time_after(jiffies, d->jiffies_expire); |
| 71 | } | 69 | } |
| 72 | 70 | ||
| 71 | static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data) | ||
| 72 | { | ||
| 73 | void *d = obj; | ||
| 74 | void **shadow_leak = shadow_data; | ||
| 75 | |||
| 76 | kfree(*shadow_leak); | ||
| 77 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
| 78 | __func__, d, *shadow_leak); | ||
| 79 | } | ||
| 80 | |||
| 73 | void livepatch_fix2_dummy_free(struct dummy *d) | 81 | void livepatch_fix2_dummy_free(struct dummy *d) |
| 74 | { | 82 | { |
| 75 | void **shadow_leak, *leak; | 83 | void **shadow_leak; |
| 76 | int *shadow_count; | 84 | int *shadow_count; |
| 77 | 85 | ||
| 78 | /* Patch: copy the memory leak patch from the fix1 module. */ | 86 | /* Patch: copy the memory leak patch from the fix1 module. */ |
| 79 | shadow_leak = klp_shadow_get(d, SV_LEAK); | 87 | shadow_leak = klp_shadow_get(d, SV_LEAK); |
| 80 | if (shadow_leak) { | 88 | if (shadow_leak) |
| 81 | leak = *shadow_leak; | 89 | klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor); |
| 82 | klp_shadow_free(d, SV_LEAK); | 90 | else |
| 83 | kfree(leak); | ||
| 84 | pr_info("%s: dummy @ %p, prevented leak @ %p\n", | ||
| 85 | __func__, d, leak); | ||
| 86 | } else { | ||
| 87 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); | 91 | pr_info("%s: dummy @ %p leaked!\n", __func__, d); |
| 88 | } | ||
| 89 | 92 | ||
| 90 | /* | 93 | /* |
| 91 | * Patch: fetch the SV_COUNTER shadow variable and display | 94 | * Patch: fetch the SV_COUNTER shadow variable and display |
| @@ -95,7 +98,7 @@ void livepatch_fix2_dummy_free(struct dummy *d) | |||
| 95 | if (shadow_count) { | 98 | if (shadow_count) { |
| 96 | pr_info("%s: dummy @ %p, check counter = %d\n", | 99 | pr_info("%s: dummy @ %p, check counter = %d\n", |
| 97 | __func__, d, *shadow_count); | 100 | __func__, d, *shadow_count); |
| 98 | klp_shadow_free(d, SV_COUNTER); | 101 | klp_shadow_free(d, SV_COUNTER, NULL); |
| 99 | } | 102 | } |
| 100 | 103 | ||
| 101 | kfree(d); | 104 | kfree(d); |
| @@ -142,7 +145,7 @@ static int livepatch_shadow_fix2_init(void) | |||
| 142 | static void livepatch_shadow_fix2_exit(void) | 145 | static void livepatch_shadow_fix2_exit(void) |
| 143 | { | 146 | { |
| 144 | /* Cleanup any existing SV_COUNTER shadow variables */ | 147 | /* Cleanup any existing SV_COUNTER shadow variables */ |
| 145 | klp_shadow_free_all(SV_COUNTER); | 148 | klp_shadow_free_all(SV_COUNTER, NULL); |
| 146 | 149 | ||
| 147 | WARN_ON(klp_unregister_patch(&patch)); | 150 | WARN_ON(klp_unregister_patch(&patch)); |
| 148 | } | 151 | } |
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c index f69764d7cdd7..e30e30ba6e39 100644 --- a/sound/core/rawmidi_compat.c +++ b/sound/core/rawmidi_compat.c | |||
| @@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, | |||
| 36 | struct snd_rawmidi_params params; | 36 | struct snd_rawmidi_params params; |
| 37 | unsigned int val; | 37 | unsigned int val; |
| 38 | 38 | ||
| 39 | if (rfile->output == NULL) | ||
| 40 | return -EINVAL; | ||
| 41 | if (get_user(params.stream, &src->stream) || | 39 | if (get_user(params.stream, &src->stream) || |
| 42 | get_user(params.buffer_size, &src->buffer_size) || | 40 | get_user(params.buffer_size, &src->buffer_size) || |
| 43 | get_user(params.avail_min, &src->avail_min) || | 41 | get_user(params.avail_min, &src->avail_min) || |
| @@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, | |||
| 46 | params.no_active_sensing = val; | 44 | params.no_active_sensing = val; |
| 47 | switch (params.stream) { | 45 | switch (params.stream) { |
| 48 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 46 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
| 47 | if (!rfile->output) | ||
| 48 | return -EINVAL; | ||
| 49 | return snd_rawmidi_output_params(rfile->output, ¶ms); | 49 | return snd_rawmidi_output_params(rfile->output, ¶ms); |
| 50 | case SNDRV_RAWMIDI_STREAM_INPUT: | 50 | case SNDRV_RAWMIDI_STREAM_INPUT: |
| 51 | if (!rfile->input) | ||
| 52 | return -EINVAL; | ||
| 51 | return snd_rawmidi_input_params(rfile->input, ¶ms); | 53 | return snd_rawmidi_input_params(rfile->input, ¶ms); |
| 52 | } | 54 | } |
| 53 | return -EINVAL; | 55 | return -EINVAL; |
| @@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, | |||
| 67 | int err; | 69 | int err; |
| 68 | struct snd_rawmidi_status status; | 70 | struct snd_rawmidi_status status; |
| 69 | 71 | ||
| 70 | if (rfile->output == NULL) | ||
| 71 | return -EINVAL; | ||
| 72 | if (get_user(status.stream, &src->stream)) | 72 | if (get_user(status.stream, &src->stream)) |
| 73 | return -EFAULT; | 73 | return -EFAULT; |
| 74 | 74 | ||
| 75 | switch (status.stream) { | 75 | switch (status.stream) { |
| 76 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 76 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
| 77 | if (!rfile->output) | ||
| 78 | return -EINVAL; | ||
| 77 | err = snd_rawmidi_output_status(rfile->output, &status); | 79 | err = snd_rawmidi_output_status(rfile->output, &status); |
| 78 | break; | 80 | break; |
| 79 | case SNDRV_RAWMIDI_STREAM_INPUT: | 81 | case SNDRV_RAWMIDI_STREAM_INPUT: |
| 82 | if (!rfile->input) | ||
| 83 | return -EINVAL; | ||
| 80 | err = snd_rawmidi_input_status(rfile->input, &status); | 84 | err = snd_rawmidi_input_status(rfile->input, &status); |
| 81 | break; | 85 | break; |
| 82 | default: | 86 | default: |
| @@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, | |||
| 112 | int err; | 116 | int err; |
| 113 | struct snd_rawmidi_status status; | 117 | struct snd_rawmidi_status status; |
| 114 | 118 | ||
| 115 | if (rfile->output == NULL) | ||
| 116 | return -EINVAL; | ||
| 117 | if (get_user(status.stream, &src->stream)) | 119 | if (get_user(status.stream, &src->stream)) |
| 118 | return -EFAULT; | 120 | return -EFAULT; |
| 119 | 121 | ||
| 120 | switch (status.stream) { | 122 | switch (status.stream) { |
| 121 | case SNDRV_RAWMIDI_STREAM_OUTPUT: | 123 | case SNDRV_RAWMIDI_STREAM_OUTPUT: |
| 124 | if (!rfile->output) | ||
| 125 | return -EINVAL; | ||
| 122 | err = snd_rawmidi_output_status(rfile->output, &status); | 126 | err = snd_rawmidi_output_status(rfile->output, &status); |
| 123 | break; | 127 | break; |
| 124 | case SNDRV_RAWMIDI_STREAM_INPUT: | 128 | case SNDRV_RAWMIDI_STREAM_INPUT: |
| 129 | if (!rfile->input) | ||
| 130 | return -EINVAL; | ||
| 125 | err = snd_rawmidi_input_status(rfile->input, &status); | 131 | err = snd_rawmidi_input_status(rfile->input, &status); |
| 126 | break; | 132 | break; |
| 127 | default: | 133 | default: |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7a111a1b5836..b0c8c79848a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -1647,7 +1647,8 @@ static void azx_check_snoop_available(struct azx *chip) | |||
| 1647 | */ | 1647 | */ |
| 1648 | u8 val; | 1648 | u8 val; |
| 1649 | pci_read_config_byte(chip->pci, 0x42, &val); | 1649 | pci_read_config_byte(chip->pci, 0x42, &val); |
| 1650 | if (!(val & 0x80) && chip->pci->revision == 0x30) | 1650 | if (!(val & 0x80) && (chip->pci->revision == 0x30 || |
| 1651 | chip->pci->revision == 0x20)) | ||
| 1651 | snoop = false; | 1652 | snoop = false; |
| 1652 | } | 1653 | } |
| 1653 | 1654 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aef1f52db7d9..fc77bf7a1544 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -6370,6 +6370,8 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 6370 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | 6370 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ |
| 6371 | { } | 6371 | { } |
| 6372 | }, | 6372 | }, |
| 6373 | .chained = true, | ||
| 6374 | .chain_id = ALC269_FIXUP_HEADSET_MIC | ||
| 6373 | }, | 6375 | }, |
| 6374 | }; | 6376 | }; |
| 6375 | 6377 | ||
| @@ -6573,6 +6575,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6573 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6575 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| 6574 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6576 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| 6575 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6577 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
| 6578 | SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | ||
| 6576 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), | 6579 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
| 6577 | SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 6580 | SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| 6578 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), | 6581 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), |
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index 6d7cde56a355..e2cf55c53ea8 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c | |||
| @@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data, | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | usb_fill_int_urb(urb, line6->usbdev, | 127 | usb_fill_int_urb(urb, line6->usbdev, |
| 128 | usb_sndbulkpipe(line6->usbdev, | 128 | usb_sndintpipe(line6->usbdev, |
| 129 | line6->properties->ep_ctrl_w), | 129 | line6->properties->ep_ctrl_w), |
| 130 | transfer_buffer, length, midi_sent, line6, | 130 | transfer_buffer, length, midi_sent, line6, |
| 131 | line6->interval); | 131 | line6->interval); |
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile | |||
| @@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ | |||
| 31 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ | 31 | -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ |
| 32 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include | 32 | -I$(srctree)/tools/objtool/arch/$(ARCH)/include |
| 33 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed | 33 | WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed |
| 34 | CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) | 34 | CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) |
| 35 | LDFLAGS += -lelf $(LIBSUBCMD) | 35 | LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) |
| 36 | 36 | ||
| 37 | # Allow old libelf to be used: | 37 | # Allow old libelf to be used: |
| 38 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) | 38 | elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index cb166be4918d..4ea385be528f 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
| @@ -138,6 +138,7 @@ static u32 handle[] = { | |||
| 138 | }; | 138 | }; |
| 139 | 139 | ||
| 140 | static unsigned long dimm_fail_cmd_flags[NUM_DCR]; | 140 | static unsigned long dimm_fail_cmd_flags[NUM_DCR]; |
| 141 | static int dimm_fail_cmd_code[NUM_DCR]; | ||
| 141 | 142 | ||
| 142 | struct nfit_test_fw { | 143 | struct nfit_test_fw { |
| 143 | enum intel_fw_update_state state; | 144 | enum intel_fw_update_state state; |
| @@ -892,8 +893,11 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) | |||
| 892 | if (i >= ARRAY_SIZE(handle)) | 893 | if (i >= ARRAY_SIZE(handle)) |
| 893 | return -ENXIO; | 894 | return -ENXIO; |
| 894 | 895 | ||
| 895 | if ((1 << func) & dimm_fail_cmd_flags[i]) | 896 | if ((1 << func) & dimm_fail_cmd_flags[i]) { |
| 897 | if (dimm_fail_cmd_code[i]) | ||
| 898 | return dimm_fail_cmd_code[i]; | ||
| 896 | return -EIO; | 899 | return -EIO; |
| 900 | } | ||
| 897 | 901 | ||
| 898 | return i; | 902 | return i; |
| 899 | } | 903 | } |
| @@ -1162,12 +1166,12 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state) | |||
| 1162 | 1166 | ||
| 1163 | static void put_dimms(void *data) | 1167 | static void put_dimms(void *data) |
| 1164 | { | 1168 | { |
| 1165 | struct device **dimm_dev = data; | 1169 | struct nfit_test *t = data; |
| 1166 | int i; | 1170 | int i; |
| 1167 | 1171 | ||
| 1168 | for (i = 0; i < NUM_DCR; i++) | 1172 | for (i = 0; i < t->num_dcr; i++) |
| 1169 | if (dimm_dev[i]) | 1173 | if (t->dimm_dev[i]) |
| 1170 | device_unregister(dimm_dev[i]); | 1174 | device_unregister(t->dimm_dev[i]); |
| 1171 | } | 1175 | } |
| 1172 | 1176 | ||
| 1173 | static struct class *nfit_test_dimm; | 1177 | static struct class *nfit_test_dimm; |
| @@ -1176,13 +1180,11 @@ static int dimm_name_to_id(struct device *dev) | |||
| 1176 | { | 1180 | { |
| 1177 | int dimm; | 1181 | int dimm; |
| 1178 | 1182 | ||
| 1179 | if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 | 1183 | if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) |
| 1180 | || dimm >= NUM_DCR || dimm < 0) | ||
| 1181 | return -ENXIO; | 1184 | return -ENXIO; |
| 1182 | return dimm; | 1185 | return dimm; |
| 1183 | } | 1186 | } |
| 1184 | 1187 | ||
| 1185 | |||
| 1186 | static ssize_t handle_show(struct device *dev, struct device_attribute *attr, | 1188 | static ssize_t handle_show(struct device *dev, struct device_attribute *attr, |
| 1187 | char *buf) | 1189 | char *buf) |
| 1188 | { | 1190 | { |
| @@ -1191,7 +1193,7 @@ static ssize_t handle_show(struct device *dev, struct device_attribute *attr, | |||
| 1191 | if (dimm < 0) | 1193 | if (dimm < 0) |
| 1192 | return dimm; | 1194 | return dimm; |
| 1193 | 1195 | ||
| 1194 | return sprintf(buf, "%#x", handle[dimm]); | 1196 | return sprintf(buf, "%#x\n", handle[dimm]); |
| 1195 | } | 1197 | } |
| 1196 | DEVICE_ATTR_RO(handle); | 1198 | DEVICE_ATTR_RO(handle); |
| 1197 | 1199 | ||
| @@ -1225,8 +1227,39 @@ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, | |||
| 1225 | } | 1227 | } |
| 1226 | static DEVICE_ATTR_RW(fail_cmd); | 1228 | static DEVICE_ATTR_RW(fail_cmd); |
| 1227 | 1229 | ||
| 1230 | static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, | ||
| 1231 | char *buf) | ||
| 1232 | { | ||
| 1233 | int dimm = dimm_name_to_id(dev); | ||
| 1234 | |||
| 1235 | if (dimm < 0) | ||
| 1236 | return dimm; | ||
| 1237 | |||
| 1238 | return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, | ||
| 1242 | const char *buf, size_t size) | ||
| 1243 | { | ||
| 1244 | int dimm = dimm_name_to_id(dev); | ||
| 1245 | unsigned long val; | ||
| 1246 | ssize_t rc; | ||
| 1247 | |||
| 1248 | if (dimm < 0) | ||
| 1249 | return dimm; | ||
| 1250 | |||
| 1251 | rc = kstrtol(buf, 0, &val); | ||
| 1252 | if (rc) | ||
| 1253 | return rc; | ||
| 1254 | |||
| 1255 | dimm_fail_cmd_code[dimm] = val; | ||
| 1256 | return size; | ||
| 1257 | } | ||
| 1258 | static DEVICE_ATTR_RW(fail_cmd_code); | ||
| 1259 | |||
| 1228 | static struct attribute *nfit_test_dimm_attributes[] = { | 1260 | static struct attribute *nfit_test_dimm_attributes[] = { |
| 1229 | &dev_attr_fail_cmd.attr, | 1261 | &dev_attr_fail_cmd.attr, |
| 1262 | &dev_attr_fail_cmd_code.attr, | ||
| 1230 | &dev_attr_handle.attr, | 1263 | &dev_attr_handle.attr, |
| 1231 | NULL, | 1264 | NULL, |
| 1232 | }; | 1265 | }; |
| @@ -1240,6 +1273,23 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { | |||
| 1240 | NULL, | 1273 | NULL, |
| 1241 | }; | 1274 | }; |
| 1242 | 1275 | ||
| 1276 | static int nfit_test_dimm_init(struct nfit_test *t) | ||
| 1277 | { | ||
| 1278 | int i; | ||
| 1279 | |||
| 1280 | if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) | ||
| 1281 | return -ENOMEM; | ||
| 1282 | for (i = 0; i < t->num_dcr; i++) { | ||
| 1283 | t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, | ||
| 1284 | &t->pdev.dev, 0, NULL, | ||
| 1285 | nfit_test_dimm_attribute_groups, | ||
| 1286 | "test_dimm%d", i + t->dcr_idx); | ||
| 1287 | if (!t->dimm_dev[i]) | ||
| 1288 | return -ENOMEM; | ||
| 1289 | } | ||
| 1290 | return 0; | ||
| 1291 | } | ||
| 1292 | |||
| 1243 | static void smart_init(struct nfit_test *t) | 1293 | static void smart_init(struct nfit_test *t) |
| 1244 | { | 1294 | { |
| 1245 | int i; | 1295 | int i; |
| @@ -1335,17 +1385,8 @@ static int nfit_test0_alloc(struct nfit_test *t) | |||
| 1335 | if (!t->_fit) | 1385 | if (!t->_fit) |
| 1336 | return -ENOMEM; | 1386 | return -ENOMEM; |
| 1337 | 1387 | ||
| 1338 | if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) | 1388 | if (nfit_test_dimm_init(t)) |
| 1339 | return -ENOMEM; | 1389 | return -ENOMEM; |
| 1340 | for (i = 0; i < NUM_DCR; i++) { | ||
| 1341 | t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, | ||
| 1342 | &t->pdev.dev, 0, NULL, | ||
| 1343 | nfit_test_dimm_attribute_groups, | ||
| 1344 | "test_dimm%d", i); | ||
| 1345 | if (!t->dimm_dev[i]) | ||
| 1346 | return -ENOMEM; | ||
| 1347 | } | ||
| 1348 | |||
| 1349 | smart_init(t); | 1390 | smart_init(t); |
| 1350 | return ars_state_init(&t->pdev.dev, &t->ars_state); | 1391 | return ars_state_init(&t->pdev.dev, &t->ars_state); |
| 1351 | } | 1392 | } |
| @@ -1377,6 +1418,8 @@ static int nfit_test1_alloc(struct nfit_test *t) | |||
| 1377 | if (!t->spa_set[1]) | 1418 | if (!t->spa_set[1]) |
| 1378 | return -ENOMEM; | 1419 | return -ENOMEM; |
| 1379 | 1420 | ||
| 1421 | if (nfit_test_dimm_init(t)) | ||
| 1422 | return -ENOMEM; | ||
| 1380 | smart_init(t); | 1423 | smart_init(t); |
| 1381 | return ars_state_init(&t->pdev.dev, &t->ars_state); | 1424 | return ars_state_init(&t->pdev.dev, &t->ars_state); |
| 1382 | } | 1425 | } |
| @@ -2222,6 +2265,9 @@ static void nfit_test1_setup(struct nfit_test *t) | |||
| 2222 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); | 2265 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); |
| 2223 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); | 2266 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); |
| 2224 | set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); | 2267 | set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); |
| 2268 | set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); | ||
| 2269 | set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
| 2270 | set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
| 2225 | } | 2271 | } |
| 2226 | 2272 | ||
| 2227 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, | 2273 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, |
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile index 4e6d09fb166f..5c7d7001ad37 100644 --- a/tools/testing/selftests/filesystems/Makefile +++ b/tools/testing/selftests/filesystems/Makefile | |||
| @@ -1,8 +1,6 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | TEST_PROGS := dnotify_test devpts_pts | ||
| 3 | all: $(TEST_PROGS) | ||
| 4 | 2 | ||
| 5 | include ../lib.mk | 3 | TEST_GEN_PROGS := devpts_pts |
| 4 | TEST_GEN_PROGS_EXTENDED := dnotify_test | ||
| 6 | 5 | ||
| 7 | clean: | 6 | include ../lib.mk |
| 8 | rm -fr $(TEST_PROGS) | ||
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index dc44de904797..2ddcc96ae456 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
| @@ -4,17 +4,18 @@ top_srcdir = ../../../../ | |||
| 4 | UNAME_M := $(shell uname -m) | 4 | UNAME_M := $(shell uname -m) |
| 5 | 5 | ||
| 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c | 6 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c |
| 7 | LIBKVM_x86_64 = lib/x86.c | 7 | LIBKVM_x86_64 = lib/x86.c lib/vmx.c |
| 8 | 8 | ||
| 9 | TEST_GEN_PROGS_x86_64 = set_sregs_test | 9 | TEST_GEN_PROGS_x86_64 = set_sregs_test |
| 10 | TEST_GEN_PROGS_x86_64 += sync_regs_test | 10 | TEST_GEN_PROGS_x86_64 += sync_regs_test |
| 11 | TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test | ||
| 11 | 12 | ||
| 12 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) | 13 | TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) |
| 13 | LIBKVM += $(LIBKVM_$(UNAME_M)) | 14 | LIBKVM += $(LIBKVM_$(UNAME_M)) |
| 14 | 15 | ||
| 15 | INSTALL_HDR_PATH = $(top_srcdir)/usr | 16 | INSTALL_HDR_PATH = $(top_srcdir)/usr |
| 16 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
| 17 | CFLAGS += -O2 -g -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) | 18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) |
| 18 | 19 | ||
| 19 | # After inclusion, $(OUTPUT) is defined and | 20 | # After inclusion, $(OUTPUT) is defined and |
| 20 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 57974ad46373..637b7017b6ee 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h | |||
| @@ -112,24 +112,27 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, | |||
| 112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, | 112 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, |
| 113 | vm_paddr_t paddr_min, uint32_t memslot); | 113 | vm_paddr_t paddr_min, uint32_t memslot); |
| 114 | 114 | ||
| 115 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); | 115 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void); |
| 116 | void vcpu_set_cpuid( | 116 | void vcpu_set_cpuid( |
| 117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); | 117 | struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); |
| 118 | 118 | ||
| 119 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void); | ||
| 120 | struct kvm_cpuid_entry2 * | 119 | struct kvm_cpuid_entry2 * |
| 121 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 120 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); |
| 122 | uint32_t index); | ||
| 123 | 121 | ||
| 124 | static inline struct kvm_cpuid_entry2 * | 122 | static inline struct kvm_cpuid_entry2 * |
| 125 | find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) | 123 | kvm_get_supported_cpuid_entry(uint32_t function) |
| 126 | { | 124 | { |
| 127 | return find_cpuid_index_entry(cpuid, function, 0); | 125 | return kvm_get_supported_cpuid_index(function, 0); |
| 128 | } | 126 | } |
| 129 | 127 | ||
| 130 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); | 128 | struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); |
| 131 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); | 129 | void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); |
| 132 | 130 | ||
| 131 | typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, | ||
| 132 | vm_paddr_t vmxon_paddr, | ||
| 133 | vm_vaddr_t vmcs_vaddr, | ||
| 134 | vm_paddr_t vmcs_paddr); | ||
| 135 | |||
| 133 | struct kvm_userspace_memory_region * | 136 | struct kvm_userspace_memory_region * |
| 134 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, | 137 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, |
| 135 | uint64_t end); | 138 | uint64_t end); |
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h new file mode 100644 index 000000000000..6ed8499807fd --- /dev/null +++ b/tools/testing/selftests/kvm/include/vmx.h | |||
| @@ -0,0 +1,494 @@ | |||
| 1 | /* | ||
| 2 | * tools/testing/selftests/kvm/include/vmx.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef SELFTEST_KVM_VMX_H | ||
| 11 | #define SELFTEST_KVM_VMX_H | ||
| 12 | |||
| 13 | #include <stdint.h> | ||
| 14 | #include "x86.h" | ||
| 15 | |||
| 16 | #define CPUID_VMX_BIT 5 | ||
| 17 | |||
| 18 | #define CPUID_VMX (1 << 5) | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Definitions of Primary Processor-Based VM-Execution Controls. | ||
| 22 | */ | ||
| 23 | #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 | ||
| 24 | #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 | ||
| 25 | #define CPU_BASED_HLT_EXITING 0x00000080 | ||
| 26 | #define CPU_BASED_INVLPG_EXITING 0x00000200 | ||
| 27 | #define CPU_BASED_MWAIT_EXITING 0x00000400 | ||
| 28 | #define CPU_BASED_RDPMC_EXITING 0x00000800 | ||
| 29 | #define CPU_BASED_RDTSC_EXITING 0x00001000 | ||
| 30 | #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 | ||
| 31 | #define CPU_BASED_CR3_STORE_EXITING 0x00010000 | ||
| 32 | #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 | ||
| 33 | #define CPU_BASED_CR8_STORE_EXITING 0x00100000 | ||
| 34 | #define CPU_BASED_TPR_SHADOW 0x00200000 | ||
| 35 | #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 | ||
| 36 | #define CPU_BASED_MOV_DR_EXITING 0x00800000 | ||
| 37 | #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 | ||
| 38 | #define CPU_BASED_USE_IO_BITMAPS 0x02000000 | ||
| 39 | #define CPU_BASED_MONITOR_TRAP 0x08000000 | ||
| 40 | #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 | ||
| 41 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | ||
| 42 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | ||
| 43 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | ||
| 44 | |||
| 45 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Definitions of Secondary Processor-Based VM-Execution Controls. | ||
| 49 | */ | ||
| 50 | #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 | ||
| 51 | #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 | ||
| 52 | #define SECONDARY_EXEC_DESC 0x00000004 | ||
| 53 | #define SECONDARY_EXEC_RDTSCP 0x00000008 | ||
| 54 | #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 | ||
| 55 | #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 | ||
| 56 | #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 | ||
| 57 | #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 | ||
| 58 | #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 | ||
| 59 | #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 | ||
| 60 | #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 | ||
| 61 | #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800 | ||
| 62 | #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 | ||
| 63 | #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 | ||
| 64 | #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 | ||
| 65 | #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 | ||
| 66 | #define SECONDARY_EXEC_ENABLE_PML 0x00020000 | ||
| 67 | #define SECONDARY_EPT_VE 0x00040000 | ||
| 68 | #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000 | ||
| 69 | #define SECONDARY_EXEC_TSC_SCALING 0x02000000 | ||
| 70 | |||
| 71 | #define PIN_BASED_EXT_INTR_MASK 0x00000001 | ||
| 72 | #define PIN_BASED_NMI_EXITING 0x00000008 | ||
| 73 | #define PIN_BASED_VIRTUAL_NMIS 0x00000020 | ||
| 74 | #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 | ||
| 75 | #define PIN_BASED_POSTED_INTR 0x00000080 | ||
| 76 | |||
| 77 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 | ||
| 78 | |||
| 79 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 | ||
| 80 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | ||
| 81 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 | ||
| 82 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | ||
| 83 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
| 84 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
| 85 | #define VM_EXIT_SAVE_IA32_EFER 0x00100000 | ||
| 86 | #define VM_EXIT_LOAD_IA32_EFER 0x00200000 | ||
| 87 | #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 | ||
| 88 | |||
| 89 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff | ||
| 90 | |||
| 91 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 | ||
| 92 | #define VM_ENTRY_IA32E_MODE 0x00000200 | ||
| 93 | #define VM_ENTRY_SMM 0x00000400 | ||
| 94 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | ||
| 95 | #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 | ||
| 96 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
| 97 | #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 | ||
| 98 | |||
| 99 | #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff | ||
| 100 | |||
| 101 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f | ||
| 102 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 | ||
| 103 | |||
| 104 | #define EXIT_REASON_FAILED_VMENTRY 0x80000000 | ||
| 105 | #define EXIT_REASON_EXCEPTION_NMI 0 | ||
| 106 | #define EXIT_REASON_EXTERNAL_INTERRUPT 1 | ||
| 107 | #define EXIT_REASON_TRIPLE_FAULT 2 | ||
| 108 | #define EXIT_REASON_PENDING_INTERRUPT 7 | ||
| 109 | #define EXIT_REASON_NMI_WINDOW 8 | ||
| 110 | #define EXIT_REASON_TASK_SWITCH 9 | ||
| 111 | #define EXIT_REASON_CPUID 10 | ||
| 112 | #define EXIT_REASON_HLT 12 | ||
| 113 | #define EXIT_REASON_INVD 13 | ||
| 114 | #define EXIT_REASON_INVLPG 14 | ||
| 115 | #define EXIT_REASON_RDPMC 15 | ||
| 116 | #define EXIT_REASON_RDTSC 16 | ||
| 117 | #define EXIT_REASON_VMCALL 18 | ||
| 118 | #define EXIT_REASON_VMCLEAR 19 | ||
| 119 | #define EXIT_REASON_VMLAUNCH 20 | ||
| 120 | #define EXIT_REASON_VMPTRLD 21 | ||
| 121 | #define EXIT_REASON_VMPTRST 22 | ||
| 122 | #define EXIT_REASON_VMREAD 23 | ||
| 123 | #define EXIT_REASON_VMRESUME 24 | ||
| 124 | #define EXIT_REASON_VMWRITE 25 | ||
| 125 | #define EXIT_REASON_VMOFF 26 | ||
| 126 | #define EXIT_REASON_VMON 27 | ||
| 127 | #define EXIT_REASON_CR_ACCESS 28 | ||
| 128 | #define EXIT_REASON_DR_ACCESS 29 | ||
| 129 | #define EXIT_REASON_IO_INSTRUCTION 30 | ||
| 130 | #define EXIT_REASON_MSR_READ 31 | ||
| 131 | #define EXIT_REASON_MSR_WRITE 32 | ||
| 132 | #define EXIT_REASON_INVALID_STATE 33 | ||
| 133 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | ||
| 134 | #define EXIT_REASON_MONITOR_INSTRUCTION 39 | ||
| 135 | #define EXIT_REASON_PAUSE_INSTRUCTION 40 | ||
| 136 | #define EXIT_REASON_MCE_DURING_VMENTRY 41 | ||
| 137 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | ||
| 138 | #define EXIT_REASON_APIC_ACCESS 44 | ||
| 139 | #define EXIT_REASON_EOI_INDUCED 45 | ||
| 140 | #define EXIT_REASON_EPT_VIOLATION 48 | ||
| 141 | #define EXIT_REASON_EPT_MISCONFIG 49 | ||
| 142 | #define EXIT_REASON_INVEPT 50 | ||
| 143 | #define EXIT_REASON_RDTSCP 51 | ||
| 144 | #define EXIT_REASON_PREEMPTION_TIMER 52 | ||
| 145 | #define EXIT_REASON_INVVPID 53 | ||
| 146 | #define EXIT_REASON_WBINVD 54 | ||
| 147 | #define EXIT_REASON_XSETBV 55 | ||
| 148 | #define EXIT_REASON_APIC_WRITE 56 | ||
| 149 | #define EXIT_REASON_INVPCID 58 | ||
| 150 | #define EXIT_REASON_PML_FULL 62 | ||
| 151 | #define EXIT_REASON_XSAVES 63 | ||
| 152 | #define EXIT_REASON_XRSTORS 64 | ||
| 153 | #define LAST_EXIT_REASON 64 | ||
| 154 | |||
| 155 | enum vmcs_field { | ||
| 156 | VIRTUAL_PROCESSOR_ID = 0x00000000, | ||
| 157 | POSTED_INTR_NV = 0x00000002, | ||
| 158 | GUEST_ES_SELECTOR = 0x00000800, | ||
| 159 | GUEST_CS_SELECTOR = 0x00000802, | ||
| 160 | GUEST_SS_SELECTOR = 0x00000804, | ||
| 161 | GUEST_DS_SELECTOR = 0x00000806, | ||
| 162 | GUEST_FS_SELECTOR = 0x00000808, | ||
| 163 | GUEST_GS_SELECTOR = 0x0000080a, | ||
| 164 | GUEST_LDTR_SELECTOR = 0x0000080c, | ||
| 165 | GUEST_TR_SELECTOR = 0x0000080e, | ||
| 166 | GUEST_INTR_STATUS = 0x00000810, | ||
| 167 | GUEST_PML_INDEX = 0x00000812, | ||
| 168 | HOST_ES_SELECTOR = 0x00000c00, | ||
| 169 | HOST_CS_SELECTOR = 0x00000c02, | ||
| 170 | HOST_SS_SELECTOR = 0x00000c04, | ||
| 171 | HOST_DS_SELECTOR = 0x00000c06, | ||
| 172 | HOST_FS_SELECTOR = 0x00000c08, | ||
| 173 | HOST_GS_SELECTOR = 0x00000c0a, | ||
| 174 | HOST_TR_SELECTOR = 0x00000c0c, | ||
| 175 | IO_BITMAP_A = 0x00002000, | ||
| 176 | IO_BITMAP_A_HIGH = 0x00002001, | ||
| 177 | IO_BITMAP_B = 0x00002002, | ||
| 178 | IO_BITMAP_B_HIGH = 0x00002003, | ||
| 179 | MSR_BITMAP = 0x00002004, | ||
| 180 | MSR_BITMAP_HIGH = 0x00002005, | ||
| 181 | VM_EXIT_MSR_STORE_ADDR = 0x00002006, | ||
| 182 | VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, | ||
| 183 | VM_EXIT_MSR_LOAD_ADDR = 0x00002008, | ||
| 184 | VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, | ||
| 185 | VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, | ||
| 186 | VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, | ||
| 187 | PML_ADDRESS = 0x0000200e, | ||
| 188 | PML_ADDRESS_HIGH = 0x0000200f, | ||
| 189 | TSC_OFFSET = 0x00002010, | ||
| 190 | TSC_OFFSET_HIGH = 0x00002011, | ||
| 191 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | ||
| 192 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | ||
| 193 | APIC_ACCESS_ADDR = 0x00002014, | ||
| 194 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
| 195 | POSTED_INTR_DESC_ADDR = 0x00002016, | ||
| 196 | POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, | ||
| 197 | EPT_POINTER = 0x0000201a, | ||
| 198 | EPT_POINTER_HIGH = 0x0000201b, | ||
| 199 | EOI_EXIT_BITMAP0 = 0x0000201c, | ||
| 200 | EOI_EXIT_BITMAP0_HIGH = 0x0000201d, | ||
| 201 | EOI_EXIT_BITMAP1 = 0x0000201e, | ||
| 202 | EOI_EXIT_BITMAP1_HIGH = 0x0000201f, | ||
| 203 | EOI_EXIT_BITMAP2 = 0x00002020, | ||
| 204 | EOI_EXIT_BITMAP2_HIGH = 0x00002021, | ||
| 205 | EOI_EXIT_BITMAP3 = 0x00002022, | ||
| 206 | EOI_EXIT_BITMAP3_HIGH = 0x00002023, | ||
| 207 | VMREAD_BITMAP = 0x00002026, | ||
| 208 | VMREAD_BITMAP_HIGH = 0x00002027, | ||
| 209 | VMWRITE_BITMAP = 0x00002028, | ||
| 210 | VMWRITE_BITMAP_HIGH = 0x00002029, | ||
| 211 | XSS_EXIT_BITMAP = 0x0000202C, | ||
| 212 | XSS_EXIT_BITMAP_HIGH = 0x0000202D, | ||
| 213 | TSC_MULTIPLIER = 0x00002032, | ||
| 214 | TSC_MULTIPLIER_HIGH = 0x00002033, | ||
| 215 | GUEST_PHYSICAL_ADDRESS = 0x00002400, | ||
| 216 | GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, | ||
| 217 | VMCS_LINK_POINTER = 0x00002800, | ||
| 218 | VMCS_LINK_POINTER_HIGH = 0x00002801, | ||
| 219 | GUEST_IA32_DEBUGCTL = 0x00002802, | ||
| 220 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | ||
| 221 | GUEST_IA32_PAT = 0x00002804, | ||
| 222 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
| 223 | GUEST_IA32_EFER = 0x00002806, | ||
| 224 | GUEST_IA32_EFER_HIGH = 0x00002807, | ||
| 225 | GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, | ||
| 226 | GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, | ||
| 227 | GUEST_PDPTR0 = 0x0000280a, | ||
| 228 | GUEST_PDPTR0_HIGH = 0x0000280b, | ||
| 229 | GUEST_PDPTR1 = 0x0000280c, | ||
| 230 | GUEST_PDPTR1_HIGH = 0x0000280d, | ||
| 231 | GUEST_PDPTR2 = 0x0000280e, | ||
| 232 | GUEST_PDPTR2_HIGH = 0x0000280f, | ||
| 233 | GUEST_PDPTR3 = 0x00002810, | ||
| 234 | GUEST_PDPTR3_HIGH = 0x00002811, | ||
| 235 | GUEST_BNDCFGS = 0x00002812, | ||
| 236 | GUEST_BNDCFGS_HIGH = 0x00002813, | ||
| 237 | HOST_IA32_PAT = 0x00002c00, | ||
| 238 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
| 239 | HOST_IA32_EFER = 0x00002c02, | ||
| 240 | HOST_IA32_EFER_HIGH = 0x00002c03, | ||
| 241 | HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, | ||
| 242 | HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, | ||
| 243 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | ||
| 244 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | ||
| 245 | EXCEPTION_BITMAP = 0x00004004, | ||
| 246 | PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, | ||
| 247 | PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, | ||
| 248 | CR3_TARGET_COUNT = 0x0000400a, | ||
| 249 | VM_EXIT_CONTROLS = 0x0000400c, | ||
| 250 | VM_EXIT_MSR_STORE_COUNT = 0x0000400e, | ||
| 251 | VM_EXIT_MSR_LOAD_COUNT = 0x00004010, | ||
| 252 | VM_ENTRY_CONTROLS = 0x00004012, | ||
| 253 | VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, | ||
| 254 | VM_ENTRY_INTR_INFO_FIELD = 0x00004016, | ||
| 255 | VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, | ||
| 256 | VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, | ||
| 257 | TPR_THRESHOLD = 0x0000401c, | ||
| 258 | SECONDARY_VM_EXEC_CONTROL = 0x0000401e, | ||
| 259 | PLE_GAP = 0x00004020, | ||
| 260 | PLE_WINDOW = 0x00004022, | ||
| 261 | VM_INSTRUCTION_ERROR = 0x00004400, | ||
| 262 | VM_EXIT_REASON = 0x00004402, | ||
| 263 | VM_EXIT_INTR_INFO = 0x00004404, | ||
| 264 | VM_EXIT_INTR_ERROR_CODE = 0x00004406, | ||
| 265 | IDT_VECTORING_INFO_FIELD = 0x00004408, | ||
| 266 | IDT_VECTORING_ERROR_CODE = 0x0000440a, | ||
| 267 | VM_EXIT_INSTRUCTION_LEN = 0x0000440c, | ||
| 268 | VMX_INSTRUCTION_INFO = 0x0000440e, | ||
| 269 | GUEST_ES_LIMIT = 0x00004800, | ||
| 270 | GUEST_CS_LIMIT = 0x00004802, | ||
| 271 | GUEST_SS_LIMIT = 0x00004804, | ||
| 272 | GUEST_DS_LIMIT = 0x00004806, | ||
| 273 | GUEST_FS_LIMIT = 0x00004808, | ||
| 274 | GUEST_GS_LIMIT = 0x0000480a, | ||
| 275 | GUEST_LDTR_LIMIT = 0x0000480c, | ||
| 276 | GUEST_TR_LIMIT = 0x0000480e, | ||
| 277 | GUEST_GDTR_LIMIT = 0x00004810, | ||
| 278 | GUEST_IDTR_LIMIT = 0x00004812, | ||
| 279 | GUEST_ES_AR_BYTES = 0x00004814, | ||
| 280 | GUEST_CS_AR_BYTES = 0x00004816, | ||
| 281 | GUEST_SS_AR_BYTES = 0x00004818, | ||
| 282 | GUEST_DS_AR_BYTES = 0x0000481a, | ||
| 283 | GUEST_FS_AR_BYTES = 0x0000481c, | ||
| 284 | GUEST_GS_AR_BYTES = 0x0000481e, | ||
| 285 | GUEST_LDTR_AR_BYTES = 0x00004820, | ||
| 286 | GUEST_TR_AR_BYTES = 0x00004822, | ||
| 287 | GUEST_INTERRUPTIBILITY_INFO = 0x00004824, | ||
| 288 | GUEST_ACTIVITY_STATE = 0X00004826, | ||
| 289 | GUEST_SYSENTER_CS = 0x0000482A, | ||
| 290 | VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, | ||
| 291 | HOST_IA32_SYSENTER_CS = 0x00004c00, | ||
| 292 | CR0_GUEST_HOST_MASK = 0x00006000, | ||
| 293 | CR4_GUEST_HOST_MASK = 0x00006002, | ||
| 294 | CR0_READ_SHADOW = 0x00006004, | ||
| 295 | CR4_READ_SHADOW = 0x00006006, | ||
| 296 | CR3_TARGET_VALUE0 = 0x00006008, | ||
| 297 | CR3_TARGET_VALUE1 = 0x0000600a, | ||
| 298 | CR3_TARGET_VALUE2 = 0x0000600c, | ||
| 299 | CR3_TARGET_VALUE3 = 0x0000600e, | ||
| 300 | EXIT_QUALIFICATION = 0x00006400, | ||
| 301 | GUEST_LINEAR_ADDRESS = 0x0000640a, | ||
| 302 | GUEST_CR0 = 0x00006800, | ||
| 303 | GUEST_CR3 = 0x00006802, | ||
| 304 | GUEST_CR4 = 0x00006804, | ||
| 305 | GUEST_ES_BASE = 0x00006806, | ||
| 306 | GUEST_CS_BASE = 0x00006808, | ||
| 307 | GUEST_SS_BASE = 0x0000680a, | ||
| 308 | GUEST_DS_BASE = 0x0000680c, | ||
| 309 | GUEST_FS_BASE = 0x0000680e, | ||
| 310 | GUEST_GS_BASE = 0x00006810, | ||
| 311 | GUEST_LDTR_BASE = 0x00006812, | ||
| 312 | GUEST_TR_BASE = 0x00006814, | ||
| 313 | GUEST_GDTR_BASE = 0x00006816, | ||
| 314 | GUEST_IDTR_BASE = 0x00006818, | ||
| 315 | GUEST_DR7 = 0x0000681a, | ||
| 316 | GUEST_RSP = 0x0000681c, | ||
| 317 | GUEST_RIP = 0x0000681e, | ||
| 318 | GUEST_RFLAGS = 0x00006820, | ||
| 319 | GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, | ||
| 320 | GUEST_SYSENTER_ESP = 0x00006824, | ||
| 321 | GUEST_SYSENTER_EIP = 0x00006826, | ||
| 322 | HOST_CR0 = 0x00006c00, | ||
| 323 | HOST_CR3 = 0x00006c02, | ||
| 324 | HOST_CR4 = 0x00006c04, | ||
| 325 | HOST_FS_BASE = 0x00006c06, | ||
| 326 | HOST_GS_BASE = 0x00006c08, | ||
| 327 | HOST_TR_BASE = 0x00006c0a, | ||
| 328 | HOST_GDTR_BASE = 0x00006c0c, | ||
| 329 | HOST_IDTR_BASE = 0x00006c0e, | ||
| 330 | HOST_IA32_SYSENTER_ESP = 0x00006c10, | ||
| 331 | HOST_IA32_SYSENTER_EIP = 0x00006c12, | ||
| 332 | HOST_RSP = 0x00006c14, | ||
| 333 | HOST_RIP = 0x00006c16, | ||
| 334 | }; | ||
| 335 | |||
| 336 | struct vmx_msr_entry { | ||
| 337 | uint32_t index; | ||
| 338 | uint32_t reserved; | ||
| 339 | uint64_t value; | ||
| 340 | } __attribute__ ((aligned(16))); | ||
| 341 | |||
| 342 | static inline int vmxon(uint64_t phys) | ||
| 343 | { | ||
| 344 | uint8_t ret; | ||
| 345 | |||
| 346 | __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" | ||
| 347 | : [ret]"=rm"(ret) | ||
| 348 | : [pa]"m"(phys) | ||
| 349 | : "cc", "memory"); | ||
| 350 | |||
| 351 | return ret; | ||
| 352 | } | ||
| 353 | |||
| 354 | static inline void vmxoff(void) | ||
| 355 | { | ||
| 356 | __asm__ __volatile__("vmxoff"); | ||
| 357 | } | ||
| 358 | |||
| 359 | static inline int vmclear(uint64_t vmcs_pa) | ||
| 360 | { | ||
| 361 | uint8_t ret; | ||
| 362 | |||
| 363 | __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" | ||
| 364 | : [ret]"=rm"(ret) | ||
| 365 | : [pa]"m"(vmcs_pa) | ||
| 366 | : "cc", "memory"); | ||
| 367 | |||
| 368 | return ret; | ||
| 369 | } | ||
| 370 | |||
| 371 | static inline int vmptrld(uint64_t vmcs_pa) | ||
| 372 | { | ||
| 373 | uint8_t ret; | ||
| 374 | |||
| 375 | __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" | ||
| 376 | : [ret]"=rm"(ret) | ||
| 377 | : [pa]"m"(vmcs_pa) | ||
| 378 | : "cc", "memory"); | ||
| 379 | |||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * No guest state (e.g. GPRs) is established by this vmlaunch. | ||
| 385 | */ | ||
| 386 | static inline int vmlaunch(void) | ||
| 387 | { | ||
| 388 | int ret; | ||
| 389 | |||
| 390 | __asm__ __volatile__("push %%rbp;" | ||
| 391 | "push %%rcx;" | ||
| 392 | "push %%rdx;" | ||
| 393 | "push %%rsi;" | ||
| 394 | "push %%rdi;" | ||
| 395 | "push $0;" | ||
| 396 | "vmwrite %%rsp, %[host_rsp];" | ||
| 397 | "lea 1f(%%rip), %%rax;" | ||
| 398 | "vmwrite %%rax, %[host_rip];" | ||
| 399 | "vmlaunch;" | ||
| 400 | "incq (%%rsp);" | ||
| 401 | "1: pop %%rax;" | ||
| 402 | "pop %%rdi;" | ||
| 403 | "pop %%rsi;" | ||
| 404 | "pop %%rdx;" | ||
| 405 | "pop %%rcx;" | ||
| 406 | "pop %%rbp;" | ||
| 407 | : [ret]"=&a"(ret) | ||
| 408 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
| 409 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
| 410 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
| 411 | "r11", "r12", "r13", "r14", "r15"); | ||
| 412 | return ret; | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * No guest state (e.g. GPRs) is established by this vmresume. | ||
| 417 | */ | ||
| 418 | static inline int vmresume(void) | ||
| 419 | { | ||
| 420 | int ret; | ||
| 421 | |||
| 422 | __asm__ __volatile__("push %%rbp;" | ||
| 423 | "push %%rcx;" | ||
| 424 | "push %%rdx;" | ||
| 425 | "push %%rsi;" | ||
| 426 | "push %%rdi;" | ||
| 427 | "push $0;" | ||
| 428 | "vmwrite %%rsp, %[host_rsp];" | ||
| 429 | "lea 1f(%%rip), %%rax;" | ||
| 430 | "vmwrite %%rax, %[host_rip];" | ||
| 431 | "vmresume;" | ||
| 432 | "incq (%%rsp);" | ||
| 433 | "1: pop %%rax;" | ||
| 434 | "pop %%rdi;" | ||
| 435 | "pop %%rsi;" | ||
| 436 | "pop %%rdx;" | ||
| 437 | "pop %%rcx;" | ||
| 438 | "pop %%rbp;" | ||
| 439 | : [ret]"=&a"(ret) | ||
| 440 | : [host_rsp]"r"((uint64_t)HOST_RSP), | ||
| 441 | [host_rip]"r"((uint64_t)HOST_RIP) | ||
| 442 | : "memory", "cc", "rbx", "r8", "r9", "r10", | ||
| 443 | "r11", "r12", "r13", "r14", "r15"); | ||
| 444 | return ret; | ||
| 445 | } | ||
| 446 | |||
| 447 | static inline int vmread(uint64_t encoding, uint64_t *value) | ||
| 448 | { | ||
| 449 | uint64_t tmp; | ||
| 450 | uint8_t ret; | ||
| 451 | |||
| 452 | __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" | ||
| 453 | : [value]"=rm"(tmp), [ret]"=rm"(ret) | ||
| 454 | : [encoding]"r"(encoding) | ||
| 455 | : "cc", "memory"); | ||
| 456 | |||
| 457 | *value = tmp; | ||
| 458 | return ret; | ||
| 459 | } | ||
| 460 | |||
| 461 | /* | ||
| 462 | * A wrapper around vmread that ignores errors and returns zero if the | ||
| 463 | * vmread instruction fails. | ||
| 464 | */ | ||
| 465 | static inline uint64_t vmreadz(uint64_t encoding) | ||
| 466 | { | ||
| 467 | uint64_t value = 0; | ||
| 468 | vmread(encoding, &value); | ||
| 469 | return value; | ||
| 470 | } | ||
| 471 | |||
| 472 | static inline int vmwrite(uint64_t encoding, uint64_t value) | ||
| 473 | { | ||
| 474 | uint8_t ret; | ||
| 475 | |||
| 476 | __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" | ||
| 477 | : [ret]"=rm"(ret) | ||
| 478 | : [value]"rm"(value), [encoding]"r"(encoding) | ||
| 479 | : "cc", "memory"); | ||
| 480 | |||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | |||
| 484 | static inline uint32_t vmcs_revision(void) | ||
| 485 | { | ||
| 486 | return rdmsr(MSR_IA32_VMX_BASIC); | ||
| 487 | } | ||
| 488 | |||
| 489 | void prepare_for_vmx_operation(void); | ||
| 490 | void prepare_vmcs(void *guest_rip, void *guest_rsp); | ||
| 491 | struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid, | ||
| 492 | vmx_guest_code_t guest_code); | ||
| 493 | |||
| 494 | #endif /* !SELFTEST_KVM_VMX_H */ | ||
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 7ca1bb40c498..2cedfda181d4 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -378,7 +378,7 @@ int kvm_memcmp_hva_gva(void *hva, | |||
| 378 | * complicated. This function uses a reasonable default length for | 378 | * complicated. This function uses a reasonable default length for |
| 379 | * the array and performs the appropriate allocation. | 379 | * the array and performs the appropriate allocation. |
| 380 | */ | 380 | */ |
| 381 | struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | 381 | static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) |
| 382 | { | 382 | { |
| 383 | struct kvm_cpuid2 *cpuid; | 383 | struct kvm_cpuid2 *cpuid; |
| 384 | int nent = 100; | 384 | int nent = 100; |
| @@ -402,17 +402,21 @@ struct kvm_cpuid2 *allocate_kvm_cpuid2(void) | |||
| 402 | * Input Args: None | 402 | * Input Args: None |
| 403 | * | 403 | * |
| 404 | * Output Args: | 404 | * Output Args: |
| 405 | * cpuid - The supported KVM CPUID | ||
| 406 | * | 405 | * |
| 407 | * Return: void | 406 | * Return: The supported KVM CPUID |
| 408 | * | 407 | * |
| 409 | * Get the guest CPUID supported by KVM. | 408 | * Get the guest CPUID supported by KVM. |
| 410 | */ | 409 | */ |
| 411 | void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | 410 | struct kvm_cpuid2 *kvm_get_supported_cpuid(void) |
| 412 | { | 411 | { |
| 412 | static struct kvm_cpuid2 *cpuid; | ||
| 413 | int ret; | 413 | int ret; |
| 414 | int kvm_fd; | 414 | int kvm_fd; |
| 415 | 415 | ||
| 416 | if (cpuid) | ||
| 417 | return cpuid; | ||
| 418 | |||
| 419 | cpuid = allocate_kvm_cpuid2(); | ||
| 416 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
| 417 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 421 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", |
| 418 | KVM_DEV_PATH, kvm_fd, errno); | 422 | KVM_DEV_PATH, kvm_fd, errno); |
| @@ -422,6 +426,7 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
| 422 | ret, errno); | 426 | ret, errno); |
| 423 | 427 | ||
| 424 | close(kvm_fd); | 428 | close(kvm_fd); |
| 429 | return cpuid; | ||
| 425 | } | 430 | } |
| 426 | 431 | ||
| 427 | /* Locate a cpuid entry. | 432 | /* Locate a cpuid entry. |
| @@ -435,12 +440,13 @@ void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) | |||
| 435 | * Return: A pointer to the cpuid entry. Never returns NULL. | 440 | * Return: A pointer to the cpuid entry. Never returns NULL. |
| 436 | */ | 441 | */ |
| 437 | struct kvm_cpuid_entry2 * | 442 | struct kvm_cpuid_entry2 * |
| 438 | find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, | 443 | kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) |
| 439 | uint32_t index) | ||
| 440 | { | 444 | { |
| 445 | struct kvm_cpuid2 *cpuid; | ||
| 441 | struct kvm_cpuid_entry2 *entry = NULL; | 446 | struct kvm_cpuid_entry2 *entry = NULL; |
| 442 | int i; | 447 | int i; |
| 443 | 448 | ||
| 449 | cpuid = kvm_get_supported_cpuid(); | ||
| 444 | for (i = 0; i < cpuid->nent; i++) { | 450 | for (i = 0; i < cpuid->nent; i++) { |
| 445 | if (cpuid->entries[i].function == function && | 451 | if (cpuid->entries[i].function == function && |
| 446 | cpuid->entries[i].index == index) { | 452 | cpuid->entries[i].index == index) { |
| @@ -1435,7 +1441,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, | |||
| 1435 | sparsebit_idx_t pg; | 1441 | sparsebit_idx_t pg; |
| 1436 | 1442 | ||
| 1437 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " | 1443 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " |
| 1438 | "not divisable by page size.\n" | 1444 | "not divisible by page size.\n" |
| 1439 | " paddr_min: 0x%lx page_size: 0x%x", | 1445 | " paddr_min: 0x%lx page_size: 0x%x", |
| 1440 | paddr_min, vm->page_size); | 1446 | paddr_min, vm->page_size); |
| 1441 | 1447 | ||
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 0c5cf3e0cb6f..b132bc95d183 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c | |||
| @@ -121,7 +121,7 @@ | |||
| 121 | * avoided by moving the setting of the nodes mask bits into | 121 | * avoided by moving the setting of the nodes mask bits into |
| 122 | * the previous nodes num_after setting. | 122 | * the previous nodes num_after setting. |
| 123 | * | 123 | * |
| 124 | * + Node starting index is evenly divisable by the number of bits | 124 | * + Node starting index is evenly divisible by the number of bits |
| 125 | * within a nodes mask member. | 125 | * within a nodes mask member. |
| 126 | * | 126 | * |
| 127 | * + Nodes never represent a range of bits that wrap around the | 127 | * + Nodes never represent a range of bits that wrap around the |
| @@ -1741,7 +1741,7 @@ void sparsebit_validate_internal(struct sparsebit *s) | |||
| 1741 | 1741 | ||
| 1742 | /* Validate node index is divisible by the mask size */ | 1742 | /* Validate node index is divisible by the mask size */ |
| 1743 | if (nodep->idx % MASK_BITS) { | 1743 | if (nodep->idx % MASK_BITS) { |
| 1744 | fprintf(stderr, "Node index not divisable by " | 1744 | fprintf(stderr, "Node index not divisible by " |
| 1745 | "mask size,\n" | 1745 | "mask size,\n" |
| 1746 | " nodep: %p nodep->idx: 0x%lx " | 1746 | " nodep: %p nodep->idx: 0x%lx " |
| 1747 | "MASK_BITS: %lu\n", | 1747 | "MASK_BITS: %lu\n", |
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/vmx.c new file mode 100644 index 000000000000..0231bc0aae7b --- /dev/null +++ b/tools/testing/selftests/kvm/lib/vmx.c | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /* | ||
| 2 | * tools/testing/selftests/kvm/lib/x86.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #define _GNU_SOURCE /* for program_invocation_name */ | ||
| 10 | |||
| 11 | #include "test_util.h" | ||
| 12 | #include "kvm_util.h" | ||
| 13 | #include "x86.h" | ||
| 14 | #include "vmx.h" | ||
| 15 | |||
| 16 | /* Create a default VM for VMX tests. | ||
| 17 | * | ||
| 18 | * Input Args: | ||
| 19 | * vcpuid - The id of the single VCPU to add to the VM. | ||
| 20 | * guest_code - The vCPU's entry point | ||
| 21 | * | ||
| 22 | * Output Args: None | ||
| 23 | * | ||
| 24 | * Return: | ||
| 25 | * Pointer to opaque structure that describes the created VM. | ||
| 26 | */ | ||
| 27 | struct kvm_vm * | ||
| 28 | vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code) | ||
| 29 | { | ||
| 30 | struct kvm_cpuid2 *cpuid; | ||
| 31 | struct kvm_vm *vm; | ||
| 32 | vm_vaddr_t vmxon_vaddr; | ||
| 33 | vm_paddr_t vmxon_paddr; | ||
| 34 | vm_vaddr_t vmcs_vaddr; | ||
| 35 | vm_paddr_t vmcs_paddr; | ||
| 36 | |||
| 37 | vm = vm_create_default(vcpuid, (void *) guest_code); | ||
| 38 | |||
| 39 | /* Enable nesting in CPUID */ | ||
| 40 | vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); | ||
| 41 | |||
| 42 | /* Setup of a region of guest memory for the vmxon region. */ | ||
| 43 | vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
| 44 | vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr); | ||
| 45 | |||
| 46 | /* Setup of a region of guest memory for a vmcs. */ | ||
| 47 | vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); | ||
| 48 | vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr); | ||
| 49 | |||
| 50 | vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr, | ||
| 51 | vmcs_paddr); | ||
| 52 | |||
| 53 | return vm; | ||
| 54 | } | ||
| 55 | |||
| 56 | void prepare_for_vmx_operation(void) | ||
| 57 | { | ||
| 58 | uint64_t feature_control; | ||
| 59 | uint64_t required; | ||
| 60 | unsigned long cr0; | ||
| 61 | unsigned long cr4; | ||
| 62 | |||
| 63 | /* | ||
| 64 | * Ensure bits in CR0 and CR4 are valid in VMX operation: | ||
| 65 | * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. | ||
| 66 | * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. | ||
| 67 | */ | ||
| 68 | __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); | ||
| 69 | cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); | ||
| 70 | cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); | ||
| 71 | __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); | ||
| 72 | |||
| 73 | __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); | ||
| 74 | cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); | ||
| 75 | cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); | ||
| 76 | /* Enable VMX operation */ | ||
| 77 | cr4 |= X86_CR4_VMXE; | ||
| 78 | __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: | ||
| 82 | * Bit 0: Lock bit. If clear, VMXON causes a #GP. | ||
| 83 | * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON | ||
| 84 | * outside of SMX causes a #GP. | ||
| 85 | */ | ||
| 86 | required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | ||
| 87 | required |= FEATURE_CONTROL_LOCKED; | ||
| 88 | feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); | ||
| 89 | if ((feature_control & required) != required) | ||
| 90 | wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Initialize the control fields to the most basic settings possible. | ||
| 95 | */ | ||
| 96 | static inline void init_vmcs_control_fields(void) | ||
| 97 | { | ||
| 98 | vmwrite(VIRTUAL_PROCESSOR_ID, 0); | ||
| 99 | vmwrite(POSTED_INTR_NV, 0); | ||
| 100 | |||
| 101 | vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS)); | ||
| 102 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS)); | ||
| 103 | vmwrite(EXCEPTION_BITMAP, 0); | ||
| 104 | vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); | ||
| 105 | vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ | ||
| 106 | vmwrite(CR3_TARGET_COUNT, 0); | ||
| 107 | vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | | ||
| 108 | VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ | ||
| 109 | vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); | ||
| 110 | vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); | ||
| 111 | vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | | ||
| 112 | VM_ENTRY_IA32E_MODE); /* 64-bit guest */ | ||
| 113 | vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); | ||
| 114 | vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); | ||
| 115 | vmwrite(TPR_THRESHOLD, 0); | ||
| 116 | vmwrite(SECONDARY_VM_EXEC_CONTROL, 0); | ||
| 117 | |||
| 118 | vmwrite(CR0_GUEST_HOST_MASK, 0); | ||
| 119 | vmwrite(CR4_GUEST_HOST_MASK, 0); | ||
| 120 | vmwrite(CR0_READ_SHADOW, get_cr0()); | ||
| 121 | vmwrite(CR4_READ_SHADOW, get_cr4()); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * Initialize the host state fields based on the current host state, with | ||
| 126 | * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch | ||
| 127 | * or vmresume. | ||
| 128 | */ | ||
| 129 | static inline void init_vmcs_host_state(void) | ||
| 130 | { | ||
| 131 | uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); | ||
| 132 | |||
| 133 | vmwrite(HOST_ES_SELECTOR, get_es()); | ||
| 134 | vmwrite(HOST_CS_SELECTOR, get_cs()); | ||
| 135 | vmwrite(HOST_SS_SELECTOR, get_ss()); | ||
| 136 | vmwrite(HOST_DS_SELECTOR, get_ds()); | ||
| 137 | vmwrite(HOST_FS_SELECTOR, get_fs()); | ||
| 138 | vmwrite(HOST_GS_SELECTOR, get_gs()); | ||
| 139 | vmwrite(HOST_TR_SELECTOR, get_tr()); | ||
| 140 | |||
| 141 | if (exit_controls & VM_EXIT_LOAD_IA32_PAT) | ||
| 142 | vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); | ||
| 143 | if (exit_controls & VM_EXIT_LOAD_IA32_EFER) | ||
| 144 | vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); | ||
| 145 | if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) | ||
| 146 | vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, | ||
| 147 | rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); | ||
| 148 | |||
| 149 | vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); | ||
| 150 | |||
| 151 | vmwrite(HOST_CR0, get_cr0()); | ||
| 152 | vmwrite(HOST_CR3, get_cr3()); | ||
| 153 | vmwrite(HOST_CR4, get_cr4()); | ||
| 154 | vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); | ||
| 155 | vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); | ||
| 156 | vmwrite(HOST_TR_BASE, | ||
| 157 | get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); | ||
| 158 | vmwrite(HOST_GDTR_BASE, get_gdt_base()); | ||
| 159 | vmwrite(HOST_IDTR_BASE, get_idt_base()); | ||
| 160 | vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); | ||
| 161 | vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * Initialize the guest state fields essentially as a clone of | ||
| 166 | * the host state fields. Some host state fields have fixed | ||
| 167 | * values, and we set the corresponding guest state fields accordingly. | ||
| 168 | */ | ||
| 169 | static inline void init_vmcs_guest_state(void *rip, void *rsp) | ||
| 170 | { | ||
| 171 | vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); | ||
| 172 | vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); | ||
| 173 | vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); | ||
| 174 | vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); | ||
| 175 | vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); | ||
| 176 | vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); | ||
| 177 | vmwrite(GUEST_LDTR_SELECTOR, 0); | ||
| 178 | vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); | ||
| 179 | vmwrite(GUEST_INTR_STATUS, 0); | ||
| 180 | vmwrite(GUEST_PML_INDEX, 0); | ||
| 181 | |||
| 182 | vmwrite(VMCS_LINK_POINTER, -1ll); | ||
| 183 | vmwrite(GUEST_IA32_DEBUGCTL, 0); | ||
| 184 | vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); | ||
| 185 | vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); | ||
| 186 | vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, | ||
| 187 | vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); | ||
| 188 | |||
| 189 | vmwrite(GUEST_ES_LIMIT, -1); | ||
| 190 | vmwrite(GUEST_CS_LIMIT, -1); | ||
| 191 | vmwrite(GUEST_SS_LIMIT, -1); | ||
| 192 | vmwrite(GUEST_DS_LIMIT, -1); | ||
| 193 | vmwrite(GUEST_FS_LIMIT, -1); | ||
| 194 | vmwrite(GUEST_GS_LIMIT, -1); | ||
| 195 | vmwrite(GUEST_LDTR_LIMIT, -1); | ||
| 196 | vmwrite(GUEST_TR_LIMIT, 0x67); | ||
| 197 | vmwrite(GUEST_GDTR_LIMIT, 0xffff); | ||
| 198 | vmwrite(GUEST_IDTR_LIMIT, 0xffff); | ||
| 199 | vmwrite(GUEST_ES_AR_BYTES, | ||
| 200 | vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 201 | vmwrite(GUEST_CS_AR_BYTES, 0xa09b); | ||
| 202 | vmwrite(GUEST_SS_AR_BYTES, 0xc093); | ||
| 203 | vmwrite(GUEST_DS_AR_BYTES, | ||
| 204 | vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 205 | vmwrite(GUEST_FS_AR_BYTES, | ||
| 206 | vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 207 | vmwrite(GUEST_GS_AR_BYTES, | ||
| 208 | vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); | ||
| 209 | vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); | ||
| 210 | vmwrite(GUEST_TR_AR_BYTES, 0x8b); | ||
| 211 | vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); | ||
| 212 | vmwrite(GUEST_ACTIVITY_STATE, 0); | ||
| 213 | vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); | ||
| 214 | vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); | ||
| 215 | |||
| 216 | vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); | ||
| 217 | vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); | ||
| 218 | vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); | ||
| 219 | vmwrite(GUEST_ES_BASE, 0); | ||
| 220 | vmwrite(GUEST_CS_BASE, 0); | ||
| 221 | vmwrite(GUEST_SS_BASE, 0); | ||
| 222 | vmwrite(GUEST_DS_BASE, 0); | ||
| 223 | vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); | ||
| 224 | vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); | ||
| 225 | vmwrite(GUEST_LDTR_BASE, 0); | ||
| 226 | vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); | ||
| 227 | vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); | ||
| 228 | vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); | ||
| 229 | vmwrite(GUEST_DR7, 0x400); | ||
| 230 | vmwrite(GUEST_RSP, (uint64_t)rsp); | ||
| 231 | vmwrite(GUEST_RIP, (uint64_t)rip); | ||
| 232 | vmwrite(GUEST_RFLAGS, 2); | ||
| 233 | vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); | ||
| 234 | vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); | ||
| 235 | vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); | ||
| 236 | } | ||
| 237 | |||
| 238 | void prepare_vmcs(void *guest_rip, void *guest_rsp) | ||
| 239 | { | ||
| 240 | init_vmcs_control_fields(); | ||
| 241 | init_vmcs_host_state(); | ||
| 242 | init_vmcs_guest_state(guest_rip, guest_rsp); | ||
| 243 | } | ||
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c new file mode 100644 index 000000000000..8f7f62093add --- /dev/null +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | |||
| @@ -0,0 +1,231 @@ | |||
| 1 | /* | ||
| 2 | * gtests/tests/vmx_tsc_adjust_test.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2018, Google LLC. | ||
| 5 | * | ||
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 7 | * | ||
| 8 | * | ||
| 9 | * IA32_TSC_ADJUST test | ||
| 10 | * | ||
| 11 | * According to the SDM, "if an execution of WRMSR to the | ||
| 12 | * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, | ||
| 13 | * the logical processor also adds (or subtracts) value X from the | ||
| 14 | * IA32_TSC_ADJUST MSR. | ||
| 15 | * | ||
| 16 | * Note that when L1 doesn't intercept writes to IA32_TSC, a | ||
| 17 | * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC | ||
| 18 | * value. | ||
| 19 | * | ||
| 20 | * This test verifies that this unusual case is handled correctly. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include "test_util.h" | ||
| 24 | #include "kvm_util.h" | ||
| 25 | #include "x86.h" | ||
| 26 | #include "vmx.h" | ||
| 27 | |||
| 28 | #include <string.h> | ||
| 29 | #include <sys/ioctl.h> | ||
| 30 | |||
| 31 | #ifndef MSR_IA32_TSC_ADJUST | ||
| 32 | #define MSR_IA32_TSC_ADJUST 0x3b | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #define PAGE_SIZE 4096 | ||
| 36 | #define VCPU_ID 5 | ||
| 37 | |||
| 38 | #define TSC_ADJUST_VALUE (1ll << 32) | ||
| 39 | #define TSC_OFFSET_VALUE -(1ll << 48) | ||
| 40 | |||
| 41 | enum { | ||
| 42 | PORT_ABORT = 0x1000, | ||
| 43 | PORT_REPORT, | ||
| 44 | PORT_DONE, | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct vmx_page { | ||
| 48 | vm_vaddr_t virt; | ||
| 49 | vm_paddr_t phys; | ||
| 50 | }; | ||
| 51 | |||
| 52 | enum { | ||
| 53 | VMXON_PAGE = 0, | ||
| 54 | VMCS_PAGE, | ||
| 55 | MSR_BITMAP_PAGE, | ||
| 56 | |||
| 57 | NUM_VMX_PAGES, | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct kvm_single_msr { | ||
| 61 | struct kvm_msrs header; | ||
| 62 | struct kvm_msr_entry entry; | ||
| 63 | } __attribute__((packed)); | ||
| 64 | |||
| 65 | /* The virtual machine object. */ | ||
| 66 | static struct kvm_vm *vm; | ||
| 67 | |||
| 68 | /* Array of vmx_page descriptors that is shared with the guest. */ | ||
| 69 | struct vmx_page *vmx_pages; | ||
| 70 | |||
| 71 | #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg)) | ||
| 72 | static void do_exit_to_l0(uint16_t port, unsigned long arg) | ||
| 73 | { | ||
| 74 | __asm__ __volatile__("in %[port], %%al" | ||
| 75 | : | ||
| 76 | : [port]"d"(port), "D"(arg) | ||
| 77 | : "rax"); | ||
| 78 | } | ||
| 79 | |||
| 80 | |||
| 81 | #define GUEST_ASSERT(_condition) do { \ | ||
| 82 | if (!(_condition)) \ | ||
| 83 | exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \ | ||
| 84 | } while (0) | ||
| 85 | |||
| 86 | static void check_ia32_tsc_adjust(int64_t max) | ||
| 87 | { | ||
| 88 | int64_t adjust; | ||
| 89 | |||
| 90 | adjust = rdmsr(MSR_IA32_TSC_ADJUST); | ||
| 91 | exit_to_l0(PORT_REPORT, adjust); | ||
| 92 | GUEST_ASSERT(adjust <= max); | ||
| 93 | } | ||
| 94 | |||
| 95 | static void l2_guest_code(void) | ||
| 96 | { | ||
| 97 | uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; | ||
| 98 | |||
| 99 | wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); | ||
| 100 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
| 101 | |||
| 102 | /* Exit to L1 */ | ||
| 103 | __asm__ __volatile__("vmcall"); | ||
| 104 | } | ||
| 105 | |||
| 106 | static void l1_guest_code(struct vmx_page *vmx_pages) | ||
| 107 | { | ||
| 108 | #define L2_GUEST_STACK_SIZE 64 | ||
| 109 | unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; | ||
| 110 | uint32_t control; | ||
| 111 | uintptr_t save_cr3; | ||
| 112 | |||
| 113 | GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); | ||
| 114 | wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); | ||
| 115 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
| 116 | |||
| 117 | prepare_for_vmx_operation(); | ||
| 118 | |||
| 119 | /* Enter VMX root operation. */ | ||
| 120 | *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision(); | ||
| 121 | GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys)); | ||
| 122 | |||
| 123 | /* Load a VMCS. */ | ||
| 124 | *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision(); | ||
| 125 | GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys)); | ||
| 126 | GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys)); | ||
| 127 | |||
| 128 | /* Prepare the VMCS for L2 execution. */ | ||
| 129 | prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); | ||
| 130 | control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); | ||
| 131 | control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; | ||
| 132 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); | ||
| 133 | vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys); | ||
| 134 | vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); | ||
| 135 | |||
| 136 | /* Jump into L2. First, test failure to load guest CR3. */ | ||
| 137 | save_cr3 = vmreadz(GUEST_CR3); | ||
| 138 | vmwrite(GUEST_CR3, -1ull); | ||
| 139 | GUEST_ASSERT(!vmlaunch()); | ||
| 140 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == | ||
| 141 | (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); | ||
| 142 | check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); | ||
| 143 | vmwrite(GUEST_CR3, save_cr3); | ||
| 144 | |||
| 145 | GUEST_ASSERT(!vmlaunch()); | ||
| 146 | GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); | ||
| 147 | |||
| 148 | check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); | ||
| 149 | |||
| 150 | exit_to_l0(PORT_DONE, 0); | ||
| 151 | } | ||
| 152 | |||
| 153 | static void allocate_vmx_page(struct vmx_page *page) | ||
| 154 | { | ||
| 155 | vm_vaddr_t virt; | ||
| 156 | |||
| 157 | virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0); | ||
| 158 | memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE); | ||
| 159 | |||
| 160 | page->virt = virt; | ||
| 161 | page->phys = addr_gva2gpa(vm, virt); | ||
| 162 | } | ||
| 163 | |||
| 164 | static vm_vaddr_t allocate_vmx_pages(void) | ||
| 165 | { | ||
| 166 | vm_vaddr_t vmx_pages_vaddr; | ||
| 167 | int i; | ||
| 168 | |||
| 169 | vmx_pages_vaddr = vm_vaddr_alloc( | ||
| 170 | vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0); | ||
| 171 | |||
| 172 | vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr); | ||
| 173 | |||
| 174 | for (i = 0; i < NUM_VMX_PAGES; i++) | ||
| 175 | allocate_vmx_page(&vmx_pages[i]); | ||
| 176 | |||
| 177 | return vmx_pages_vaddr; | ||
| 178 | } | ||
| 179 | |||
| 180 | void report(int64_t val) | ||
| 181 | { | ||
| 182 | printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", | ||
| 183 | val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); | ||
| 184 | } | ||
| 185 | |||
| 186 | int main(int argc, char *argv[]) | ||
| 187 | { | ||
| 188 | vm_vaddr_t vmx_pages_vaddr; | ||
| 189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | ||
| 190 | |||
| 191 | if (!(entry->ecx & CPUID_VMX)) { | ||
| 192 | printf("nested VMX not enabled, skipping test"); | ||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); | ||
| 197 | |||
| 198 | /* Allocate VMX pages and shared descriptors (vmx_pages). */ | ||
| 199 | vmx_pages_vaddr = allocate_vmx_pages(); | ||
| 200 | vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr); | ||
| 201 | |||
| 202 | for (;;) { | ||
| 203 | volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); | ||
| 204 | struct kvm_regs regs; | ||
| 205 | |||
| 206 | vcpu_run(vm, VCPU_ID); | ||
| 207 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | ||
| 208 | "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n", | ||
| 209 | run->exit_reason, | ||
| 210 | exit_reason_str(run->exit_reason)); | ||
| 211 | |||
| 212 | vcpu_regs_get(vm, VCPU_ID, ®s); | ||
| 213 | |||
| 214 | switch (run->io.port) { | ||
| 215 | case PORT_ABORT: | ||
| 216 | TEST_ASSERT(false, "%s", (const char *) regs.rdi); | ||
| 217 | /* NOT REACHED */ | ||
| 218 | case PORT_REPORT: | ||
| 219 | report(regs.rdi); | ||
| 220 | break; | ||
| 221 | case PORT_DONE: | ||
| 222 | goto done; | ||
| 223 | default: | ||
| 224 | TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 228 | kvm_vm_free(vm); | ||
| 229 | done: | ||
| 230 | return 0; | ||
| 231 | } | ||
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 785fc18a16b4..8f1e13d2e547 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
| @@ -5,7 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g | |||
| 5 | CFLAGS += -I../../../../usr/include/ | 5 | CFLAGS += -I../../../../usr/include/ |
| 6 | 6 | ||
| 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh | 7 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh |
| 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh | 8 | TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh |
| 9 | TEST_GEN_FILES = socket | 9 | TEST_GEN_FILES = socket |
| 10 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy | 10 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy |
| 11 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 11 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |
