diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-02-08 19:18:14 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-02-08 19:18:14 -0500 |
commit | 56c7303e62996fc7b49aea1fd967ccdf94f3a5d9 (patch) | |
tree | 9e5ff563595eb3d5d274a293d87fb811d168e762 | |
parent | 6e978b22efa1db9f6e71b24440b5f1d93e968ee3 (diff) | |
parent | 565ebe8073f84ced436a18e76a5ba8e6bb73dfb3 (diff) |
Merge back earlier cpufreq changes for v4.11.
350 files changed, 4042 insertions, 2435 deletions
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt index 4bc7287806de..978463a7c81e 100644 --- a/Documentation/cpu-freq/core.txt +++ b/Documentation/cpu-freq/core.txt | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | Dominik Brodowski <linux@brodo.de> | 9 | Dominik Brodowski <linux@brodo.de> |
10 | David Kimdon <dwhedon@debian.org> | 10 | David Kimdon <dwhedon@debian.org> |
11 | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
12 | Viresh Kumar <viresh.kumar@linaro.org> | ||
11 | 13 | ||
12 | 14 | ||
13 | 15 | ||
@@ -36,10 +38,11 @@ speed limits (like LCD drivers on ARM architecture). Additionally, the | |||
36 | kernel "constant" loops_per_jiffy is updated on frequency changes | 38 | kernel "constant" loops_per_jiffy is updated on frequency changes |
37 | here. | 39 | here. |
38 | 40 | ||
39 | Reference counting is done by cpufreq_get_cpu and cpufreq_put_cpu, | 41 | Reference counting of the cpufreq policies is done by cpufreq_cpu_get |
40 | which make sure that the cpufreq processor driver is correctly | 42 | and cpufreq_cpu_put, which make sure that the cpufreq driver is |
41 | registered with the core, and will not be unloaded until | 43 | correctly registered with the core, and will not be unloaded until |
42 | cpufreq_put_cpu is called. | 44 | cpufreq_put_cpu is called. That also ensures that the respective cpufreq |
45 | policy doesn't get freed while being used. | ||
43 | 46 | ||
44 | 2. CPUFreq notifiers | 47 | 2. CPUFreq notifiers |
45 | ==================== | 48 | ==================== |
@@ -69,18 +72,16 @@ CPUFreq policy notifier is called twice for a policy transition: | |||
69 | The phase is specified in the second argument to the notifier. | 72 | The phase is specified in the second argument to the notifier. |
70 | 73 | ||
71 | The third argument, a void *pointer, points to a struct cpufreq_policy | 74 | The third argument, a void *pointer, points to a struct cpufreq_policy |
72 | consisting of five values: cpu, min, max, policy and max_cpu_freq. min | 75 | consisting of several values, including min, max (the lower and upper |
73 | and max are the lower and upper frequencies (in kHz) of the new | 76 | frequencies (in kHz) of the new policy). |
74 | policy, policy the new policy, cpu the number of the affected CPU; and | ||
75 | max_cpu_freq the maximum supported CPU frequency. This value is given | ||
76 | for informational purposes only. | ||
77 | 77 | ||
78 | 78 | ||
79 | 2.2 CPUFreq transition notifiers | 79 | 2.2 CPUFreq transition notifiers |
80 | -------------------------------- | 80 | -------------------------------- |
81 | 81 | ||
82 | These are notified twice when the CPUfreq driver switches the CPU core | 82 | These are notified twice for each online CPU in the policy, when the |
83 | frequency and this change has any external implications. | 83 | CPUfreq driver switches the CPU core frequency and this change has no |
84 | any external implications. | ||
84 | 85 | ||
85 | The second argument specifies the phase - CPUFREQ_PRECHANGE or | 86 | The second argument specifies the phase - CPUFREQ_PRECHANGE or |
86 | CPUFREQ_POSTCHANGE. | 87 | CPUFREQ_POSTCHANGE. |
@@ -90,6 +91,7 @@ values: | |||
90 | cpu - number of the affected CPU | 91 | cpu - number of the affected CPU |
91 | old - old frequency | 92 | old - old frequency |
92 | new - new frequency | 93 | new - new frequency |
94 | flags - flags of the cpufreq driver | ||
93 | 95 | ||
94 | 3. CPUFreq Table Generation with Operating Performance Point (OPP) | 96 | 3. CPUFreq Table Generation with Operating Performance Point (OPP) |
95 | ================================================================== | 97 | ================================================================== |
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 772b94fde264..f71e6be26b83 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt | |||
@@ -9,6 +9,8 @@ | |||
9 | 9 | ||
10 | 10 | ||
11 | Dominik Brodowski <linux@brodo.de> | 11 | Dominik Brodowski <linux@brodo.de> |
12 | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
13 | Viresh Kumar <viresh.kumar@linaro.org> | ||
12 | 14 | ||
13 | 15 | ||
14 | 16 | ||
@@ -49,49 +51,65 @@ using cpufreq_register_driver() | |||
49 | 51 | ||
50 | What shall this struct cpufreq_driver contain? | 52 | What shall this struct cpufreq_driver contain? |
51 | 53 | ||
52 | cpufreq_driver.name - The name of this driver. | 54 | .name - The name of this driver. |
53 | 55 | ||
54 | cpufreq_driver.init - A pointer to the per-CPU initialization | 56 | .init - A pointer to the per-policy initialization function. |
55 | function. | ||
56 | 57 | ||
57 | cpufreq_driver.verify - A pointer to a "verification" function. | 58 | .verify - A pointer to a "verification" function. |
58 | 59 | ||
59 | cpufreq_driver.setpolicy _or_ | 60 | .setpolicy _or_ .fast_switch _or_ .target _or_ .target_index - See |
60 | cpufreq_driver.target/ | 61 | below on the differences. |
61 | target_index - See below on the differences. | ||
62 | 62 | ||
63 | And optionally | 63 | And optionally |
64 | 64 | ||
65 | cpufreq_driver.exit - A pointer to a per-CPU cleanup | 65 | .flags - Hints for the cpufreq core. |
66 | function called during CPU_POST_DEAD | ||
67 | phase of cpu hotplug process. | ||
68 | 66 | ||
69 | cpufreq_driver.stop_cpu - A pointer to a per-CPU stop function | 67 | .driver_data - cpufreq driver specific data. |
70 | called during CPU_DOWN_PREPARE phase of | ||
71 | cpu hotplug process. | ||
72 | 68 | ||
73 | cpufreq_driver.resume - A pointer to a per-CPU resume function | 69 | .resolve_freq - Returns the most appropriate frequency for a target |
74 | which is called with interrupts disabled | 70 | frequency. Doesn't change the frequency though. |
75 | and _before_ the pre-suspend frequency | ||
76 | and/or policy is restored by a call to | ||
77 | ->target/target_index or ->setpolicy. | ||
78 | 71 | ||
79 | cpufreq_driver.attr - A pointer to a NULL-terminated list of | 72 | .get_intermediate and target_intermediate - Used to switch to stable |
80 | "struct freq_attr" which allow to | 73 | frequency while changing CPU frequency. |
81 | export values to sysfs. | ||
82 | 74 | ||
83 | cpufreq_driver.get_intermediate | 75 | .get - Returns current frequency of the CPU. |
84 | and target_intermediate Used to switch to stable frequency while | 76 | |
85 | changing CPU frequency. | 77 | .bios_limit - Returns HW/BIOS max frequency limitations for the CPU. |
78 | |||
79 | .exit - A pointer to a per-policy cleanup function called during | ||
80 | CPU_POST_DEAD phase of cpu hotplug process. | ||
81 | |||
82 | .stop_cpu - A pointer to a per-policy stop function called during | ||
83 | CPU_DOWN_PREPARE phase of cpu hotplug process. | ||
84 | |||
85 | .suspend - A pointer to a per-policy suspend function which is called | ||
86 | with interrupts disabled and _after_ the governor is stopped for the | ||
87 | policy. | ||
88 | |||
89 | .resume - A pointer to a per-policy resume function which is called | ||
90 | with interrupts disabled and _before_ the governor is started again. | ||
91 | |||
92 | .ready - A pointer to a per-policy ready function which is called after | ||
93 | the policy is fully initialized. | ||
94 | |||
95 | .attr - A pointer to a NULL-terminated list of "struct freq_attr" which | ||
96 | allow to export values to sysfs. | ||
97 | |||
98 | .boost_enabled - If set, boost frequencies are enabled. | ||
99 | |||
100 | .set_boost - A pointer to a per-policy function to enable/disable boost | ||
101 | frequencies. | ||
86 | 102 | ||
87 | 103 | ||
88 | 1.2 Per-CPU Initialization | 104 | 1.2 Per-CPU Initialization |
89 | -------------------------- | 105 | -------------------------- |
90 | 106 | ||
91 | Whenever a new CPU is registered with the device model, or after the | 107 | Whenever a new CPU is registered with the device model, or after the |
92 | cpufreq driver registers itself, the per-CPU initialization function | 108 | cpufreq driver registers itself, the per-policy initialization function |
93 | cpufreq_driver.init is called. It takes a struct cpufreq_policy | 109 | cpufreq_driver.init is called if no cpufreq policy existed for the CPU. |
94 | *policy as argument. What to do now? | 110 | Note that the .init() and .exit() routines are called only once for the |
111 | policy and not for each CPU managed by the policy. It takes a struct | ||
112 | cpufreq_policy *policy as argument. What to do now? | ||
95 | 113 | ||
96 | If necessary, activate the CPUfreq support on your CPU. | 114 | If necessary, activate the CPUfreq support on your CPU. |
97 | 115 | ||
@@ -117,47 +135,45 @@ policy->governor must contain the "default policy" for | |||
117 | cpufreq_driver.setpolicy or | 135 | cpufreq_driver.setpolicy or |
118 | cpufreq_driver.target/target_index is called | 136 | cpufreq_driver.target/target_index is called |
119 | with these values. | 137 | with these values. |
138 | policy->cpus Update this with the masks of the | ||
139 | (online + offline) CPUs that do DVFS | ||
140 | along with this CPU (i.e. that share | ||
141 | clock/voltage rails with it). | ||
120 | 142 | ||
121 | For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the | 143 | For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the |
122 | frequency table helpers might be helpful. See the section 2 for more information | 144 | frequency table helpers might be helpful. See the section 2 for more information |
123 | on them. | 145 | on them. |
124 | 146 | ||
125 | SMP systems normally have same clock source for a group of cpus. For these the | ||
126 | .init() would be called only once for the first online cpu. Here the .init() | ||
127 | routine must initialize policy->cpus with mask of all possible cpus (Online + | ||
128 | Offline) that share the clock. Then the core would copy this mask onto | ||
129 | policy->related_cpus and will reset policy->cpus to carry only online cpus. | ||
130 | |||
131 | 147 | ||
132 | 1.3 verify | 148 | 1.3 verify |
133 | ------------ | 149 | ---------- |
134 | 150 | ||
135 | When the user decides a new policy (consisting of | 151 | When the user decides a new policy (consisting of |
136 | "policy,governor,min,max") shall be set, this policy must be validated | 152 | "policy,governor,min,max") shall be set, this policy must be validated |
137 | so that incompatible values can be corrected. For verifying these | 153 | so that incompatible values can be corrected. For verifying these |
138 | values, a frequency table helper and/or the | 154 | values cpufreq_verify_within_limits(struct cpufreq_policy *policy, |
139 | cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned | 155 | unsigned int min_freq, unsigned int max_freq) function might be helpful. |
140 | int min_freq, unsigned int max_freq) function might be helpful. See | 156 | See section 2 for details on frequency table helpers. |
141 | section 2 for details on frequency table helpers. | ||
142 | 157 | ||
143 | You need to make sure that at least one valid frequency (or operating | 158 | You need to make sure that at least one valid frequency (or operating |
144 | range) is within policy->min and policy->max. If necessary, increase | 159 | range) is within policy->min and policy->max. If necessary, increase |
145 | policy->max first, and only if this is no solution, decrease policy->min. | 160 | policy->max first, and only if this is no solution, decrease policy->min. |
146 | 161 | ||
147 | 162 | ||
148 | 1.4 target/target_index or setpolicy? | 163 | 1.4 target or target_index or setpolicy or fast_switch? |
149 | ---------------------------- | 164 | ------------------------------------------------------- |
150 | 165 | ||
151 | Most cpufreq drivers or even most cpu frequency scaling algorithms | 166 | Most cpufreq drivers or even most cpu frequency scaling algorithms |
152 | only allow the CPU to be set to one frequency. For these, you use the | 167 | only allow the CPU frequency to be set to predefined fixed values. For |
153 | ->target/target_index call. | 168 | these, you use the ->target(), ->target_index() or ->fast_switch() |
169 | callbacks. | ||
154 | 170 | ||
155 | Some cpufreq-capable processors switch the frequency between certain | 171 | Some cpufreq capable processors switch the frequency between certain |
156 | limits on their own. These shall use the ->setpolicy call | 172 | limits on their own. These shall use the ->setpolicy() callback. |
157 | 173 | ||
158 | 174 | ||
159 | 1.5. target/target_index | 175 | 1.5. target/target_index |
160 | ------------- | 176 | ------------------------ |
161 | 177 | ||
162 | The target_index call has two arguments: struct cpufreq_policy *policy, | 178 | The target_index call has two arguments: struct cpufreq_policy *policy, |
163 | and unsigned int index (into the exposed frequency table). | 179 | and unsigned int index (into the exposed frequency table). |
@@ -186,9 +202,20 @@ actual frequency must be determined using the following rules: | |||
186 | Here again the frequency table helper might assist you - see section 2 | 202 | Here again the frequency table helper might assist you - see section 2 |
187 | for details. | 203 | for details. |
188 | 204 | ||
205 | 1.6. fast_switch | ||
206 | ---------------- | ||
189 | 207 | ||
190 | 1.6 setpolicy | 208 | This function is used for frequency switching from scheduler's context. |
191 | --------------- | 209 | Not all drivers are expected to implement it, as sleeping from within |
210 | this callback isn't allowed. This callback must be highly optimized to | ||
211 | do switching as fast as possible. | ||
212 | |||
213 | This function has two arguments: struct cpufreq_policy *policy and | ||
214 | unsigned int target_frequency. | ||
215 | |||
216 | |||
217 | 1.7 setpolicy | ||
218 | ------------- | ||
192 | 219 | ||
193 | The setpolicy call only takes a struct cpufreq_policy *policy as | 220 | The setpolicy call only takes a struct cpufreq_policy *policy as |
194 | argument. You need to set the lower limit of the in-processor or | 221 | argument. You need to set the lower limit of the in-processor or |
@@ -198,7 +225,7 @@ setting when policy->policy is CPUFREQ_POLICY_PERFORMANCE, and a | |||
198 | powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check | 225 | powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check |
199 | the reference implementation in drivers/cpufreq/longrun.c | 226 | the reference implementation in drivers/cpufreq/longrun.c |
200 | 227 | ||
201 | 1.7 get_intermediate and target_intermediate | 228 | 1.8 get_intermediate and target_intermediate |
202 | -------------------------------------------- | 229 | -------------------------------------------- |
203 | 230 | ||
204 | Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION unset. | 231 | Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION unset. |
@@ -222,42 +249,36 @@ failures as core would send notifications for that. | |||
222 | 249 | ||
223 | As most cpufreq processors only allow for being set to a few specific | 250 | As most cpufreq processors only allow for being set to a few specific |
224 | frequencies, a "frequency table" with some functions might assist in | 251 | frequencies, a "frequency table" with some functions might assist in |
225 | some work of the processor driver. Such a "frequency table" consists | 252 | some work of the processor driver. Such a "frequency table" consists of |
226 | of an array of struct cpufreq_frequency_table entries, with any value in | 253 | an array of struct cpufreq_frequency_table entries, with driver specific |
227 | "driver_data" you want to use, and the corresponding frequency in | 254 | values in "driver_data", the corresponding frequency in "frequency" and |
228 | "frequency". At the end of the table, you need to add a | 255 | flags set. At the end of the table, you need to add a |
229 | cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. And | 256 | cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. |
230 | if you want to skip one entry in the table, set the frequency to | 257 | And if you want to skip one entry in the table, set the frequency to |
231 | CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending | 258 | CPUFREQ_ENTRY_INVALID. The entries don't need to be in sorted in any |
232 | order. | 259 | particular order, but if they are cpufreq core will do DVFS a bit |
233 | 260 | quickly for them as search for best match is faster. | |
234 | By calling cpufreq_table_validate_and_show(struct cpufreq_policy *policy, | 261 | |
235 | struct cpufreq_frequency_table *table); | 262 | By calling cpufreq_table_validate_and_show(), the cpuinfo.min_freq and |
236 | the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and | 263 | cpuinfo.max_freq values are detected, and policy->min and policy->max |
237 | policy->min and policy->max are set to the same values. This is | 264 | are set to the same values. This is helpful for the per-CPU |
238 | helpful for the per-CPU initialization stage. | 265 | initialization stage. |
239 | 266 | ||
240 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | 267 | cpufreq_frequency_table_verify() assures that at least one valid |
241 | struct cpufreq_frequency_table *table); | 268 | frequency is within policy->min and policy->max, and all other criteria |
242 | assures that at least one valid frequency is within policy->min and | 269 | are met. This is helpful for the ->verify call. |
243 | policy->max, and all other criteria are met. This is helpful for the | 270 | |
244 | ->verify call. | 271 | cpufreq_frequency_table_target() is the corresponding frequency table |
245 | 272 | helper for the ->target stage. Just pass the values to this function, | |
246 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | 273 | and this function returns the of the frequency table entry which |
247 | unsigned int target_freq, | 274 | contains the frequency the CPU shall be set to. |
248 | unsigned int relation); | ||
249 | |||
250 | is the corresponding frequency table helper for the ->target | ||
251 | stage. Just pass the values to this function, and this function | ||
252 | returns the number of the frequency table entry which contains | ||
253 | the frequency the CPU shall be set to. | ||
254 | 275 | ||
255 | The following macros can be used as iterators over cpufreq_frequency_table: | 276 | The following macros can be used as iterators over cpufreq_frequency_table: |
256 | 277 | ||
257 | cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency | 278 | cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency |
258 | table. | 279 | table. |
259 | 280 | ||
260 | cpufreq-for_each_valid_entry(pos, table) - iterates over all entries, | 281 | cpufreq_for_each_valid_entry(pos, table) - iterates over all entries, |
261 | excluding CPUFREQ_ENTRY_INVALID frequencies. | 282 | excluding CPUFREQ_ENTRY_INVALID frequencies. |
262 | Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and | 283 | Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and |
263 | "table" - the cpufreq_frequency_table * you want to iterate over. | 284 | "table" - the cpufreq_frequency_table * you want to iterate over. |
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt index 3c355f6ad834..2bbe207354ed 100644 --- a/Documentation/cpu-freq/cpufreq-stats.txt +++ b/Documentation/cpu-freq/cpufreq-stats.txt | |||
@@ -34,10 +34,10 @@ cpufreq stats provides following statistics (explained in detail below). | |||
34 | - total_trans | 34 | - total_trans |
35 | - trans_table | 35 | - trans_table |
36 | 36 | ||
37 | All the statistics will be from the time the stats driver has been inserted | 37 | All the statistics will be from the time the stats driver has been inserted |
38 | to the time when a read of a particular statistic is done. Obviously, stats | 38 | (or the time the stats were reset) to the time when a read of a particular |
39 | driver will not have any information about the frequency transitions before | 39 | statistic is done. Obviously, stats driver will not have any information |
40 | the stats driver insertion. | 40 | about the frequency transitions before the stats driver insertion. |
41 | 41 | ||
42 | -------------------------------------------------------------------------------- | 42 | -------------------------------------------------------------------------------- |
43 | <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l | 43 | <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l |
@@ -110,25 +110,13 @@ Config Main Menu | |||
110 | CPU Frequency scaling ---> | 110 | CPU Frequency scaling ---> |
111 | [*] CPU Frequency scaling | 111 | [*] CPU Frequency scaling |
112 | [*] CPU frequency translation statistics | 112 | [*] CPU frequency translation statistics |
113 | [*] CPU frequency translation statistics details | ||
114 | 113 | ||
115 | 114 | ||
116 | "CPU Frequency scaling" (CONFIG_CPU_FREQ) should be enabled to configure | 115 | "CPU Frequency scaling" (CONFIG_CPU_FREQ) should be enabled to configure |
117 | cpufreq-stats. | 116 | cpufreq-stats. |
118 | 117 | ||
119 | "CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT) provides the | 118 | "CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT) provides the |
120 | basic statistics which includes time_in_state and total_trans. | 119 | statistics which includes time_in_state, total_trans and trans_table. |
121 | 120 | ||
122 | "CPU frequency translation statistics details" (CONFIG_CPU_FREQ_STAT_DETAILS) | 121 | Once this option is enabled and your CPU supports cpufrequency, you |
123 | provides fine grained cpufreq stats by trans_table. The reason for having a | ||
124 | separate config option for trans_table is: | ||
125 | - trans_table goes against the traditional /sysfs rule of one value per | ||
126 | interface. It provides a whole bunch of value in a 2 dimensional matrix | ||
127 | form. | ||
128 | |||
129 | Once these two options are enabled and your CPU supports cpufrequency, you | ||
130 | will be able to see the CPU frequency statistics in /sysfs. | 122 | will be able to see the CPU frequency statistics in /sysfs. |
131 | |||
132 | |||
133 | |||
134 | |||
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index c15aa75f5227..61b3184b6c24 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt | |||
@@ -10,6 +10,8 @@ | |||
10 | 10 | ||
11 | Dominik Brodowski <linux@brodo.de> | 11 | Dominik Brodowski <linux@brodo.de> |
12 | some additions and corrections by Nico Golde <nico@ngolde.de> | 12 | some additions and corrections by Nico Golde <nico@ngolde.de> |
13 | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
14 | Viresh Kumar <viresh.kumar@linaro.org> | ||
13 | 15 | ||
14 | 16 | ||
15 | 17 | ||
@@ -28,32 +30,27 @@ Contents: | |||
28 | 2.3 Userspace | 30 | 2.3 Userspace |
29 | 2.4 Ondemand | 31 | 2.4 Ondemand |
30 | 2.5 Conservative | 32 | 2.5 Conservative |
33 | 2.6 Schedutil | ||
31 | 34 | ||
32 | 3. The Governor Interface in the CPUfreq Core | 35 | 3. The Governor Interface in the CPUfreq Core |
33 | 36 | ||
37 | 4. References | ||
34 | 38 | ||
35 | 39 | ||
36 | 1. What Is A CPUFreq Governor? | 40 | 1. What Is A CPUFreq Governor? |
37 | ============================== | 41 | ============================== |
38 | 42 | ||
39 | Most cpufreq drivers (except the intel_pstate and longrun) or even most | 43 | Most cpufreq drivers (except the intel_pstate and longrun) or even most |
40 | cpu frequency scaling algorithms only offer the CPU to be set to one | 44 | cpu frequency scaling algorithms only allow the CPU frequency to be set |
41 | frequency. In order to offer dynamic frequency scaling, the cpufreq | 45 | to predefined fixed values. In order to offer dynamic frequency |
42 | core must be able to tell these drivers of a "target frequency". So | 46 | scaling, the cpufreq core must be able to tell these drivers of a |
43 | these specific drivers will be transformed to offer a "->target/target_index" | 47 | "target frequency". So these specific drivers will be transformed to |
44 | call instead of the existing "->setpolicy" call. For "longrun", all | 48 | offer a "->target/target_index/fast_switch()" call instead of the |
45 | stays the same, though. | 49 | "->setpolicy()" call. For set_policy drivers, all stays the same, |
50 | though. | ||
46 | 51 | ||
47 | How to decide what frequency within the CPUfreq policy should be used? | 52 | How to decide what frequency within the CPUfreq policy should be used? |
48 | That's done using "cpufreq governors". Two are already in this patch | 53 | That's done using "cpufreq governors". |
49 | -- they're the already existing "powersave" and "performance" which | ||
50 | set the frequency statically to the lowest or highest frequency, | ||
51 | respectively. At least two more such governors will be ready for | ||
52 | addition in the near future, but likely many more as there are various | ||
53 | different theories and models about dynamic frequency scaling | ||
54 | around. Using such a generic interface as cpufreq offers to scaling | ||
55 | governors, these can be tested extensively, and the best one can be | ||
56 | selected for each specific use. | ||
57 | 54 | ||
58 | Basically, it's the following flow graph: | 55 | Basically, it's the following flow graph: |
59 | 56 | ||
@@ -71,7 +68,7 @@ CPU can be set to switch independently | CPU can only be set | |||
71 | / the limits of policy->{min,max} | 68 | / the limits of policy->{min,max} |
72 | / \ | 69 | / \ |
73 | / \ | 70 | / \ |
74 | Using the ->setpolicy call, Using the ->target/target_index call, | 71 | Using the ->setpolicy call, Using the ->target/target_index/fast_switch call, |
75 | the limits and the the frequency closest | 72 | the limits and the the frequency closest |
76 | "policy" is set. to target_freq is set. | 73 | "policy" is set. to target_freq is set. |
77 | It is assured that it | 74 | It is assured that it |
@@ -109,114 +106,159 @@ directory. | |||
109 | 2.4 Ondemand | 106 | 2.4 Ondemand |
110 | ------------ | 107 | ------------ |
111 | 108 | ||
112 | The CPUfreq governor "ondemand" sets the CPU depending on the | 109 | The CPUfreq governor "ondemand" sets the CPU frequency depending on the |
113 | current usage. To do this the CPU must have the capability to | 110 | current system load. Load estimation is triggered by the scheduler |
114 | switch the frequency very quickly. There are a number of sysfs file | 111 | through the update_util_data->func hook; when triggered, cpufreq checks |
115 | accessible parameters: | 112 | the CPU-usage statistics over the last period and the governor sets the |
116 | 113 | CPU accordingly. The CPU must have the capability to switch the | |
117 | sampling_rate: measured in uS (10^-6 seconds), this is how often you | 114 | frequency very quickly. |
118 | want the kernel to look at the CPU usage and to make decisions on | 115 | |
119 | what to do about the frequency. Typically this is set to values of | 116 | Sysfs files: |
120 | around '10000' or more. It's default value is (cmp. with users-guide.txt): | 117 | |
121 | transition_latency * 1000 | 118 | * sampling_rate: |
122 | Be aware that transition latency is in ns and sampling_rate is in us, so you | 119 | |
123 | get the same sysfs value by default. | 120 | Measured in uS (10^-6 seconds), this is how often you want the kernel |
124 | Sampling rate should always get adjusted considering the transition latency | 121 | to look at the CPU usage and to make decisions on what to do about the |
125 | To set the sampling rate 750 times as high as the transition latency | 122 | frequency. Typically this is set to values of around '10000' or more. |
126 | in the bash (as said, 1000 is default), do: | 123 | It's default value is (cmp. with users-guide.txt): transition_latency |
127 | echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \ | 124 | * 1000. Be aware that transition latency is in ns and sampling_rate |
128 | >ondemand/sampling_rate | 125 | is in us, so you get the same sysfs value by default. Sampling rate |
129 | 126 | should always get adjusted considering the transition latency to set | |
130 | sampling_rate_min: | 127 | the sampling rate 750 times as high as the transition latency in the |
131 | The sampling rate is limited by the HW transition latency: | 128 | bash (as said, 1000 is default), do: |
132 | transition_latency * 100 | 129 | |
133 | Or by kernel restrictions: | 130 | $ echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate |
134 | If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed. | 131 | |
135 | If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is used, the | 132 | * sampling_rate_min: |
136 | limits depend on the CONFIG_HZ option: | 133 | |
137 | HZ=1000: min=20000us (20ms) | 134 | The sampling rate is limited by the HW transition latency: |
138 | HZ=250: min=80000us (80ms) | 135 | transition_latency * 100 |
139 | HZ=100: min=200000us (200ms) | 136 | |
140 | The highest value of kernel and HW latency restrictions is shown and | 137 | Or by kernel restrictions: |
141 | used as the minimum sampling rate. | 138 | - If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed. |
142 | 139 | - If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is | |
143 | up_threshold: defines what the average CPU usage between the samplings | 140 | used, the limits depend on the CONFIG_HZ option: |
144 | of 'sampling_rate' needs to be for the kernel to make a decision on | 141 | HZ=1000: min=20000us (20ms) |
145 | whether it should increase the frequency. For example when it is set | 142 | HZ=250: min=80000us (80ms) |
146 | to its default value of '95' it means that between the checking | 143 | HZ=100: min=200000us (200ms) |
147 | intervals the CPU needs to be on average more than 95% in use to then | 144 | |
148 | decide that the CPU frequency needs to be increased. | 145 | The highest value of kernel and HW latency restrictions is shown and |
149 | 146 | used as the minimum sampling rate. | |
150 | ignore_nice_load: this parameter takes a value of '0' or '1'. When | 147 | |
151 | set to '0' (its default), all processes are counted towards the | 148 | * up_threshold: |
152 | 'cpu utilisation' value. When set to '1', the processes that are | 149 | |
153 | run with a 'nice' value will not count (and thus be ignored) in the | 150 | This defines what the average CPU usage between the samplings of |
154 | overall usage calculation. This is useful if you are running a CPU | 151 | 'sampling_rate' needs to be for the kernel to make a decision on |
155 | intensive calculation on your laptop that you do not care how long it | 152 | whether it should increase the frequency. For example when it is set |
156 | takes to complete as you can 'nice' it and prevent it from taking part | 153 | to its default value of '95' it means that between the checking |
157 | in the deciding process of whether to increase your CPU frequency. | 154 | intervals the CPU needs to be on average more than 95% in use to then |
158 | 155 | decide that the CPU frequency needs to be increased. | |
159 | sampling_down_factor: this parameter controls the rate at which the | 156 | |
160 | kernel makes a decision on when to decrease the frequency while running | 157 | * ignore_nice_load: |
161 | at top speed. When set to 1 (the default) decisions to reevaluate load | 158 | |
162 | are made at the same interval regardless of current clock speed. But | 159 | This parameter takes a value of '0' or '1'. When set to '0' (its |
163 | when set to greater than 1 (e.g. 100) it acts as a multiplier for the | 160 | default), all processes are counted towards the 'cpu utilisation' |
164 | scheduling interval for reevaluating load when the CPU is at its top | 161 | value. When set to '1', the processes that are run with a 'nice' |
165 | speed due to high load. This improves performance by reducing the overhead | 162 | value will not count (and thus be ignored) in the overall usage |
166 | of load evaluation and helping the CPU stay at its top speed when truly | 163 | calculation. This is useful if you are running a CPU intensive |
167 | busy, rather than shifting back and forth in speed. This tunable has no | 164 | calculation on your laptop that you do not care how long it takes to |
168 | effect on behavior at lower speeds/lower CPU loads. | 165 | complete as you can 'nice' it and prevent it from taking part in the |
169 | 166 | deciding process of whether to increase your CPU frequency. | |
170 | powersave_bias: this parameter takes a value between 0 to 1000. It | 167 | |
171 | defines the percentage (times 10) value of the target frequency that | 168 | * sampling_down_factor: |
172 | will be shaved off of the target. For example, when set to 100 -- 10%, | 169 | |
173 | when ondemand governor would have targeted 1000 MHz, it will target | 170 | This parameter controls the rate at which the kernel makes a decision |
174 | 1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0 | 171 | on when to decrease the frequency while running at top speed. When set |
175 | (disabled) by default. | 172 | to 1 (the default) decisions to reevaluate load are made at the same |
176 | When AMD frequency sensitivity powersave bias driver -- | 173 | interval regardless of current clock speed. But when set to greater |
177 | drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter | 174 | than 1 (e.g. 100) it acts as a multiplier for the scheduling interval |
178 | defines the workload frequency sensitivity threshold in which a lower | 175 | for reevaluating load when the CPU is at its top speed due to high |
179 | frequency is chosen instead of ondemand governor's original target. | 176 | load. This improves performance by reducing the overhead of load |
180 | The frequency sensitivity is a hardware reported (on AMD Family 16h | 177 | evaluation and helping the CPU stay at its top speed when truly busy, |
181 | Processors and above) value between 0 to 100% that tells software how | 178 | rather than shifting back and forth in speed. This tunable has no |
182 | the performance of the workload running on a CPU will change when | 179 | effect on behavior at lower speeds/lower CPU loads. |
183 | frequency changes. A workload with sensitivity of 0% (memory/IO-bound) | 180 | |
184 | will not perform any better on higher core frequency, whereas a | 181 | * powersave_bias: |
185 | workload with sensitivity of 100% (CPU-bound) will perform better | 182 | |
186 | higher the frequency. When the driver is loaded, this is set to 400 | 183 | This parameter takes a value between 0 to 1000. It defines the |
187 | by default -- for CPUs running workloads with sensitivity value below | 184 | percentage (times 10) value of the target frequency that will be |
188 | 40%, a lower frequency is chosen. Unloading the driver or writing 0 | 185 | shaved off of the target. For example, when set to 100 -- 10%, when |
189 | will disable this feature. | 186 | ondemand governor would have targeted 1000 MHz, it will target |
187 | 1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0 | ||
188 | (disabled) by default. | ||
189 | |||
190 | When AMD frequency sensitivity powersave bias driver -- | ||
191 | drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter | ||
192 | defines the workload frequency sensitivity threshold in which a lower | ||
193 | frequency is chosen instead of ondemand governor's original target. | ||
194 | The frequency sensitivity is a hardware reported (on AMD Family 16h | ||
195 | Processors and above) value between 0 to 100% that tells software how | ||
196 | the performance of the workload running on a CPU will change when | ||
197 | frequency changes. A workload with sensitivity of 0% (memory/IO-bound) | ||
198 | will not perform any better on higher core frequency, whereas a | ||
199 | workload with sensitivity of 100% (CPU-bound) will perform better | ||
200 | higher the frequency. When the driver is loaded, this is set to 400 by | ||
201 | default -- for CPUs running workloads with sensitivity value below | ||
202 | 40%, a lower frequency is chosen. Unloading the driver or writing 0 | ||
203 | will disable this feature. | ||
190 | 204 | ||
191 | 205 | ||
192 | 2.5 Conservative | 206 | 2.5 Conservative |
193 | ---------------- | 207 | ---------------- |
194 | 208 | ||
195 | The CPUfreq governor "conservative", much like the "ondemand" | 209 | The CPUfreq governor "conservative", much like the "ondemand" |
196 | governor, sets the CPU depending on the current usage. It differs in | 210 | governor, sets the CPU frequency depending on the current usage. It |
197 | behaviour in that it gracefully increases and decreases the CPU speed | 211 | differs in behaviour in that it gracefully increases and decreases the |
198 | rather than jumping to max speed the moment there is any load on the | 212 | CPU speed rather than jumping to max speed the moment there is any load |
199 | CPU. This behaviour more suitable in a battery powered environment. | 213 | on the CPU. This behaviour is more suitable in a battery powered |
200 | The governor is tweaked in the same manner as the "ondemand" governor | 214 | environment. The governor is tweaked in the same manner as the |
201 | through sysfs with the addition of: | 215 | "ondemand" governor through sysfs with the addition of: |
202 | 216 | ||
203 | freq_step: this describes what percentage steps the cpu freq should be | 217 | * freq_step: |
204 | increased and decreased smoothly by. By default the cpu frequency will | 218 | |
205 | increase in 5% chunks of your maximum cpu frequency. You can change this | 219 | This describes what percentage steps the cpu freq should be increased |
206 | value to anywhere between 0 and 100 where '0' will effectively lock your | 220 | and decreased smoothly by. By default the cpu frequency will increase |
207 | CPU at a speed regardless of its load whilst '100' will, in theory, make | 221 | in 5% chunks of your maximum cpu frequency. You can change this value |
208 | it behave identically to the "ondemand" governor. | 222 | to anywhere between 0 and 100 where '0' will effectively lock your CPU |
209 | 223 | at a speed regardless of its load whilst '100' will, in theory, make | |
210 | down_threshold: same as the 'up_threshold' found for the "ondemand" | 224 | it behave identically to the "ondemand" governor. |
211 | governor but for the opposite direction. For example when set to its | 225 | |
212 | default value of '20' it means that if the CPU usage needs to be below | 226 | * down_threshold: |
213 | 20% between samples to have the frequency decreased. | 227 | |
214 | 228 | Same as the 'up_threshold' found for the "ondemand" governor but for | |
215 | sampling_down_factor: similar functionality as in "ondemand" governor. | 229 | the opposite direction. For example when set to its default value of |
216 | But in "conservative", it controls the rate at which the kernel makes | 230 | '20' it means that if the CPU usage needs to be below 20% between |
217 | a decision on when to decrease the frequency while running in any | 231 | samples to have the frequency decreased. |
218 | speed. Load for frequency increase is still evaluated every | 232 | |
219 | sampling rate. | 233 | * sampling_down_factor: |
234 | |||
235 | Similar functionality as in "ondemand" governor. But in | ||
236 | "conservative", it controls the rate at which the kernel makes a | ||
237 | decision on when to decrease the frequency while running in any speed. | ||
238 | Load for frequency increase is still evaluated every sampling rate. | ||
239 | |||
240 | |||
241 | 2.6 Schedutil | ||
242 | ------------- | ||
243 | |||
244 | The "schedutil" governor aims at better integration with the Linux | ||
245 | kernel scheduler. Load estimation is achieved through the scheduler's | ||
246 | Per-Entity Load Tracking (PELT) mechanism, which also provides | ||
247 | information about the recent load [1]. This governor currently does | ||
248 | load based DVFS only for tasks managed by CFS. RT and DL scheduler tasks | ||
249 | are always run at the highest frequency. Unlike all the other | ||
250 | governors, the code is located under the kernel/sched/ directory. | ||
251 | |||
252 | Sysfs files: | ||
253 | |||
254 | * rate_limit_us: | ||
255 | |||
256 | This contains a value in microseconds. The governor waits for | ||
257 | rate_limit_us time before reevaluating the load again, after it has | ||
258 | evaluated the load once. | ||
259 | |||
260 | For an in-depth comparison with the other governors refer to [2]. | ||
261 | |||
220 | 262 | ||
221 | 3. The Governor Interface in the CPUfreq Core | 263 | 3. The Governor Interface in the CPUfreq Core |
222 | ============================================= | 264 | ============================================= |
@@ -225,26 +267,10 @@ A new governor must register itself with the CPUfreq core using | |||
225 | "cpufreq_register_governor". The struct cpufreq_governor, which has to | 267 | "cpufreq_register_governor". The struct cpufreq_governor, which has to |
226 | be passed to that function, must contain the following values: | 268 | be passed to that function, must contain the following values: |
227 | 269 | ||
228 | governor->name - A unique name for this governor | 270 | governor->name - A unique name for this governor. |
229 | governor->governor - The governor callback function | 271 | governor->owner - .THIS_MODULE for the governor module (if appropriate). |
230 | governor->owner - .THIS_MODULE for the governor module (if | ||
231 | appropriate) | ||
232 | |||
233 | The governor->governor callback is called with the current (or to-be-set) | ||
234 | cpufreq_policy struct for that CPU, and an unsigned int event. The | ||
235 | following events are currently defined: | ||
236 | |||
237 | CPUFREQ_GOV_START: This governor shall start its duty for the CPU | ||
238 | policy->cpu | ||
239 | CPUFREQ_GOV_STOP: This governor shall end its duty for the CPU | ||
240 | policy->cpu | ||
241 | CPUFREQ_GOV_LIMITS: The limits for CPU policy->cpu have changed to | ||
242 | policy->min and policy->max. | ||
243 | |||
244 | If you need other "events" externally of your driver, _only_ use the | ||
245 | cpufreq_governor_l(unsigned int cpu, unsigned int event) call to the | ||
246 | CPUfreq core to ensure proper locking. | ||
247 | 272 | ||
273 | plus a set of hooks to the functions implementing the governor's logic. | ||
248 | 274 | ||
249 | The CPUfreq governor may call the CPU processor driver using one of | 275 | The CPUfreq governor may call the CPU processor driver using one of |
250 | these two functions: | 276 | these two functions: |
@@ -258,12 +284,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
258 | unsigned int relation); | 284 | unsigned int relation); |
259 | 285 | ||
260 | target_freq must be within policy->min and policy->max, of course. | 286 | target_freq must be within policy->min and policy->max, of course. |
261 | What's the difference between these two functions? When your governor | 287 | What's the difference between these two functions? When your governor is |
262 | still is in a direct code path of a call to governor->governor, the | 288 | in a direct code path of a call to governor callbacks, like |
263 | per-CPU cpufreq lock is still held in the cpufreq core, and there's | 289 | governor->start(), the policy->rwsem is still held in the cpufreq core, |
264 | no need to lock it again (in fact, this would cause a deadlock). So | 290 | and there's no need to lock it again (in fact, this would cause a |
265 | use __cpufreq_driver_target only in these cases. In all other cases | 291 | deadlock). So use __cpufreq_driver_target only in these cases. In all |
266 | (for example, when there's a "daemonized" function that wakes up | 292 | other cases (for example, when there's a "daemonized" function that |
267 | every second), use cpufreq_driver_target to lock the cpufreq per-CPU | 293 | wakes up every second), use cpufreq_driver_target to take policy->rwsem |
268 | lock before the command is passed to the cpufreq processor driver. | 294 | before the command is passed to the cpufreq driver. |
295 | |||
296 | 4. References | ||
297 | ============= | ||
298 | |||
299 | [1] Per-entity load tracking: https://lwn.net/Articles/531853/ | ||
300 | [2] Improvements in CPU frequency management: https://lwn.net/Articles/682391/ | ||
269 | 301 | ||
diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt index dc024ab4054f..ef1d39247b05 100644 --- a/Documentation/cpu-freq/index.txt +++ b/Documentation/cpu-freq/index.txt | |||
@@ -18,16 +18,29 @@ | |||
18 | 18 | ||
19 | Documents in this directory: | 19 | Documents in this directory: |
20 | ---------------------------- | 20 | ---------------------------- |
21 | |||
22 | amd-powernow.txt - AMD powernow driver specific file. | ||
23 | |||
24 | boost.txt - Frequency boosting support. | ||
25 | |||
21 | core.txt - General description of the CPUFreq core and | 26 | core.txt - General description of the CPUFreq core and |
22 | of CPUFreq notifiers | 27 | of CPUFreq notifiers. |
28 | |||
29 | cpu-drivers.txt - How to implement a new cpufreq processor driver. | ||
23 | 30 | ||
24 | cpu-drivers.txt - How to implement a new cpufreq processor driver | 31 | cpufreq-nforce2.txt - nVidia nForce2 platform specific file. |
32 | |||
33 | cpufreq-stats.txt - General description of sysfs cpufreq stats. | ||
25 | 34 | ||
26 | governors.txt - What are cpufreq governors and how to | 35 | governors.txt - What are cpufreq governors and how to |
27 | implement them? | 36 | implement them? |
28 | 37 | ||
29 | index.txt - File index, Mailing list and Links (this document) | 38 | index.txt - File index, Mailing list and Links (this document) |
30 | 39 | ||
40 | intel-pstate.txt - Intel pstate cpufreq driver specific file. | ||
41 | |||
42 | pcc-cpufreq.txt - PCC cpufreq driver specific file. | ||
43 | |||
31 | user-guide.txt - User Guide to CPUFreq | 44 | user-guide.txt - User Guide to CPUFreq |
32 | 45 | ||
33 | 46 | ||
@@ -35,9 +48,7 @@ Mailing List | |||
35 | ------------ | 48 | ------------ |
36 | There is a CPU frequency changing CVS commit and general list where | 49 | There is a CPU frequency changing CVS commit and general list where |
37 | you can report bugs, problems or submit patches. To post a message, | 50 | you can report bugs, problems or submit patches. To post a message, |
38 | send an email to linux-pm@vger.kernel.org, to subscribe go to | 51 | send an email to linux-pm@vger.kernel.org. |
39 | http://vger.kernel.org/vger-lists.html#linux-pm and follow the | ||
40 | instructions there. | ||
41 | 52 | ||
42 | Links | 53 | Links |
43 | ----- | 54 | ----- |
@@ -48,7 +59,7 @@ how to access the CVS repository: | |||
48 | * http://cvs.arm.linux.org.uk/ | 59 | * http://cvs.arm.linux.org.uk/ |
49 | 60 | ||
50 | the CPUFreq Mailing list: | 61 | the CPUFreq Mailing list: |
51 | * http://vger.kernel.org/vger-lists.html#cpufreq | 62 | * http://vger.kernel.org/vger-lists.html#linux-pm |
52 | 63 | ||
53 | Clock and voltage scaling for the SA-1100: | 64 | Clock and voltage scaling for the SA-1100: |
54 | * http://www.lartmaker.nl/projects/scaling | 65 | * http://www.lartmaker.nl/projects/scaling |
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt index 1953994ef5e6..3fdcdfd968ba 100644 --- a/Documentation/cpu-freq/intel-pstate.txt +++ b/Documentation/cpu-freq/intel-pstate.txt | |||
@@ -85,6 +85,21 @@ Sysfs will show : | |||
85 | Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual | 85 | Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual |
86 | Volume 3: System Programming Guide" to understand ratios. | 86 | Volume 3: System Programming Guide" to understand ratios. |
87 | 87 | ||
88 | There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/ | ||
89 | that can be used for controlling the operation mode of the driver: | ||
90 | |||
91 | status: Three settings are possible: | ||
92 | "off" - The driver is not in use at this time. | ||
93 | "active" - The driver works as a P-state governor (default). | ||
94 | "passive" - The driver works as a regular cpufreq one and collaborates | ||
95 | with the generic cpufreq governors (it sets P-states as | ||
96 | requested by those governors). | ||
97 | The current setting is returned by reads from this attribute. Writing one | ||
98 | of the above strings to it changes the operation mode as indicated by that | ||
99 | string, if possible. If HW-managed P-states (HWP) are enabled, it is not | ||
100 | possible to change the driver's operation mode and attempts to write to | ||
101 | this attribute will fail. | ||
102 | |||
88 | cpufreq sysfs for Intel P-State | 103 | cpufreq sysfs for Intel P-State |
89 | 104 | ||
90 | Since this driver registers with cpufreq, cpufreq sysfs is also presented. | 105 | Since this driver registers with cpufreq, cpufreq sysfs is also presented. |
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt index 109e97bbab77..107f6fdd7d14 100644 --- a/Documentation/cpu-freq/user-guide.txt +++ b/Documentation/cpu-freq/user-guide.txt | |||
@@ -18,7 +18,7 @@ | |||
18 | Contents: | 18 | Contents: |
19 | --------- | 19 | --------- |
20 | 1. Supported Architectures and Processors | 20 | 1. Supported Architectures and Processors |
21 | 1.1 ARM | 21 | 1.1 ARM and ARM64 |
22 | 1.2 x86 | 22 | 1.2 x86 |
23 | 1.3 sparc64 | 23 | 1.3 sparc64 |
24 | 1.4 ppc | 24 | 1.4 ppc |
@@ -37,16 +37,10 @@ Contents: | |||
37 | 1. Supported Architectures and Processors | 37 | 1. Supported Architectures and Processors |
38 | ========================================= | 38 | ========================================= |
39 | 39 | ||
40 | 1.1 ARM | 40 | 1.1 ARM and ARM64 |
41 | ------- | 41 | ----------------- |
42 | |||
43 | The following ARM processors are supported by cpufreq: | ||
44 | |||
45 | ARM Integrator | ||
46 | ARM-SA1100 | ||
47 | ARM-SA1110 | ||
48 | Intel PXA | ||
49 | 42 | ||
43 | Almost all ARM and ARM64 platforms support CPU frequency scaling. | ||
50 | 44 | ||
51 | 1.2 x86 | 45 | 1.2 x86 |
52 | ------- | 46 | ------- |
@@ -69,6 +63,7 @@ Transmeta Crusoe | |||
69 | Transmeta Efficeon | 63 | Transmeta Efficeon |
70 | VIA Cyrix 3 / C3 | 64 | VIA Cyrix 3 / C3 |
71 | various processors on some ACPI 2.0-compatible systems [*] | 65 | various processors on some ACPI 2.0-compatible systems [*] |
66 | And many more | ||
72 | 67 | ||
73 | [*] Only if "ACPI Processor Performance States" are available | 68 | [*] Only if "ACPI Processor Performance States" are available |
74 | to the ACPI<->BIOS interface. | 69 | to the ACPI<->BIOS interface. |
@@ -147,10 +142,19 @@ mounted it at /sys, the cpufreq interface is located in a subdirectory | |||
147 | "cpufreq" within the cpu-device directory | 142 | "cpufreq" within the cpu-device directory |
148 | (e.g. /sys/devices/system/cpu/cpu0/cpufreq/ for the first CPU). | 143 | (e.g. /sys/devices/system/cpu/cpu0/cpufreq/ for the first CPU). |
149 | 144 | ||
145 | affected_cpus : List of Online CPUs that require software | ||
146 | coordination of frequency. | ||
147 | |||
148 | cpuinfo_cur_freq : Current frequency of the CPU as obtained from | ||
149 | the hardware, in KHz. This is the frequency | ||
150 | the CPU actually runs at. | ||
151 | |||
150 | cpuinfo_min_freq : this file shows the minimum operating | 152 | cpuinfo_min_freq : this file shows the minimum operating |
151 | frequency the processor can run at(in kHz) | 153 | frequency the processor can run at(in kHz) |
154 | |||
152 | cpuinfo_max_freq : this file shows the maximum operating | 155 | cpuinfo_max_freq : this file shows the maximum operating |
153 | frequency the processor can run at(in kHz) | 156 | frequency the processor can run at(in kHz) |
157 | |||
154 | cpuinfo_transition_latency The time it takes on this CPU to | 158 | cpuinfo_transition_latency The time it takes on this CPU to |
155 | switch between two frequencies in nano | 159 | switch between two frequencies in nano |
156 | seconds. If unknown or known to be | 160 | seconds. If unknown or known to be |
@@ -163,25 +167,30 @@ cpuinfo_transition_latency The time it takes on this CPU to | |||
163 | userspace daemon. Make sure to not | 167 | userspace daemon. Make sure to not |
164 | switch the frequency too often | 168 | switch the frequency too often |
165 | resulting in performance loss. | 169 | resulting in performance loss. |
166 | scaling_driver : this file shows what cpufreq driver is | 170 | |
167 | used to set the frequency on this CPU | 171 | related_cpus : List of Online + Offline CPUs that need software |
172 | coordination of frequency. | ||
173 | |||
174 | scaling_available_frequencies : List of available frequencies, in KHz. | ||
168 | 175 | ||
169 | scaling_available_governors : this file shows the CPUfreq governors | 176 | scaling_available_governors : this file shows the CPUfreq governors |
170 | available in this kernel. You can see the | 177 | available in this kernel. You can see the |
171 | currently activated governor in | 178 | currently activated governor in |
172 | 179 | ||
180 | scaling_cur_freq : Current frequency of the CPU as determined by | ||
181 | the governor and cpufreq core, in KHz. This is | ||
182 | the frequency the kernel thinks the CPU runs | ||
183 | at. | ||
184 | |||
185 | scaling_driver : this file shows what cpufreq driver is | ||
186 | used to set the frequency on this CPU | ||
187 | |||
173 | scaling_governor, and by "echoing" the name of another | 188 | scaling_governor, and by "echoing" the name of another |
174 | governor you can change it. Please note | 189 | governor you can change it. Please note |
175 | that some governors won't load - they only | 190 | that some governors won't load - they only |
176 | work on some specific architectures or | 191 | work on some specific architectures or |
177 | processors. | 192 | processors. |
178 | 193 | ||
179 | cpuinfo_cur_freq : Current frequency of the CPU as obtained from | ||
180 | the hardware, in KHz. This is the frequency | ||
181 | the CPU actually runs at. | ||
182 | |||
183 | scaling_available_frequencies : List of available frequencies, in KHz. | ||
184 | |||
185 | scaling_min_freq and | 194 | scaling_min_freq and |
186 | scaling_max_freq show the current "policy limits" (in | 195 | scaling_max_freq show the current "policy limits" (in |
187 | kHz). By echoing new values into these | 196 | kHz). By echoing new values into these |
@@ -190,16 +199,11 @@ scaling_max_freq show the current "policy limits" (in | |||
190 | first set scaling_max_freq, then | 199 | first set scaling_max_freq, then |
191 | scaling_min_freq. | 200 | scaling_min_freq. |
192 | 201 | ||
193 | affected_cpus : List of Online CPUs that require software | 202 | scaling_setspeed This can be read to get the currently programmed |
194 | coordination of frequency. | 203 | value by the governor. This can be written to |
195 | 204 | change the current frequency for a group of | |
196 | related_cpus : List of Online + Offline CPUs that need software | 205 | CPUs, represented by a policy. This is supported |
197 | coordination of frequency. | 206 | currently only by the userspace governor. |
198 | |||
199 | scaling_cur_freq : Current frequency of the CPU as determined by | ||
200 | the governor and cpufreq core, in KHz. This is | ||
201 | the frequency the kernel thinks the CPU runs | ||
202 | at. | ||
203 | 207 | ||
204 | bios_limit : If the BIOS tells the OS to limit a CPU to | 208 | bios_limit : If the BIOS tells the OS to limit a CPU to |
205 | lower frequencies, the user can read out the | 209 | lower frequencies, the user can read out the |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt index 0dcb7c7d3e40..944657684d73 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt | |||
@@ -15,6 +15,9 @@ Properties: | |||
15 | Second cell specifies the irq distribution mode to cores | 15 | Second cell specifies the irq distribution mode to cores |
16 | 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 | 16 | 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 |
17 | 17 | ||
18 | The second cell in interrupts property is deprecated and may be ignored by | ||
19 | the kernel. | ||
20 | |||
18 | intc accessed via the special ARC AUX register interface, hence "reg" property | 21 | intc accessed via the special ARC AUX register interface, hence "reg" property |
19 | is not specified. | 22 | is not specified. |
20 | 23 | ||
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index c010fafc66a8..c7194e87d5f4 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt | |||
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node.. | |||
7 | * Ethernet controller node | 7 | * Ethernet controller node |
8 | 8 | ||
9 | Required properties: | 9 | Required properties: |
10 | - compatible: Should be "mediatek,mt7623-eth" | 10 | - compatible: Should be "mediatek,mt2701-eth" |
11 | - reg: Address and length of the register set for the device | 11 | - reg: Address and length of the register set for the device |
12 | - interrupts: Should contain the three frame engines interrupts in numeric | 12 | - interrupts: Should contain the three frame engines interrupts in numeric |
13 | order. These are fe_int0, fe_int1 and fe_int2. | 13 | order. These are fe_int0, fe_int1 and fe_int2. |
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index ff1bc4b1bb3b..fb5056b22685 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt | |||
@@ -19,8 +19,9 @@ Optional Properties: | |||
19 | specifications. If neither of these are specified, the default is to | 19 | specifications. If neither of these are specified, the default is to |
20 | assume clause 22. | 20 | assume clause 22. |
21 | 21 | ||
22 | If the phy's identifier is known then the list may contain an entry | 22 | If the PHY reports an incorrect ID (or none at all) then the |
23 | of the form: "ethernet-phy-idAAAA.BBBB" where | 23 | "compatible" list may contain an entry with the correct PHY ID in the |
24 | form: "ethernet-phy-idAAAA.BBBB" where | ||
24 | AAAA - The value of the 16 bit Phy Identifier 1 register as | 25 | AAAA - The value of the 16 bit Phy Identifier 1 register as |
25 | 4 hex digits. This is the chip vendor OUI bits 3:18 | 26 | 4 hex digits. This is the chip vendor OUI bits 3:18 |
26 | BBBB - The value of the 16 bit Phy Identifier 2 register as | 27 | BBBB - The value of the 16 bit Phy Identifier 2 register as |
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 72624a16b792..c94b4675d021 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt | |||
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise | |||
212 | snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. | 212 | snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. |
213 | It's slow but very precise. | 213 | It's slow but very precise. |
214 | 214 | ||
215 | Table 1-2: Contents of the status files (as of 4.1) | 215 | Table 1-2: Contents of the status files (as of 4.8) |
216 | .............................................................................. | 216 | .............................................................................. |
217 | Field Content | 217 | Field Content |
218 | Name filename of the executable | 218 | Name filename of the executable |
219 | Umask file mode creation mask | ||
219 | State state (R is running, S is sleeping, D is sleeping | 220 | State state (R is running, S is sleeping, D is sleeping |
220 | in an uninterruptible wait, Z is zombie, | 221 | in an uninterruptible wait, Z is zombie, |
221 | T is traced or stopped) | 222 | T is traced or stopped) |
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1) | |||
226 | TracerPid PID of process tracing this process (0 if not) | 227 | TracerPid PID of process tracing this process (0 if not) |
227 | Uid Real, effective, saved set, and file system UIDs | 228 | Uid Real, effective, saved set, and file system UIDs |
228 | Gid Real, effective, saved set, and file system GIDs | 229 | Gid Real, effective, saved set, and file system GIDs |
229 | Umask file mode creation mask | ||
230 | FDSize number of file descriptor slots currently allocated | 230 | FDSize number of file descriptor slots currently allocated |
231 | Groups supplementary group list | 231 | Groups supplementary group list |
232 | NStgid descendant namespace thread group ID hierarchy | 232 | NStgid descendant namespace thread group ID hierarchy |
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1) | |||
236 | VmPeak peak virtual memory size | 236 | VmPeak peak virtual memory size |
237 | VmSize total program size | 237 | VmSize total program size |
238 | VmLck locked memory size | 238 | VmLck locked memory size |
239 | VmPin pinned memory size | ||
239 | VmHWM peak resident set size ("high water mark") | 240 | VmHWM peak resident set size ("high water mark") |
240 | VmRSS size of memory portions. It contains the three | 241 | VmRSS size of memory portions. It contains the three |
241 | following parts (VmRSS = RssAnon + RssFile + RssShmem) | 242 | following parts (VmRSS = RssAnon + RssFile + RssShmem) |
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt index 8a39ce45d8a0..008ecb588317 100644 --- a/Documentation/power/states.txt +++ b/Documentation/power/states.txt | |||
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write | |||
35 | The default suspend mode (ie. the one to be used without writing anything into | 35 | The default suspend mode (ie. the one to be used without writing anything into |
36 | /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or | 36 | /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or |
37 | "s2idle", but it can be overridden by the value of the "mem_sleep_default" | 37 | "s2idle", but it can be overridden by the value of the "mem_sleep_default" |
38 | parameter in the kernel command line. On some ACPI-based systems, depending on | 38 | parameter in the kernel command line. |
39 | the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM | ||
40 | is supported. | ||
41 | 39 | ||
42 | The properties of all of the sleep states are described below. | 40 | The properties of all of the sleep states are described below. |
43 | 41 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 26edd832c64e..5f10c28b2e15 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/ | |||
3567 | F: include/uapi/rdma/cxgb3-abi.h | 3567 | F: include/uapi/rdma/cxgb3-abi.h |
3568 | 3568 | ||
3569 | CXGB4 ETHERNET DRIVER (CXGB4) | 3569 | CXGB4 ETHERNET DRIVER (CXGB4) |
3570 | M: Hariprasad S <hariprasad@chelsio.com> | 3570 | M: Ganesh Goudar <ganeshgr@chelsio.com> |
3571 | L: netdev@vger.kernel.org | 3571 | L: netdev@vger.kernel.org |
3572 | W: http://www.chelsio.com | 3572 | W: http://www.chelsio.com |
3573 | S: Supported | 3573 | S: Supported |
@@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/ | |||
4100 | 4100 | ||
4101 | DRM DRIVER FOR BOCHS VIRTUAL GPU | 4101 | DRM DRIVER FOR BOCHS VIRTUAL GPU |
4102 | M: Gerd Hoffmann <kraxel@redhat.com> | 4102 | M: Gerd Hoffmann <kraxel@redhat.com> |
4103 | S: Odd Fixes | 4103 | L: virtualization@lists.linux-foundation.org |
4104 | T: git git://git.kraxel.org/linux drm-qemu | ||
4105 | S: Maintained | ||
4104 | F: drivers/gpu/drm/bochs/ | 4106 | F: drivers/gpu/drm/bochs/ |
4105 | 4107 | ||
4106 | DRM DRIVER FOR QEMU'S CIRRUS DEVICE | 4108 | DRM DRIVER FOR QEMU'S CIRRUS DEVICE |
4107 | M: Dave Airlie <airlied@redhat.com> | 4109 | M: Dave Airlie <airlied@redhat.com> |
4108 | S: Odd Fixes | 4110 | M: Gerd Hoffmann <kraxel@redhat.com> |
4111 | L: virtualization@lists.linux-foundation.org | ||
4112 | T: git git://git.kraxel.org/linux drm-qemu | ||
4113 | S: Obsolete | ||
4114 | W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ | ||
4109 | F: drivers/gpu/drm/cirrus/ | 4115 | F: drivers/gpu/drm/cirrus/ |
4110 | 4116 | ||
4111 | RADEON and AMDGPU DRM DRIVERS | 4117 | RADEON and AMDGPU DRM DRIVERS |
@@ -4147,7 +4153,7 @@ F: Documentation/gpu/i915.rst | |||
4147 | INTEL GVT-g DRIVERS (Intel GPU Virtualization) | 4153 | INTEL GVT-g DRIVERS (Intel GPU Virtualization) |
4148 | M: Zhenyu Wang <zhenyuw@linux.intel.com> | 4154 | M: Zhenyu Wang <zhenyuw@linux.intel.com> |
4149 | M: Zhi Wang <zhi.a.wang@intel.com> | 4155 | M: Zhi Wang <zhi.a.wang@intel.com> |
4150 | L: igvt-g-dev@lists.01.org | 4156 | L: intel-gvt-dev@lists.freedesktop.org |
4151 | L: intel-gfx@lists.freedesktop.org | 4157 | L: intel-gfx@lists.freedesktop.org |
4152 | W: https://01.org/igvt-g | 4158 | W: https://01.org/igvt-g |
4153 | T: git https://github.com/01org/gvt-linux.git | 4159 | T: git https://github.com/01org/gvt-linux.git |
@@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt | |||
4298 | 4304 | ||
4299 | DRM DRIVER FOR QXL VIRTUAL GPU | 4305 | DRM DRIVER FOR QXL VIRTUAL GPU |
4300 | M: Dave Airlie <airlied@redhat.com> | 4306 | M: Dave Airlie <airlied@redhat.com> |
4301 | S: Odd Fixes | 4307 | M: Gerd Hoffmann <kraxel@redhat.com> |
4308 | L: virtualization@lists.linux-foundation.org | ||
4309 | T: git git://git.kraxel.org/linux drm-qemu | ||
4310 | S: Maintained | ||
4302 | F: drivers/gpu/drm/qxl/ | 4311 | F: drivers/gpu/drm/qxl/ |
4303 | F: include/uapi/drm/qxl_drm.h | 4312 | F: include/uapi/drm/qxl_drm.h |
4304 | 4313 | ||
@@ -13092,6 +13101,7 @@ M: David Airlie <airlied@linux.ie> | |||
13092 | M: Gerd Hoffmann <kraxel@redhat.com> | 13101 | M: Gerd Hoffmann <kraxel@redhat.com> |
13093 | L: dri-devel@lists.freedesktop.org | 13102 | L: dri-devel@lists.freedesktop.org |
13094 | L: virtualization@lists.linux-foundation.org | 13103 | L: virtualization@lists.linux-foundation.org |
13104 | T: git git://git.kraxel.org/linux drm-qemu | ||
13095 | S: Maintained | 13105 | S: Maintained |
13096 | F: drivers/gpu/drm/virtio/ | 13106 | F: drivers/gpu/drm/virtio/ |
13097 | F: include/uapi/linux/virtio_gpu.h | 13107 | F: include/uapi/linux/virtio_gpu.h |
@@ -13443,6 +13453,7 @@ F: arch/x86/ | |||
13443 | 13453 | ||
13444 | X86 PLATFORM DRIVERS | 13454 | X86 PLATFORM DRIVERS |
13445 | M: Darren Hart <dvhart@infradead.org> | 13455 | M: Darren Hart <dvhart@infradead.org> |
13456 | M: Andy Shevchenko <andy@infradead.org> | ||
13446 | L: platform-driver-x86@vger.kernel.org | 13457 | L: platform-driver-x86@vger.kernel.org |
13447 | T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git | 13458 | T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git |
13448 | S: Maintained | 13459 | S: Maintained |
@@ -13614,6 +13625,7 @@ F: drivers/net/hamradio/z8530.h | |||
13614 | 13625 | ||
13615 | ZBUD COMPRESSED PAGE ALLOCATOR | 13626 | ZBUD COMPRESSED PAGE ALLOCATOR |
13616 | M: Seth Jennings <sjenning@redhat.com> | 13627 | M: Seth Jennings <sjenning@redhat.com> |
13628 | M: Dan Streetman <ddstreet@ieee.org> | ||
13617 | L: linux-mm@kvack.org | 13629 | L: linux-mm@kvack.org |
13618 | S: Maintained | 13630 | S: Maintained |
13619 | F: mm/zbud.c | 13631 | F: mm/zbud.c |
@@ -13669,6 +13681,7 @@ F: Documentation/vm/zsmalloc.txt | |||
13669 | 13681 | ||
13670 | ZSWAP COMPRESSED SWAP CACHING | 13682 | ZSWAP COMPRESSED SWAP CACHING |
13671 | M: Seth Jennings <sjenning@redhat.com> | 13683 | M: Seth Jennings <sjenning@redhat.com> |
13684 | M: Dan Streetman <ddstreet@ieee.org> | ||
13672 | L: linux-mm@kvack.org | 13685 | L: linux-mm@kvack.org |
13673 | S: Maintained | 13686 | S: Maintained |
13674 | F: mm/zswap.c | 13687 | F: mm/zswap.c |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 10 | 2 | PATCHLEVEL = 10 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Anniversary Edition | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index a36e8601114d..d5da2115d78a 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h | |||
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops) | |||
26 | " lp 1f \n" | 26 | " lp 1f \n" |
27 | " nop \n" | 27 | " nop \n" |
28 | "1: \n" | 28 | "1: \n" |
29 | : : "r"(loops)); | 29 | : |
30 | : "r"(loops) | ||
31 | : "lp_count"); | ||
30 | } | 32 | } |
31 | 33 | ||
32 | extern void __bad_udelay(void); | 34 | extern void __bad_udelay(void); |
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 689dd867fdff..8b90d25a15cc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S | |||
@@ -71,14 +71,14 @@ ENTRY(stext) | |||
71 | GET_CPU_ID r5 | 71 | GET_CPU_ID r5 |
72 | cmp r5, 0 | 72 | cmp r5, 0 |
73 | mov.nz r0, r5 | 73 | mov.nz r0, r5 |
74 | #ifdef CONFIG_ARC_SMP_HALT_ON_RESET | 74 | bz .Lmaster_proceed |
75 | ; Non-Master can proceed as system would be booted sufficiently | 75 | |
76 | jnz first_lines_of_secondary | ||
77 | #else | ||
78 | ; Non-Masters wait for Master to boot enough and bring them up | 76 | ; Non-Masters wait for Master to boot enough and bring them up |
79 | jnz arc_platform_smp_wait_to_boot | 77 | ; when they resume, tail-call to entry point |
80 | #endif | 78 | mov blink, @first_lines_of_secondary |
81 | ; Master falls thru | 79 | j arc_platform_smp_wait_to_boot |
80 | |||
81 | .Lmaster_proceed: | ||
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | ; Clear BSS before updating any globals | 84 | ; Clear BSS before updating any globals |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 9274f8ade8c7..9f6b68fd4f3b 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void) | |||
93 | READ_BCR(ARC_REG_MCIP_BCR, mp); | 93 | READ_BCR(ARC_REG_MCIP_BCR, mp); |
94 | 94 | ||
95 | sprintf(smp_cpuinfo_buf, | 95 | sprintf(smp_cpuinfo_buf, |
96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", | 96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", |
97 | mp.ver, mp.num_cores, | 97 | mp.ver, mp.num_cores, |
98 | IS_AVAIL1(mp.ipi, "IPI "), | 98 | IS_AVAIL1(mp.ipi, "IPI "), |
99 | IS_AVAIL1(mp.idu, "IDU "), | 99 | IS_AVAIL1(mp.idu, "IDU "), |
100 | IS_AVAIL1(mp.llm, "LLM "), | ||
101 | IS_AVAIL1(mp.dbg, "DEBUG "), | 100 | IS_AVAIL1(mp.dbg, "DEBUG "), |
102 | IS_AVAIL1(mp.gfrc, "GFRC")); | 101 | IS_AVAIL1(mp.gfrc, "GFRC")); |
103 | 102 | ||
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data) | |||
175 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | 174 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
176 | } | 175 | } |
177 | 176 | ||
178 | #ifdef CONFIG_SMP | ||
179 | static int | 177 | static int |
180 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | 178 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
181 | bool force) | 179 | bool force) |
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, | |||
205 | 203 | ||
206 | return IRQ_SET_MASK_OK; | 204 | return IRQ_SET_MASK_OK; |
207 | } | 205 | } |
208 | #endif | 206 | |
207 | static void idu_irq_enable(struct irq_data *data) | ||
208 | { | ||
209 | /* | ||
210 | * By default send all common interrupts to all available online CPUs. | ||
211 | * The affinity of common interrupts in IDU must be set manually since | ||
212 | * in some cases the kernel will not call irq_set_affinity() by itself: | ||
213 | * 1. When the kernel is not configured with support of SMP. | ||
214 | * 2. When the kernel is configured with support of SMP but upper | ||
215 | * interrupt controllers does not support setting of the affinity | ||
216 | * and cannot propagate it to IDU. | ||
217 | */ | ||
218 | idu_irq_set_affinity(data, cpu_online_mask, false); | ||
219 | idu_irq_unmask(data); | ||
220 | } | ||
209 | 221 | ||
210 | static struct irq_chip idu_irq_chip = { | 222 | static struct irq_chip idu_irq_chip = { |
211 | .name = "MCIP IDU Intc", | 223 | .name = "MCIP IDU Intc", |
212 | .irq_mask = idu_irq_mask, | 224 | .irq_mask = idu_irq_mask, |
213 | .irq_unmask = idu_irq_unmask, | 225 | .irq_unmask = idu_irq_unmask, |
226 | .irq_enable = idu_irq_enable, | ||
214 | #ifdef CONFIG_SMP | 227 | #ifdef CONFIG_SMP |
215 | .irq_set_affinity = idu_irq_set_affinity, | 228 | .irq_set_affinity = idu_irq_set_affinity, |
216 | #endif | 229 | #endif |
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, | |||
243 | const u32 *intspec, unsigned int intsize, | 256 | const u32 *intspec, unsigned int intsize, |
244 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | 257 | irq_hw_number_t *out_hwirq, unsigned int *out_type) |
245 | { | 258 | { |
246 | irq_hw_number_t hwirq = *out_hwirq = intspec[0]; | 259 | /* |
247 | int distri = intspec[1]; | 260 | * Ignore value of interrupt distribution mode for common interrupts in |
248 | unsigned long flags; | 261 | * IDU which resides in intspec[1] since setting an affinity using value |
249 | 262 | * from Device Tree is deprecated in ARC. | |
263 | */ | ||
264 | *out_hwirq = intspec[0]; | ||
250 | *out_type = IRQ_TYPE_NONE; | 265 | *out_type = IRQ_TYPE_NONE; |
251 | 266 | ||
252 | /* XXX: validate distribution scheme again online cpu mask */ | ||
253 | if (distri == 0) { | ||
254 | /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ | ||
255 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
256 | idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); | ||
257 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); | ||
258 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
259 | } else { | ||
260 | /* | ||
261 | * DEST based distribution for Level Triggered intr can only | ||
262 | * have 1 CPU, so generalize it to always contain 1 cpu | ||
263 | */ | ||
264 | int cpu = ffs(distri); | ||
265 | |||
266 | if (cpu != fls(distri)) | ||
267 | pr_warn("IDU irq %lx distri mode set to cpu %x\n", | ||
268 | hwirq, cpu); | ||
269 | |||
270 | raw_spin_lock_irqsave(&mcip_lock, flags); | ||
271 | idu_set_dest(hwirq, cpu); | ||
272 | idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); | ||
273 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | ||
274 | } | ||
275 | |||
276 | return 0; | 267 | return 0; |
277 | } | 268 | } |
278 | 269 | ||
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 88674d972c9d..2afbafadb6ab 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
90 | */ | 90 | */ |
91 | static volatile int wake_flag; | 91 | static volatile int wake_flag; |
92 | 92 | ||
93 | #ifdef CONFIG_ISA_ARCOMPACT | ||
94 | |||
95 | #define __boot_read(f) f | ||
96 | #define __boot_write(f, v) f = v | ||
97 | |||
98 | #else | ||
99 | |||
100 | #define __boot_read(f) arc_read_uncached_32(&f) | ||
101 | #define __boot_write(f, v) arc_write_uncached_32(&f, v) | ||
102 | |||
103 | #endif | ||
104 | |||
93 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) | 105 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) |
94 | { | 106 | { |
95 | BUG_ON(cpu == 0); | 107 | BUG_ON(cpu == 0); |
96 | wake_flag = cpu; | 108 | |
109 | __boot_write(wake_flag, cpu); | ||
97 | } | 110 | } |
98 | 111 | ||
99 | void arc_platform_smp_wait_to_boot(int cpu) | 112 | void arc_platform_smp_wait_to_boot(int cpu) |
100 | { | 113 | { |
101 | while (wake_flag != cpu) | 114 | /* for halt-on-reset, we've waited already */ |
115 | if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) | ||
116 | return; | ||
117 | |||
118 | while (__boot_read(wake_flag) != cpu) | ||
102 | ; | 119 | ; |
103 | 120 | ||
104 | wake_flag = 0; | 121 | __boot_write(wake_flag, 0); |
105 | __asm__ __volatile__("j @first_lines_of_secondary \n"); | ||
106 | } | 122 | } |
107 | 123 | ||
108 | |||
109 | const char *arc_platform_smp_cpuinfo(void) | 124 | const char *arc_platform_smp_cpuinfo(void) |
110 | { | 125 | { |
111 | return plat_smp_ops.info ? : ""; | 126 | return plat_smp_ops.info ? : ""; |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index abd961f3e763..91ebe382147f 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c | |||
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, | |||
241 | if (state.fault) | 241 | if (state.fault) |
242 | goto fault; | 242 | goto fault; |
243 | 243 | ||
244 | /* clear any remanants of delay slot */ | ||
244 | if (delay_mode(regs)) { | 245 | if (delay_mode(regs)) { |
245 | regs->ret = regs->bta; | 246 | regs->ret = regs->bta ~1U; |
246 | regs->status32 &= ~STATUS_DE_MASK; | 247 | regs->status32 &= ~STATUS_DE_MASK; |
247 | } else { | 248 | } else { |
248 | regs->ret += state.instr_len; | 249 | regs->ret += state.instr_len; |
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 79c415c33f69..809f0bf3042a 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig | |||
@@ -24,7 +24,7 @@ CONFIG_ARM_APPENDED_DTB=y | |||
24 | CONFIG_ARM_ATAG_DTB_COMPAT=y | 24 | CONFIG_ARM_ATAG_DTB_COMPAT=y |
25 | CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M" | 25 | CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M" |
26 | CONFIG_CPU_FREQ=y | 26 | CONFIG_CPU_FREQ=y |
27 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 27 | CONFIG_CPU_FREQ_STAT=y |
28 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 28 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
29 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 29 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
30 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | 30 | CONFIG_CPU_FREQ_GOV_USERSPACE=m |
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index 361686a362f1..69a4bd13eea5 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig | |||
@@ -58,7 +58,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 | |||
58 | CONFIG_ARM_APPENDED_DTB=y | 58 | CONFIG_ARM_APPENDED_DTB=y |
59 | CONFIG_ARM_ATAG_DTB_COMPAT=y | 59 | CONFIG_ARM_ATAG_DTB_COMPAT=y |
60 | CONFIG_CPU_FREQ=y | 60 | CONFIG_CPU_FREQ=y |
61 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 61 | CONFIG_CPU_FREQ_STAT=y |
62 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 62 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
63 | CONFIG_CPU_IDLE=y | 63 | CONFIG_CPU_IDLE=y |
64 | CONFIG_ARM_KIRKWOOD_CPUIDLE=y | 64 | CONFIG_ARM_KIRKWOOD_CPUIDLE=y |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 028d2b70e3b5..8d7b17f52750 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -132,7 +132,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y | |||
132 | CONFIG_KEXEC=y | 132 | CONFIG_KEXEC=y |
133 | CONFIG_EFI=y | 133 | CONFIG_EFI=y |
134 | CONFIG_CPU_FREQ=y | 134 | CONFIG_CPU_FREQ=y |
135 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 135 | CONFIG_CPU_FREQ_STAT=y |
136 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 136 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
137 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 137 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
138 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | 138 | CONFIG_CPU_FREQ_GOV_USERSPACE=m |
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index f7f6039419aa..4b598da0d086 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig | |||
@@ -44,7 +44,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 | |||
44 | CONFIG_ARM_APPENDED_DTB=y | 44 | CONFIG_ARM_APPENDED_DTB=y |
45 | CONFIG_ARM_ATAG_DTB_COMPAT=y | 45 | CONFIG_ARM_ATAG_DTB_COMPAT=y |
46 | CONFIG_CPU_FREQ=y | 46 | CONFIG_CPU_FREQ=y |
47 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 47 | CONFIG_CPU_FREQ_STAT=y |
48 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 48 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
49 | CONFIG_CPU_IDLE=y | 49 | CONFIG_CPU_IDLE=y |
50 | CONFIG_ARM_KIRKWOOD_CPUIDLE=y | 50 | CONFIG_ARM_KIRKWOOD_CPUIDLE=y |
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index e4314b1227a3..271dc7e78e43 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig | |||
@@ -97,7 +97,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 | |||
97 | CONFIG_CMDLINE="root=/dev/ram0 ro" | 97 | CONFIG_CMDLINE="root=/dev/ram0 ro" |
98 | CONFIG_KEXEC=y | 98 | CONFIG_KEXEC=y |
99 | CONFIG_CPU_FREQ=y | 99 | CONFIG_CPU_FREQ=y |
100 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 100 | CONFIG_CPU_FREQ_STAT=y |
101 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 101 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
102 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 102 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
103 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | 103 | CONFIG_CPU_FREQ_GOV_USERSPACE=m |
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 1b0f8ae36fb3..adeaecd831a4 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig | |||
@@ -38,7 +38,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 | |||
38 | CONFIG_ARM_APPENDED_DTB=y | 38 | CONFIG_ARM_APPENDED_DTB=y |
39 | CONFIG_KEXEC=y | 39 | CONFIG_KEXEC=y |
40 | CONFIG_CPU_FREQ=y | 40 | CONFIG_CPU_FREQ=y |
41 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 41 | CONFIG_CPU_FREQ_STAT=y |
42 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y | 42 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y |
43 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | 43 | CONFIG_CPU_FREQ_GOV_USERSPACE=y |
44 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y | 44 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y |
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 23e9e13bd2aa..655e65f38f31 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * for more details. | 11 | * for more details. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/acpi.h> | ||
14 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
15 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = { | |||
209 | 210 | ||
210 | static int __init register_cpufreq_notifier(void) | 211 | static int __init register_cpufreq_notifier(void) |
211 | { | 212 | { |
212 | if (cap_parsing_failed) | 213 | /* |
214 | * on ACPI-based systems we need to use the default cpu capacity | ||
215 | * until we have the necessary code to parse the cpu capacity, so | ||
216 | * skip registering cpufreq notifier. | ||
217 | */ | ||
218 | if (!acpi_disabled || cap_parsing_failed) | ||
213 | return -EINVAL; | 219 | return -EINVAL; |
214 | 220 | ||
215 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { | 221 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { |
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 1c2a5e264fc7..e93c9494503a 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h | |||
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v) | |||
139 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | 139 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) |
140 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | 140 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
141 | #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) | 141 | #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) |
142 | 142 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
143 | 143 | ||
144 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) | 144 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) |
145 | #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) | 145 | #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) |
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
161 | return c; | 161 | return c; |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | ||
165 | { | ||
166 | long long c, old; | ||
167 | |||
168 | c = atomic64_read(v); | ||
169 | for (;;) { | ||
170 | if (unlikely(c == u)) | ||
171 | break; | ||
172 | old = atomic64_cmpxchg(v, c, c + i); | ||
173 | if (likely(old == c)) | ||
174 | break; | ||
175 | c = old; | ||
176 | } | ||
177 | return c != u; | ||
178 | } | ||
179 | |||
180 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
181 | { | ||
182 | long long c, old, dec; | ||
183 | |||
184 | c = atomic64_read(v); | ||
185 | for (;;) { | ||
186 | dec = c - 1; | ||
187 | if (unlikely(dec < 0)) | ||
188 | break; | ||
189 | old = atomic64_cmpxchg((v), c, dec); | ||
190 | if (likely(old == c)) | ||
191 | break; | ||
192 | c = old; | ||
193 | } | ||
194 | return dec; | ||
195 | } | ||
196 | |||
164 | #define ATOMIC_OP(op) \ | 197 | #define ATOMIC_OP(op) \ |
165 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | 198 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
166 | { \ | 199 | { \ |
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 5da76e0e120f..bed745596d86 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig | |||
@@ -40,7 +40,6 @@ CONFIG_PM_STD_PARTITION="/dev/hda3" | |||
40 | CONFIG_CPU_FREQ=y | 40 | CONFIG_CPU_FREQ=y |
41 | CONFIG_CPU_FREQ_DEBUG=y | 41 | CONFIG_CPU_FREQ_DEBUG=y |
42 | CONFIG_CPU_FREQ_STAT=m | 42 | CONFIG_CPU_FREQ_STAT=m |
43 | CONFIG_CPU_FREQ_STAT_DETAILS=y | ||
44 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 43 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
45 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 44 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
46 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | 45 | CONFIG_CPU_FREQ_GOV_USERSPACE=m |
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h index 393d311735c8..67e333aa7629 100644 --- a/arch/mn10300/include/asm/switch_to.h +++ b/arch/mn10300/include/asm/switch_to.h | |||
@@ -16,7 +16,7 @@ | |||
16 | struct task_struct; | 16 | struct task_struct; |
17 | struct thread_struct; | 17 | struct thread_struct; |
18 | 18 | ||
19 | #if !defined(CONFIG_LAZY_SAVE_FPU) | 19 | #if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU) |
20 | struct fpu_state_struct; | 20 | struct fpu_state_struct; |
21 | extern asmlinkage void fpu_save(struct fpu_state_struct *); | 21 | extern asmlinkage void fpu_save(struct fpu_state_struct *); |
22 | #define switch_fpu(prev, next) \ | 22 | #define switch_fpu(prev, next) \ |
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h index 3f9406d9b9d6..da87943328a5 100644 --- a/arch/parisc/include/asm/bitops.h +++ b/arch/parisc/include/asm/bitops.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ | 9 | #include <asm/types.h> |
10 | #include <asm/byteorder.h> | 10 | #include <asm/byteorder.h> |
11 | #include <asm/barrier.h> | 11 | #include <asm/barrier.h> |
12 | #include <linux/atomic.h> | 12 | #include <linux/atomic.h> |
@@ -17,6 +17,12 @@ | |||
17 | * to include/asm-i386/bitops.h or kerneldoc | 17 | * to include/asm-i386/bitops.h or kerneldoc |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #if __BITS_PER_LONG == 64 | ||
21 | #define SHIFT_PER_LONG 6 | ||
22 | #else | ||
23 | #define SHIFT_PER_LONG 5 | ||
24 | #endif | ||
25 | |||
20 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) | 26 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) |
21 | 27 | ||
22 | 28 | ||
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h index e0a23c7bdd43..07fa7e50bdc0 100644 --- a/arch/parisc/include/uapi/asm/bitsperlong.h +++ b/arch/parisc/include/uapi/asm/bitsperlong.h | |||
@@ -3,10 +3,8 @@ | |||
3 | 3 | ||
4 | #if defined(__LP64__) | 4 | #if defined(__LP64__) |
5 | #define __BITS_PER_LONG 64 | 5 | #define __BITS_PER_LONG 64 |
6 | #define SHIFT_PER_LONG 6 | ||
7 | #else | 6 | #else |
8 | #define __BITS_PER_LONG 32 | 7 | #define __BITS_PER_LONG 32 |
9 | #define SHIFT_PER_LONG 5 | ||
10 | #endif | 8 | #endif |
11 | 9 | ||
12 | #include <asm-generic/bitsperlong.h> | 10 | #include <asm-generic/bitsperlong.h> |
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h index e78403b129ef..928e1bbac98f 100644 --- a/arch/parisc/include/uapi/asm/swab.h +++ b/arch/parisc/include/uapi/asm/swab.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _PARISC_SWAB_H | 1 | #ifndef _PARISC_SWAB_H |
2 | #define _PARISC_SWAB_H | 2 | #define _PARISC_SWAB_H |
3 | 3 | ||
4 | #include <asm/bitsperlong.h> | ||
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/compiler.h> | 6 | #include <linux/compiler.h> |
6 | 7 | ||
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) | |||
38 | } | 39 | } |
39 | #define __arch_swab32 __arch_swab32 | 40 | #define __arch_swab32 __arch_swab32 |
40 | 41 | ||
41 | #if BITS_PER_LONG > 32 | 42 | #if __BITS_PER_LONG > 32 |
42 | /* | 43 | /* |
43 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. | 44 | ** From "PA-RISC 2.0 Architecture", HP Professional Books. |
44 | ** See Appendix I page 8 , "Endian Byte Swapping". | 45 | ** See Appendix I page 8 , "Endian Byte Swapping". |
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x) | |||
61 | return x; | 62 | return x; |
62 | } | 63 | } |
63 | #define __arch_swab64 __arch_swab64 | 64 | #define __arch_swab64 __arch_swab64 |
64 | #endif /* BITS_PER_LONG > 32 */ | 65 | #endif /* __BITS_PER_LONG > 32 */ |
65 | 66 | ||
66 | #endif /* _PARISC_SWAB_H */ | 67 | #endif /* _PARISC_SWAB_H */ |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 3ce91a3df27f..1d2d69dd6409 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -62,7 +62,6 @@ CONFIG_MPC8610_HPCD=y | |||
62 | CONFIG_GEF_SBC610=y | 62 | CONFIG_GEF_SBC610=y |
63 | CONFIG_CPU_FREQ=y | 63 | CONFIG_CPU_FREQ=y |
64 | CONFIG_CPU_FREQ_STAT=m | 64 | CONFIG_CPU_FREQ_STAT=m |
65 | CONFIG_CPU_FREQ_STAT_DETAILS=y | ||
66 | CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y | 65 | CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y |
67 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | 66 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y |
68 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 67 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 7447ba509c30..12020b55887b 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target, | |||
963 | if (target == current) | 963 | if (target == current) |
964 | save_fpu_regs(); | 964 | save_fpu_regs(); |
965 | 965 | ||
966 | if (MACHINE_HAS_VX) | ||
967 | convert_vx_to_fp(fprs, target->thread.fpu.vxrs); | ||
968 | else | ||
969 | memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); | ||
970 | |||
966 | /* If setting FPC, must validate it first. */ | 971 | /* If setting FPC, must validate it first. */ |
967 | if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { | 972 | if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { |
968 | u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; | 973 | u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; |
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target, | |||
1067 | if (target == current) | 1072 | if (target == current) |
1068 | save_fpu_regs(); | 1073 | save_fpu_regs(); |
1069 | 1074 | ||
1075 | for (i = 0; i < __NUM_VXRS_LOW; i++) | ||
1076 | vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); | ||
1077 | |||
1070 | rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); | 1078 | rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); |
1071 | if (rc == 0) | 1079 | if (rc == 0) |
1072 | for (i = 0; i < __NUM_VXRS_LOW; i++) | 1080 | for (i = 0; i < __NUM_VXRS_LOW; i++) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 7a1897c51c54..d56ef26d4681 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm, | |||
202 | return pgste; | 202 | return pgste; |
203 | } | 203 | } |
204 | 204 | ||
205 | static inline void ptep_xchg_commit(struct mm_struct *mm, | 205 | static inline pte_t ptep_xchg_commit(struct mm_struct *mm, |
206 | unsigned long addr, pte_t *ptep, | 206 | unsigned long addr, pte_t *ptep, |
207 | pgste_t pgste, pte_t old, pte_t new) | 207 | pgste_t pgste, pte_t old, pte_t new) |
208 | { | 208 | { |
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm, | |||
220 | } else { | 220 | } else { |
221 | *ptep = new; | 221 | *ptep = new; |
222 | } | 222 | } |
223 | return old; | ||
223 | } | 224 | } |
224 | 225 | ||
225 | pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, | 226 | pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, |
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, | |||
231 | preempt_disable(); | 232 | preempt_disable(); |
232 | pgste = ptep_xchg_start(mm, addr, ptep); | 233 | pgste = ptep_xchg_start(mm, addr, ptep); |
233 | old = ptep_flush_direct(mm, addr, ptep); | 234 | old = ptep_flush_direct(mm, addr, ptep); |
234 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | 235 | old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
235 | preempt_enable(); | 236 | preempt_enable(); |
236 | return old; | 237 | return old; |
237 | } | 238 | } |
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, | |||
246 | preempt_disable(); | 247 | preempt_disable(); |
247 | pgste = ptep_xchg_start(mm, addr, ptep); | 248 | pgste = ptep_xchg_start(mm, addr, ptep); |
248 | old = ptep_flush_lazy(mm, addr, ptep); | 249 | old = ptep_flush_lazy(mm, addr, ptep); |
249 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); | 250 | old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
250 | preempt_enable(); | 251 | preempt_enable(); |
251 | return old; | 252 | return old; |
252 | } | 253 | } |
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 9bdcf72ec06a..2fce54d9c388 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig | |||
@@ -25,7 +25,7 @@ CONFIG_SH_SH7785LCR=y | |||
25 | CONFIG_NO_HZ=y | 25 | CONFIG_NO_HZ=y |
26 | CONFIG_HIGH_RES_TIMERS=y | 26 | CONFIG_HIGH_RES_TIMERS=y |
27 | CONFIG_CPU_FREQ=y | 27 | CONFIG_CPU_FREQ=y |
28 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 28 | CONFIG_CPU_FREQ_STAT=y |
29 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 29 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
30 | CONFIG_SH_CPU_FREQ=y | 30 | CONFIG_SH_CPU_FREQ=y |
31 | CONFIG_HEARTBEAT=y | 31 | CONFIG_HEARTBEAT=y |
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index d89b7011667c..e279572824b1 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c | |||
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target, | |||
111 | const void *kbuf, const void __user *ubuf) | 111 | const void *kbuf, const void __user *ubuf) |
112 | { | 112 | { |
113 | int ret; | 113 | int ret; |
114 | struct pt_regs regs; | 114 | struct pt_regs regs = *task_pt_regs(target); |
115 | 115 | ||
116 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, | 116 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, |
117 | sizeof(regs)); | 117 | sizeof(regs)); |
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 82b0b5710979..b0399e8f6d27 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c | |||
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address, | |||
852 | 852 | ||
853 | ACPI_FUNCTION_TRACE(tb_install_and_load_table); | 853 | ACPI_FUNCTION_TRACE(tb_install_and_load_table); |
854 | 854 | ||
855 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
856 | |||
857 | /* Install the table and load it into the namespace */ | 855 | /* Install the table and load it into the namespace */ |
858 | 856 | ||
859 | status = acpi_tb_install_standard_table(address, flags, TRUE, | 857 | status = acpi_tb_install_standard_table(address, flags, TRUE, |
860 | override, &i); | 858 | override, &i); |
861 | if (ACPI_FAILURE(status)) { | 859 | if (ACPI_FAILURE(status)) { |
862 | goto unlock_and_exit; | 860 | goto exit; |
863 | } | 861 | } |
864 | 862 | ||
865 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
866 | status = acpi_tb_load_table(i, acpi_gbl_root_node); | 863 | status = acpi_tb_load_table(i, acpi_gbl_root_node); |
867 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
868 | 864 | ||
869 | unlock_and_exit: | 865 | exit: |
870 | *table_index = i; | 866 | *table_index = i; |
871 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
872 | return_ACPI_STATUS(status); | 867 | return_ACPI_STATUS(status); |
873 | } | 868 | } |
874 | 869 | ||
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 5fdf251a9f97..01e1b3d63fc0 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address, | |||
217 | goto release_and_exit; | 217 | goto release_and_exit; |
218 | } | 218 | } |
219 | 219 | ||
220 | /* Acquire the table lock */ | ||
221 | |||
222 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
223 | |||
220 | if (reload) { | 224 | if (reload) { |
221 | /* | 225 | /* |
222 | * Validate the incoming table signature. | 226 | * Validate the incoming table signature. |
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, | |||
244 | new_table_desc.signature.integer)); | 248 | new_table_desc.signature.integer)); |
245 | 249 | ||
246 | status = AE_BAD_SIGNATURE; | 250 | status = AE_BAD_SIGNATURE; |
247 | goto release_and_exit; | 251 | goto unlock_and_exit; |
248 | } | 252 | } |
249 | 253 | ||
250 | /* Check if table is already registered */ | 254 | /* Check if table is already registered */ |
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, | |||
279 | /* Table is still loaded, this is an error */ | 283 | /* Table is still loaded, this is an error */ |
280 | 284 | ||
281 | status = AE_ALREADY_EXISTS; | 285 | status = AE_ALREADY_EXISTS; |
282 | goto release_and_exit; | 286 | goto unlock_and_exit; |
283 | } else { | 287 | } else { |
284 | /* | 288 | /* |
285 | * Table was unloaded, allow it to be reloaded. | 289 | * Table was unloaded, allow it to be reloaded. |
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address, | |||
290 | * indicate the re-installation. | 294 | * indicate the re-installation. |
291 | */ | 295 | */ |
292 | acpi_tb_uninstall_table(&new_table_desc); | 296 | acpi_tb_uninstall_table(&new_table_desc); |
297 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
293 | *table_index = i; | 298 | *table_index = i; |
294 | return_ACPI_STATUS(AE_OK); | 299 | return_ACPI_STATUS(AE_OK); |
295 | } | 300 | } |
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address, | |||
303 | 308 | ||
304 | /* Invoke table handler if present */ | 309 | /* Invoke table handler if present */ |
305 | 310 | ||
311 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
306 | if (acpi_gbl_table_handler) { | 312 | if (acpi_gbl_table_handler) { |
307 | (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, | 313 | (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, |
308 | new_table_desc.pointer, | 314 | new_table_desc.pointer, |
309 | acpi_gbl_table_handler_context); | 315 | acpi_gbl_table_handler_context); |
310 | } | 316 | } |
317 | (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); | ||
318 | |||
319 | unlock_and_exit: | ||
320 | |||
321 | /* Release the table lock */ | ||
322 | |||
323 | (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); | ||
311 | 324 | ||
312 | release_and_exit: | 325 | release_and_exit: |
313 | 326 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index f0b4a981b8d3..18b72eec3507 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -75,10 +75,8 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, | |||
75 | struct acpi_processor *pr; | 75 | struct acpi_processor *pr; |
76 | unsigned int ppc = 0; | 76 | unsigned int ppc = 0; |
77 | 77 | ||
78 | if (event == CPUFREQ_START && ignore_ppc <= 0) { | 78 | if (ignore_ppc < 0) |
79 | ignore_ppc = 0; | 79 | ignore_ppc = 0; |
80 | return 0; | ||
81 | } | ||
82 | 80 | ||
83 | if (ignore_ppc) | 81 | if (ignore_ppc) |
84 | return 0; | 82 | return 0; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9b6cebe227a0..54abb26b7366 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void) | |||
674 | if (acpi_sleep_state_supported(i)) | 674 | if (acpi_sleep_state_supported(i)) |
675 | sleep_states[i] = 1; | 675 | sleep_states[i] = 1; |
676 | 676 | ||
677 | /* | ||
678 | * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and | ||
679 | * the default suspend mode was not selected from the command line. | ||
680 | */ | ||
681 | if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 && | ||
682 | mem_sleep_default > PM_SUSPEND_MEM) | ||
683 | mem_sleep_default = PM_SUSPEND_FREEZE; | ||
684 | |||
685 | suspend_set_ops(old_suspend_ordering ? | 677 | suspend_set_ops(old_suspend_ordering ? |
686 | &acpi_suspend_ops_old : &acpi_suspend_ops); | 678 | &acpi_suspend_ops_old : &acpi_suspend_ops); |
687 | freeze_set_ops(&acpi_freeze_ops); | 679 | freeze_set_ops(&acpi_freeze_ops); |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 02ded25c82e4..7f48156cbc0c 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { | |||
305 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), | 305 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), |
306 | }, | 306 | }, |
307 | }, | 307 | }, |
308 | { | ||
309 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */ | ||
310 | /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */ | ||
311 | .callback = video_detect_force_native, | ||
312 | .ident = "HP Pavilion dv6", | ||
313 | .matches = { | ||
314 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
315 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), | ||
316 | }, | ||
317 | }, | ||
318 | |||
319 | { }, | 308 | { }, |
320 | }; | 309 | }; |
321 | 310 | ||
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ab8ea1253e6..dacb6a8418aa 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev, | |||
408 | sprintf(buf, "%s", zone->name); | 408 | sprintf(buf, "%s", zone->name); |
409 | 409 | ||
410 | /* MMOP_ONLINE_KERNEL */ | 410 | /* MMOP_ONLINE_KERNEL */ |
411 | zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); | 411 | zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); |
412 | if (zone_shift) { | 412 | if (zone_shift) { |
413 | strcat(buf, " "); | 413 | strcat(buf, " "); |
414 | strcat(buf, (zone + zone_shift)->name); | 414 | strcat(buf, (zone + zone_shift)->name); |
415 | } | 415 | } |
416 | 416 | ||
417 | /* MMOP_ONLINE_MOVABLE */ | 417 | /* MMOP_ONLINE_MOVABLE */ |
418 | zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); | 418 | zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); |
419 | if (zone_shift) { | 419 | if (zone_shift) { |
420 | strcat(buf, " "); | 420 | strcat(buf, " "); |
421 | strcat(buf, (zone + zone_shift)->name); | 421 | strcat(buf, (zone + zone_shift)->name); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b2bdfa81f929..265f1a7072e9 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -197,13 +197,13 @@ struct blkfront_info | |||
197 | /* Number of pages per ring buffer. */ | 197 | /* Number of pages per ring buffer. */ |
198 | unsigned int nr_ring_pages; | 198 | unsigned int nr_ring_pages; |
199 | struct request_queue *rq; | 199 | struct request_queue *rq; |
200 | unsigned int feature_flush; | 200 | unsigned int feature_flush:1; |
201 | unsigned int feature_fua; | 201 | unsigned int feature_fua:1; |
202 | unsigned int feature_discard:1; | 202 | unsigned int feature_discard:1; |
203 | unsigned int feature_secdiscard:1; | 203 | unsigned int feature_secdiscard:1; |
204 | unsigned int feature_persistent:1; | ||
204 | unsigned int discard_granularity; | 205 | unsigned int discard_granularity; |
205 | unsigned int discard_alignment; | 206 | unsigned int discard_alignment; |
206 | unsigned int feature_persistent:1; | ||
207 | /* Number of 4KB segments handled */ | 207 | /* Number of 4KB segments handled */ |
208 | unsigned int max_indirect_segments; | 208 | unsigned int max_indirect_segments; |
209 | int is_ready; | 209 | int is_ready; |
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) | |||
2223 | } | 2223 | } |
2224 | else | 2224 | else |
2225 | grants = info->max_indirect_segments; | 2225 | grants = info->max_indirect_segments; |
2226 | psegs = grants / GRANTS_PER_PSEG; | 2226 | psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG); |
2227 | 2227 | ||
2228 | err = fill_grant_buffer(rinfo, | 2228 | err = fill_grant_buffer(rinfo, |
2229 | (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); | 2229 | (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); |
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2323 | blkfront_setup_discard(info); | 2323 | blkfront_setup_discard(info); |
2324 | 2324 | ||
2325 | info->feature_persistent = | 2325 | info->feature_persistent = |
2326 | xenbus_read_unsigned(info->xbdev->otherend, | 2326 | !!xenbus_read_unsigned(info->xbdev->otherend, |
2327 | "feature-persistent", 0); | 2327 | "feature-persistent", 0); |
2328 | 2328 | ||
2329 | indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, | 2329 | indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, |
2330 | "feature-max-indirect-segments", 0); | 2330 | "feature-max-indirect-segments", 0); |
2331 | info->max_indirect_segments = min(indirect_segments, | 2331 | if (indirect_segments > xen_blkif_max_segments) |
2332 | xen_blkif_max_segments); | 2332 | indirect_segments = xen_blkif_max_segments; |
2333 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | ||
2334 | indirect_segments = 0; | ||
2335 | info->max_indirect_segments = indirect_segments; | ||
2333 | } | 2336 | } |
2334 | 2337 | ||
2335 | /* | 2338 | /* |
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void) | |||
2652 | if (!xen_domain()) | 2655 | if (!xen_domain()) |
2653 | return -ENODEV; | 2656 | return -ENODEV; |
2654 | 2657 | ||
2658 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | ||
2659 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
2660 | |||
2655 | if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { | 2661 | if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { |
2656 | pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", | 2662 | pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", |
2657 | xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); | 2663 | xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d8b164a7c4e5..15adef473d42 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -37,14 +37,6 @@ config CPU_FREQ_STAT | |||
37 | 37 | ||
38 | If in doubt, say N. | 38 | If in doubt, say N. |
39 | 39 | ||
40 | config CPU_FREQ_STAT_DETAILS | ||
41 | bool "CPU frequency transition statistics details" | ||
42 | depends on CPU_FREQ_STAT | ||
43 | help | ||
44 | Show detailed CPU frequency transition table in sysfs. | ||
45 | |||
46 | If in doubt, say N. | ||
47 | |||
48 | choice | 40 | choice |
49 | prompt "Default CPUFreq governor" | 41 | prompt "Default CPUFreq governor" |
50 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ | 42 | default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cc475eff90b3..408479540566 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1078,15 +1078,11 @@ err_free_policy: | |||
1078 | return NULL; | 1078 | return NULL; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) | 1081 | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) |
1082 | { | 1082 | { |
1083 | struct kobject *kobj; | 1083 | struct kobject *kobj; |
1084 | struct completion *cmp; | 1084 | struct completion *cmp; |
1085 | 1085 | ||
1086 | if (notify) | ||
1087 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1088 | CPUFREQ_REMOVE_POLICY, policy); | ||
1089 | |||
1090 | down_write(&policy->rwsem); | 1086 | down_write(&policy->rwsem); |
1091 | cpufreq_stats_free_table(policy); | 1087 | cpufreq_stats_free_table(policy); |
1092 | kobj = &policy->kobj; | 1088 | kobj = &policy->kobj; |
@@ -1104,7 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) | |||
1104 | pr_debug("wait complete\n"); | 1100 | pr_debug("wait complete\n"); |
1105 | } | 1101 | } |
1106 | 1102 | ||
1107 | static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) | 1103 | static void cpufreq_policy_free(struct cpufreq_policy *policy) |
1108 | { | 1104 | { |
1109 | unsigned long flags; | 1105 | unsigned long flags; |
1110 | int cpu; | 1106 | int cpu; |
@@ -1117,7 +1113,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) | |||
1117 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | 1113 | per_cpu(cpufreq_cpu_data, cpu) = NULL; |
1118 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1114 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1119 | 1115 | ||
1120 | cpufreq_policy_put_kobj(policy, notify); | 1116 | cpufreq_policy_put_kobj(policy); |
1121 | free_cpumask_var(policy->real_cpus); | 1117 | free_cpumask_var(policy->real_cpus); |
1122 | free_cpumask_var(policy->related_cpus); | 1118 | free_cpumask_var(policy->related_cpus); |
1123 | free_cpumask_var(policy->cpus); | 1119 | free_cpumask_var(policy->cpus); |
@@ -1244,17 +1240,12 @@ static int cpufreq_online(unsigned int cpu) | |||
1244 | goto out_exit_policy; | 1240 | goto out_exit_policy; |
1245 | 1241 | ||
1246 | cpufreq_stats_create_table(policy); | 1242 | cpufreq_stats_create_table(policy); |
1247 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1248 | CPUFREQ_CREATE_POLICY, policy); | ||
1249 | 1243 | ||
1250 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1244 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1251 | list_add(&policy->policy_list, &cpufreq_policy_list); | 1245 | list_add(&policy->policy_list, &cpufreq_policy_list); |
1252 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1246 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1253 | } | 1247 | } |
1254 | 1248 | ||
1255 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1256 | CPUFREQ_START, policy); | ||
1257 | |||
1258 | ret = cpufreq_init_policy(policy); | 1249 | ret = cpufreq_init_policy(policy); |
1259 | if (ret) { | 1250 | if (ret) { |
1260 | pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", | 1251 | pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", |
@@ -1282,7 +1273,7 @@ out_exit_policy: | |||
1282 | if (cpufreq_driver->exit) | 1273 | if (cpufreq_driver->exit) |
1283 | cpufreq_driver->exit(policy); | 1274 | cpufreq_driver->exit(policy); |
1284 | out_free_policy: | 1275 | out_free_policy: |
1285 | cpufreq_policy_free(policy, !new_policy); | 1276 | cpufreq_policy_free(policy); |
1286 | return ret; | 1277 | return ret; |
1287 | } | 1278 | } |
1288 | 1279 | ||
@@ -1403,7 +1394,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
1403 | remove_cpu_dev_symlink(policy, dev); | 1394 | remove_cpu_dev_symlink(policy, dev); |
1404 | 1395 | ||
1405 | if (cpumask_empty(policy->real_cpus)) | 1396 | if (cpumask_empty(policy->real_cpus)) |
1406 | cpufreq_policy_free(policy, true); | 1397 | cpufreq_policy_free(policy); |
1407 | } | 1398 | } |
1408 | 1399 | ||
1409 | /** | 1400 | /** |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index ac284e66839c..18abd454da43 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -25,9 +25,7 @@ struct cpufreq_stats { | |||
25 | unsigned int last_index; | 25 | unsigned int last_index; |
26 | u64 *time_in_state; | 26 | u64 *time_in_state; |
27 | unsigned int *freq_table; | 27 | unsigned int *freq_table; |
28 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
29 | unsigned int *trans_table; | 28 | unsigned int *trans_table; |
30 | #endif | ||
31 | }; | 29 | }; |
32 | 30 | ||
33 | static int cpufreq_stats_update(struct cpufreq_stats *stats) | 31 | static int cpufreq_stats_update(struct cpufreq_stats *stats) |
@@ -46,9 +44,7 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) | |||
46 | unsigned int count = stats->max_state; | 44 | unsigned int count = stats->max_state; |
47 | 45 | ||
48 | memset(stats->time_in_state, 0, count * sizeof(u64)); | 46 | memset(stats->time_in_state, 0, count * sizeof(u64)); |
49 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
50 | memset(stats->trans_table, 0, count * count * sizeof(int)); | 47 | memset(stats->trans_table, 0, count * count * sizeof(int)); |
51 | #endif | ||
52 | stats->last_time = get_jiffies_64(); | 48 | stats->last_time = get_jiffies_64(); |
53 | stats->total_trans = 0; | 49 | stats->total_trans = 0; |
54 | } | 50 | } |
@@ -84,7 +80,6 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, | |||
84 | return count; | 80 | return count; |
85 | } | 81 | } |
86 | 82 | ||
87 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
88 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | 83 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) |
89 | { | 84 | { |
90 | struct cpufreq_stats *stats = policy->stats; | 85 | struct cpufreq_stats *stats = policy->stats; |
@@ -129,7 +124,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | |||
129 | return len; | 124 | return len; |
130 | } | 125 | } |
131 | cpufreq_freq_attr_ro(trans_table); | 126 | cpufreq_freq_attr_ro(trans_table); |
132 | #endif | ||
133 | 127 | ||
134 | cpufreq_freq_attr_ro(total_trans); | 128 | cpufreq_freq_attr_ro(total_trans); |
135 | cpufreq_freq_attr_ro(time_in_state); | 129 | cpufreq_freq_attr_ro(time_in_state); |
@@ -139,9 +133,7 @@ static struct attribute *default_attrs[] = { | |||
139 | &total_trans.attr, | 133 | &total_trans.attr, |
140 | &time_in_state.attr, | 134 | &time_in_state.attr, |
141 | &reset.attr, | 135 | &reset.attr, |
142 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
143 | &trans_table.attr, | 136 | &trans_table.attr, |
144 | #endif | ||
145 | NULL | 137 | NULL |
146 | }; | 138 | }; |
147 | static struct attribute_group stats_attr_group = { | 139 | static struct attribute_group stats_attr_group = { |
@@ -200,9 +192,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) | |||
200 | 192 | ||
201 | alloc_size = count * sizeof(int) + count * sizeof(u64); | 193 | alloc_size = count * sizeof(int) + count * sizeof(u64); |
202 | 194 | ||
203 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
204 | alloc_size += count * count * sizeof(int); | 195 | alloc_size += count * count * sizeof(int); |
205 | #endif | ||
206 | 196 | ||
207 | /* Allocate memory for time_in_state/freq_table/trans_table in one go */ | 197 | /* Allocate memory for time_in_state/freq_table/trans_table in one go */ |
208 | stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL); | 198 | stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL); |
@@ -211,9 +201,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) | |||
211 | 201 | ||
212 | stats->freq_table = (unsigned int *)(stats->time_in_state + count); | 202 | stats->freq_table = (unsigned int *)(stats->time_in_state + count); |
213 | 203 | ||
214 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
215 | stats->trans_table = stats->freq_table + count; | 204 | stats->trans_table = stats->freq_table + count; |
216 | #endif | ||
217 | 205 | ||
218 | stats->max_state = count; | 206 | stats->max_state = count; |
219 | 207 | ||
@@ -259,8 +247,6 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy, | |||
259 | cpufreq_stats_update(stats); | 247 | cpufreq_stats_update(stats); |
260 | 248 | ||
261 | stats->last_index = new_index; | 249 | stats->last_index = new_index; |
262 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | ||
263 | stats->trans_table[old_index * stats->max_state + new_index]++; | 250 | stats->trans_table[old_index * stats->max_state + new_index]++; |
264 | #endif | ||
265 | stats->total_trans++; | 251 | stats->total_trans++; |
266 | } | 252 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 86e36544925f..eb0f7fb71685 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -358,6 +358,8 @@ static struct pstate_funcs pstate_funcs __read_mostly; | |||
358 | static int hwp_active __read_mostly; | 358 | static int hwp_active __read_mostly; |
359 | static bool per_cpu_limits __read_mostly; | 359 | static bool per_cpu_limits __read_mostly; |
360 | 360 | ||
361 | static bool driver_registered __read_mostly; | ||
362 | |||
361 | #ifdef CONFIG_ACPI | 363 | #ifdef CONFIG_ACPI |
362 | static bool acpi_ppc; | 364 | static bool acpi_ppc; |
363 | #endif | 365 | #endif |
@@ -394,6 +396,7 @@ static struct perf_limits *limits = &performance_limits; | |||
394 | static struct perf_limits *limits = &powersave_limits; | 396 | static struct perf_limits *limits = &powersave_limits; |
395 | #endif | 397 | #endif |
396 | 398 | ||
399 | static DEFINE_MUTEX(intel_pstate_driver_lock); | ||
397 | static DEFINE_MUTEX(intel_pstate_limits_lock); | 400 | static DEFINE_MUTEX(intel_pstate_limits_lock); |
398 | 401 | ||
399 | #ifdef CONFIG_ACPI | 402 | #ifdef CONFIG_ACPI |
@@ -538,7 +541,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | |||
538 | 541 | ||
539 | acpi_processor_unregister_performance(policy->cpu); | 542 | acpi_processor_unregister_performance(policy->cpu); |
540 | } | 543 | } |
541 | |||
542 | #else | 544 | #else |
543 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | 545 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
544 | { | 546 | { |
@@ -873,7 +875,10 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) | |||
873 | 875 | ||
874 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); | 876 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); |
875 | hw_min = HWP_LOWEST_PERF(cap); | 877 | hw_min = HWP_LOWEST_PERF(cap); |
876 | hw_max = HWP_HIGHEST_PERF(cap); | 878 | if (limits->no_turbo) |
879 | hw_max = HWP_GUARANTEED_PERF(cap); | ||
880 | else | ||
881 | hw_max = HWP_HIGHEST_PERF(cap); | ||
877 | range = hw_max - hw_min; | 882 | range = hw_max - hw_min; |
878 | 883 | ||
879 | max_perf_pct = perf_limits->max_perf_pct; | 884 | max_perf_pct = perf_limits->max_perf_pct; |
@@ -887,11 +892,6 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) | |||
887 | 892 | ||
888 | adj_range = max_perf_pct * range / 100; | 893 | adj_range = max_perf_pct * range / 100; |
889 | max = hw_min + adj_range; | 894 | max = hw_min + adj_range; |
890 | if (limits->no_turbo) { | ||
891 | hw_max = HWP_GUARANTEED_PERF(cap); | ||
892 | if (hw_max < max) | ||
893 | max = hw_max; | ||
894 | } | ||
895 | 895 | ||
896 | value &= ~HWP_MAX_PERF(~0L); | 896 | value &= ~HWP_MAX_PERF(~0L); |
897 | value |= HWP_MAX_PERF(max); | 897 | value |= HWP_MAX_PERF(max); |
@@ -1007,37 +1007,59 @@ static int pid_param_get(void *data, u64 *val) | |||
1007 | } | 1007 | } |
1008 | DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); | 1008 | DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); |
1009 | 1009 | ||
1010 | static struct dentry *debugfs_parent; | ||
1011 | |||
1010 | struct pid_param { | 1012 | struct pid_param { |
1011 | char *name; | 1013 | char *name; |
1012 | void *value; | 1014 | void *value; |
1015 | struct dentry *dentry; | ||
1013 | }; | 1016 | }; |
1014 | 1017 | ||
1015 | static struct pid_param pid_files[] = { | 1018 | static struct pid_param pid_files[] = { |
1016 | {"sample_rate_ms", &pid_params.sample_rate_ms}, | 1019 | {"sample_rate_ms", &pid_params.sample_rate_ms, }, |
1017 | {"d_gain_pct", &pid_params.d_gain_pct}, | 1020 | {"d_gain_pct", &pid_params.d_gain_pct, }, |
1018 | {"i_gain_pct", &pid_params.i_gain_pct}, | 1021 | {"i_gain_pct", &pid_params.i_gain_pct, }, |
1019 | {"deadband", &pid_params.deadband}, | 1022 | {"deadband", &pid_params.deadband, }, |
1020 | {"setpoint", &pid_params.setpoint}, | 1023 | {"setpoint", &pid_params.setpoint, }, |
1021 | {"p_gain_pct", &pid_params.p_gain_pct}, | 1024 | {"p_gain_pct", &pid_params.p_gain_pct, }, |
1022 | {NULL, NULL} | 1025 | {NULL, NULL, } |
1023 | }; | 1026 | }; |
1024 | 1027 | ||
1025 | static void __init intel_pstate_debug_expose_params(void) | 1028 | static void intel_pstate_debug_expose_params(void) |
1026 | { | 1029 | { |
1027 | struct dentry *debugfs_parent; | 1030 | int i; |
1028 | int i = 0; | ||
1029 | 1031 | ||
1030 | debugfs_parent = debugfs_create_dir("pstate_snb", NULL); | 1032 | debugfs_parent = debugfs_create_dir("pstate_snb", NULL); |
1031 | if (IS_ERR_OR_NULL(debugfs_parent)) | 1033 | if (IS_ERR_OR_NULL(debugfs_parent)) |
1032 | return; | 1034 | return; |
1033 | while (pid_files[i].name) { | 1035 | |
1034 | debugfs_create_file(pid_files[i].name, 0660, | 1036 | for (i = 0; pid_files[i].name; i++) { |
1035 | debugfs_parent, pid_files[i].value, | 1037 | struct dentry *dentry; |
1036 | &fops_pid_param); | 1038 | |
1037 | i++; | 1039 | dentry = debugfs_create_file(pid_files[i].name, 0660, |
1040 | debugfs_parent, pid_files[i].value, | ||
1041 | &fops_pid_param); | ||
1042 | if (!IS_ERR(dentry)) | ||
1043 | pid_files[i].dentry = dentry; | ||
1038 | } | 1044 | } |
1039 | } | 1045 | } |
1040 | 1046 | ||
1047 | static void intel_pstate_debug_hide_params(void) | ||
1048 | { | ||
1049 | int i; | ||
1050 | |||
1051 | if (IS_ERR_OR_NULL(debugfs_parent)) | ||
1052 | return; | ||
1053 | |||
1054 | for (i = 0; pid_files[i].name; i++) { | ||
1055 | debugfs_remove(pid_files[i].dentry); | ||
1056 | pid_files[i].dentry = NULL; | ||
1057 | } | ||
1058 | |||
1059 | debugfs_remove(debugfs_parent); | ||
1060 | debugfs_parent = NULL; | ||
1061 | } | ||
1062 | |||
1041 | /************************** debugfs end ************************/ | 1063 | /************************** debugfs end ************************/ |
1042 | 1064 | ||
1043 | /************************** sysfs begin ************************/ | 1065 | /************************** sysfs begin ************************/ |
@@ -1048,6 +1070,34 @@ static void __init intel_pstate_debug_expose_params(void) | |||
1048 | return sprintf(buf, "%u\n", limits->object); \ | 1070 | return sprintf(buf, "%u\n", limits->object); \ |
1049 | } | 1071 | } |
1050 | 1072 | ||
1073 | static ssize_t intel_pstate_show_status(char *buf); | ||
1074 | static int intel_pstate_update_status(const char *buf, size_t size); | ||
1075 | |||
1076 | static ssize_t show_status(struct kobject *kobj, | ||
1077 | struct attribute *attr, char *buf) | ||
1078 | { | ||
1079 | ssize_t ret; | ||
1080 | |||
1081 | mutex_lock(&intel_pstate_driver_lock); | ||
1082 | ret = intel_pstate_show_status(buf); | ||
1083 | mutex_unlock(&intel_pstate_driver_lock); | ||
1084 | |||
1085 | return ret; | ||
1086 | } | ||
1087 | |||
1088 | static ssize_t store_status(struct kobject *a, struct attribute *b, | ||
1089 | const char *buf, size_t count) | ||
1090 | { | ||
1091 | char *p = memchr(buf, '\n', count); | ||
1092 | int ret; | ||
1093 | |||
1094 | mutex_lock(&intel_pstate_driver_lock); | ||
1095 | ret = intel_pstate_update_status(buf, p ? p - buf : count); | ||
1096 | mutex_unlock(&intel_pstate_driver_lock); | ||
1097 | |||
1098 | return ret < 0 ? ret : count; | ||
1099 | } | ||
1100 | |||
1051 | static ssize_t show_turbo_pct(struct kobject *kobj, | 1101 | static ssize_t show_turbo_pct(struct kobject *kobj, |
1052 | struct attribute *attr, char *buf) | 1102 | struct attribute *attr, char *buf) |
1053 | { | 1103 | { |
@@ -1055,12 +1105,22 @@ static ssize_t show_turbo_pct(struct kobject *kobj, | |||
1055 | int total, no_turbo, turbo_pct; | 1105 | int total, no_turbo, turbo_pct; |
1056 | uint32_t turbo_fp; | 1106 | uint32_t turbo_fp; |
1057 | 1107 | ||
1108 | mutex_lock(&intel_pstate_driver_lock); | ||
1109 | |||
1110 | if (!driver_registered) { | ||
1111 | mutex_unlock(&intel_pstate_driver_lock); | ||
1112 | return -EAGAIN; | ||
1113 | } | ||
1114 | |||
1058 | cpu = all_cpu_data[0]; | 1115 | cpu = all_cpu_data[0]; |
1059 | 1116 | ||
1060 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | 1117 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; |
1061 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; | 1118 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; |
1062 | turbo_fp = div_fp(no_turbo, total); | 1119 | turbo_fp = div_fp(no_turbo, total); |
1063 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); | 1120 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); |
1121 | |||
1122 | mutex_unlock(&intel_pstate_driver_lock); | ||
1123 | |||
1064 | return sprintf(buf, "%u\n", turbo_pct); | 1124 | return sprintf(buf, "%u\n", turbo_pct); |
1065 | } | 1125 | } |
1066 | 1126 | ||
@@ -1070,8 +1130,18 @@ static ssize_t show_num_pstates(struct kobject *kobj, | |||
1070 | struct cpudata *cpu; | 1130 | struct cpudata *cpu; |
1071 | int total; | 1131 | int total; |
1072 | 1132 | ||
1133 | mutex_lock(&intel_pstate_driver_lock); | ||
1134 | |||
1135 | if (!driver_registered) { | ||
1136 | mutex_unlock(&intel_pstate_driver_lock); | ||
1137 | return -EAGAIN; | ||
1138 | } | ||
1139 | |||
1073 | cpu = all_cpu_data[0]; | 1140 | cpu = all_cpu_data[0]; |
1074 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | 1141 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; |
1142 | |||
1143 | mutex_unlock(&intel_pstate_driver_lock); | ||
1144 | |||
1075 | return sprintf(buf, "%u\n", total); | 1145 | return sprintf(buf, "%u\n", total); |
1076 | } | 1146 | } |
1077 | 1147 | ||
@@ -1080,12 +1150,21 @@ static ssize_t show_no_turbo(struct kobject *kobj, | |||
1080 | { | 1150 | { |
1081 | ssize_t ret; | 1151 | ssize_t ret; |
1082 | 1152 | ||
1153 | mutex_lock(&intel_pstate_driver_lock); | ||
1154 | |||
1155 | if (!driver_registered) { | ||
1156 | mutex_unlock(&intel_pstate_driver_lock); | ||
1157 | return -EAGAIN; | ||
1158 | } | ||
1159 | |||
1083 | update_turbo_state(); | 1160 | update_turbo_state(); |
1084 | if (limits->turbo_disabled) | 1161 | if (limits->turbo_disabled) |
1085 | ret = sprintf(buf, "%u\n", limits->turbo_disabled); | 1162 | ret = sprintf(buf, "%u\n", limits->turbo_disabled); |
1086 | else | 1163 | else |
1087 | ret = sprintf(buf, "%u\n", limits->no_turbo); | 1164 | ret = sprintf(buf, "%u\n", limits->no_turbo); |
1088 | 1165 | ||
1166 | mutex_unlock(&intel_pstate_driver_lock); | ||
1167 | |||
1089 | return ret; | 1168 | return ret; |
1090 | } | 1169 | } |
1091 | 1170 | ||
@@ -1099,12 +1178,20 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
1099 | if (ret != 1) | 1178 | if (ret != 1) |
1100 | return -EINVAL; | 1179 | return -EINVAL; |
1101 | 1180 | ||
1181 | mutex_lock(&intel_pstate_driver_lock); | ||
1182 | |||
1183 | if (!driver_registered) { | ||
1184 | mutex_unlock(&intel_pstate_driver_lock); | ||
1185 | return -EAGAIN; | ||
1186 | } | ||
1187 | |||
1102 | mutex_lock(&intel_pstate_limits_lock); | 1188 | mutex_lock(&intel_pstate_limits_lock); |
1103 | 1189 | ||
1104 | update_turbo_state(); | 1190 | update_turbo_state(); |
1105 | if (limits->turbo_disabled) { | 1191 | if (limits->turbo_disabled) { |
1106 | pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); | 1192 | pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); |
1107 | mutex_unlock(&intel_pstate_limits_lock); | 1193 | mutex_unlock(&intel_pstate_limits_lock); |
1194 | mutex_unlock(&intel_pstate_driver_lock); | ||
1108 | return -EPERM; | 1195 | return -EPERM; |
1109 | } | 1196 | } |
1110 | 1197 | ||
@@ -1114,6 +1201,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
1114 | 1201 | ||
1115 | intel_pstate_update_policies(); | 1202 | intel_pstate_update_policies(); |
1116 | 1203 | ||
1204 | mutex_unlock(&intel_pstate_driver_lock); | ||
1205 | |||
1117 | return count; | 1206 | return count; |
1118 | } | 1207 | } |
1119 | 1208 | ||
@@ -1127,6 +1216,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
1127 | if (ret != 1) | 1216 | if (ret != 1) |
1128 | return -EINVAL; | 1217 | return -EINVAL; |
1129 | 1218 | ||
1219 | mutex_lock(&intel_pstate_driver_lock); | ||
1220 | |||
1221 | if (!driver_registered) { | ||
1222 | mutex_unlock(&intel_pstate_driver_lock); | ||
1223 | return -EAGAIN; | ||
1224 | } | ||
1225 | |||
1130 | mutex_lock(&intel_pstate_limits_lock); | 1226 | mutex_lock(&intel_pstate_limits_lock); |
1131 | 1227 | ||
1132 | limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); | 1228 | limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); |
@@ -1142,6 +1238,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
1142 | 1238 | ||
1143 | intel_pstate_update_policies(); | 1239 | intel_pstate_update_policies(); |
1144 | 1240 | ||
1241 | mutex_unlock(&intel_pstate_driver_lock); | ||
1242 | |||
1145 | return count; | 1243 | return count; |
1146 | } | 1244 | } |
1147 | 1245 | ||
@@ -1155,6 +1253,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
1155 | if (ret != 1) | 1253 | if (ret != 1) |
1156 | return -EINVAL; | 1254 | return -EINVAL; |
1157 | 1255 | ||
1256 | mutex_lock(&intel_pstate_driver_lock); | ||
1257 | |||
1258 | if (!driver_registered) { | ||
1259 | mutex_unlock(&intel_pstate_driver_lock); | ||
1260 | return -EAGAIN; | ||
1261 | } | ||
1262 | |||
1158 | mutex_lock(&intel_pstate_limits_lock); | 1263 | mutex_lock(&intel_pstate_limits_lock); |
1159 | 1264 | ||
1160 | limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); | 1265 | limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); |
@@ -1170,12 +1275,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
1170 | 1275 | ||
1171 | intel_pstate_update_policies(); | 1276 | intel_pstate_update_policies(); |
1172 | 1277 | ||
1278 | mutex_unlock(&intel_pstate_driver_lock); | ||
1279 | |||
1173 | return count; | 1280 | return count; |
1174 | } | 1281 | } |
1175 | 1282 | ||
1176 | show_one(max_perf_pct, max_perf_pct); | 1283 | show_one(max_perf_pct, max_perf_pct); |
1177 | show_one(min_perf_pct, min_perf_pct); | 1284 | show_one(min_perf_pct, min_perf_pct); |
1178 | 1285 | ||
1286 | define_one_global_rw(status); | ||
1179 | define_one_global_rw(no_turbo); | 1287 | define_one_global_rw(no_turbo); |
1180 | define_one_global_rw(max_perf_pct); | 1288 | define_one_global_rw(max_perf_pct); |
1181 | define_one_global_rw(min_perf_pct); | 1289 | define_one_global_rw(min_perf_pct); |
@@ -1183,6 +1291,7 @@ define_one_global_ro(turbo_pct); | |||
1183 | define_one_global_ro(num_pstates); | 1291 | define_one_global_ro(num_pstates); |
1184 | 1292 | ||
1185 | static struct attribute *intel_pstate_attributes[] = { | 1293 | static struct attribute *intel_pstate_attributes[] = { |
1294 | &status.attr, | ||
1186 | &no_turbo.attr, | 1295 | &no_turbo.attr, |
1187 | &turbo_pct.attr, | 1296 | &turbo_pct.attr, |
1188 | &num_pstates.attr, | 1297 | &num_pstates.attr, |
@@ -1364,48 +1473,71 @@ static int core_get_max_pstate_physical(void) | |||
1364 | return (value >> 8) & 0xFF; | 1473 | return (value >> 8) & 0xFF; |
1365 | } | 1474 | } |
1366 | 1475 | ||
1476 | static int core_get_tdp_ratio(u64 plat_info) | ||
1477 | { | ||
1478 | /* Check how many TDP levels present */ | ||
1479 | if (plat_info & 0x600000000) { | ||
1480 | u64 tdp_ctrl; | ||
1481 | u64 tdp_ratio; | ||
1482 | int tdp_msr; | ||
1483 | int err; | ||
1484 | |||
1485 | /* Get the TDP level (0, 1, 2) to get ratios */ | ||
1486 | err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); | ||
1487 | if (err) | ||
1488 | return err; | ||
1489 | |||
1490 | /* TDP MSR are continuous starting at 0x648 */ | ||
1491 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); | ||
1492 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); | ||
1493 | if (err) | ||
1494 | return err; | ||
1495 | |||
1496 | /* For level 1 and 2, bits[23:16] contain the ratio */ | ||
1497 | if (tdp_ctrl & 0x03) | ||
1498 | tdp_ratio >>= 16; | ||
1499 | |||
1500 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ | ||
1501 | pr_debug("tdp_ratio %x\n", (int)tdp_ratio); | ||
1502 | |||
1503 | return (int)tdp_ratio; | ||
1504 | } | ||
1505 | |||
1506 | return -ENXIO; | ||
1507 | } | ||
1508 | |||
1367 | static int core_get_max_pstate(void) | 1509 | static int core_get_max_pstate(void) |
1368 | { | 1510 | { |
1369 | u64 tar; | 1511 | u64 tar; |
1370 | u64 plat_info; | 1512 | u64 plat_info; |
1371 | int max_pstate; | 1513 | int max_pstate; |
1514 | int tdp_ratio; | ||
1372 | int err; | 1515 | int err; |
1373 | 1516 | ||
1374 | rdmsrl(MSR_PLATFORM_INFO, plat_info); | 1517 | rdmsrl(MSR_PLATFORM_INFO, plat_info); |
1375 | max_pstate = (plat_info >> 8) & 0xFF; | 1518 | max_pstate = (plat_info >> 8) & 0xFF; |
1376 | 1519 | ||
1520 | tdp_ratio = core_get_tdp_ratio(plat_info); | ||
1521 | if (tdp_ratio <= 0) | ||
1522 | return max_pstate; | ||
1523 | |||
1524 | if (hwp_active) { | ||
1525 | /* Turbo activation ratio is not used on HWP platforms */ | ||
1526 | return tdp_ratio; | ||
1527 | } | ||
1528 | |||
1377 | err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); | 1529 | err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); |
1378 | if (!err) { | 1530 | if (!err) { |
1531 | int tar_levels; | ||
1532 | |||
1379 | /* Do some sanity checking for safety */ | 1533 | /* Do some sanity checking for safety */ |
1380 | if (plat_info & 0x600000000) { | 1534 | tar_levels = tar & 0xff; |
1381 | u64 tdp_ctrl; | 1535 | if (tdp_ratio - 1 == tar_levels) { |
1382 | u64 tdp_ratio; | 1536 | max_pstate = tar_levels; |
1383 | int tdp_msr; | 1537 | pr_debug("max_pstate=TAC %x\n", max_pstate); |
1384 | |||
1385 | err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); | ||
1386 | if (err) | ||
1387 | goto skip_tar; | ||
1388 | |||
1389 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); | ||
1390 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); | ||
1391 | if (err) | ||
1392 | goto skip_tar; | ||
1393 | |||
1394 | /* For level 1 and 2, bits[23:16] contain the ratio */ | ||
1395 | if (tdp_ctrl) | ||
1396 | tdp_ratio >>= 16; | ||
1397 | |||
1398 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ | ||
1399 | if (tdp_ratio - 1 == tar) { | ||
1400 | max_pstate = tar; | ||
1401 | pr_debug("max_pstate=TAC %x\n", max_pstate); | ||
1402 | } else { | ||
1403 | goto skip_tar; | ||
1404 | } | ||
1405 | } | 1538 | } |
1406 | } | 1539 | } |
1407 | 1540 | ||
1408 | skip_tar: | ||
1409 | return max_pstate; | 1541 | return max_pstate; |
1410 | } | 1542 | } |
1411 | 1543 | ||
@@ -2035,7 +2167,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
2035 | limits = &performance_limits; | 2167 | limits = &performance_limits; |
2036 | perf_limits = limits; | 2168 | perf_limits = limits; |
2037 | } | 2169 | } |
2038 | if (policy->max >= policy->cpuinfo.max_freq) { | 2170 | if (policy->max >= policy->cpuinfo.max_freq && |
2171 | !limits->no_turbo) { | ||
2039 | pr_debug("set performance\n"); | 2172 | pr_debug("set performance\n"); |
2040 | intel_pstate_set_performance_limits(perf_limits); | 2173 | intel_pstate_set_performance_limits(perf_limits); |
2041 | goto out; | 2174 | goto out; |
@@ -2071,12 +2204,37 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
2071 | 2204 | ||
2072 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | 2205 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) |
2073 | { | 2206 | { |
2207 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | ||
2208 | struct perf_limits *perf_limits; | ||
2209 | |||
2210 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
2211 | perf_limits = &performance_limits; | ||
2212 | else | ||
2213 | perf_limits = &powersave_limits; | ||
2214 | |||
2215 | update_turbo_state(); | ||
2216 | policy->cpuinfo.max_freq = perf_limits->turbo_disabled || | ||
2217 | perf_limits->no_turbo ? | ||
2218 | cpu->pstate.max_freq : | ||
2219 | cpu->pstate.turbo_freq; | ||
2220 | |||
2074 | cpufreq_verify_within_cpu_limits(policy); | 2221 | cpufreq_verify_within_cpu_limits(policy); |
2075 | 2222 | ||
2076 | if (policy->policy != CPUFREQ_POLICY_POWERSAVE && | 2223 | if (policy->policy != CPUFREQ_POLICY_POWERSAVE && |
2077 | policy->policy != CPUFREQ_POLICY_PERFORMANCE) | 2224 | policy->policy != CPUFREQ_POLICY_PERFORMANCE) |
2078 | return -EINVAL; | 2225 | return -EINVAL; |
2079 | 2226 | ||
2227 | /* When per-CPU limits are used, sysfs limits are not used */ | ||
2228 | if (!per_cpu_limits) { | ||
2229 | unsigned int max_freq, min_freq; | ||
2230 | |||
2231 | max_freq = policy->cpuinfo.max_freq * | ||
2232 | limits->max_sysfs_pct / 100; | ||
2233 | min_freq = policy->cpuinfo.max_freq * | ||
2234 | limits->min_sysfs_pct / 100; | ||
2235 | cpufreq_verify_within_limits(policy, min_freq, max_freq); | ||
2236 | } | ||
2237 | |||
2080 | return 0; | 2238 | return 0; |
2081 | } | 2239 | } |
2082 | 2240 | ||
@@ -2287,6 +2445,111 @@ static struct cpufreq_driver intel_cpufreq = { | |||
2287 | 2445 | ||
2288 | static struct cpufreq_driver *intel_pstate_driver = &intel_pstate; | 2446 | static struct cpufreq_driver *intel_pstate_driver = &intel_pstate; |
2289 | 2447 | ||
2448 | static void intel_pstate_driver_cleanup(void) | ||
2449 | { | ||
2450 | unsigned int cpu; | ||
2451 | |||
2452 | get_online_cpus(); | ||
2453 | for_each_online_cpu(cpu) { | ||
2454 | if (all_cpu_data[cpu]) { | ||
2455 | if (intel_pstate_driver == &intel_pstate) | ||
2456 | intel_pstate_clear_update_util_hook(cpu); | ||
2457 | |||
2458 | kfree(all_cpu_data[cpu]); | ||
2459 | all_cpu_data[cpu] = NULL; | ||
2460 | } | ||
2461 | } | ||
2462 | put_online_cpus(); | ||
2463 | } | ||
2464 | |||
2465 | static int intel_pstate_register_driver(void) | ||
2466 | { | ||
2467 | int ret; | ||
2468 | |||
2469 | ret = cpufreq_register_driver(intel_pstate_driver); | ||
2470 | if (ret) { | ||
2471 | intel_pstate_driver_cleanup(); | ||
2472 | return ret; | ||
2473 | } | ||
2474 | |||
2475 | mutex_lock(&intel_pstate_limits_lock); | ||
2476 | driver_registered = true; | ||
2477 | mutex_unlock(&intel_pstate_limits_lock); | ||
2478 | |||
2479 | if (intel_pstate_driver == &intel_pstate && !hwp_active && | ||
2480 | pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) | ||
2481 | intel_pstate_debug_expose_params(); | ||
2482 | |||
2483 | return 0; | ||
2484 | } | ||
2485 | |||
2486 | static int intel_pstate_unregister_driver(void) | ||
2487 | { | ||
2488 | if (hwp_active) | ||
2489 | return -EBUSY; | ||
2490 | |||
2491 | if (intel_pstate_driver == &intel_pstate && !hwp_active && | ||
2492 | pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) | ||
2493 | intel_pstate_debug_hide_params(); | ||
2494 | |||
2495 | mutex_lock(&intel_pstate_limits_lock); | ||
2496 | driver_registered = false; | ||
2497 | mutex_unlock(&intel_pstate_limits_lock); | ||
2498 | |||
2499 | cpufreq_unregister_driver(intel_pstate_driver); | ||
2500 | intel_pstate_driver_cleanup(); | ||
2501 | |||
2502 | return 0; | ||
2503 | } | ||
2504 | |||
2505 | static ssize_t intel_pstate_show_status(char *buf) | ||
2506 | { | ||
2507 | if (!driver_registered) | ||
2508 | return sprintf(buf, "off\n"); | ||
2509 | |||
2510 | return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? | ||
2511 | "active" : "passive"); | ||
2512 | } | ||
2513 | |||
2514 | static int intel_pstate_update_status(const char *buf, size_t size) | ||
2515 | { | ||
2516 | int ret; | ||
2517 | |||
2518 | if (size == 3 && !strncmp(buf, "off", size)) | ||
2519 | return driver_registered ? | ||
2520 | intel_pstate_unregister_driver() : -EINVAL; | ||
2521 | |||
2522 | if (size == 6 && !strncmp(buf, "active", size)) { | ||
2523 | if (driver_registered) { | ||
2524 | if (intel_pstate_driver == &intel_pstate) | ||
2525 | return 0; | ||
2526 | |||
2527 | ret = intel_pstate_unregister_driver(); | ||
2528 | if (ret) | ||
2529 | return ret; | ||
2530 | } | ||
2531 | |||
2532 | intel_pstate_driver = &intel_pstate; | ||
2533 | return intel_pstate_register_driver(); | ||
2534 | } | ||
2535 | |||
2536 | if (size == 7 && !strncmp(buf, "passive", size)) { | ||
2537 | if (driver_registered) { | ||
2538 | if (intel_pstate_driver != &intel_pstate) | ||
2539 | return 0; | ||
2540 | |||
2541 | ret = intel_pstate_unregister_driver(); | ||
2542 | if (ret) | ||
2543 | return ret; | ||
2544 | } | ||
2545 | |||
2546 | intel_pstate_driver = &intel_cpufreq; | ||
2547 | return intel_pstate_register_driver(); | ||
2548 | } | ||
2549 | |||
2550 | return -EINVAL; | ||
2551 | } | ||
2552 | |||
2290 | static int no_load __initdata; | 2553 | static int no_load __initdata; |
2291 | static int no_hwp __initdata; | 2554 | static int no_hwp __initdata; |
2292 | static int hwp_only __initdata; | 2555 | static int hwp_only __initdata; |
@@ -2474,9 +2737,9 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = { | |||
2474 | 2737 | ||
2475 | static int __init intel_pstate_init(void) | 2738 | static int __init intel_pstate_init(void) |
2476 | { | 2739 | { |
2477 | int cpu, rc = 0; | ||
2478 | const struct x86_cpu_id *id; | 2740 | const struct x86_cpu_id *id; |
2479 | struct cpu_defaults *cpu_def; | 2741 | struct cpu_defaults *cpu_def; |
2742 | int rc = 0; | ||
2480 | 2743 | ||
2481 | if (no_load) | 2744 | if (no_load) |
2482 | return -ENODEV; | 2745 | return -ENODEV; |
@@ -2508,45 +2771,29 @@ hwp_cpu_matched: | |||
2508 | if (intel_pstate_platform_pwr_mgmt_exists()) | 2771 | if (intel_pstate_platform_pwr_mgmt_exists()) |
2509 | return -ENODEV; | 2772 | return -ENODEV; |
2510 | 2773 | ||
2774 | if (!hwp_active && hwp_only) | ||
2775 | return -ENOTSUPP; | ||
2776 | |||
2511 | pr_info("Intel P-state driver initializing\n"); | 2777 | pr_info("Intel P-state driver initializing\n"); |
2512 | 2778 | ||
2513 | all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); | 2779 | all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); |
2514 | if (!all_cpu_data) | 2780 | if (!all_cpu_data) |
2515 | return -ENOMEM; | 2781 | return -ENOMEM; |
2516 | 2782 | ||
2517 | if (!hwp_active && hwp_only) | ||
2518 | goto out; | ||
2519 | |||
2520 | intel_pstate_request_control_from_smm(); | 2783 | intel_pstate_request_control_from_smm(); |
2521 | 2784 | ||
2522 | rc = cpufreq_register_driver(intel_pstate_driver); | ||
2523 | if (rc) | ||
2524 | goto out; | ||
2525 | |||
2526 | if (intel_pstate_driver == &intel_pstate && !hwp_active && | ||
2527 | pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) | ||
2528 | intel_pstate_debug_expose_params(); | ||
2529 | |||
2530 | intel_pstate_sysfs_expose_params(); | 2785 | intel_pstate_sysfs_expose_params(); |
2531 | 2786 | ||
2787 | mutex_lock(&intel_pstate_driver_lock); | ||
2788 | rc = intel_pstate_register_driver(); | ||
2789 | mutex_unlock(&intel_pstate_driver_lock); | ||
2790 | if (rc) | ||
2791 | return rc; | ||
2792 | |||
2532 | if (hwp_active) | 2793 | if (hwp_active) |
2533 | pr_info("HWP enabled\n"); | 2794 | pr_info("HWP enabled\n"); |
2534 | 2795 | ||
2535 | return rc; | 2796 | return 0; |
2536 | out: | ||
2537 | get_online_cpus(); | ||
2538 | for_each_online_cpu(cpu) { | ||
2539 | if (all_cpu_data[cpu]) { | ||
2540 | if (intel_pstate_driver == &intel_pstate) | ||
2541 | intel_pstate_clear_update_util_hook(cpu); | ||
2542 | |||
2543 | kfree(all_cpu_data[cpu]); | ||
2544 | } | ||
2545 | } | ||
2546 | |||
2547 | put_online_cpus(); | ||
2548 | vfree(all_cpu_data); | ||
2549 | return -ENODEV; | ||
2550 | } | 2797 | } |
2551 | device_initcall(intel_pstate_init); | 2798 | device_initcall(intel_pstate_init); |
2552 | 2799 | ||
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 37671b545880..3ff5160451b4 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c | |||
@@ -144,6 +144,7 @@ static struct powernv_pstate_info { | |||
144 | unsigned int max; | 144 | unsigned int max; |
145 | unsigned int nominal; | 145 | unsigned int nominal; |
146 | unsigned int nr_pstates; | 146 | unsigned int nr_pstates; |
147 | bool wof_enabled; | ||
147 | } powernv_pstate_info; | 148 | } powernv_pstate_info; |
148 | 149 | ||
149 | /* Use following macros for conversions between pstate_id and index */ | 150 | /* Use following macros for conversions between pstate_id and index */ |
@@ -203,6 +204,7 @@ static int init_powernv_pstates(void) | |||
203 | const __be32 *pstate_ids, *pstate_freqs; | 204 | const __be32 *pstate_ids, *pstate_freqs; |
204 | u32 len_ids, len_freqs; | 205 | u32 len_ids, len_freqs; |
205 | u32 pstate_min, pstate_max, pstate_nominal; | 206 | u32 pstate_min, pstate_max, pstate_nominal; |
207 | u32 pstate_turbo, pstate_ultra_turbo; | ||
206 | 208 | ||
207 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); | 209 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); |
208 | if (!power_mgt) { | 210 | if (!power_mgt) { |
@@ -225,8 +227,29 @@ static int init_powernv_pstates(void) | |||
225 | pr_warn("ibm,pstate-nominal not found\n"); | 227 | pr_warn("ibm,pstate-nominal not found\n"); |
226 | return -ENODEV; | 228 | return -ENODEV; |
227 | } | 229 | } |
230 | |||
231 | if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo", | ||
232 | &pstate_ultra_turbo)) { | ||
233 | powernv_pstate_info.wof_enabled = false; | ||
234 | goto next; | ||
235 | } | ||
236 | |||
237 | if (of_property_read_u32(power_mgt, "ibm,pstate-turbo", | ||
238 | &pstate_turbo)) { | ||
239 | powernv_pstate_info.wof_enabled = false; | ||
240 | goto next; | ||
241 | } | ||
242 | |||
243 | if (pstate_turbo == pstate_ultra_turbo) | ||
244 | powernv_pstate_info.wof_enabled = false; | ||
245 | else | ||
246 | powernv_pstate_info.wof_enabled = true; | ||
247 | |||
248 | next: | ||
228 | pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, | 249 | pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, |
229 | pstate_nominal, pstate_max); | 250 | pstate_nominal, pstate_max); |
251 | pr_info("Workload Optimized Frequency is %s in the platform\n", | ||
252 | (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled"); | ||
230 | 253 | ||
231 | pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); | 254 | pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); |
232 | if (!pstate_ids) { | 255 | if (!pstate_ids) { |
@@ -268,6 +291,13 @@ static int init_powernv_pstates(void) | |||
268 | powernv_pstate_info.nominal = i; | 291 | powernv_pstate_info.nominal = i; |
269 | else if (id == pstate_min) | 292 | else if (id == pstate_min) |
270 | powernv_pstate_info.min = i; | 293 | powernv_pstate_info.min = i; |
294 | |||
295 | if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { | ||
296 | int j; | ||
297 | |||
298 | for (j = i - 1; j >= (int)powernv_pstate_info.max; j--) | ||
299 | powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ; | ||
300 | } | ||
271 | } | 301 | } |
272 | 302 | ||
273 | /* End of list marker entry */ | 303 | /* End of list marker entry */ |
@@ -305,9 +335,12 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, | |||
305 | struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = | 335 | struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = |
306 | __ATTR_RO(cpuinfo_nominal_freq); | 336 | __ATTR_RO(cpuinfo_nominal_freq); |
307 | 337 | ||
338 | #define SCALING_BOOST_FREQS_ATTR_INDEX 2 | ||
339 | |||
308 | static struct freq_attr *powernv_cpu_freq_attr[] = { | 340 | static struct freq_attr *powernv_cpu_freq_attr[] = { |
309 | &cpufreq_freq_attr_scaling_available_freqs, | 341 | &cpufreq_freq_attr_scaling_available_freqs, |
310 | &cpufreq_freq_attr_cpuinfo_nominal_freq, | 342 | &cpufreq_freq_attr_cpuinfo_nominal_freq, |
343 | &cpufreq_freq_attr_scaling_boost_freqs, | ||
311 | NULL, | 344 | NULL, |
312 | }; | 345 | }; |
313 | 346 | ||
@@ -1013,11 +1046,22 @@ static int __init powernv_cpufreq_init(void) | |||
1013 | register_reboot_notifier(&powernv_cpufreq_reboot_nb); | 1046 | register_reboot_notifier(&powernv_cpufreq_reboot_nb); |
1014 | opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); | 1047 | opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); |
1015 | 1048 | ||
1049 | if (powernv_pstate_info.wof_enabled) | ||
1050 | powernv_cpufreq_driver.boost_enabled = true; | ||
1051 | else | ||
1052 | powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL; | ||
1053 | |||
1016 | rc = cpufreq_register_driver(&powernv_cpufreq_driver); | 1054 | rc = cpufreq_register_driver(&powernv_cpufreq_driver); |
1017 | if (!rc) | 1055 | if (rc) { |
1018 | return 0; | 1056 | pr_info("Failed to register the cpufreq driver (%d)\n", rc); |
1057 | goto cleanup_notifiers; | ||
1058 | } | ||
1019 | 1059 | ||
1020 | pr_info("Failed to register the cpufreq driver (%d)\n", rc); | 1060 | if (powernv_pstate_info.wof_enabled) |
1061 | cpufreq_enable_boost_support(); | ||
1062 | |||
1063 | return 0; | ||
1064 | cleanup_notifiers: | ||
1021 | unregister_all_notifiers(); | 1065 | unregister_all_notifiers(); |
1022 | clean_chip_info(); | 1066 | clean_chip_info(); |
1023 | out: | 1067 | out: |
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index dc112481a408..eeaa92251512 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c | |||
@@ -100,9 +100,6 @@ static int pmi_notifier(struct notifier_block *nb, | |||
100 | /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY | 100 | /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY |
101 | * policy events?) | 101 | * policy events?) |
102 | */ | 102 | */ |
103 | if (event == CPUFREQ_START) | ||
104 | return 0; | ||
105 | |||
106 | node = cbe_cpu_to_node(policy->cpu); | 103 | node = cbe_cpu_to_node(policy->cpu); |
107 | 104 | ||
108 | pr_debug("got notified, event=%lu, node=%u\n", event, node); | 105 | pr_debug("got notified, event=%lu, node=%u\n", event, node); |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 86bf3b84ada5..a07ae9e37930 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) | |||
1723 | } | 1723 | } |
1724 | 1724 | ||
1725 | /** | 1725 | /** |
1726 | * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip | 1726 | * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip |
1727 | * @gpiochip: the gpiochip to add the irqchip to | 1727 | * @gpiochip: the gpiochip to add the irqchip to |
1728 | * @irqchip: the irqchip to add to the gpiochip | 1728 | * @irqchip: the irqchip to add to the gpiochip |
1729 | * @first_irq: if not dynamically assigned, the base (first) IRQ to | 1729 | * @first_irq: if not dynamically assigned, the base (first) IRQ to |
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) | |||
1749 | * the pins on the gpiochip can generate a unique IRQ. Everything else | 1749 | * the pins on the gpiochip can generate a unique IRQ. Everything else |
1750 | * need to be open coded. | 1750 | * need to be open coded. |
1751 | */ | 1751 | */ |
1752 | int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, | 1752 | int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, |
1753 | struct irq_chip *irqchip, | 1753 | struct irq_chip *irqchip, |
1754 | unsigned int first_irq, | 1754 | unsigned int first_irq, |
1755 | irq_flow_handler_t handler, | 1755 | irq_flow_handler_t handler, |
1756 | unsigned int type, | 1756 | unsigned int type, |
1757 | bool nested, | 1757 | bool nested, |
1758 | struct lock_class_key *lock_key) | 1758 | struct lock_class_key *lock_key) |
1759 | { | 1759 | { |
1760 | struct device_node *of_node; | 1760 | struct device_node *of_node; |
1761 | bool irq_base_set = false; | 1761 | bool irq_base_set = false; |
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, | |||
1840 | 1840 | ||
1841 | return 0; | 1841 | return 0; |
1842 | } | 1842 | } |
1843 | EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); | 1843 | EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); |
1844 | 1844 | ||
1845 | #else /* CONFIG_GPIOLIB_IRQCHIP */ | 1845 | #else /* CONFIG_GPIOLIB_IRQCHIP */ |
1846 | 1846 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 29d6d84d1c28..41e41f90265d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
83 | } | 83 | } |
84 | break; | 84 | break; |
85 | } | 85 | } |
86 | |||
87 | if (!(*out_ring && (*out_ring)->adev)) { | ||
88 | DRM_ERROR("Ring %d is not initialized on IP %d\n", | ||
89 | ring, ip_type); | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
86 | return 0; | 93 | return 0; |
87 | } | 94 | } |
88 | 95 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9999dc71b998..ccb5e02e7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, | |||
2512 | 2512 | ||
2513 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2513 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
2514 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2514 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
2515 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2516 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
2515 | 2517 | ||
2516 | return 0; | 2518 | return 0; |
2517 | } | 2519 | } |
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2537 | int32_t hot_y) | 2539 | int32_t hot_y) |
2538 | { | 2540 | { |
2539 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2541 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2540 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2541 | struct drm_gem_object *obj; | 2542 | struct drm_gem_object *obj; |
2542 | struct amdgpu_bo *aobj; | 2543 | struct amdgpu_bo *aobj; |
2543 | int ret; | 2544 | int ret; |
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2578 | 2579 | ||
2579 | dce_v10_0_lock_cursor(crtc, true); | 2580 | dce_v10_0_lock_cursor(crtc, true); |
2580 | 2581 | ||
2581 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2582 | if (width != amdgpu_crtc->cursor_width || |
2583 | height != amdgpu_crtc->cursor_height || | ||
2584 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
2582 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2585 | hot_y != amdgpu_crtc->cursor_hot_y) { |
2583 | int x, y; | 2586 | int x, y; |
2584 | 2587 | ||
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2587 | 2590 | ||
2588 | dce_v10_0_cursor_move_locked(crtc, x, y); | 2591 | dce_v10_0_cursor_move_locked(crtc, x, y); |
2589 | 2592 | ||
2590 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2591 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2592 | } | ||
2593 | |||
2594 | if (width != amdgpu_crtc->cursor_width || | ||
2595 | height != amdgpu_crtc->cursor_height) { | ||
2596 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2597 | (width - 1) << 16 | (height - 1)); | ||
2598 | amdgpu_crtc->cursor_width = width; | 2593 | amdgpu_crtc->cursor_width = width; |
2599 | amdgpu_crtc->cursor_height = height; | 2594 | amdgpu_crtc->cursor_height = height; |
2595 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2596 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2600 | } | 2597 | } |
2601 | 2598 | ||
2602 | dce_v10_0_show_cursor(crtc); | 2599 | dce_v10_0_show_cursor(crtc); |
@@ -2620,7 +2617,6 @@ unpin: | |||
2620 | static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) | 2617 | static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) |
2621 | { | 2618 | { |
2622 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2619 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2623 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2624 | 2620 | ||
2625 | if (amdgpu_crtc->cursor_bo) { | 2621 | if (amdgpu_crtc->cursor_bo) { |
2626 | dce_v10_0_lock_cursor(crtc, true); | 2622 | dce_v10_0_lock_cursor(crtc, true); |
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) | |||
2628 | dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2624 | dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
2629 | amdgpu_crtc->cursor_y); | 2625 | amdgpu_crtc->cursor_y); |
2630 | 2626 | ||
2631 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2632 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
2633 | (amdgpu_crtc->cursor_height - 1)); | ||
2634 | |||
2635 | dce_v10_0_show_cursor(crtc); | 2627 | dce_v10_0_show_cursor(crtc); |
2636 | 2628 | ||
2637 | dce_v10_0_lock_cursor(crtc, false); | 2629 | dce_v10_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 2006abbbfb62..a7af5b33a5e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, | |||
2532 | 2532 | ||
2533 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2533 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
2534 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2534 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
2535 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2536 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
2535 | 2537 | ||
2536 | return 0; | 2538 | return 0; |
2537 | } | 2539 | } |
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2557 | int32_t hot_y) | 2559 | int32_t hot_y) |
2558 | { | 2560 | { |
2559 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2561 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2560 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2561 | struct drm_gem_object *obj; | 2562 | struct drm_gem_object *obj; |
2562 | struct amdgpu_bo *aobj; | 2563 | struct amdgpu_bo *aobj; |
2563 | int ret; | 2564 | int ret; |
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2598 | 2599 | ||
2599 | dce_v11_0_lock_cursor(crtc, true); | 2600 | dce_v11_0_lock_cursor(crtc, true); |
2600 | 2601 | ||
2601 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2602 | if (width != amdgpu_crtc->cursor_width || |
2603 | height != amdgpu_crtc->cursor_height || | ||
2604 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
2602 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2605 | hot_y != amdgpu_crtc->cursor_hot_y) { |
2603 | int x, y; | 2606 | int x, y; |
2604 | 2607 | ||
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2607 | 2610 | ||
2608 | dce_v11_0_cursor_move_locked(crtc, x, y); | 2611 | dce_v11_0_cursor_move_locked(crtc, x, y); |
2609 | 2612 | ||
2610 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2611 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2612 | } | ||
2613 | |||
2614 | if (width != amdgpu_crtc->cursor_width || | ||
2615 | height != amdgpu_crtc->cursor_height) { | ||
2616 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2617 | (width - 1) << 16 | (height - 1)); | ||
2618 | amdgpu_crtc->cursor_width = width; | 2613 | amdgpu_crtc->cursor_width = width; |
2619 | amdgpu_crtc->cursor_height = height; | 2614 | amdgpu_crtc->cursor_height = height; |
2615 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2616 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2620 | } | 2617 | } |
2621 | 2618 | ||
2622 | dce_v11_0_show_cursor(crtc); | 2619 | dce_v11_0_show_cursor(crtc); |
@@ -2640,7 +2637,6 @@ unpin: | |||
2640 | static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) | 2637 | static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) |
2641 | { | 2638 | { |
2642 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2639 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2643 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2644 | 2640 | ||
2645 | if (amdgpu_crtc->cursor_bo) { | 2641 | if (amdgpu_crtc->cursor_bo) { |
2646 | dce_v11_0_lock_cursor(crtc, true); | 2642 | dce_v11_0_lock_cursor(crtc, true); |
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) | |||
2648 | dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2644 | dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
2649 | amdgpu_crtc->cursor_y); | 2645 | amdgpu_crtc->cursor_y); |
2650 | 2646 | ||
2651 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2652 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
2653 | (amdgpu_crtc->cursor_height - 1)); | ||
2654 | |||
2655 | dce_v11_0_show_cursor(crtc); | 2647 | dce_v11_0_show_cursor(crtc); |
2656 | 2648 | ||
2657 | dce_v11_0_lock_cursor(crtc, false); | 2649 | dce_v11_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b4e4ec630e8c..39df6a50637f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, | |||
1859 | struct amdgpu_device *adev = crtc->dev->dev_private; | 1859 | struct amdgpu_device *adev = crtc->dev->dev_private; |
1860 | int xorigin = 0, yorigin = 0; | 1860 | int xorigin = 0, yorigin = 0; |
1861 | 1861 | ||
1862 | int w = amdgpu_crtc->cursor_width; | ||
1863 | |||
1862 | amdgpu_crtc->cursor_x = x; | 1864 | amdgpu_crtc->cursor_x = x; |
1863 | amdgpu_crtc->cursor_y = y; | 1865 | amdgpu_crtc->cursor_y = y; |
1864 | 1866 | ||
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, | |||
1878 | 1880 | ||
1879 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 1881 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
1880 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 1882 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
1883 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
1884 | ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
1881 | 1885 | ||
1882 | return 0; | 1886 | return 0; |
1883 | } | 1887 | } |
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
1903 | int32_t hot_y) | 1907 | int32_t hot_y) |
1904 | { | 1908 | { |
1905 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 1909 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
1906 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
1907 | struct drm_gem_object *obj; | 1910 | struct drm_gem_object *obj; |
1908 | struct amdgpu_bo *aobj; | 1911 | struct amdgpu_bo *aobj; |
1909 | int ret; | 1912 | int ret; |
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
1944 | 1947 | ||
1945 | dce_v6_0_lock_cursor(crtc, true); | 1948 | dce_v6_0_lock_cursor(crtc, true); |
1946 | 1949 | ||
1947 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 1950 | if (width != amdgpu_crtc->cursor_width || |
1951 | height != amdgpu_crtc->cursor_height || | ||
1952 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
1948 | hot_y != amdgpu_crtc->cursor_hot_y) { | 1953 | hot_y != amdgpu_crtc->cursor_hot_y) { |
1949 | int x, y; | 1954 | int x, y; |
1950 | 1955 | ||
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
1953 | 1958 | ||
1954 | dce_v6_0_cursor_move_locked(crtc, x, y); | 1959 | dce_v6_0_cursor_move_locked(crtc, x, y); |
1955 | 1960 | ||
1956 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
1957 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
1958 | } | ||
1959 | |||
1960 | if (width != amdgpu_crtc->cursor_width || | ||
1961 | height != amdgpu_crtc->cursor_height) { | ||
1962 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
1963 | (width - 1) << 16 | (height - 1)); | ||
1964 | amdgpu_crtc->cursor_width = width; | 1961 | amdgpu_crtc->cursor_width = width; |
1965 | amdgpu_crtc->cursor_height = height; | 1962 | amdgpu_crtc->cursor_height = height; |
1963 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
1964 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
1966 | } | 1965 | } |
1967 | 1966 | ||
1968 | dce_v6_0_show_cursor(crtc); | 1967 | dce_v6_0_show_cursor(crtc); |
@@ -1986,7 +1985,6 @@ unpin: | |||
1986 | static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) | 1985 | static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) |
1987 | { | 1986 | { |
1988 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 1987 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
1989 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
1990 | 1988 | ||
1991 | if (amdgpu_crtc->cursor_bo) { | 1989 | if (amdgpu_crtc->cursor_bo) { |
1992 | dce_v6_0_lock_cursor(crtc, true); | 1990 | dce_v6_0_lock_cursor(crtc, true); |
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) | |||
1994 | dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 1992 | dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
1995 | amdgpu_crtc->cursor_y); | 1993 | amdgpu_crtc->cursor_y); |
1996 | 1994 | ||
1997 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
1998 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
1999 | (amdgpu_crtc->cursor_height - 1)); | ||
2000 | |||
2001 | dce_v6_0_show_cursor(crtc); | 1995 | dce_v6_0_show_cursor(crtc); |
2002 | dce_v6_0_lock_cursor(crtc, false); | 1996 | dce_v6_0_lock_cursor(crtc, false); |
2003 | } | 1997 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 584abe834a3c..28102bb1704d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, | |||
2363 | 2363 | ||
2364 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2364 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
2365 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2365 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
2366 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2367 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
2366 | 2368 | ||
2367 | return 0; | 2369 | return 0; |
2368 | } | 2370 | } |
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2388 | int32_t hot_y) | 2390 | int32_t hot_y) |
2389 | { | 2391 | { |
2390 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2392 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2391 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2392 | struct drm_gem_object *obj; | 2393 | struct drm_gem_object *obj; |
2393 | struct amdgpu_bo *aobj; | 2394 | struct amdgpu_bo *aobj; |
2394 | int ret; | 2395 | int ret; |
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2429 | 2430 | ||
2430 | dce_v8_0_lock_cursor(crtc, true); | 2431 | dce_v8_0_lock_cursor(crtc, true); |
2431 | 2432 | ||
2432 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2433 | if (width != amdgpu_crtc->cursor_width || |
2434 | height != amdgpu_crtc->cursor_height || | ||
2435 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
2433 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2436 | hot_y != amdgpu_crtc->cursor_hot_y) { |
2434 | int x, y; | 2437 | int x, y; |
2435 | 2438 | ||
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2438 | 2441 | ||
2439 | dce_v8_0_cursor_move_locked(crtc, x, y); | 2442 | dce_v8_0_cursor_move_locked(crtc, x, y); |
2440 | 2443 | ||
2441 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2442 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2443 | } | ||
2444 | |||
2445 | if (width != amdgpu_crtc->cursor_width || | ||
2446 | height != amdgpu_crtc->cursor_height) { | ||
2447 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2448 | (width - 1) << 16 | (height - 1)); | ||
2449 | amdgpu_crtc->cursor_width = width; | 2444 | amdgpu_crtc->cursor_width = width; |
2450 | amdgpu_crtc->cursor_height = height; | 2445 | amdgpu_crtc->cursor_height = height; |
2446 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
2447 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
2451 | } | 2448 | } |
2452 | 2449 | ||
2453 | dce_v8_0_show_cursor(crtc); | 2450 | dce_v8_0_show_cursor(crtc); |
@@ -2471,7 +2468,6 @@ unpin: | |||
2471 | static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) | 2468 | static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) |
2472 | { | 2469 | { |
2473 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2470 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2474 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2475 | 2471 | ||
2476 | if (amdgpu_crtc->cursor_bo) { | 2472 | if (amdgpu_crtc->cursor_bo) { |
2477 | dce_v8_0_lock_cursor(crtc, true); | 2473 | dce_v8_0_lock_cursor(crtc, true); |
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) | |||
2479 | dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2475 | dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
2480 | amdgpu_crtc->cursor_y); | 2476 | amdgpu_crtc->cursor_y); |
2481 | 2477 | ||
2482 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2483 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
2484 | (amdgpu_crtc->cursor_height - 1)); | ||
2485 | |||
2486 | dce_v8_0_show_cursor(crtc); | 2478 | dce_v8_0_show_cursor(crtc); |
2487 | 2479 | ||
2488 | dce_v8_0_lock_cursor(crtc, false); | 2480 | dce_v8_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e82ceb7..e9a176891e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = | |||
627 | 627 | ||
628 | static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) | 628 | static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) |
629 | { | 629 | { |
630 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
631 | |||
632 | kfree(amdgpu_encoder->enc_priv); | ||
633 | drm_encoder_cleanup(encoder); | 630 | drm_encoder_cleanup(encoder); |
634 | kfree(amdgpu_encoder); | 631 | kfree(encoder); |
635 | } | 632 | } |
636 | 633 | ||
637 | static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { | 634 | static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 45a573e63d4a..e2b0b1646f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin"); | |||
44 | MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); | 44 | MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); |
45 | MODULE_FIRMWARE("radeon/verde_mc.bin"); | 45 | MODULE_FIRMWARE("radeon/verde_mc.bin"); |
46 | MODULE_FIRMWARE("radeon/oland_mc.bin"); | 46 | MODULE_FIRMWARE("radeon/oland_mc.bin"); |
47 | MODULE_FIRMWARE("radeon/si58_mc.bin"); | ||
47 | 48 | ||
48 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 | 49 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 |
49 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 | 50 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 |
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) | |||
113 | const char *chip_name; | 114 | const char *chip_name; |
114 | char fw_name[30]; | 115 | char fw_name[30]; |
115 | int err; | 116 | int err; |
117 | bool is_58_fw = false; | ||
116 | 118 | ||
117 | DRM_DEBUG("\n"); | 119 | DRM_DEBUG("\n"); |
118 | 120 | ||
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) | |||
135 | default: BUG(); | 137 | default: BUG(); |
136 | } | 138 | } |
137 | 139 | ||
138 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 140 | /* this memory configuration requires special firmware */ |
141 | if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) | ||
142 | is_58_fw = true; | ||
143 | |||
144 | if (is_58_fw) | ||
145 | snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); | ||
146 | else | ||
147 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
139 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); | 148 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); |
140 | if (err) | 149 | if (err) |
141 | goto out; | 150 | goto out; |
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) | |||
463 | WREG32(mmVM_CONTEXT1_CNTL, | 472 | WREG32(mmVM_CONTEXT1_CNTL, |
464 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | | 473 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | |
465 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | | 474 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | |
466 | ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | | 475 | ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); |
467 | VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | 476 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
468 | VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | 477 | gmc_v6_0_set_fault_enable_default(adev, false); |
469 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | 478 | else |
470 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | 479 | gmc_v6_0_set_fault_enable_default(adev, true); |
471 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
472 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
473 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
474 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
475 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
476 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
477 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
478 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); | ||
479 | 480 | ||
480 | gmc_v6_0_gart_flush_gpu_tlb(adev, 0); | 481 | gmc_v6_0_gart_flush_gpu_tlb(adev, 0); |
481 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", | 482 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", |
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle) | |||
754 | { | 755 | { |
755 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 756 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
756 | 757 | ||
757 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | 758 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
759 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | ||
760 | else | ||
761 | return 0; | ||
758 | } | 762 | } |
759 | 763 | ||
760 | static int gmc_v6_0_sw_init(void *handle) | 764 | static int gmc_v6_0_sw_init(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 10bedfac27b8..6e150db8f380 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); | |||
64 | MODULE_FIRMWARE("radeon/oland_k_smc.bin"); | 64 | MODULE_FIRMWARE("radeon/oland_k_smc.bin"); |
65 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); | 65 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); |
66 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); | 66 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); |
67 | MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); | ||
67 | 68 | ||
68 | union power_info { | 69 | union power_info { |
69 | struct _ATOM_POWERPLAY_INFO info; | 70 | struct _ATOM_POWERPLAY_INFO info; |
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
3487 | (adev->pdev->device == 0x6817) || | 3488 | (adev->pdev->device == 0x6817) || |
3488 | (adev->pdev->device == 0x6806)) | 3489 | (adev->pdev->device == 0x6806)) |
3489 | max_mclk = 120000; | 3490 | max_mclk = 120000; |
3490 | } else if (adev->asic_type == CHIP_OLAND) { | ||
3491 | if ((adev->pdev->revision == 0xC7) || | ||
3492 | (adev->pdev->revision == 0x80) || | ||
3493 | (adev->pdev->revision == 0x81) || | ||
3494 | (adev->pdev->revision == 0x83) || | ||
3495 | (adev->pdev->revision == 0x87) || | ||
3496 | (adev->pdev->device == 0x6604) || | ||
3497 | (adev->pdev->device == 0x6605)) { | ||
3498 | max_sclk = 75000; | ||
3499 | max_mclk = 80000; | ||
3500 | } | ||
3501 | } else if (adev->asic_type == CHIP_HAINAN) { | 3491 | } else if (adev->asic_type == CHIP_HAINAN) { |
3502 | if ((adev->pdev->revision == 0x81) || | 3492 | if ((adev->pdev->revision == 0x81) || |
3503 | (adev->pdev->revision == 0x83) || | 3493 | (adev->pdev->revision == 0x83) || |
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
3506 | (adev->pdev->device == 0x6665) || | 3496 | (adev->pdev->device == 0x6665) || |
3507 | (adev->pdev->device == 0x6667)) { | 3497 | (adev->pdev->device == 0x6667)) { |
3508 | max_sclk = 75000; | 3498 | max_sclk = 75000; |
3509 | max_mclk = 80000; | ||
3510 | } | 3499 | } |
3511 | } | 3500 | } |
3512 | /* Apply dpm quirks */ | 3501 | /* Apply dpm quirks */ |
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) | |||
7713 | ((adev->pdev->device == 0x6660) || | 7702 | ((adev->pdev->device == 0x6660) || |
7714 | (adev->pdev->device == 0x6663) || | 7703 | (adev->pdev->device == 0x6663) || |
7715 | (adev->pdev->device == 0x6665) || | 7704 | (adev->pdev->device == 0x6665) || |
7716 | (adev->pdev->device == 0x6667))) || | 7705 | (adev->pdev->device == 0x6667)))) |
7717 | ((adev->pdev->revision == 0xc3) && | ||
7718 | (adev->pdev->device == 0x6665))) | ||
7719 | chip_name = "hainan_k"; | 7706 | chip_name = "hainan_k"; |
7707 | else if ((adev->pdev->revision == 0xc3) && | ||
7708 | (adev->pdev->device == 0x6665)) | ||
7709 | chip_name = "banks_k_2"; | ||
7720 | else | 7710 | else |
7721 | chip_name = "hainan"; | 7711 | chip_name = "hainan"; |
7722 | break; | 7712 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 96444e4d862a..7fb9137dd89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -40,13 +40,14 @@ | |||
40 | #include "smu/smu_7_0_1_sh_mask.h" | 40 | #include "smu/smu_7_0_1_sh_mask.h" |
41 | 41 | ||
42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); | 42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); |
43 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); | ||
44 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); | 43 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); |
45 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); | 44 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); |
46 | static int uvd_v4_2_start(struct amdgpu_device *adev); | 45 | static int uvd_v4_2_start(struct amdgpu_device *adev); |
47 | static void uvd_v4_2_stop(struct amdgpu_device *adev); | 46 | static void uvd_v4_2_stop(struct amdgpu_device *adev); |
48 | static int uvd_v4_2_set_clockgating_state(void *handle, | 47 | static int uvd_v4_2_set_clockgating_state(void *handle, |
49 | enum amd_clockgating_state state); | 48 | enum amd_clockgating_state state); |
49 | static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, | ||
50 | bool sw_mode); | ||
50 | /** | 51 | /** |
51 | * uvd_v4_2_ring_get_rptr - get read pointer | 52 | * uvd_v4_2_ring_get_rptr - get read pointer |
52 | * | 53 | * |
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle) | |||
140 | 141 | ||
141 | return r; | 142 | return r; |
142 | } | 143 | } |
143 | 144 | static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, | |
145 | bool enable); | ||
144 | /** | 146 | /** |
145 | * uvd_v4_2_hw_init - start and test UVD block | 147 | * uvd_v4_2_hw_init - start and test UVD block |
146 | * | 148 | * |
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle) | |||
155 | uint32_t tmp; | 157 | uint32_t tmp; |
156 | int r; | 158 | int r; |
157 | 159 | ||
158 | uvd_v4_2_init_cg(adev); | 160 | uvd_v4_2_enable_mgcg(adev, true); |
159 | uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); | ||
160 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | 161 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
161 | r = uvd_v4_2_start(adev); | 162 | r = uvd_v4_2_start(adev); |
162 | if (r) | 163 | if (r) |
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
266 | struct amdgpu_ring *ring = &adev->uvd.ring; | 267 | struct amdgpu_ring *ring = &adev->uvd.ring; |
267 | uint32_t rb_bufsz; | 268 | uint32_t rb_bufsz; |
268 | int i, j, r; | 269 | int i, j, r; |
269 | |||
270 | /* disable byte swapping */ | 270 | /* disable byte swapping */ |
271 | u32 lmi_swap_cntl = 0; | 271 | u32 lmi_swap_cntl = 0; |
272 | u32 mp_swap_cntl = 0; | 272 | u32 mp_swap_cntl = 0; |
273 | 273 | ||
274 | WREG32(mmUVD_CGC_GATE, 0); | ||
275 | uvd_v4_2_set_dcm(adev, true); | ||
276 | |||
274 | uvd_v4_2_mc_resume(adev); | 277 | uvd_v4_2_mc_resume(adev); |
275 | 278 | ||
276 | /* disable interupt */ | 279 | /* disable interupt */ |
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) | |||
406 | 409 | ||
407 | /* Unstall UMC and register bus */ | 410 | /* Unstall UMC and register bus */ |
408 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 411 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
412 | |||
413 | uvd_v4_2_set_dcm(adev, false); | ||
409 | } | 414 | } |
410 | 415 | ||
411 | /** | 416 | /** |
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, | |||
619 | WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); | 624 | WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); |
620 | } | 625 | } |
621 | 626 | ||
622 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev) | ||
623 | { | ||
624 | bool hw_mode = true; | ||
625 | |||
626 | if (hw_mode) { | ||
627 | uvd_v4_2_set_dcm(adev, false); | ||
628 | } else { | ||
629 | u32 tmp = RREG32(mmUVD_CGC_CTRL); | ||
630 | tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | ||
631 | WREG32(mmUVD_CGC_CTRL, tmp); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | static bool uvd_v4_2_is_idle(void *handle) | 627 | static bool uvd_v4_2_is_idle(void *handle) |
636 | { | 628 | { |
637 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 629 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, | |||
685 | static int uvd_v4_2_set_clockgating_state(void *handle, | 677 | static int uvd_v4_2_set_clockgating_state(void *handle, |
686 | enum amd_clockgating_state state) | 678 | enum amd_clockgating_state state) |
687 | { | 679 | { |
688 | bool gate = false; | ||
689 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
690 | |||
691 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
692 | return 0; | ||
693 | |||
694 | if (state == AMD_CG_STATE_GATE) | ||
695 | gate = true; | ||
696 | |||
697 | uvd_v4_2_enable_mgcg(adev, gate); | ||
698 | |||
699 | return 0; | 680 | return 0; |
700 | } | 681 | } |
701 | 682 | ||
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
711 | */ | 692 | */ |
712 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 693 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
713 | 694 | ||
714 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
715 | return 0; | ||
716 | |||
717 | if (state == AMD_PG_STATE_GATE) { | 695 | if (state == AMD_PG_STATE_GATE) { |
718 | uvd_v4_2_stop(adev); | 696 | uvd_v4_2_stop(adev); |
719 | return 0; | 697 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 5fb0b7f5c065..37ca685e5a9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -43,9 +43,13 @@ | |||
43 | 43 | ||
44 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | 44 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 |
45 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | 45 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 |
46 | #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 | ||
47 | |||
46 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 | 48 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 |
47 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | 49 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 |
48 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | 50 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 |
51 | #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 | ||
52 | |||
49 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 | 53 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 |
50 | 54 | ||
51 | #define VCE_V3_0_FW_SIZE (384 * 1024) | 55 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
@@ -54,6 +58,9 @@ | |||
54 | 58 | ||
55 | #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) | 59 | #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) |
56 | 60 | ||
61 | #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ | ||
62 | | GRBM_GFX_INDEX__VCE_ALL_PIPE) | ||
63 | |||
57 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); | 64 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); |
58 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); | 65 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
59 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | 66 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); |
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, | |||
175 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); | 182 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); |
176 | 183 | ||
177 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | 184 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); |
178 | data &= ~0xffc00000; | 185 | data &= ~0x3ff; |
179 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); | 186 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); |
180 | 187 | ||
181 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); | 188 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); |
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
249 | if (adev->vce.harvest_config & (1 << idx)) | 256 | if (adev->vce.harvest_config & (1 << idx)) |
250 | continue; | 257 | continue; |
251 | 258 | ||
252 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); | 259 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
253 | vce_v3_0_mc_resume(adev, idx); | 260 | vce_v3_0_mc_resume(adev, idx); |
254 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); | 261 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
255 | 262 | ||
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
273 | } | 280 | } |
274 | } | 281 | } |
275 | 282 | ||
276 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 283 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
277 | mutex_unlock(&adev->grbm_idx_mutex); | 284 | mutex_unlock(&adev->grbm_idx_mutex); |
278 | 285 | ||
279 | return 0; | 286 | return 0; |
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) | |||
288 | if (adev->vce.harvest_config & (1 << idx)) | 295 | if (adev->vce.harvest_config & (1 << idx)) |
289 | continue; | 296 | continue; |
290 | 297 | ||
291 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); | 298 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
292 | 299 | ||
293 | if (adev->asic_type >= CHIP_STONEY) | 300 | if (adev->asic_type >= CHIP_STONEY) |
294 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); | 301 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); |
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) | |||
306 | vce_v3_0_set_vce_sw_clock_gating(adev, false); | 313 | vce_v3_0_set_vce_sw_clock_gating(adev, false); |
307 | } | 314 | } |
308 | 315 | ||
309 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 316 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
310 | mutex_unlock(&adev->grbm_idx_mutex); | 317 | mutex_unlock(&adev->grbm_idx_mutex); |
311 | 318 | ||
312 | return 0; | 319 | return 0; |
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) | |||
586 | * VCE team suggest use bit 3--bit 6 for busy status check | 593 | * VCE team suggest use bit 3--bit 6 for busy status check |
587 | */ | 594 | */ |
588 | mutex_lock(&adev->grbm_idx_mutex); | 595 | mutex_lock(&adev->grbm_idx_mutex); |
589 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); | 596 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); |
590 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { | 597 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
591 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | 598 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); |
592 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | 599 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); |
593 | } | 600 | } |
594 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); | 601 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); |
595 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { | 602 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
596 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | 603 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); |
597 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | 604 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); |
598 | } | 605 | } |
599 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); | 606 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); |
600 | mutex_unlock(&adev->grbm_idx_mutex); | 607 | mutex_unlock(&adev->grbm_idx_mutex); |
601 | 608 | ||
602 | if (srbm_soft_reset) { | 609 | if (srbm_soft_reset) { |
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
734 | if (adev->vce.harvest_config & (1 << i)) | 741 | if (adev->vce.harvest_config & (1 << i)) |
735 | continue; | 742 | continue; |
736 | 743 | ||
737 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); | 744 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); |
738 | 745 | ||
739 | if (enable) { | 746 | if (enable) { |
740 | /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ | 747 | /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ |
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
753 | vce_v3_0_set_vce_sw_clock_gating(adev, enable); | 760 | vce_v3_0_set_vce_sw_clock_gating(adev, enable); |
754 | } | 761 | } |
755 | 762 | ||
756 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 763 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
757 | mutex_unlock(&adev->grbm_idx_mutex); | 764 | mutex_unlock(&adev->grbm_idx_mutex); |
758 | 765 | ||
759 | return 0; | 766 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b0c63c5f54c9..6bb79c94cb9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c | |||
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
200 | cgs_set_clockgating_state( | 200 | cgs_set_clockgating_state( |
201 | hwmgr->device, | 201 | hwmgr->device, |
202 | AMD_IP_BLOCK_TYPE_VCE, | 202 | AMD_IP_BLOCK_TYPE_VCE, |
203 | AMD_CG_STATE_UNGATE); | 203 | AMD_CG_STATE_GATE); |
204 | cgs_set_powergating_state( | 204 | cgs_set_powergating_state( |
205 | hwmgr->device, | 205 | hwmgr->device, |
206 | AMD_IP_BLOCK_TYPE_VCE, | 206 | AMD_IP_BLOCK_TYPE_VCE, |
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
218 | cgs_set_clockgating_state( | 218 | cgs_set_clockgating_state( |
219 | hwmgr->device, | 219 | hwmgr->device, |
220 | AMD_IP_BLOCK_TYPE_VCE, | 220 | AMD_IP_BLOCK_TYPE_VCE, |
221 | AMD_PG_STATE_GATE); | 221 | AMD_PG_STATE_UNGATE); |
222 | cz_dpm_update_vce_dpm(hwmgr); | 222 | cz_dpm_update_vce_dpm(hwmgr); |
223 | cz_enable_disable_vce_dpm(hwmgr, true); | 223 | cz_enable_disable_vce_dpm(hwmgr, true); |
224 | return 0; | 224 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 4b14f259a147..0fb4e8c8f5e1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) | |||
1402 | cz_hwmgr->vce_dpm.hard_min_clk, | 1402 | cz_hwmgr->vce_dpm.hard_min_clk, |
1403 | PPSMC_MSG_SetEclkHardMin)); | 1403 | PPSMC_MSG_SetEclkHardMin)); |
1404 | } else { | 1404 | } else { |
1405 | /*EPR# 419220 -HW limitation to to */ | 1405 | /*Program HardMin based on the vce_arbiter.ecclk */ |
1406 | cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; | 1406 | if (hwmgr->vce_arbiter.ecclk == 0) { |
1407 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | 1407 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1408 | PPSMC_MSG_SetEclkHardMin, | 1408 | PPSMC_MSG_SetEclkHardMin, 0); |
1409 | cz_get_eclk_level(hwmgr, | 1409 | /* disable ECLK DPM 0. Otherwise VCE could hang if |
1410 | cz_hwmgr->vce_dpm.hard_min_clk, | 1410 | * switching SCLK from DPM 0 to 6/7 */ |
1411 | PPSMC_MSG_SetEclkHardMin)); | 1411 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1412 | 1412 | PPSMC_MSG_SetEclkSoftMin, 1); | |
1413 | } else { | ||
1414 | cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; | ||
1415 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
1416 | PPSMC_MSG_SetEclkHardMin, | ||
1417 | cz_get_eclk_level(hwmgr, | ||
1418 | cz_hwmgr->vce_dpm.hard_min_clk, | ||
1419 | PPSMC_MSG_SetEclkHardMin)); | ||
1420 | } | ||
1413 | } | 1421 | } |
1414 | return 0; | 1422 | return 0; |
1415 | } | 1423 | } |
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 908011d2c8f5..7abda94fc2cf 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h | |||
@@ -113,6 +113,7 @@ struct ast_private { | |||
113 | struct ttm_bo_kmap_obj cache_kmap; | 113 | struct ttm_bo_kmap_obj cache_kmap; |
114 | int next_cursor; | 114 | int next_cursor; |
115 | bool support_wide_screen; | 115 | bool support_wide_screen; |
116 | bool DisableP2A; | ||
116 | 117 | ||
117 | enum ast_tx_chip tx_chip_type; | 118 | enum ast_tx_chip tx_chip_type; |
118 | u8 dp501_maxclk; | 119 | u8 dp501_maxclk; |
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f75c6421db62..533e762d036d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c | |||
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | |||
124 | } else | 124 | } else |
125 | *need_post = false; | 125 | *need_post = false; |
126 | 126 | ||
127 | /* Check P2A Access */ | ||
128 | ast->DisableP2A = true; | ||
129 | data = ast_read32(ast, 0xf004); | ||
130 | if (data != 0xFFFFFFFF) | ||
131 | ast->DisableP2A = false; | ||
132 | |||
127 | /* Check if we support wide screen */ | 133 | /* Check if we support wide screen */ |
128 | switch (ast->chip) { | 134 | switch (ast->chip) { |
129 | case AST1180: | 135 | case AST1180: |
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) | |||
140 | ast->support_wide_screen = true; | 146 | ast->support_wide_screen = true; |
141 | else { | 147 | else { |
142 | ast->support_wide_screen = false; | 148 | ast->support_wide_screen = false; |
143 | /* Read SCU7c (silicon revision register) */ | 149 | if (ast->DisableP2A == false) { |
144 | ast_write32(ast, 0xf004, 0x1e6e0000); | 150 | /* Read SCU7c (silicon revision register) */ |
145 | ast_write32(ast, 0xf000, 0x1); | 151 | ast_write32(ast, 0xf004, 0x1e6e0000); |
146 | data = ast_read32(ast, 0x1207c); | 152 | ast_write32(ast, 0xf000, 0x1); |
147 | data &= 0x300; | 153 | data = ast_read32(ast, 0x1207c); |
148 | if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ | 154 | data &= 0x300; |
149 | ast->support_wide_screen = true; | 155 | if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ |
150 | if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ | 156 | ast->support_wide_screen = true; |
151 | ast->support_wide_screen = true; | 157 | if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ |
158 | ast->support_wide_screen = true; | ||
159 | } | ||
152 | } | 160 | } |
153 | break; | 161 | break; |
154 | } | 162 | } |
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) | |||
216 | uint32_t data, data2; | 224 | uint32_t data, data2; |
217 | uint32_t denum, num, div, ref_pll; | 225 | uint32_t denum, num, div, ref_pll; |
218 | 226 | ||
219 | ast_write32(ast, 0xf004, 0x1e6e0000); | 227 | if (ast->DisableP2A) |
220 | ast_write32(ast, 0xf000, 0x1); | 228 | { |
221 | |||
222 | |||
223 | ast_write32(ast, 0x10000, 0xfc600309); | ||
224 | |||
225 | do { | ||
226 | if (pci_channel_offline(dev->pdev)) | ||
227 | return -EIO; | ||
228 | } while (ast_read32(ast, 0x10000) != 0x01); | ||
229 | data = ast_read32(ast, 0x10004); | ||
230 | |||
231 | if (data & 0x40) | ||
232 | ast->dram_bus_width = 16; | 229 | ast->dram_bus_width = 16; |
230 | ast->dram_type = AST_DRAM_1Gx16; | ||
231 | ast->mclk = 396; | ||
232 | } | ||
233 | else | 233 | else |
234 | ast->dram_bus_width = 32; | 234 | { |
235 | ast_write32(ast, 0xf004, 0x1e6e0000); | ||
236 | ast_write32(ast, 0xf000, 0x1); | ||
237 | data = ast_read32(ast, 0x10004); | ||
238 | |||
239 | if (data & 0x40) | ||
240 | ast->dram_bus_width = 16; | ||
241 | else | ||
242 | ast->dram_bus_width = 32; | ||
243 | |||
244 | if (ast->chip == AST2300 || ast->chip == AST2400) { | ||
245 | switch (data & 0x03) { | ||
246 | case 0: | ||
247 | ast->dram_type = AST_DRAM_512Mx16; | ||
248 | break; | ||
249 | default: | ||
250 | case 1: | ||
251 | ast->dram_type = AST_DRAM_1Gx16; | ||
252 | break; | ||
253 | case 2: | ||
254 | ast->dram_type = AST_DRAM_2Gx16; | ||
255 | break; | ||
256 | case 3: | ||
257 | ast->dram_type = AST_DRAM_4Gx16; | ||
258 | break; | ||
259 | } | ||
260 | } else { | ||
261 | switch (data & 0x0c) { | ||
262 | case 0: | ||
263 | case 4: | ||
264 | ast->dram_type = AST_DRAM_512Mx16; | ||
265 | break; | ||
266 | case 8: | ||
267 | if (data & 0x40) | ||
268 | ast->dram_type = AST_DRAM_1Gx16; | ||
269 | else | ||
270 | ast->dram_type = AST_DRAM_512Mx32; | ||
271 | break; | ||
272 | case 0xc: | ||
273 | ast->dram_type = AST_DRAM_1Gx32; | ||
274 | break; | ||
275 | } | ||
276 | } | ||
235 | 277 | ||
236 | if (ast->chip == AST2300 || ast->chip == AST2400) { | 278 | data = ast_read32(ast, 0x10120); |
237 | switch (data & 0x03) { | 279 | data2 = ast_read32(ast, 0x10170); |
238 | case 0: | 280 | if (data2 & 0x2000) |
239 | ast->dram_type = AST_DRAM_512Mx16; | 281 | ref_pll = 14318; |
240 | break; | 282 | else |
241 | default: | 283 | ref_pll = 12000; |
242 | case 1: | 284 | |
243 | ast->dram_type = AST_DRAM_1Gx16; | 285 | denum = data & 0x1f; |
244 | break; | 286 | num = (data & 0x3fe0) >> 5; |
245 | case 2: | 287 | data = (data & 0xc000) >> 14; |
246 | ast->dram_type = AST_DRAM_2Gx16; | 288 | switch (data) { |
247 | break; | ||
248 | case 3: | 289 | case 3: |
249 | ast->dram_type = AST_DRAM_4Gx16; | 290 | div = 0x4; |
250 | break; | ||
251 | } | ||
252 | } else { | ||
253 | switch (data & 0x0c) { | ||
254 | case 0: | ||
255 | case 4: | ||
256 | ast->dram_type = AST_DRAM_512Mx16; | ||
257 | break; | 291 | break; |
258 | case 8: | 292 | case 2: |
259 | if (data & 0x40) | 293 | case 1: |
260 | ast->dram_type = AST_DRAM_1Gx16; | 294 | div = 0x2; |
261 | else | ||
262 | ast->dram_type = AST_DRAM_512Mx32; | ||
263 | break; | 295 | break; |
264 | case 0xc: | 296 | default: |
265 | ast->dram_type = AST_DRAM_1Gx32; | 297 | div = 0x1; |
266 | break; | 298 | break; |
267 | } | 299 | } |
300 | ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
268 | } | 301 | } |
269 | |||
270 | data = ast_read32(ast, 0x10120); | ||
271 | data2 = ast_read32(ast, 0x10170); | ||
272 | if (data2 & 0x2000) | ||
273 | ref_pll = 14318; | ||
274 | else | ||
275 | ref_pll = 12000; | ||
276 | |||
277 | denum = data & 0x1f; | ||
278 | num = (data & 0x3fe0) >> 5; | ||
279 | data = (data & 0xc000) >> 14; | ||
280 | switch (data) { | ||
281 | case 3: | ||
282 | div = 0x4; | ||
283 | break; | ||
284 | case 2: | ||
285 | case 1: | ||
286 | div = 0x2; | ||
287 | break; | ||
288 | default: | ||
289 | div = 0x1; | ||
290 | break; | ||
291 | } | ||
292 | ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); | ||
293 | return 0; | 302 | return 0; |
294 | } | 303 | } |
295 | 304 | ||
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 810c51d92b99..5331ee1df086 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c | |||
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev) | |||
379 | ast_open_key(ast); | 379 | ast_open_key(ast); |
380 | ast_set_def_ext_reg(dev); | 380 | ast_set_def_ext_reg(dev); |
381 | 381 | ||
382 | if (ast->chip == AST2300 || ast->chip == AST2400) | 382 | if (ast->DisableP2A == false) |
383 | ast_init_dram_2300(dev); | 383 | { |
384 | else | 384 | if (ast->chip == AST2300 || ast->chip == AST2400) |
385 | ast_init_dram_reg(dev); | 385 | ast_init_dram_2300(dev); |
386 | else | ||
387 | ast_init_dram_reg(dev); | ||
386 | 388 | ||
387 | ast_init_3rdtx(dev); | 389 | ast_init_3rdtx(dev); |
390 | } | ||
391 | else | ||
392 | { | ||
393 | if (ast->tx_chip_type != AST_TX_NONE) | ||
394 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ | ||
395 | } | ||
388 | } | 396 | } |
389 | 397 | ||
390 | /* AST 2300 DRAM settings */ | 398 | /* AST 2300 DRAM settings */ |
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index eb9bf8786c24..18eefdcbf1ba 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | |||
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, | |||
1382 | 1382 | ||
1383 | pm_runtime_enable(dev); | 1383 | pm_runtime_enable(dev); |
1384 | 1384 | ||
1385 | pm_runtime_get_sync(dev); | ||
1385 | phy_power_on(dp->phy); | 1386 | phy_power_on(dp->phy); |
1386 | 1387 | ||
1387 | analogix_dp_init_dp(dp); | 1388 | analogix_dp_init_dp(dp); |
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, | |||
1414 | goto err_disable_pm_runtime; | 1415 | goto err_disable_pm_runtime; |
1415 | } | 1416 | } |
1416 | 1417 | ||
1418 | phy_power_off(dp->phy); | ||
1419 | pm_runtime_put(dev); | ||
1420 | |||
1417 | return 0; | 1421 | return 0; |
1418 | 1422 | ||
1419 | err_disable_pm_runtime: | 1423 | err_disable_pm_runtime: |
1424 | |||
1425 | phy_power_off(dp->phy); | ||
1426 | pm_runtime_put(dev); | ||
1420 | pm_runtime_disable(dev); | 1427 | pm_runtime_disable(dev); |
1421 | 1428 | ||
1422 | return ret; | 1429 | return ret; |
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index 04b3c161dfae..7f4cc6e172ab 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig | |||
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU | |||
7 | This is a KMS driver for emulated cirrus device in qemu. | 7 | This is a KMS driver for emulated cirrus device in qemu. |
8 | It is *NOT* intended for real cirrus devices. This requires | 8 | It is *NOT* intended for real cirrus devices. This requires |
9 | the modesetting userspace X.org driver. | 9 | the modesetting userspace X.org driver. |
10 | |||
11 | Cirrus is obsolete, the hardware was designed in the 90ies | ||
12 | and can't keep up with todays needs. More background: | ||
13 | https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ | ||
14 | |||
15 | Better alternatives are: | ||
16 | - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) | ||
17 | - qxl (DRM_QXL, qemu -vga qxl, works best with spice) | ||
18 | - virtio (DRM_VIRTIO_GPU), qemu -vga virtio) | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 60697482b94c..50f5cf7b69d1 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, | |||
291 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); | 291 | EXPORT_SYMBOL(drm_atomic_get_crtc_state); |
292 | 292 | ||
293 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, | 293 | static void set_out_fence_for_crtc(struct drm_atomic_state *state, |
294 | struct drm_crtc *crtc, s64 __user *fence_ptr) | 294 | struct drm_crtc *crtc, s32 __user *fence_ptr) |
295 | { | 295 | { |
296 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; | 296 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; |
297 | } | 297 | } |
298 | 298 | ||
299 | static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, | 299 | static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, |
300 | struct drm_crtc *crtc) | 300 | struct drm_crtc *crtc) |
301 | { | 301 | { |
302 | s64 __user *fence_ptr; | 302 | s32 __user *fence_ptr; |
303 | 303 | ||
304 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; | 304 | fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; |
305 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; | 305 | state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; |
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
512 | state->color_mgmt_changed |= replaced; | 512 | state->color_mgmt_changed |= replaced; |
513 | return ret; | 513 | return ret; |
514 | } else if (property == config->prop_out_fence_ptr) { | 514 | } else if (property == config->prop_out_fence_ptr) { |
515 | s64 __user *fence_ptr = u64_to_user_ptr(val); | 515 | s32 __user *fence_ptr = u64_to_user_ptr(val); |
516 | 516 | ||
517 | if (!fence_ptr) | 517 | if (!fence_ptr) |
518 | return 0; | 518 | return 0; |
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); | |||
1915 | */ | 1915 | */ |
1916 | 1916 | ||
1917 | struct drm_out_fence_state { | 1917 | struct drm_out_fence_state { |
1918 | s64 __user *out_fence_ptr; | 1918 | s32 __user *out_fence_ptr; |
1919 | struct sync_file *sync_file; | 1919 | struct sync_file *sync_file; |
1920 | int fd; | 1920 | int fd; |
1921 | }; | 1921 | }; |
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, | |||
1952 | return 0; | 1952 | return 0; |
1953 | 1953 | ||
1954 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 1954 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
1955 | u64 __user *fence_ptr; | 1955 | s32 __user *fence_ptr; |
1956 | 1956 | ||
1957 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); | 1957 | fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); |
1958 | 1958 | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac6a35212501..e6b19bc9021a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, | |||
1460 | return NULL; | 1460 | return NULL; |
1461 | 1461 | ||
1462 | mode->type |= DRM_MODE_TYPE_USERDEF; | 1462 | mode->type |= DRM_MODE_TYPE_USERDEF; |
1463 | /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ | ||
1464 | if (cmd->xres == 1366 && mode->hdisplay == 1368) { | ||
1465 | mode->hdisplay = 1366; | ||
1466 | mode->hsync_start--; | ||
1467 | mode->hsync_end--; | ||
1468 | drm_mode_set_name(mode); | ||
1469 | } | ||
1463 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1470 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
1464 | return mode; | 1471 | return mode; |
1465 | } | 1472 | } |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ac953f037be7..cf8f0128c161 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) | |||
143 | } | 143 | } |
144 | 144 | ||
145 | if (dev->mode_config.delayed_event) { | 145 | if (dev->mode_config.delayed_event) { |
146 | /* | ||
147 | * FIXME: | ||
148 | * | ||
149 | * Use short (1s) delay to handle the initial delayed event. | ||
150 | * This delay should not be needed, but Optimus/nouveau will | ||
151 | * fail in a mysterious way if the delayed event is handled as | ||
152 | * soon as possible like it is done in | ||
153 | * drm_helper_probe_single_connector_modes() in case the poll | ||
154 | * was enabled before. | ||
155 | */ | ||
146 | poll = true; | 156 | poll = true; |
147 | delay = 0; | 157 | delay = HZ; |
148 | } | 158 | } |
149 | 159 | ||
150 | if (poll) | 160 | if (poll) |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96e8f08..fe0e85b41310 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, | |||
116 | struct list_head list; | 116 | struct list_head list; |
117 | bool found; | 117 | bool found; |
118 | 118 | ||
119 | /* | ||
120 | * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick | ||
121 | * drm_mm into giving out a low IOVA after address space | ||
122 | * rollover. This needs a proper fix. | ||
123 | */ | ||
119 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, | 124 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, |
120 | size, 0, mmu->last_iova, ~0UL, | 125 | size, 0, mmu->last_iova, ~0UL, |
121 | DRM_MM_SEARCH_DEFAULT); | 126 | mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); |
122 | 127 | ||
123 | if (ret != -ENOSPC) | 128 | if (ret != -ENOSPC) |
124 | break; | 129 | break; |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f3117fe8..75eeb831ed6a 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
@@ -46,7 +46,8 @@ enum decon_flag_bits { | |||
46 | BIT_CLKS_ENABLED, | 46 | BIT_CLKS_ENABLED, |
47 | BIT_IRQS_ENABLED, | 47 | BIT_IRQS_ENABLED, |
48 | BIT_WIN_UPDATED, | 48 | BIT_WIN_UPDATED, |
49 | BIT_SUSPENDED | 49 | BIT_SUSPENDED, |
50 | BIT_REQUEST_UPDATE | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | struct decon_context { | 53 | struct decon_context { |
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) | |||
141 | m->crtc_vsync_end = m->crtc_vsync_start + 1; | 142 | m->crtc_vsync_end = m->crtc_vsync_start + 1; |
142 | } | 143 | } |
143 | 144 | ||
144 | decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); | ||
145 | |||
146 | /* enable clock gate */ | ||
147 | val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; | ||
148 | writel(val, ctx->addr + DECON_CMU); | ||
149 | |||
150 | if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) | 145 | if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) |
151 | decon_setup_trigger(ctx); | 146 | decon_setup_trigger(ctx); |
152 | 147 | ||
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, | |||
315 | 310 | ||
316 | /* window enable */ | 311 | /* window enable */ |
317 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); | 312 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); |
313 | set_bit(BIT_REQUEST_UPDATE, &ctx->flags); | ||
318 | } | 314 | } |
319 | 315 | ||
320 | static void decon_disable_plane(struct exynos_drm_crtc *crtc, | 316 | static void decon_disable_plane(struct exynos_drm_crtc *crtc, |
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, | |||
327 | return; | 323 | return; |
328 | 324 | ||
329 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); | 325 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); |
326 | set_bit(BIT_REQUEST_UPDATE, &ctx->flags); | ||
330 | } | 327 | } |
331 | 328 | ||
332 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | 329 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) |
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | |||
340 | for (i = ctx->first_win; i < WINDOWS_NR; i++) | 337 | for (i = ctx->first_win; i < WINDOWS_NR; i++) |
341 | decon_shadow_protect_win(ctx, i, false); | 338 | decon_shadow_protect_win(ctx, i, false); |
342 | 339 | ||
343 | /* standalone update */ | 340 | if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) |
344 | decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); | 341 | decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); |
345 | 342 | ||
346 | if (ctx->out_type & IFTYPE_I80) | 343 | if (ctx->out_type & IFTYPE_I80) |
347 | set_bit(BIT_WIN_UPDATED, &ctx->flags); | 344 | set_bit(BIT_WIN_UPDATED, &ctx->flags); |
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..f7bce8603958 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
@@ -37,13 +37,6 @@ | |||
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | #include "gvt.h" | 38 | #include "gvt.h" |
39 | 39 | ||
40 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) | ||
41 | #define BYTES_TO_MB(b) ((b) >> 20ULL) | ||
42 | |||
43 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) | ||
44 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) | ||
45 | #define HOST_FENCE 4 | ||
46 | |||
47 | static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | 40 | static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) |
48 | { | 41 | { |
49 | struct intel_gvt *gvt = vgpu->gvt; | 42 | struct intel_gvt *gvt = vgpu->gvt; |
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | |||
165 | POSTING_READ(fence_reg_lo); | 158 | POSTING_READ(fence_reg_lo); |
166 | } | 159 | } |
167 | 160 | ||
161 | static void _clear_vgpu_fence(struct intel_vgpu *vgpu) | ||
162 | { | ||
163 | int i; | ||
164 | |||
165 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) | ||
166 | intel_vgpu_write_fence(vgpu, i, 0); | ||
167 | } | ||
168 | |||
168 | static void free_vgpu_fence(struct intel_vgpu *vgpu) | 169 | static void free_vgpu_fence(struct intel_vgpu *vgpu) |
169 | { | 170 | { |
170 | struct intel_gvt *gvt = vgpu->gvt; | 171 | struct intel_gvt *gvt = vgpu->gvt; |
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) | |||
178 | intel_runtime_pm_get(dev_priv); | 179 | intel_runtime_pm_get(dev_priv); |
179 | 180 | ||
180 | mutex_lock(&dev_priv->drm.struct_mutex); | 181 | mutex_lock(&dev_priv->drm.struct_mutex); |
182 | _clear_vgpu_fence(vgpu); | ||
181 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { | 183 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { |
182 | reg = vgpu->fence.regs[i]; | 184 | reg = vgpu->fence.regs[i]; |
183 | intel_vgpu_write_fence(vgpu, i, 0); | ||
184 | list_add_tail(®->link, | 185 | list_add_tail(®->link, |
185 | &dev_priv->mm.fence_list); | 186 | &dev_priv->mm.fence_list); |
186 | } | 187 | } |
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) | |||
208 | continue; | 209 | continue; |
209 | list_del(pos); | 210 | list_del(pos); |
210 | vgpu->fence.regs[i] = reg; | 211 | vgpu->fence.regs[i] = reg; |
211 | intel_vgpu_write_fence(vgpu, i, 0); | ||
212 | if (++i == vgpu_fence_sz(vgpu)) | 212 | if (++i == vgpu_fence_sz(vgpu)) |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | if (i != vgpu_fence_sz(vgpu)) | 215 | if (i != vgpu_fence_sz(vgpu)) |
216 | goto out_free_fence; | 216 | goto out_free_fence; |
217 | 217 | ||
218 | _clear_vgpu_fence(vgpu); | ||
219 | |||
218 | mutex_unlock(&dev_priv->drm.struct_mutex); | 220 | mutex_unlock(&dev_priv->drm.struct_mutex); |
219 | intel_runtime_pm_put(dev_priv); | 221 | intel_runtime_pm_put(dev_priv); |
220 | return 0; | 222 | return 0; |
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) | |||
314 | } | 316 | } |
315 | 317 | ||
316 | /** | 318 | /** |
319 | * intel_vgpu_reset_resource - reset resource state owned by a vGPU | ||
320 | * @vgpu: a vGPU | ||
321 | * | ||
322 | * This function is used to reset resource state owned by a vGPU. | ||
323 | * | ||
324 | */ | ||
325 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) | ||
326 | { | ||
327 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
328 | |||
329 | intel_runtime_pm_get(dev_priv); | ||
330 | _clear_vgpu_fence(vgpu); | ||
331 | intel_runtime_pm_put(dev_priv); | ||
332 | } | ||
333 | |||
334 | /** | ||
317 | * intel_alloc_vgpu_resource - allocate HW resource for a vGPU | 335 | * intel_alloc_vgpu_resource - allocate HW resource for a vGPU |
318 | * @vgpu: vGPU | 336 | * @vgpu: vGPU |
319 | * @param: vGPU creation params | 337 | * @param: vGPU creation params |
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 711c31c8d8b4..4a6a2ed65732 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c | |||
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
282 | } | 282 | } |
283 | return 0; | 283 | return 0; |
284 | } | 284 | } |
285 | |||
286 | /** | ||
287 | * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU | ||
288 | * | ||
289 | * @vgpu: a vGPU | ||
290 | * @primary: is the vGPU presented as primary | ||
291 | * | ||
292 | */ | ||
293 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | ||
294 | bool primary) | ||
295 | { | ||
296 | struct intel_gvt *gvt = vgpu->gvt; | ||
297 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
298 | u16 *gmch_ctl; | ||
299 | int i; | ||
300 | |||
301 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, | ||
302 | info->cfg_space_size); | ||
303 | |||
304 | if (!primary) { | ||
305 | vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = | ||
306 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
307 | vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = | ||
308 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
309 | } | ||
310 | |||
311 | /* Show guest that there isn't any stolen memory.*/ | ||
312 | gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); | ||
313 | *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); | ||
314 | |||
315 | intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, | ||
316 | gvt_aperture_pa_base(gvt), true); | ||
317 | |||
318 | vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO | ||
319 | | PCI_COMMAND_MEMORY | ||
320 | | PCI_COMMAND_MASTER); | ||
321 | /* | ||
322 | * Clear the bar upper 32bit and let guest to assign the new value | ||
323 | */ | ||
324 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); | ||
325 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); | ||
326 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); | ||
327 | |||
328 | for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { | ||
329 | vgpu->cfg_space.bar[i].size = pci_resource_len( | ||
330 | gvt->dev_priv->drm.pdev, i * 2); | ||
331 | vgpu->cfg_space.bar[i].tracked = false; | ||
332 | } | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * intel_vgpu_reset_cfg_space - reset vGPU configuration space | ||
337 | * | ||
338 | * @vgpu: a vGPU | ||
339 | * | ||
340 | */ | ||
341 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) | ||
342 | { | ||
343 | u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; | ||
344 | bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != | ||
345 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
346 | |||
347 | if (cmd & PCI_COMMAND_MEMORY) { | ||
348 | trap_gttmmio(vgpu, false); | ||
349 | map_aperture(vgpu, false); | ||
350 | } | ||
351 | |||
352 | /** | ||
353 | * Currently we only do such reset when vGPU is not | ||
354 | * owned by any VM, so we simply restore entire cfg | ||
355 | * space to default value. | ||
356 | */ | ||
357 | intel_vgpu_init_cfg_space(vgpu, primary); | ||
358 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092c70e8..e4563984cb1e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -481,7 +481,6 @@ struct parser_exec_state { | |||
481 | (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) | 481 | (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) |
482 | 482 | ||
483 | static unsigned long bypass_scan_mask = 0; | 483 | static unsigned long bypass_scan_mask = 0; |
484 | static bool bypass_batch_buffer_scan = true; | ||
485 | 484 | ||
486 | /* ring ALL, type = 0 */ | 485 | /* ring ALL, type = 0 */ |
487 | static struct sub_op_bits sub_op_mi[] = { | 486 | static struct sub_op_bits sub_op_mi[] = { |
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) | |||
1525 | { | 1524 | { |
1526 | struct intel_gvt *gvt = s->vgpu->gvt; | 1525 | struct intel_gvt *gvt = s->vgpu->gvt; |
1527 | 1526 | ||
1528 | if (bypass_batch_buffer_scan) | ||
1529 | return 0; | ||
1530 | |||
1531 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { | 1527 | if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { |
1532 | /* BDW decides privilege based on address space */ | 1528 | /* BDW decides privilege based on address space */ |
1533 | if (cmd_val(s, 0) & (1 << 8)) | 1529 | if (cmd_val(s, 0) & (1 << 8)) |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..34083731669d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) | |||
364 | #define get_desc_from_elsp_dwords(ed, i) \ | 364 | #define get_desc_from_elsp_dwords(ed, i) \ |
365 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) | 365 | ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) |
366 | 366 | ||
367 | |||
368 | #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) | ||
369 | #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) | ||
370 | static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, | ||
371 | unsigned long add, int gmadr_bytes) | ||
372 | { | ||
373 | if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) | ||
374 | return -1; | ||
375 | |||
376 | *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & | ||
377 | BATCH_BUFFER_ADDR_MASK; | ||
378 | if (gmadr_bytes == 8) { | ||
379 | *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = | ||
380 | add & BATCH_BUFFER_ADDR_HIGH_MASK; | ||
381 | } | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | 367 | static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) |
387 | { | 368 | { |
388 | int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; | 369 | const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; |
370 | struct intel_shadow_bb_entry *entry_obj; | ||
389 | 371 | ||
390 | /* pin the gem object to ggtt */ | 372 | /* pin the gem object to ggtt */ |
391 | if (!list_empty(&workload->shadow_bb)) { | 373 | list_for_each_entry(entry_obj, &workload->shadow_bb, list) { |
392 | struct intel_shadow_bb_entry *entry_obj = | 374 | struct i915_vma *vma; |
393 | list_first_entry(&workload->shadow_bb, | ||
394 | struct intel_shadow_bb_entry, | ||
395 | list); | ||
396 | struct intel_shadow_bb_entry *temp; | ||
397 | 375 | ||
398 | list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, | 376 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); |
399 | list) { | 377 | if (IS_ERR(vma)) { |
400 | struct i915_vma *vma; | 378 | gvt_err("Cannot pin\n"); |
401 | 379 | return; | |
402 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, | ||
403 | 4, 0); | ||
404 | if (IS_ERR(vma)) { | ||
405 | gvt_err("Cannot pin\n"); | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | /* FIXME: we are not tracking our pinned VMA leaving it | ||
410 | * up to the core to fix up the stray pin_count upon | ||
411 | * free. | ||
412 | */ | ||
413 | |||
414 | /* update the relocate gma with shadow batch buffer*/ | ||
415 | set_gma_to_bb_cmd(entry_obj, | ||
416 | i915_ggtt_offset(vma), | ||
417 | gmadr_bytes); | ||
418 | } | 380 | } |
381 | |||
382 | /* FIXME: we are not tracking our pinned VMA leaving it | ||
383 | * up to the core to fix up the stray pin_count upon | ||
384 | * free. | ||
385 | */ | ||
386 | |||
387 | /* update the relocate gma with shadow batch buffer*/ | ||
388 | entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); | ||
389 | if (gmadr_bytes == 8) | ||
390 | entry_obj->bb_start_cmd_va[2] = 0; | ||
419 | } | 391 | } |
420 | } | 392 | } |
421 | 393 | ||
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) | |||
826 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); | 798 | INIT_LIST_HEAD(&vgpu->workload_q_head[i]); |
827 | } | 799 | } |
828 | 800 | ||
829 | vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", | 801 | vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", |
830 | sizeof(struct intel_vgpu_workload), 0, | 802 | sizeof(struct intel_vgpu_workload), 0, |
831 | SLAB_HWCACHE_ALIGN, | 803 | SLAB_HWCACHE_ALIGN, |
832 | NULL); | 804 | NULL); |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5b2ce2..47dec4acf7ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type) | |||
240 | static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) | 240 | static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) |
241 | { | 241 | { |
242 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; | 242 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
243 | u64 pte; | ||
244 | 243 | ||
245 | #ifdef readq | 244 | return readq(addr); |
246 | pte = readq(addr); | ||
247 | #else | ||
248 | pte = ioread32(addr); | ||
249 | pte |= (u64)ioread32(addr + 4) << 32; | ||
250 | #endif | ||
251 | return pte; | ||
252 | } | 245 | } |
253 | 246 | ||
254 | static void write_pte64(struct drm_i915_private *dev_priv, | 247 | static void write_pte64(struct drm_i915_private *dev_priv, |
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, | |||
256 | { | 249 | { |
257 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; | 250 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
258 | 251 | ||
259 | #ifdef writeq | ||
260 | writeq(pte, addr); | 252 | writeq(pte, addr); |
261 | #else | 253 | |
262 | iowrite32((u32)pte, addr); | ||
263 | iowrite32(pte >> 32, addr + 4); | ||
264 | #endif | ||
265 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 254 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
266 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 255 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
267 | } | 256 | } |
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) | |||
1380 | info->gtt_entry_size; | 1369 | info->gtt_entry_size; |
1381 | mem = kzalloc(mm->has_shadow_page_table ? | 1370 | mem = kzalloc(mm->has_shadow_page_table ? |
1382 | mm->page_table_entry_size * 2 | 1371 | mm->page_table_entry_size * 2 |
1383 | : mm->page_table_entry_size, | 1372 | : mm->page_table_entry_size, GFP_KERNEL); |
1384 | GFP_ATOMIC); | ||
1385 | if (!mem) | 1373 | if (!mem) |
1386 | return -ENOMEM; | 1374 | return -ENOMEM; |
1387 | mm->virtual_page_table = mem; | 1375 | mm->virtual_page_table = mem; |
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
1532 | struct intel_vgpu_mm *mm; | 1520 | struct intel_vgpu_mm *mm; |
1533 | int ret; | 1521 | int ret; |
1534 | 1522 | ||
1535 | mm = kzalloc(sizeof(*mm), GFP_ATOMIC); | 1523 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); |
1536 | if (!mm) { | 1524 | if (!mm) { |
1537 | ret = -ENOMEM; | 1525 | ret = -ENOMEM; |
1538 | goto fail; | 1526 | goto fail; |
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1886 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1874 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
1887 | int page_entry_num = GTT_PAGE_SIZE >> | 1875 | int page_entry_num = GTT_PAGE_SIZE >> |
1888 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1876 | vgpu->gvt->device_info.gtt_entry_size_shift; |
1889 | struct page *scratch_pt; | 1877 | void *scratch_pt; |
1890 | unsigned long mfn; | 1878 | unsigned long mfn; |
1891 | int i; | 1879 | int i; |
1892 | void *p; | ||
1893 | 1880 | ||
1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
1895 | return -EINVAL; | 1882 | return -EINVAL; |
1896 | 1883 | ||
1897 | scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); | 1884 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); |
1898 | if (!scratch_pt) { | 1885 | if (!scratch_pt) { |
1899 | gvt_err("fail to allocate scratch page\n"); | 1886 | gvt_err("fail to allocate scratch page\n"); |
1900 | return -ENOMEM; | 1887 | return -ENOMEM; |
1901 | } | 1888 | } |
1902 | 1889 | ||
1903 | p = kmap_atomic(scratch_pt); | 1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); |
1904 | mfn = intel_gvt_hypervisor_virt_to_mfn(p); | ||
1905 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
1906 | gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); | 1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); |
1907 | kunmap_atomic(p); | 1893 | free_page((unsigned long)scratch_pt); |
1908 | __free_page(scratch_pt); | ||
1909 | return -EFAULT; | 1894 | return -EFAULT; |
1910 | } | 1895 | } |
1911 | gtt->scratch_pt[type].page_mfn = mfn; | 1896 | gtt->scratch_pt[type].page_mfn = mfn; |
1912 | gtt->scratch_pt[type].page = scratch_pt; | 1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
1914 | vgpu->id, type, mfn); | 1899 | vgpu->id, type, mfn); |
1915 | 1900 | ||
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1918 | * scratch_pt[type] indicate the scratch pt/scratch page used by the | 1903 | * scratch_pt[type] indicate the scratch pt/scratch page used by the |
1919 | * 'type' pt. | 1904 | * 'type' pt. |
1920 | * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by | 1905 | * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by |
1921 | * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self | 1906 | * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self |
1922 | * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. | 1907 | * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. |
1923 | */ | 1908 | */ |
1924 | if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { | 1909 | if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { |
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1936 | se.val64 |= PPAT_CACHED_INDEX; | 1921 | se.val64 |= PPAT_CACHED_INDEX; |
1937 | 1922 | ||
1938 | for (i = 0; i < page_entry_num; i++) | 1923 | for (i = 0; i < page_entry_num; i++) |
1939 | ops->set_entry(p, &se, i, false, 0, vgpu); | 1924 | ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); |
1940 | } | 1925 | } |
1941 | 1926 | ||
1942 | kunmap_atomic(p); | ||
1943 | |||
1944 | return 0; | 1927 | return 0; |
1945 | } | 1928 | } |
1946 | 1929 | ||
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, | |||
2208 | int intel_gvt_init_gtt(struct intel_gvt *gvt) | 2191 | int intel_gvt_init_gtt(struct intel_gvt *gvt) |
2209 | { | 2192 | { |
2210 | int ret; | 2193 | int ret; |
2211 | void *page_addr; | 2194 | void *page; |
2212 | 2195 | ||
2213 | gvt_dbg_core("init gtt\n"); | 2196 | gvt_dbg_core("init gtt\n"); |
2214 | 2197 | ||
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2221 | return -ENODEV; | 2204 | return -ENODEV; |
2222 | } | 2205 | } |
2223 | 2206 | ||
2224 | gvt->gtt.scratch_ggtt_page = | 2207 | page = (void *)get_zeroed_page(GFP_KERNEL); |
2225 | alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); | 2208 | if (!page) { |
2226 | if (!gvt->gtt.scratch_ggtt_page) { | ||
2227 | gvt_err("fail to allocate scratch ggtt page\n"); | 2209 | gvt_err("fail to allocate scratch ggtt page\n"); |
2228 | return -ENOMEM; | 2210 | return -ENOMEM; |
2229 | } | 2211 | } |
2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2230 | 2213 | ||
2231 | page_addr = page_address(gvt->gtt.scratch_ggtt_page); | 2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); |
2232 | |||
2233 | gvt->gtt.scratch_ggtt_mfn = | ||
2234 | intel_gvt_hypervisor_virt_to_mfn(page_addr); | ||
2235 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { |
2236 | gvt_err("fail to translate scratch ggtt page\n"); | 2216 | gvt_err("fail to translate scratch ggtt page\n"); |
2237 | __free_page(gvt->gtt.scratch_ggtt_page); | 2217 | __free_page(gvt->gtt.scratch_ggtt_page); |
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | |||
2297 | for (offset = 0; offset < num_entries; offset++) | 2277 | for (offset = 0; offset < num_entries; offset++) |
2298 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); | 2278 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); |
2299 | } | 2279 | } |
2280 | |||
2281 | /** | ||
2282 | * intel_vgpu_reset_gtt - reset the all GTT related status | ||
2283 | * @vgpu: a vGPU | ||
2284 | * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset | ||
2285 | * | ||
2286 | * This function is called from vfio core to reset reset all | ||
2287 | * GTT related status, including GGTT, PPGTT, scratch page. | ||
2288 | * | ||
2289 | */ | ||
2290 | void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) | ||
2291 | { | ||
2292 | int i; | ||
2293 | |||
2294 | ppgtt_free_all_shadow_page(vgpu); | ||
2295 | if (!dmlr) | ||
2296 | return; | ||
2297 | |||
2298 | intel_vgpu_reset_ggtt(vgpu); | ||
2299 | |||
2300 | /* clear scratch page for security */ | ||
2301 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | ||
2302 | if (vgpu->gtt.scratch_pt[i].page != NULL) | ||
2303 | memset(page_address(vgpu->gtt.scratch_pt[i].page), | ||
2304 | 0, PAGE_SIZE); | ||
2305 | } | ||
2306 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b315ab3593ec..f88eb5e89bea 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); | |||
208 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); | 208 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); |
209 | 209 | ||
210 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); | 210 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); |
211 | extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); | ||
211 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); | 212 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); |
212 | 213 | ||
213 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, | 214 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd..e6bf5c533fbe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) | |||
201 | intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); | 201 | intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); |
202 | intel_gvt_clean_vgpu_types(gvt); | 202 | intel_gvt_clean_vgpu_types(gvt); |
203 | 203 | ||
204 | idr_destroy(&gvt->vgpu_idr); | ||
205 | |||
204 | kfree(dev_priv->gvt); | 206 | kfree(dev_priv->gvt); |
205 | dev_priv->gvt = NULL; | 207 | dev_priv->gvt = NULL; |
206 | } | 208 | } |
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) | |||
237 | 239 | ||
238 | gvt_dbg_core("init gvt device\n"); | 240 | gvt_dbg_core("init gvt device\n"); |
239 | 241 | ||
242 | idr_init(&gvt->vgpu_idr); | ||
243 | |||
240 | mutex_init(&gvt->lock); | 244 | mutex_init(&gvt->lock); |
241 | gvt->dev_priv = dev_priv; | 245 | gvt->dev_priv = dev_priv; |
242 | 246 | ||
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) | |||
244 | 248 | ||
245 | ret = intel_gvt_setup_mmio_info(gvt); | 249 | ret = intel_gvt_setup_mmio_info(gvt); |
246 | if (ret) | 250 | if (ret) |
247 | return ret; | 251 | goto out_clean_idr; |
248 | 252 | ||
249 | ret = intel_gvt_load_firmware(gvt); | 253 | ret = intel_gvt_load_firmware(gvt); |
250 | if (ret) | 254 | if (ret) |
@@ -313,6 +317,8 @@ out_free_firmware: | |||
313 | intel_gvt_free_firmware(gvt); | 317 | intel_gvt_free_firmware(gvt); |
314 | out_clean_mmio_info: | 318 | out_clean_mmio_info: |
315 | intel_gvt_clean_mmio_info(gvt); | 319 | intel_gvt_clean_mmio_info(gvt); |
320 | out_clean_idr: | ||
321 | idr_destroy(&gvt->vgpu_idr); | ||
316 | kfree(gvt); | 322 | kfree(gvt); |
317 | return ret; | 323 | return ret; |
318 | } | 324 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0af17016f33f..e227caf5859e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params { | |||
323 | 323 | ||
324 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, | 324 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, |
325 | struct intel_vgpu_creation_params *param); | 325 | struct intel_vgpu_creation_params *param); |
326 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); | ||
326 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); | 327 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
327 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | 328 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, |
328 | u32 fence, u64 value); | 329 | u32 fence, u64 value); |
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); | |||
375 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | 376 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, |
376 | struct intel_vgpu_type *type); | 377 | struct intel_vgpu_type *type); |
377 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); | 378 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
379 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||
380 | unsigned int engine_mask); | ||
378 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); | 381 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
379 | 382 | ||
380 | 383 | ||
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, | |||
411 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, | 414 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, |
412 | unsigned long *g_index); | 415 | unsigned long *g_index); |
413 | 416 | ||
417 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | ||
418 | bool primary); | ||
419 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); | ||
420 | |||
414 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, | 421 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
415 | void *p_data, unsigned int bytes); | 422 | void *p_data, unsigned int bytes); |
416 | 423 | ||
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); | |||
424 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); | 431 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); |
425 | 432 | ||
426 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); | 433 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); |
427 | int setup_vgpu_mmio(struct intel_vgpu *vgpu); | ||
428 | void populate_pvinfo_page(struct intel_vgpu *vgpu); | 434 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
429 | 435 | ||
430 | struct intel_gvt_ops { | 436 | struct intel_gvt_ops { |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..ab2ea157da4c 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, | |||
93 | static int new_mmio_info(struct intel_gvt *gvt, | 93 | static int new_mmio_info(struct intel_gvt *gvt, |
94 | u32 offset, u32 flags, u32 size, | 94 | u32 offset, u32 flags, u32 size, |
95 | u32 addr_mask, u32 ro_mask, u32 device, | 95 | u32 addr_mask, u32 ro_mask, u32 device, |
96 | void *read, void *write) | 96 | int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), |
97 | int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) | ||
97 | { | 98 | { |
98 | struct intel_gvt_mmio_info *info, *p; | 99 | struct intel_gvt_mmio_info *info, *p; |
99 | u32 start, end, i; | 100 | u32 start, end, i; |
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
219 | default: | 220 | default: |
220 | /*should not hit here*/ | 221 | /*should not hit here*/ |
221 | gvt_err("invalid forcewake offset 0x%x\n", offset); | 222 | gvt_err("invalid forcewake offset 0x%x\n", offset); |
222 | return 1; | 223 | return -EINVAL; |
223 | } | 224 | } |
224 | } else { | 225 | } else { |
225 | ack_reg_offset = FORCEWAKE_ACK_HSW_REG; | 226 | ack_reg_offset = FORCEWAKE_ACK_HSW_REG; |
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
230 | return 0; | 231 | return 0; |
231 | } | 232 | } |
232 | 233 | ||
233 | static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, | ||
234 | void *p_data, unsigned int bytes, unsigned long bitmap) | ||
235 | { | ||
236 | struct intel_gvt_workload_scheduler *scheduler = | ||
237 | &vgpu->gvt->scheduler; | ||
238 | |||
239 | vgpu->resetting = true; | ||
240 | |||
241 | intel_vgpu_stop_schedule(vgpu); | ||
242 | /* | ||
243 | * The current_vgpu will set to NULL after stopping the | ||
244 | * scheduler when the reset is triggered by current vgpu. | ||
245 | */ | ||
246 | if (scheduler->current_vgpu == NULL) { | ||
247 | mutex_unlock(&vgpu->gvt->lock); | ||
248 | intel_gvt_wait_vgpu_idle(vgpu); | ||
249 | mutex_lock(&vgpu->gvt->lock); | ||
250 | } | ||
251 | |||
252 | intel_vgpu_reset_execlist(vgpu, bitmap); | ||
253 | |||
254 | /* full GPU reset */ | ||
255 | if (bitmap == 0xff) { | ||
256 | mutex_unlock(&vgpu->gvt->lock); | ||
257 | intel_vgpu_clean_gtt(vgpu); | ||
258 | mutex_lock(&vgpu->gvt->lock); | ||
259 | setup_vgpu_mmio(vgpu); | ||
260 | populate_pvinfo_page(vgpu); | ||
261 | intel_vgpu_init_gtt(vgpu); | ||
262 | } | ||
263 | |||
264 | vgpu->resetting = false; | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 234 | static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
270 | void *p_data, unsigned int bytes) | 235 | void *p_data, unsigned int bytes) |
271 | { | 236 | { |
237 | unsigned int engine_mask = 0; | ||
272 | u32 data; | 238 | u32 data; |
273 | u64 bitmap = 0; | ||
274 | 239 | ||
275 | write_vreg(vgpu, offset, p_data, bytes); | 240 | write_vreg(vgpu, offset, p_data, bytes); |
276 | data = vgpu_vreg(vgpu, offset); | 241 | data = vgpu_vreg(vgpu, offset); |
277 | 242 | ||
278 | if (data & GEN6_GRDOM_FULL) { | 243 | if (data & GEN6_GRDOM_FULL) { |
279 | gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); | 244 | gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); |
280 | bitmap = 0xff; | 245 | engine_mask = ALL_ENGINES; |
281 | } | 246 | } else { |
282 | if (data & GEN6_GRDOM_RENDER) { | 247 | if (data & GEN6_GRDOM_RENDER) { |
283 | gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); | 248 | gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); |
284 | bitmap |= (1 << RCS); | 249 | engine_mask |= (1 << RCS); |
285 | } | 250 | } |
286 | if (data & GEN6_GRDOM_MEDIA) { | 251 | if (data & GEN6_GRDOM_MEDIA) { |
287 | gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); | 252 | gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); |
288 | bitmap |= (1 << VCS); | 253 | engine_mask |= (1 << VCS); |
289 | } | 254 | } |
290 | if (data & GEN6_GRDOM_BLT) { | 255 | if (data & GEN6_GRDOM_BLT) { |
291 | gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); | 256 | gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); |
292 | bitmap |= (1 << BCS); | 257 | engine_mask |= (1 << BCS); |
293 | } | 258 | } |
294 | if (data & GEN6_GRDOM_VECS) { | 259 | if (data & GEN6_GRDOM_VECS) { |
295 | gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); | 260 | gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); |
296 | bitmap |= (1 << VECS); | 261 | engine_mask |= (1 << VECS); |
297 | } | 262 | } |
298 | if (data & GEN8_GRDOM_MEDIA2) { | 263 | if (data & GEN8_GRDOM_MEDIA2) { |
299 | gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); | 264 | gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); |
300 | if (HAS_BSD2(vgpu->gvt->dev_priv)) | 265 | if (HAS_BSD2(vgpu->gvt->dev_priv)) |
301 | bitmap |= (1 << VCS2); | 266 | engine_mask |= (1 << VCS2); |
267 | } | ||
302 | } | 268 | } |
303 | return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); | 269 | |
270 | intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); | ||
271 | |||
272 | return 0; | ||
304 | } | 273 | } |
305 | 274 | ||
306 | static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | 275 | static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, |
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
974 | return 0; | 943 | return 0; |
975 | } | 944 | } |
976 | 945 | ||
977 | static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 946 | static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
978 | void *p_data, unsigned int bytes) | 947 | void *p_data, unsigned int bytes) |
979 | { | 948 | { |
980 | u32 data; | 949 | u32 data; |
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1366 | static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, | 1335 | static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, |
1367 | unsigned int offset, void *p_data, unsigned int bytes) | 1336 | unsigned int offset, void *p_data, unsigned int bytes) |
1368 | { | 1337 | { |
1369 | int rc = 0; | ||
1370 | unsigned int id = 0; | 1338 | unsigned int id = 0; |
1371 | 1339 | ||
1372 | write_vreg(vgpu, offset, p_data, bytes); | 1340 | write_vreg(vgpu, offset, p_data, bytes); |
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, | |||
1389 | id = VECS; | 1357 | id = VECS; |
1390 | break; | 1358 | break; |
1391 | default: | 1359 | default: |
1392 | rc = -EINVAL; | 1360 | return -EINVAL; |
1393 | break; | ||
1394 | } | 1361 | } |
1395 | set_bit(id, (void *)vgpu->tlb_handle_pending); | 1362 | set_bit(id, (void *)vgpu->tlb_handle_pending); |
1396 | 1363 | ||
1397 | return rc; | 1364 | return 0; |
1398 | } | 1365 | } |
1399 | 1366 | ||
1400 | static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | 1367 | static int ring_reset_ctl_write(struct intel_vgpu *vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07ae487..3f656e3a6e5a 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, | |||
230 | return NULL; | 230 | return NULL; |
231 | } | 231 | } |
232 | 232 | ||
233 | static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, | 233 | static ssize_t available_instances_show(struct kobject *kobj, |
234 | char *buf) | 234 | struct device *dev, char *buf) |
235 | { | 235 | { |
236 | struct intel_vgpu_type *type; | 236 | struct intel_vgpu_type *type; |
237 | unsigned int num = 0; | 237 | unsigned int num = 0; |
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, | |||
269 | type->fence); | 269 | type->fence); |
270 | } | 270 | } |
271 | 271 | ||
272 | static MDEV_TYPE_ATTR_RO(available_instance); | 272 | static MDEV_TYPE_ATTR_RO(available_instances); |
273 | static MDEV_TYPE_ATTR_RO(device_api); | 273 | static MDEV_TYPE_ATTR_RO(device_api); |
274 | static MDEV_TYPE_ATTR_RO(description); | 274 | static MDEV_TYPE_ATTR_RO(description); |
275 | 275 | ||
276 | static struct attribute *type_attrs[] = { | 276 | static struct attribute *type_attrs[] = { |
277 | &mdev_type_attr_available_instance.attr, | 277 | &mdev_type_attr_available_instances.attr, |
278 | &mdev_type_attr_device_api.attr, | 278 | &mdev_type_attr_device_api.attr, |
279 | &mdev_type_attr_description.attr, | 279 | &mdev_type_attr_description.attr, |
280 | NULL, | 280 | NULL, |
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
398 | struct intel_vgpu_type *type; | 398 | struct intel_vgpu_type *type; |
399 | struct device *pdev; | 399 | struct device *pdev; |
400 | void *gvt; | 400 | void *gvt; |
401 | int ret; | ||
401 | 402 | ||
402 | pdev = mdev_parent_dev(mdev); | 403 | pdev = mdev_parent_dev(mdev); |
403 | gvt = kdev_to_i915(pdev)->gvt; | 404 | gvt = kdev_to_i915(pdev)->gvt; |
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
406 | if (!type) { | 407 | if (!type) { |
407 | gvt_err("failed to find type %s to create\n", | 408 | gvt_err("failed to find type %s to create\n", |
408 | kobject_name(kobj)); | 409 | kobject_name(kobj)); |
409 | return -EINVAL; | 410 | ret = -EINVAL; |
411 | goto out; | ||
410 | } | 412 | } |
411 | 413 | ||
412 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | 414 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); |
413 | if (IS_ERR_OR_NULL(vgpu)) { | 415 | if (IS_ERR_OR_NULL(vgpu)) { |
414 | gvt_err("create intel vgpu failed\n"); | 416 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
415 | return -EINVAL; | 417 | gvt_err("failed to create intel vgpu: %d\n", ret); |
418 | goto out; | ||
416 | } | 419 | } |
417 | 420 | ||
418 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); | 421 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); |
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
422 | 425 | ||
423 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", | 426 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", |
424 | dev_name(mdev_dev(mdev))); | 427 | dev_name(mdev_dev(mdev))); |
425 | return 0; | 428 | ret = 0; |
429 | |||
430 | out: | ||
431 | return ret; | ||
426 | } | 432 | } |
427 | 433 | ||
428 | static int intel_vgpu_remove(struct mdev_device *mdev) | 434 | static int intel_vgpu_remove(struct mdev_device *mdev) |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a1946..4df078bc5d04 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
125 | if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) | 125 | if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) |
126 | goto err; | 126 | goto err; |
127 | 127 | ||
128 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | ||
129 | if (!mmio && !vgpu->mmio.disable_warn_untrack) { | ||
130 | gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", | ||
131 | vgpu->id, offset, bytes, *(u32 *)p_data); | ||
132 | |||
133 | if (offset == 0x206c) { | ||
134 | gvt_err("------------------------------------------\n"); | ||
135 | gvt_err("vgpu%d: likely triggers a gfx reset\n", | ||
136 | vgpu->id); | ||
137 | gvt_err("------------------------------------------\n"); | ||
138 | vgpu->mmio.disable_warn_untrack = true; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { | 128 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { |
143 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) | 129 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) |
144 | goto err; | 130 | goto err; |
145 | } | 131 | } |
146 | 132 | ||
133 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | ||
147 | if (mmio) { | 134 | if (mmio) { |
148 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { | 135 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { |
149 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) | 136 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) |
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
152 | goto err; | 139 | goto err; |
153 | } | 140 | } |
154 | ret = mmio->read(vgpu, offset, p_data, bytes); | 141 | ret = mmio->read(vgpu, offset, p_data, bytes); |
155 | } else | 142 | } else { |
156 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); | 143 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); |
157 | 144 | ||
145 | if (!vgpu->mmio.disable_warn_untrack) { | ||
146 | gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", | ||
147 | vgpu->id, offset, bytes, *(u32 *)p_data); | ||
148 | |||
149 | if (offset == 0x206c) { | ||
150 | gvt_err("------------------------------------------\n"); | ||
151 | gvt_err("vgpu%d: likely triggers a gfx reset\n", | ||
152 | vgpu->id); | ||
153 | gvt_err("------------------------------------------\n"); | ||
154 | vgpu->mmio.disable_warn_untrack = true; | ||
155 | } | ||
156 | } | ||
157 | } | ||
158 | |||
158 | if (ret) | 159 | if (ret) |
159 | goto err; | 160 | goto err; |
160 | 161 | ||
@@ -302,3 +303,56 @@ err: | |||
302 | mutex_unlock(&gvt->lock); | 303 | mutex_unlock(&gvt->lock); |
303 | return ret; | 304 | return ret; |
304 | } | 305 | } |
306 | |||
307 | |||
308 | /** | ||
309 | * intel_vgpu_reset_mmio - reset virtual MMIO space | ||
310 | * @vgpu: a vGPU | ||
311 | * | ||
312 | */ | ||
313 | void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) | ||
314 | { | ||
315 | struct intel_gvt *gvt = vgpu->gvt; | ||
316 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
317 | |||
318 | memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); | ||
319 | memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); | ||
320 | |||
321 | vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; | ||
322 | |||
323 | /* set the bit 0:2(Core C-State ) to C0 */ | ||
324 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * intel_vgpu_init_mmio - init MMIO space | ||
329 | * @vgpu: a vGPU | ||
330 | * | ||
331 | * Returns: | ||
332 | * Zero on success, negative error code if failed | ||
333 | */ | ||
334 | int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) | ||
335 | { | ||
336 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | ||
337 | |||
338 | vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); | ||
339 | if (!vgpu->mmio.vreg) | ||
340 | return -ENOMEM; | ||
341 | |||
342 | vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; | ||
343 | |||
344 | intel_vgpu_reset_mmio(vgpu); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | /** | ||
350 | * intel_vgpu_clean_mmio - clean MMIO space | ||
351 | * @vgpu: a vGPU | ||
352 | * | ||
353 | */ | ||
354 | void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) | ||
355 | { | ||
356 | vfree(vgpu->mmio.vreg); | ||
357 | vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; | ||
358 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e366a3..3bc620f56f35 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h | |||
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, | |||
86 | *offset; \ | 86 | *offset; \ |
87 | }) | 87 | }) |
88 | 88 | ||
89 | int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); | ||
90 | void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); | ||
91 | void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); | ||
92 | |||
89 | int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); | 93 | int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); |
90 | 94 | ||
91 | int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, | 95 | int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921770c6..d9fb41ab7119 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) | |||
36 | vgpu->id)) | 36 | vgpu->id)) |
37 | return -EINVAL; | 37 | return -EINVAL; |
38 | 38 | ||
39 | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | | 39 | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | |
40 | GFP_DMA32 | __GFP_ZERO, | 40 | __GFP_ZERO, |
41 | INTEL_GVT_OPREGION_PORDER); | 41 | get_order(INTEL_GVT_OPREGION_SIZE)); |
42 | 42 | ||
43 | if (!vgpu_opregion(vgpu)->va) | 43 | if (!vgpu_opregion(vgpu)->va) |
44 | return -ENOMEM; | 44 | return -ENOMEM; |
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) | |||
97 | if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { | 97 | if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { |
98 | map_vgpu_opregion(vgpu, false); | 98 | map_vgpu_opregion(vgpu, false); |
99 | free_pages((unsigned long)vgpu_opregion(vgpu)->va, | 99 | free_pages((unsigned long)vgpu_opregion(vgpu)->va, |
100 | INTEL_GVT_OPREGION_PORDER); | 100 | get_order(INTEL_GVT_OPREGION_SIZE)); |
101 | 101 | ||
102 | vgpu_opregion(vgpu)->va = NULL; | 102 | vgpu_opregion(vgpu)->va = NULL; |
103 | } | 103 | } |
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02..fbd023a16f18 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h | |||
@@ -50,8 +50,7 @@ | |||
50 | #define INTEL_GVT_OPREGION_PARM 0x204 | 50 | #define INTEL_GVT_OPREGION_PARM 0x204 |
51 | 51 | ||
52 | #define INTEL_GVT_OPREGION_PAGES 2 | 52 | #define INTEL_GVT_OPREGION_PAGES 2 |
53 | #define INTEL_GVT_OPREGION_PORDER 1 | 53 | #define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) |
54 | #define INTEL_GVT_OPREGION_SIZE (2 * 4096) | ||
55 | 54 | ||
56 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) | 55 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) |
57 | 56 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db242250235..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
350 | { | 350 | { |
351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
352 | struct intel_vgpu_workload *workload; | 352 | struct intel_vgpu_workload *workload; |
353 | struct intel_vgpu *vgpu; | ||
353 | int event; | 354 | int event; |
354 | 355 | ||
355 | mutex_lock(&gvt->lock); | 356 | mutex_lock(&gvt->lock); |
356 | 357 | ||
357 | workload = scheduler->current_workload[ring_id]; | 358 | workload = scheduler->current_workload[ring_id]; |
359 | vgpu = workload->vgpu; | ||
358 | 360 | ||
359 | if (!workload->status && !workload->vgpu->resetting) { | 361 | if (!workload->status && !vgpu->resetting) { |
360 | wait_event(workload->shadow_ctx_status_wq, | 362 | wait_event(workload->shadow_ctx_status_wq, |
361 | !atomic_read(&workload->shadow_ctx_active)); | 363 | !atomic_read(&workload->shadow_ctx_active)); |
362 | 364 | ||
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
364 | 366 | ||
365 | for_each_set_bit(event, workload->pending_events, | 367 | for_each_set_bit(event, workload->pending_events, |
366 | INTEL_GVT_EVENT_MAX) | 368 | INTEL_GVT_EVENT_MAX) |
367 | intel_vgpu_trigger_virtual_event(workload->vgpu, | 369 | intel_vgpu_trigger_virtual_event(vgpu, event); |
368 | event); | ||
369 | } | 370 | } |
370 | 371 | ||
371 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 372 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
373 | 374 | ||
374 | scheduler->current_workload[ring_id] = NULL; | 375 | scheduler->current_workload[ring_id] = NULL; |
375 | 376 | ||
376 | atomic_dec(&workload->vgpu->running_workload_num); | ||
377 | |||
378 | list_del_init(&workload->list); | 377 | list_del_init(&workload->list); |
379 | workload->complete(workload); | 378 | workload->complete(workload); |
380 | 379 | ||
380 | atomic_dec(&vgpu->running_workload_num); | ||
381 | wake_up(&scheduler->workload_complete_wq); | 381 | wake_up(&scheduler->workload_complete_wq); |
382 | mutex_unlock(&gvt->lock); | 382 | mutex_unlock(&gvt->lock); |
383 | } | 383 | } |
@@ -459,11 +459,11 @@ complete: | |||
459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | 459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", |
460 | workload, workload->status); | 460 | workload, workload->status); |
461 | 461 | ||
462 | complete_current_workload(gvt, ring_id); | ||
463 | |||
464 | if (workload->req) | 462 | if (workload->req) |
465 | i915_gem_request_put(fetch_and_zero(&workload->req)); | 463 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
466 | 464 | ||
465 | complete_current_workload(gvt, ring_id); | ||
466 | |||
467 | if (need_force_wake) | 467 | if (need_force_wake) |
468 | intel_uncore_forcewake_put(gvt->dev_priv, | 468 | intel_uncore_forcewake_put(gvt->dev_priv, |
469 | FORCEWAKE_ALL); | 469 | FORCEWAKE_ALL); |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28bff51..2833dfa8c9ae 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { | |||
113 | struct drm_i915_gem_object *obj; | 113 | struct drm_i915_gem_object *obj; |
114 | void *va; | 114 | void *va; |
115 | unsigned long len; | 115 | unsigned long len; |
116 | void *bb_start_cmd_va; | 116 | u32 *bb_start_cmd_va; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | #define workload_q_head(vgpu, ring_id) \ | 119 | #define workload_q_head(vgpu, ring_id) \ |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d5777..7295bc8e12fb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
@@ -35,79 +35,6 @@ | |||
35 | #include "gvt.h" | 35 | #include "gvt.h" |
36 | #include "i915_pvinfo.h" | 36 | #include "i915_pvinfo.h" |
37 | 37 | ||
38 | static void clean_vgpu_mmio(struct intel_vgpu *vgpu) | ||
39 | { | ||
40 | vfree(vgpu->mmio.vreg); | ||
41 | vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; | ||
42 | } | ||
43 | |||
44 | int setup_vgpu_mmio(struct intel_vgpu *vgpu) | ||
45 | { | ||
46 | struct intel_gvt *gvt = vgpu->gvt; | ||
47 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
48 | |||
49 | if (vgpu->mmio.vreg) | ||
50 | memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); | ||
51 | else { | ||
52 | vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); | ||
53 | if (!vgpu->mmio.vreg) | ||
54 | return -ENOMEM; | ||
55 | } | ||
56 | |||
57 | vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; | ||
58 | |||
59 | memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); | ||
60 | memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); | ||
61 | |||
62 | vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; | ||
63 | |||
64 | /* set the bit 0:2(Core C-State ) to C0 */ | ||
65 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, | ||
70 | struct intel_vgpu_creation_params *param) | ||
71 | { | ||
72 | struct intel_gvt *gvt = vgpu->gvt; | ||
73 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
74 | u16 *gmch_ctl; | ||
75 | int i; | ||
76 | |||
77 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, | ||
78 | info->cfg_space_size); | ||
79 | |||
80 | if (!param->primary) { | ||
81 | vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = | ||
82 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
83 | vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = | ||
84 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
85 | } | ||
86 | |||
87 | /* Show guest that there isn't any stolen memory.*/ | ||
88 | gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); | ||
89 | *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); | ||
90 | |||
91 | intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, | ||
92 | gvt_aperture_pa_base(gvt), true); | ||
93 | |||
94 | vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO | ||
95 | | PCI_COMMAND_MEMORY | ||
96 | | PCI_COMMAND_MASTER); | ||
97 | /* | ||
98 | * Clear the bar upper 32bit and let guest to assign the new value | ||
99 | */ | ||
100 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); | ||
101 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); | ||
102 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); | ||
103 | |||
104 | for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { | ||
105 | vgpu->cfg_space.bar[i].size = pci_resource_len( | ||
106 | gvt->dev_priv->drm.pdev, i * 2); | ||
107 | vgpu->cfg_space.bar[i].tracked = false; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | void populate_pvinfo_page(struct intel_vgpu *vgpu) | 38 | void populate_pvinfo_page(struct intel_vgpu *vgpu) |
112 | { | 39 | { |
113 | /* setup the ballooning information */ | 40 | /* setup the ballooning information */ |
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
177 | if (low_avail / min_low == 0) | 104 | if (low_avail / min_low == 0) |
178 | break; | 105 | break; |
179 | gvt->types[i].low_gm_size = min_low; | 106 | gvt->types[i].low_gm_size = min_low; |
180 | gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; | 107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); |
181 | gvt->types[i].fence = 4; | 108 | gvt->types[i].fence = 4; |
182 | gvt->types[i].max_instance = low_avail / min_low; | 109 | gvt->types[i].max_instance = low_avail / min_low; |
183 | gvt->types[i].avail_instance = gvt->types[i].max_instance; | 110 | gvt->types[i].avail_instance = gvt->types[i].max_instance; |
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
217 | */ | 144 | */ |
218 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - | 145 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - |
219 | gvt->gm.vgpu_allocated_low_gm_size; | 146 | gvt->gm.vgpu_allocated_low_gm_size; |
220 | high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - | 147 | high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - |
221 | gvt->gm.vgpu_allocated_high_gm_size; | 148 | gvt->gm.vgpu_allocated_high_gm_size; |
222 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - | 149 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - |
223 | gvt->fence.vgpu_allocated_fence_num; | 150 | gvt->fence.vgpu_allocated_fence_num; |
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) | |||
268 | intel_vgpu_clean_gtt(vgpu); | 195 | intel_vgpu_clean_gtt(vgpu); |
269 | intel_gvt_hypervisor_detach_vgpu(vgpu); | 196 | intel_gvt_hypervisor_detach_vgpu(vgpu); |
270 | intel_vgpu_free_resource(vgpu); | 197 | intel_vgpu_free_resource(vgpu); |
271 | clean_vgpu_mmio(vgpu); | 198 | intel_vgpu_clean_mmio(vgpu); |
272 | vfree(vgpu); | 199 | vfree(vgpu); |
273 | 200 | ||
274 | intel_gvt_update_vgpu_types(gvt); | 201 | intel_gvt_update_vgpu_types(gvt); |
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
300 | vgpu->gvt = gvt; | 227 | vgpu->gvt = gvt; |
301 | bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); | 228 | bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); |
302 | 229 | ||
303 | setup_vgpu_cfg_space(vgpu, param); | 230 | intel_vgpu_init_cfg_space(vgpu, param->primary); |
304 | 231 | ||
305 | ret = setup_vgpu_mmio(vgpu); | 232 | ret = intel_vgpu_init_mmio(vgpu); |
306 | if (ret) | 233 | if (ret) |
307 | goto out_free_vgpu; | 234 | goto out_clean_idr; |
308 | 235 | ||
309 | ret = intel_vgpu_alloc_resource(vgpu, param); | 236 | ret = intel_vgpu_alloc_resource(vgpu, param); |
310 | if (ret) | 237 | if (ret) |
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: | |||
354 | out_clean_vgpu_resource: | 281 | out_clean_vgpu_resource: |
355 | intel_vgpu_free_resource(vgpu); | 282 | intel_vgpu_free_resource(vgpu); |
356 | out_clean_vgpu_mmio: | 283 | out_clean_vgpu_mmio: |
357 | clean_vgpu_mmio(vgpu); | 284 | intel_vgpu_clean_mmio(vgpu); |
285 | out_clean_idr: | ||
286 | idr_remove(&gvt->vgpu_idr, vgpu->id); | ||
358 | out_free_vgpu: | 287 | out_free_vgpu: |
359 | vfree(vgpu); | 288 | vfree(vgpu); |
360 | mutex_unlock(&gvt->lock); | 289 | mutex_unlock(&gvt->lock); |
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
398 | } | 327 | } |
399 | 328 | ||
400 | /** | 329 | /** |
401 | * intel_gvt_reset_vgpu - reset a virtual GPU | 330 | * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset |
331 | * @vgpu: virtual GPU | ||
332 | * @dmlr: vGPU Device Model Level Reset or GT Reset | ||
333 | * @engine_mask: engines to reset for GT reset | ||
334 | * | ||
335 | * This function is called when user wants to reset a virtual GPU through | ||
336 | * device model reset or GT reset. The caller should hold the gvt lock. | ||
337 | * | ||
338 | * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset | ||
339 | * the whole vGPU to default state as when it is created. This vGPU function | ||
340 | * is required both for functionary and security concerns.The ultimate goal | ||
341 | * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we | ||
342 | * assign a vGPU to a virtual machine we must isse such reset first. | ||
343 | * | ||
344 | * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines | ||
345 | * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. | ||
346 | * Unlike the FLR, GT reset only reset particular resource of a vGPU per | ||
347 | * the reset request. Guest driver can issue a GT reset by programming the | ||
348 | * virtual GDRST register to reset specific virtual GPU engine or all | ||
349 | * engines. | ||
350 | * | ||
351 | * The parameter dev_level is to identify if we will do DMLR or GT reset. | ||
352 | * The parameter engine_mask is to specific the engines that need to be | ||
353 | * resetted. If value ALL_ENGINES is given for engine_mask, it means | ||
354 | * the caller requests a full GT reset that we will reset all virtual | ||
355 | * GPU engines. For FLR, engine_mask is ignored. | ||
356 | */ | ||
357 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||
358 | unsigned int engine_mask) | ||
359 | { | ||
360 | struct intel_gvt *gvt = vgpu->gvt; | ||
361 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | ||
362 | |||
363 | gvt_dbg_core("------------------------------------------\n"); | ||
364 | gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", | ||
365 | vgpu->id, dmlr, engine_mask); | ||
366 | vgpu->resetting = true; | ||
367 | |||
368 | intel_vgpu_stop_schedule(vgpu); | ||
369 | /* | ||
370 | * The current_vgpu will set to NULL after stopping the | ||
371 | * scheduler when the reset is triggered by current vgpu. | ||
372 | */ | ||
373 | if (scheduler->current_vgpu == NULL) { | ||
374 | mutex_unlock(&gvt->lock); | ||
375 | intel_gvt_wait_vgpu_idle(vgpu); | ||
376 | mutex_lock(&gvt->lock); | ||
377 | } | ||
378 | |||
379 | intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); | ||
380 | |||
381 | /* full GPU reset or device model level reset */ | ||
382 | if (engine_mask == ALL_ENGINES || dmlr) { | ||
383 | intel_vgpu_reset_gtt(vgpu, dmlr); | ||
384 | intel_vgpu_reset_resource(vgpu); | ||
385 | intel_vgpu_reset_mmio(vgpu); | ||
386 | populate_pvinfo_page(vgpu); | ||
387 | |||
388 | if (dmlr) | ||
389 | intel_vgpu_reset_cfg_space(vgpu); | ||
390 | } | ||
391 | |||
392 | vgpu->resetting = false; | ||
393 | gvt_dbg_core("reset vgpu%d done\n", vgpu->id); | ||
394 | gvt_dbg_core("------------------------------------------\n"); | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) | ||
402 | * @vgpu: virtual GPU | 399 | * @vgpu: virtual GPU |
403 | * | 400 | * |
404 | * This function is called when user wants to reset a virtual GPU. | 401 | * This function is called when user wants to reset a virtual GPU. |
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
406 | */ | 403 | */ |
407 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) | 404 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) |
408 | { | 405 | { |
406 | mutex_lock(&vgpu->gvt->lock); | ||
407 | intel_gvt_reset_vgpu_locked(vgpu, true, 0); | ||
408 | mutex_unlock(&vgpu->gvt->lock); | ||
409 | } | 409 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 445fec9c2841..b2c4a0b8a627 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) | |||
2378 | 2378 | ||
2379 | assert_forcewakes_inactive(dev_priv); | 2379 | assert_forcewakes_inactive(dev_priv); |
2380 | 2380 | ||
2381 | if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) | 2381 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
2382 | intel_hpd_poll_init(dev_priv); | 2382 | intel_hpd_poll_init(dev_priv); |
2383 | 2383 | ||
2384 | DRM_DEBUG_KMS("Device suspended\n"); | 2384 | DRM_DEBUG_KMS("Device suspended\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 243224aeabf8..69bc3b0c4390 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1977,6 +1977,11 @@ struct drm_i915_private { | |||
1977 | 1977 | ||
1978 | struct i915_frontbuffer_tracking fb_tracking; | 1978 | struct i915_frontbuffer_tracking fb_tracking; |
1979 | 1979 | ||
1980 | struct intel_atomic_helper { | ||
1981 | struct llist_head free_list; | ||
1982 | struct work_struct free_work; | ||
1983 | } atomic_helper; | ||
1984 | |||
1980 | u16 orig_clock; | 1985 | u16 orig_clock; |
1981 | 1986 | ||
1982 | bool mchbar_need_disable; | 1987 | bool mchbar_need_disable; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dd7fc662859..4b23a7814713 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
595 | struct drm_i915_gem_pwrite *args, | 595 | struct drm_i915_gem_pwrite *args, |
596 | struct drm_file *file) | 596 | struct drm_file *file) |
597 | { | 597 | { |
598 | struct drm_device *dev = obj->base.dev; | ||
599 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 598 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
600 | char __user *user_data = u64_to_user_ptr(args->data_ptr); | 599 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
601 | int ret; | ||
602 | 600 | ||
603 | /* We manually control the domain here and pretend that it | 601 | /* We manually control the domain here and pretend that it |
604 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | 602 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
605 | */ | 603 | */ |
606 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
607 | ret = i915_gem_object_wait(obj, | ||
608 | I915_WAIT_INTERRUPTIBLE | | ||
609 | I915_WAIT_LOCKED | | ||
610 | I915_WAIT_ALL, | ||
611 | MAX_SCHEDULE_TIMEOUT, | ||
612 | to_rps_client(file)); | ||
613 | if (ret) | ||
614 | return ret; | ||
615 | |||
616 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 604 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
617 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 605 | if (copy_from_user(vaddr, user_data, args->size)) |
618 | unsigned long unwritten; | 606 | return -EFAULT; |
619 | |||
620 | /* The physical object once assigned is fixed for the lifetime | ||
621 | * of the obj, so we can safely drop the lock and continue | ||
622 | * to access vaddr. | ||
623 | */ | ||
624 | mutex_unlock(&dev->struct_mutex); | ||
625 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
626 | mutex_lock(&dev->struct_mutex); | ||
627 | if (unwritten) { | ||
628 | ret = -EFAULT; | ||
629 | goto out; | ||
630 | } | ||
631 | } | ||
632 | 607 | ||
633 | drm_clflush_virt_range(vaddr, args->size); | 608 | drm_clflush_virt_range(vaddr, args->size); |
634 | i915_gem_chipset_flush(to_i915(dev)); | 609 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
635 | 610 | ||
636 | out: | ||
637 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 611 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
638 | return ret; | 612 | return 0; |
639 | } | 613 | } |
640 | 614 | ||
641 | void *i915_gem_object_alloc(struct drm_device *dev) | 615 | void *i915_gem_object_alloc(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814b015c..d534a316a16e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -199,6 +199,7 @@ found: | |||
199 | } | 199 | } |
200 | 200 | ||
201 | /* Unbinding will emit any required flushes */ | 201 | /* Unbinding will emit any required flushes */ |
202 | ret = 0; | ||
202 | while (!list_empty(&eviction_list)) { | 203 | while (!list_empty(&eviction_list)) { |
203 | vma = list_first_entry(&eviction_list, | 204 | vma = list_first_entry(&eviction_list, |
204 | struct i915_vma, | 205 | struct i915_vma, |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a792dcb902b5..e924a9516079 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
187 | 187 | ||
188 | trace_i915_vma_bind(vma, bind_flags); | ||
188 | ret = vma->vm->bind_vma(vma, cache_level, bind_flags); | 189 | ret = vma->vm->bind_vma(vma, cache_level, bind_flags); |
189 | if (ret) | 190 | if (ret) |
190 | return ret; | 191 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 86ecec5601d4..588470eb8d39 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) | |||
499 | struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); | 499 | struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); |
500 | struct edid *edid; | 500 | struct edid *edid; |
501 | struct i2c_adapter *i2c; | 501 | struct i2c_adapter *i2c; |
502 | bool ret = false; | ||
502 | 503 | ||
503 | BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); | 504 | BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); |
504 | 505 | ||
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) | |||
515 | */ | 516 | */ |
516 | if (!is_digital) { | 517 | if (!is_digital) { |
517 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 518 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); |
518 | return true; | 519 | ret = true; |
520 | } else { | ||
521 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||
519 | } | 522 | } |
520 | |||
521 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); | ||
522 | } else { | 523 | } else { |
523 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); | 524 | DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); |
524 | } | 525 | } |
525 | 526 | ||
526 | kfree(edid); | 527 | kfree(edid); |
527 | 528 | ||
528 | return false; | 529 | return ret; |
529 | } | 530 | } |
530 | 531 | ||
531 | static enum drm_connector_status | 532 | static enum drm_connector_status |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3dc8724df400..f0b9aa7a0483 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2251,6 +2251,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) | |||
2251 | intel_fill_fb_ggtt_view(&view, fb, rotation); | 2251 | intel_fill_fb_ggtt_view(&view, fb, rotation); |
2252 | vma = i915_gem_object_to_ggtt(obj, &view); | 2252 | vma = i915_gem_object_to_ggtt(obj, &view); |
2253 | 2253 | ||
2254 | if (WARN_ON_ONCE(!vma)) | ||
2255 | return; | ||
2256 | |||
2254 | i915_vma_unpin_fence(vma); | 2257 | i915_vma_unpin_fence(vma); |
2255 | i915_gem_object_unpin_from_display_plane(vma); | 2258 | i915_gem_object_unpin_from_display_plane(vma); |
2256 | } | 2259 | } |
@@ -2585,8 +2588,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, | |||
2585 | * We only keep the x/y offsets, so push all of the | 2588 | * We only keep the x/y offsets, so push all of the |
2586 | * gtt offset into the x/y offsets. | 2589 | * gtt offset into the x/y offsets. |
2587 | */ | 2590 | */ |
2588 | _intel_adjust_tile_offset(&x, &y, tile_size, | 2591 | _intel_adjust_tile_offset(&x, &y, |
2589 | tile_width, tile_height, pitch_tiles, | 2592 | tile_width, tile_height, |
2593 | tile_size, pitch_tiles, | ||
2590 | gtt_offset_rotated * tile_size, 0); | 2594 | gtt_offset_rotated * tile_size, 0); |
2591 | 2595 | ||
2592 | gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; | 2596 | gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; |
@@ -2967,6 +2971,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) | |||
2967 | unsigned int rotation = plane_state->base.rotation; | 2971 | unsigned int rotation = plane_state->base.rotation; |
2968 | int ret; | 2972 | int ret; |
2969 | 2973 | ||
2974 | if (!plane_state->base.visible) | ||
2975 | return 0; | ||
2976 | |||
2970 | /* Rotate src coordinates to match rotated GTT view */ | 2977 | /* Rotate src coordinates to match rotated GTT view */ |
2971 | if (drm_rotation_90_or_270(rotation)) | 2978 | if (drm_rotation_90_or_270(rotation)) |
2972 | drm_rect_rotate(&plane_state->base.src, | 2979 | drm_rect_rotate(&plane_state->base.src, |
@@ -6846,6 +6853,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
6846 | } | 6853 | } |
6847 | 6854 | ||
6848 | state = drm_atomic_state_alloc(crtc->dev); | 6855 | state = drm_atomic_state_alloc(crtc->dev); |
6856 | if (!state) { | ||
6857 | DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", | ||
6858 | crtc->base.id, crtc->name); | ||
6859 | return; | ||
6860 | } | ||
6861 | |||
6849 | state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; | 6862 | state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; |
6850 | 6863 | ||
6851 | /* Everything's already locked, -EDEADLK can't happen. */ | 6864 | /* Everything's already locked, -EDEADLK can't happen. */ |
@@ -11243,6 +11256,7 @@ found: | |||
11243 | } | 11256 | } |
11244 | 11257 | ||
11245 | old->restore_state = restore_state; | 11258 | old->restore_state = restore_state; |
11259 | drm_atomic_state_put(state); | ||
11246 | 11260 | ||
11247 | /* let the connector get through one full cycle before testing */ | 11261 | /* let the connector get through one full cycle before testing */ |
11248 | intel_wait_for_vblank(dev_priv, intel_crtc->pipe); | 11262 | intel_wait_for_vblank(dev_priv, intel_crtc->pipe); |
@@ -14512,8 +14526,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, | |||
14512 | break; | 14526 | break; |
14513 | 14527 | ||
14514 | case FENCE_FREE: | 14528 | case FENCE_FREE: |
14515 | drm_atomic_state_put(&state->base); | 14529 | { |
14516 | break; | 14530 | struct intel_atomic_helper *helper = |
14531 | &to_i915(state->base.dev)->atomic_helper; | ||
14532 | |||
14533 | if (llist_add(&state->freed, &helper->free_list)) | ||
14534 | schedule_work(&helper->free_work); | ||
14535 | break; | ||
14536 | } | ||
14517 | } | 14537 | } |
14518 | 14538 | ||
14519 | return NOTIFY_DONE; | 14539 | return NOTIFY_DONE; |
@@ -16392,6 +16412,18 @@ fail: | |||
16392 | drm_modeset_acquire_fini(&ctx); | 16412 | drm_modeset_acquire_fini(&ctx); |
16393 | } | 16413 | } |
16394 | 16414 | ||
16415 | static void intel_atomic_helper_free_state(struct work_struct *work) | ||
16416 | { | ||
16417 | struct drm_i915_private *dev_priv = | ||
16418 | container_of(work, typeof(*dev_priv), atomic_helper.free_work); | ||
16419 | struct intel_atomic_state *state, *next; | ||
16420 | struct llist_node *freed; | ||
16421 | |||
16422 | freed = llist_del_all(&dev_priv->atomic_helper.free_list); | ||
16423 | llist_for_each_entry_safe(state, next, freed, freed) | ||
16424 | drm_atomic_state_put(&state->base); | ||
16425 | } | ||
16426 | |||
16395 | int intel_modeset_init(struct drm_device *dev) | 16427 | int intel_modeset_init(struct drm_device *dev) |
16396 | { | 16428 | { |
16397 | struct drm_i915_private *dev_priv = to_i915(dev); | 16429 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -16411,6 +16443,9 @@ int intel_modeset_init(struct drm_device *dev) | |||
16411 | 16443 | ||
16412 | dev->mode_config.funcs = &intel_mode_funcs; | 16444 | dev->mode_config.funcs = &intel_mode_funcs; |
16413 | 16445 | ||
16446 | INIT_WORK(&dev_priv->atomic_helper.free_work, | ||
16447 | intel_atomic_helper_free_state); | ||
16448 | |||
16414 | intel_init_quirks(dev); | 16449 | intel_init_quirks(dev); |
16415 | 16450 | ||
16416 | intel_init_pm(dev_priv); | 16451 | intel_init_pm(dev_priv); |
@@ -17024,7 +17059,8 @@ void intel_display_resume(struct drm_device *dev) | |||
17024 | 17059 | ||
17025 | if (ret) | 17060 | if (ret) |
17026 | DRM_ERROR("Restoring old state failed with %i\n", ret); | 17061 | DRM_ERROR("Restoring old state failed with %i\n", ret); |
17027 | drm_atomic_state_put(state); | 17062 | if (state) |
17063 | drm_atomic_state_put(state); | ||
17028 | } | 17064 | } |
17029 | 17065 | ||
17030 | void intel_modeset_gem_init(struct drm_device *dev) | 17066 | void intel_modeset_gem_init(struct drm_device *dev) |
@@ -17094,6 +17130,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
17094 | { | 17130 | { |
17095 | struct drm_i915_private *dev_priv = to_i915(dev); | 17131 | struct drm_i915_private *dev_priv = to_i915(dev); |
17096 | 17132 | ||
17133 | flush_work(&dev_priv->atomic_helper.free_work); | ||
17134 | WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); | ||
17135 | |||
17097 | intel_disable_gt_powersave(dev_priv); | 17136 | intel_disable_gt_powersave(dev_priv); |
17098 | 17137 | ||
17099 | /* | 17138 | /* |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd132c216a67..cd72ae171eeb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -370,6 +370,8 @@ struct intel_atomic_state { | |||
370 | struct skl_wm_values wm_results; | 370 | struct skl_wm_values wm_results; |
371 | 371 | ||
372 | struct i915_sw_fence commit_ready; | 372 | struct i915_sw_fence commit_ready; |
373 | |||
374 | struct llist_node freed; | ||
373 | }; | 375 | }; |
374 | 376 | ||
375 | struct intel_plane_state { | 377 | struct intel_plane_state { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index beb08982dc0b..8cf2d80f2254 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev) | |||
742 | { | 742 | { |
743 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 743 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
744 | 744 | ||
745 | if (!ifbdev) | ||
746 | return; | ||
747 | |||
745 | ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); | 748 | ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); |
746 | } | 749 | } |
747 | 750 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d4961fa20c73..beabc17e7c8a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, | |||
979 | uint32_t *batch, | 979 | uint32_t *batch, |
980 | uint32_t index) | 980 | uint32_t index) |
981 | { | 981 | { |
982 | struct drm_i915_private *dev_priv = engine->i915; | ||
983 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); | 982 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); |
984 | 983 | ||
985 | /* | ||
986 | * WaDisableLSQCROPERFforOCL:kbl | ||
987 | * This WA is implemented in skl_init_clock_gating() but since | ||
988 | * this batch updates GEN8_L3SQCREG4 with default value we need to | ||
989 | * set this bit here to retain the WA during flush. | ||
990 | */ | ||
991 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) | ||
992 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; | ||
993 | |||
994 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | | 984 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | |
995 | MI_SRM_LRM_GLOBAL_GTT)); | 985 | MI_SRM_LRM_GLOBAL_GTT)); |
996 | wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); | 986 | wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637dc1fdf..91cb4c422ad5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) | |||
1095 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 1095 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
1096 | HDC_FENCE_DEST_SLM_DISABLE); | 1096 | HDC_FENCE_DEST_SLM_DISABLE); |
1097 | 1097 | ||
1098 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
1099 | * involving this register should also be added to WA batch as required. | ||
1100 | */ | ||
1101 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) | ||
1102 | /* WaDisableLSQCROPERFforOCL:kbl */ | ||
1103 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
1104 | GEN8_LQSC_RO_PERF_DIS); | ||
1105 | |||
1106 | /* WaToEnableHwFixForPushConstHWBug:kbl */ | 1098 | /* WaToEnableHwFixForPushConstHWBug:kbl */ |
1107 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) | 1099 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) |
1108 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1100 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 14ff87686a36..686a580c711a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
345 | { | 345 | { |
346 | struct adreno_platform_config *config = pdev->dev.platform_data; | 346 | struct adreno_platform_config *config = pdev->dev.platform_data; |
347 | struct msm_gpu *gpu = &adreno_gpu->base; | 347 | struct msm_gpu *gpu = &adreno_gpu->base; |
348 | struct msm_mmu *mmu; | ||
349 | int ret; | 348 | int ret; |
350 | 349 | ||
351 | adreno_gpu->funcs = funcs; | 350 | adreno_gpu->funcs = funcs; |
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
385 | return ret; | 384 | return ret; |
386 | } | 385 | } |
387 | 386 | ||
388 | mmu = gpu->aspace->mmu; | 387 | if (gpu->aspace && gpu->aspace->mmu) { |
389 | if (mmu) { | 388 | struct msm_mmu *mmu = gpu->aspace->mmu; |
390 | ret = mmu->funcs->attach(mmu, iommu_ports, | 389 | ret = mmu->funcs->attach(mmu, iommu_ports, |
391 | ARRAY_SIZE(iommu_ports)); | 390 | ARRAY_SIZE(iommu_ports)); |
392 | if (ret) | 391 | if (ret) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5f6cd8745dbc..c396d459a9d0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st | |||
119 | 119 | ||
120 | static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) | 120 | static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) |
121 | { | 121 | { |
122 | int i; | ||
123 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 122 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
124 | struct drm_plane *plane; | ||
125 | struct drm_plane_state *plane_state; | ||
126 | |||
127 | for_each_plane_in_state(state, plane, plane_state, i) | ||
128 | mdp5_plane_complete_commit(plane, plane_state); | ||
129 | 123 | ||
130 | if (mdp5_kms->smp) | 124 | if (mdp5_kms->smp) |
131 | mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); | 125 | mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17b0cc101171..cdfc63d90c7b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | |||
@@ -104,8 +104,6 @@ struct mdp5_plane_state { | |||
104 | 104 | ||
105 | /* assigned by crtc blender */ | 105 | /* assigned by crtc blender */ |
106 | enum mdp_mixer_stage_id stage; | 106 | enum mdp_mixer_stage_id stage; |
107 | |||
108 | bool pending : 1; | ||
109 | }; | 107 | }; |
110 | #define to_mdp5_plane_state(x) \ | 108 | #define to_mdp5_plane_state(x) \ |
111 | container_of(x, struct mdp5_plane_state, base) | 109 | container_of(x, struct mdp5_plane_state, base) |
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); | |||
232 | void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); | 230 | void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); |
233 | 231 | ||
234 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); | 232 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); |
235 | void mdp5_plane_complete_commit(struct drm_plane *plane, | ||
236 | struct drm_plane_state *state); | ||
237 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); | 233 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); |
238 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); | 234 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); |
239 | 235 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index c099da7bc212..25d9d0a97156 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, | |||
179 | drm_printf(p, "\tzpos=%u\n", pstate->zpos); | 179 | drm_printf(p, "\tzpos=%u\n", pstate->zpos); |
180 | drm_printf(p, "\talpha=%u\n", pstate->alpha); | 180 | drm_printf(p, "\talpha=%u\n", pstate->alpha); |
181 | drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); | 181 | drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); |
182 | drm_printf(p, "\tpending=%u\n", pstate->pending); | ||
183 | } | 182 | } |
184 | 183 | ||
185 | static void mdp5_plane_reset(struct drm_plane *plane) | 184 | static void mdp5_plane_reset(struct drm_plane *plane) |
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) | |||
220 | if (mdp5_state && mdp5_state->base.fb) | 219 | if (mdp5_state && mdp5_state->base.fb) |
221 | drm_framebuffer_reference(mdp5_state->base.fb); | 220 | drm_framebuffer_reference(mdp5_state->base.fb); |
222 | 221 | ||
223 | mdp5_state->pending = false; | ||
224 | |||
225 | return &mdp5_state->base; | 222 | return &mdp5_state->base; |
226 | } | 223 | } |
227 | 224 | ||
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
288 | DBG("%s: check (%d -> %d)", plane->name, | 285 | DBG("%s: check (%d -> %d)", plane->name, |
289 | plane_enabled(old_state), plane_enabled(state)); | 286 | plane_enabled(old_state), plane_enabled(state)); |
290 | 287 | ||
291 | /* We don't allow faster-than-vblank updates.. if we did add this | ||
292 | * some day, we would need to disallow in cases where hwpipe | ||
293 | * changes | ||
294 | */ | ||
295 | if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) | ||
296 | return -EBUSY; | ||
297 | |||
298 | max_width = config->hw->lm.max_width << 16; | 288 | max_width = config->hw->lm.max_width << 16; |
299 | max_height = config->hw->lm.max_height << 16; | 289 | max_height = config->hw->lm.max_height << 16; |
300 | 290 | ||
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, | |||
370 | struct drm_plane_state *old_state) | 360 | struct drm_plane_state *old_state) |
371 | { | 361 | { |
372 | struct drm_plane_state *state = plane->state; | 362 | struct drm_plane_state *state = plane->state; |
373 | struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); | ||
374 | 363 | ||
375 | DBG("%s: update", plane->name); | 364 | DBG("%s: update", plane->name); |
376 | 365 | ||
377 | mdp5_state->pending = true; | ||
378 | |||
379 | if (plane_enabled(state)) { | 366 | if (plane_enabled(state)) { |
380 | int ret; | 367 | int ret; |
381 | 368 | ||
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) | |||
851 | return pstate->hwpipe->flush_mask; | 838 | return pstate->hwpipe->flush_mask; |
852 | } | 839 | } |
853 | 840 | ||
854 | /* called after vsync in thread context */ | ||
855 | void mdp5_plane_complete_commit(struct drm_plane *plane, | ||
856 | struct drm_plane_state *state) | ||
857 | { | ||
858 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); | ||
859 | |||
860 | pstate->pending = false; | ||
861 | } | ||
862 | |||
863 | /* initialize plane */ | 841 | /* initialize plane */ |
864 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) | 842 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) |
865 | { | 843 | { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8bc59c7e261..8098677a3916 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj) | |||
294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
295 | 295 | ||
296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | 296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { |
297 | if (!priv->aspace[id]) | ||
298 | continue; | ||
297 | msm_gem_unmap_vma(priv->aspace[id], | 299 | msm_gem_unmap_vma(priv->aspace[id], |
298 | &msm_obj->domain[id], msm_obj->sgt); | 300 | &msm_obj->domain[id], msm_obj->sgt); |
299 | } | 301 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cef08da1da4e..6a157763dfc3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev) | |||
411 | return ret; | 411 | return ret; |
412 | 412 | ||
413 | /* enable polling for external displays */ | 413 | /* enable polling for external displays */ |
414 | drm_kms_helper_poll_enable(dev); | 414 | if (!dev->mode_config.poll_enabled) |
415 | drm_kms_helper_poll_enable(dev); | ||
415 | 416 | ||
416 | /* enable hotplug interrupts */ | 417 | /* enable hotplug interrupts */ |
417 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 418 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 59348fc41c77..bc85a45f91cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev) | |||
773 | pci_set_master(pdev); | 773 | pci_set_master(pdev); |
774 | 774 | ||
775 | ret = nouveau_do_resume(drm_dev, true); | 775 | ret = nouveau_do_resume(drm_dev, true); |
776 | drm_kms_helper_poll_enable(drm_dev); | 776 | |
777 | if (!drm_dev->mode_config.poll_enabled) | ||
778 | drm_kms_helper_poll_enable(drm_dev); | ||
779 | |||
777 | /* do magic */ | 780 | /* do magic */ |
778 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); | 781 | nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); |
779 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); | 782 | vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d5ed5bfdacb..42c1fa53d431 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -165,6 +165,8 @@ struct nouveau_drm { | |||
165 | struct backlight_device *backlight; | 165 | struct backlight_device *backlight; |
166 | struct list_head bl_connectors; | 166 | struct list_head bl_connectors; |
167 | struct work_struct hpd_work; | 167 | struct work_struct hpd_work; |
168 | struct work_struct fbcon_work; | ||
169 | int fbcon_new_state; | ||
168 | #ifdef CONFIG_ACPI | 170 | #ifdef CONFIG_ACPI |
169 | struct notifier_block acpi_nb; | 171 | struct notifier_block acpi_nb; |
170 | #endif | 172 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2f2a3dcd4ad7..fa2d0a978ccc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | |||
470 | .fb_probe = nouveau_fbcon_create, | 470 | .fb_probe = nouveau_fbcon_create, |
471 | }; | 471 | }; |
472 | 472 | ||
473 | static void | ||
474 | nouveau_fbcon_set_suspend_work(struct work_struct *work) | ||
475 | { | ||
476 | struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); | ||
477 | int state = READ_ONCE(drm->fbcon_new_state); | ||
478 | |||
479 | if (state == FBINFO_STATE_RUNNING) | ||
480 | pm_runtime_get_sync(drm->dev->dev); | ||
481 | |||
482 | console_lock(); | ||
483 | if (state == FBINFO_STATE_RUNNING) | ||
484 | nouveau_fbcon_accel_restore(drm->dev); | ||
485 | drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | ||
486 | if (state != FBINFO_STATE_RUNNING) | ||
487 | nouveau_fbcon_accel_save_disable(drm->dev); | ||
488 | console_unlock(); | ||
489 | |||
490 | if (state == FBINFO_STATE_RUNNING) { | ||
491 | pm_runtime_mark_last_busy(drm->dev->dev); | ||
492 | pm_runtime_put_sync(drm->dev->dev); | ||
493 | } | ||
494 | } | ||
495 | |||
473 | void | 496 | void |
474 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | 497 | nouveau_fbcon_set_suspend(struct drm_device *dev, int state) |
475 | { | 498 | { |
476 | struct nouveau_drm *drm = nouveau_drm(dev); | 499 | struct nouveau_drm *drm = nouveau_drm(dev); |
477 | if (drm->fbcon) { | 500 | |
478 | console_lock(); | 501 | if (!drm->fbcon) |
479 | if (state == FBINFO_STATE_RUNNING) | 502 | return; |
480 | nouveau_fbcon_accel_restore(dev); | 503 | |
481 | drm_fb_helper_set_suspend(&drm->fbcon->helper, state); | 504 | drm->fbcon_new_state = state; |
482 | if (state != FBINFO_STATE_RUNNING) | 505 | /* Since runtime resume can happen as a result of a sysfs operation, |
483 | nouveau_fbcon_accel_save_disable(dev); | 506 | * it's possible we already have the console locked. So handle fbcon |
484 | console_unlock(); | 507 | * init/deinit from a seperate work thread |
485 | } | 508 | */ |
509 | schedule_work(&drm->fbcon_work); | ||
486 | } | 510 | } |
487 | 511 | ||
488 | int | 512 | int |
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
502 | return -ENOMEM; | 526 | return -ENOMEM; |
503 | 527 | ||
504 | drm->fbcon = fbcon; | 528 | drm->fbcon = fbcon; |
529 | INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); | ||
505 | 530 | ||
506 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); | 531 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); |
507 | 532 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 00ea0002b539..e0c143b865f3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -366,11 +366,10 @@ static void | |||
366 | radeon_pci_shutdown(struct pci_dev *pdev) | 366 | radeon_pci_shutdown(struct pci_dev *pdev) |
367 | { | 367 | { |
368 | /* if we are running in a VM, make sure the device | 368 | /* if we are running in a VM, make sure the device |
369 | * torn down properly on reboot/shutdown. | 369 | * torn down properly on reboot/shutdown |
370 | * unfortunately we can't detect certain | ||
371 | * hypervisors so just do this all the time. | ||
372 | */ | 370 | */ |
373 | radeon_pci_remove(pdev); | 371 | if (radeon_device_is_virtual()) |
372 | radeon_pci_remove(pdev); | ||
374 | } | 373 | } |
375 | 374 | ||
376 | static int radeon_pmops_suspend(struct device *dev) | 375 | static int radeon_pmops_suspend(struct device *dev) |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e8a38d296855..414776811e71 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin"); | |||
114 | MODULE_FIRMWARE("radeon/hainan_rlc.bin"); | 114 | MODULE_FIRMWARE("radeon/hainan_rlc.bin"); |
115 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); | 115 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); |
116 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); | 116 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); |
117 | MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); | ||
118 | |||
119 | MODULE_FIRMWARE("radeon/si58_mc.bin"); | ||
117 | 120 | ||
118 | static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); | 121 | static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); |
119 | static void si_pcie_gen3_enable(struct radeon_device *rdev); | 122 | static void si_pcie_gen3_enable(struct radeon_device *rdev); |
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1650 | int err; | 1653 | int err; |
1651 | int new_fw = 0; | 1654 | int new_fw = 0; |
1652 | bool new_smc = false; | 1655 | bool new_smc = false; |
1656 | bool si58_fw = false; | ||
1657 | bool banks2_fw = false; | ||
1653 | 1658 | ||
1654 | DRM_DEBUG("\n"); | 1659 | DRM_DEBUG("\n"); |
1655 | 1660 | ||
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1727 | ((rdev->pdev->device == 0x6660) || | 1732 | ((rdev->pdev->device == 0x6660) || |
1728 | (rdev->pdev->device == 0x6663) || | 1733 | (rdev->pdev->device == 0x6663) || |
1729 | (rdev->pdev->device == 0x6665) || | 1734 | (rdev->pdev->device == 0x6665) || |
1730 | (rdev->pdev->device == 0x6667))) || | 1735 | (rdev->pdev->device == 0x6667)))) |
1731 | ((rdev->pdev->revision == 0xc3) && | ||
1732 | (rdev->pdev->device == 0x6665))) | ||
1733 | new_smc = true; | 1736 | new_smc = true; |
1737 | else if ((rdev->pdev->revision == 0xc3) && | ||
1738 | (rdev->pdev->device == 0x6665)) | ||
1739 | banks2_fw = true; | ||
1734 | new_chip_name = "hainan"; | 1740 | new_chip_name = "hainan"; |
1735 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; | 1741 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; |
1736 | me_req_size = SI_PM4_UCODE_SIZE * 4; | 1742 | me_req_size = SI_PM4_UCODE_SIZE * 4; |
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1742 | default: BUG(); | 1748 | default: BUG(); |
1743 | } | 1749 | } |
1744 | 1750 | ||
1751 | /* this memory configuration requires special firmware */ | ||
1752 | if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) | ||
1753 | si58_fw = true; | ||
1754 | |||
1745 | DRM_INFO("Loading %s Microcode\n", new_chip_name); | 1755 | DRM_INFO("Loading %s Microcode\n", new_chip_name); |
1746 | 1756 | ||
1747 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); | 1757 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); |
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1845 | } | 1855 | } |
1846 | } | 1856 | } |
1847 | 1857 | ||
1848 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); | 1858 | if (si58_fw) |
1859 | snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); | ||
1860 | else | ||
1861 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); | ||
1849 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); | 1862 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
1850 | if (err) { | 1863 | if (err) { |
1851 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); | 1864 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); |
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1876 | } | 1889 | } |
1877 | } | 1890 | } |
1878 | 1891 | ||
1879 | if (new_smc) | 1892 | if (banks2_fw) |
1893 | snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); | ||
1894 | else if (new_smc) | ||
1880 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); | 1895 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); |
1881 | else | 1896 | else |
1882 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); | 1897 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 13ba73fd9b68..2944916f7102 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
3008 | (rdev->pdev->device == 0x6817) || | 3008 | (rdev->pdev->device == 0x6817) || |
3009 | (rdev->pdev->device == 0x6806)) | 3009 | (rdev->pdev->device == 0x6806)) |
3010 | max_mclk = 120000; | 3010 | max_mclk = 120000; |
3011 | } else if (rdev->family == CHIP_OLAND) { | ||
3012 | if ((rdev->pdev->revision == 0xC7) || | ||
3013 | (rdev->pdev->revision == 0x80) || | ||
3014 | (rdev->pdev->revision == 0x81) || | ||
3015 | (rdev->pdev->revision == 0x83) || | ||
3016 | (rdev->pdev->revision == 0x87) || | ||
3017 | (rdev->pdev->device == 0x6604) || | ||
3018 | (rdev->pdev->device == 0x6605)) { | ||
3019 | max_sclk = 75000; | ||
3020 | max_mclk = 80000; | ||
3021 | } | ||
3022 | } else if (rdev->family == CHIP_HAINAN) { | 3011 | } else if (rdev->family == CHIP_HAINAN) { |
3023 | if ((rdev->pdev->revision == 0x81) || | 3012 | if ((rdev->pdev->revision == 0x81) || |
3024 | (rdev->pdev->revision == 0x83) || | 3013 | (rdev->pdev->revision == 0x83) || |
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
3027 | (rdev->pdev->device == 0x6665) || | 3016 | (rdev->pdev->device == 0x6665) || |
3028 | (rdev->pdev->device == 0x6667)) { | 3017 | (rdev->pdev->device == 0x6667)) { |
3029 | max_sclk = 75000; | 3018 | max_sclk = 75000; |
3030 | max_mclk = 80000; | ||
3031 | } | 3019 | } |
3032 | } | 3020 | } |
3033 | /* Apply dpm quirks */ | 3021 | /* Apply dpm quirks */ |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index a0fd3e66bc4b..7aadce1f7e7a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, | |||
839 | 839 | ||
840 | } | 840 | } |
841 | 841 | ||
842 | __drm_atomic_helper_crtc_destroy_state(state); | 842 | drm_atomic_helper_crtc_destroy_state(crtc, state); |
843 | } | 843 | } |
844 | 844 | ||
845 | static const struct drm_crtc_funcs vc4_crtc_funcs = { | 845 | static const struct drm_crtc_funcs vc4_crtc_funcs = { |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index db920771bfb5..ab3016982466 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
594 | args->shader_rec_count); | 594 | args->shader_rec_count); |
595 | struct vc4_bo *bo; | 595 | struct vc4_bo *bo; |
596 | 596 | ||
597 | if (uniforms_offset < shader_rec_offset || | 597 | if (shader_rec_offset < args->bin_cl_size || |
598 | uniforms_offset < shader_rec_offset || | ||
598 | exec_size < uniforms_offset || | 599 | exec_size < uniforms_offset || |
599 | args->shader_rec_count >= (UINT_MAX / | 600 | args->shader_rec_count >= (UINT_MAX / |
600 | sizeof(struct vc4_shader_state)) || | 601 | sizeof(struct vc4_shader_state)) || |
601 | temp_size < exec_size) { | 602 | temp_size < exec_size) { |
602 | DRM_ERROR("overflow in exec arguments\n"); | 603 | DRM_ERROR("overflow in exec arguments\n"); |
604 | ret = -EINVAL; | ||
603 | goto fail; | 605 | goto fail; |
604 | } | 606 | } |
605 | 607 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 08886a309757..5cdd003605f5 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c | |||
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, | |||
461 | } | 461 | } |
462 | 462 | ||
463 | ret = vc4_full_res_bounds_check(exec, *obj, surf); | 463 | ret = vc4_full_res_bounds_check(exec, *obj, surf); |
464 | if (!ret) | 464 | if (ret) |
465 | return ret; | 465 | return ret; |
466 | 466 | ||
467 | return 0; | 467 | return 0; |
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index dd21f950e129..cde9f3758106 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c | |||
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, | |||
331 | info->fbops = &virtio_gpufb_ops; | 331 | info->fbops = &virtio_gpufb_ops; |
332 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | 332 | info->pixmap.flags = FB_PIXMAP_SYSTEM; |
333 | 333 | ||
334 | info->screen_base = obj->vmap; | 334 | info->screen_buffer = obj->vmap; |
335 | info->screen_size = obj->gem_base.size; | 335 | info->screen_size = obj->gem_base.size; |
336 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | 336 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
337 | drm_fb_helper_fill_var(info, &vfbdev->helper, | 337 | drm_fb_helper_fill_var(info, &vfbdev->helper, |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 686971263bef..45d6771fac8c 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev) | |||
962 | goto err_clk_dis; | 962 | goto err_clk_dis; |
963 | } | 963 | } |
964 | 964 | ||
965 | ret = i2c_add_adapter(&id->adap); | ||
966 | if (ret < 0) | ||
967 | goto err_clk_dis; | ||
968 | |||
969 | /* | 965 | /* |
970 | * Cadence I2C controller has a bug wherein it generates | 966 | * Cadence I2C controller has a bug wherein it generates |
971 | * invalid read transaction after HW timeout in master receiver mode. | 967 | * invalid read transaction after HW timeout in master receiver mode. |
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) | |||
975 | */ | 971 | */ |
976 | cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); | 972 | cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); |
977 | 973 | ||
974 | ret = i2c_add_adapter(&id->adap); | ||
975 | if (ret < 0) | ||
976 | goto err_clk_dis; | ||
977 | |||
978 | dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", | 978 | dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", |
979 | id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); | 979 | id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); |
980 | 980 | ||
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index c62b7cd475f8..3310f2e0dbd3 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/of.h> | 29 | #include <linux/of.h> |
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/pinctrl/consumer.h> | ||
31 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
32 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev) | |||
636 | return 0; | 637 | return 0; |
637 | } | 638 | } |
638 | 639 | ||
640 | #ifdef CONFIG_PM_SLEEP | ||
641 | static int lpi2c_imx_suspend(struct device *dev) | ||
642 | { | ||
643 | pinctrl_pm_select_sleep_state(dev); | ||
644 | |||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | static int lpi2c_imx_resume(struct device *dev) | ||
649 | { | ||
650 | pinctrl_pm_select_default_state(dev); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | #endif | ||
655 | |||
656 | static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume); | ||
657 | |||
639 | static struct platform_driver lpi2c_imx_driver = { | 658 | static struct platform_driver lpi2c_imx_driver = { |
640 | .probe = lpi2c_imx_probe, | 659 | .probe = lpi2c_imx_probe, |
641 | .remove = lpi2c_imx_remove, | 660 | .remove = lpi2c_imx_remove, |
642 | .driver = { | 661 | .driver = { |
643 | .name = DRIVER_NAME, | 662 | .name = DRIVER_NAME, |
644 | .of_match_table = lpi2c_imx_of_match, | 663 | .of_match_table = lpi2c_imx_of_match, |
664 | .pm = &imx_lpi2c_pm, | ||
645 | }, | 665 | }, |
646 | }; | 666 | }; |
647 | 667 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e7dcfac877ca..3e70a9c5d79d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
2811 | if (!src_addr || !src_addr->sa_family) { | 2811 | if (!src_addr || !src_addr->sa_family) { |
2812 | src_addr = (struct sockaddr *) &id->route.addr.src_addr; | 2812 | src_addr = (struct sockaddr *) &id->route.addr.src_addr; |
2813 | src_addr->sa_family = dst_addr->sa_family; | 2813 | src_addr->sa_family = dst_addr->sa_family; |
2814 | if (dst_addr->sa_family == AF_INET6) { | 2814 | if (IS_ENABLED(CONFIG_IPV6) && |
2815 | dst_addr->sa_family == AF_INET6) { | ||
2815 | struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; | 2816 | struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; |
2816 | struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; | 2817 | struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; |
2817 | src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; | 2818 | src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 1e62a5f0cb28..4609b921f899 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
134 | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); | 134 | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); |
135 | 135 | ||
136 | if (access & IB_ACCESS_ON_DEMAND) { | 136 | if (access & IB_ACCESS_ON_DEMAND) { |
137 | put_pid(umem->pid); | ||
137 | ret = ib_umem_odp_get(context, umem); | 138 | ret = ib_umem_odp_get(context, umem); |
138 | if (ret) { | 139 | if (ret) { |
139 | kfree(umem); | 140 | kfree(umem); |
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
149 | 150 | ||
150 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | 151 | page_list = (struct page **) __get_free_page(GFP_KERNEL); |
151 | if (!page_list) { | 152 | if (!page_list) { |
153 | put_pid(umem->pid); | ||
152 | kfree(umem); | 154 | kfree(umem); |
153 | return ERR_PTR(-ENOMEM); | 155 | return ERR_PTR(-ENOMEM); |
154 | } | 156 | } |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9d5fe1853da4..6262dc035f3c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev, | |||
1135 | 1135 | ||
1136 | memset(props, 0, sizeof(struct ib_port_attr)); | 1136 | memset(props, 0, sizeof(struct ib_port_attr)); |
1137 | props->max_mtu = IB_MTU_4096; | 1137 | props->max_mtu = IB_MTU_4096; |
1138 | if (netdev->mtu >= 4096) | 1138 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); |
1139 | props->active_mtu = IB_MTU_4096; | ||
1140 | else if (netdev->mtu >= 2048) | ||
1141 | props->active_mtu = IB_MTU_2048; | ||
1142 | else if (netdev->mtu >= 1024) | ||
1143 | props->active_mtu = IB_MTU_1024; | ||
1144 | else if (netdev->mtu >= 512) | ||
1145 | props->active_mtu = IB_MTU_512; | ||
1146 | else | ||
1147 | props->active_mtu = IB_MTU_256; | ||
1148 | 1139 | ||
1149 | if (!netif_carrier_ok(netdev)) | 1140 | if (!netif_carrier_ok(netdev)) |
1150 | props->state = IB_PORT_DOWN; | 1141 | props->state = IB_PORT_DOWN; |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index f1510cc76d2d..9398143d7c5e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1804 | skb_trim(skb, dlen); | 1804 | skb_trim(skb, dlen); |
1805 | mutex_lock(&ep->com.mutex); | 1805 | mutex_lock(&ep->com.mutex); |
1806 | 1806 | ||
1807 | /* update RX credits */ | ||
1808 | update_rx_credits(ep, dlen); | ||
1809 | |||
1810 | switch (ep->com.state) { | 1807 | switch (ep->com.state) { |
1811 | case MPA_REQ_SENT: | 1808 | case MPA_REQ_SENT: |
1809 | update_rx_credits(ep, dlen); | ||
1812 | ep->rcv_seq += dlen; | 1810 | ep->rcv_seq += dlen; |
1813 | disconnect = process_mpa_reply(ep, skb); | 1811 | disconnect = process_mpa_reply(ep, skb); |
1814 | break; | 1812 | break; |
1815 | case MPA_REQ_WAIT: | 1813 | case MPA_REQ_WAIT: |
1814 | update_rx_credits(ep, dlen); | ||
1816 | ep->rcv_seq += dlen; | 1815 | ep->rcv_seq += dlen; |
1817 | disconnect = process_mpa_request(ep, skb); | 1816 | disconnect = process_mpa_request(ep, skb); |
1818 | break; | 1817 | break; |
1819 | case FPDU_MODE: { | 1818 | case FPDU_MODE: { |
1820 | struct c4iw_qp_attributes attrs; | 1819 | struct c4iw_qp_attributes attrs; |
1820 | |||
1821 | update_rx_credits(ep, dlen); | ||
1821 | BUG_ON(!ep->com.qp); | 1822 | BUG_ON(!ep->com.qp); |
1822 | if (status) | 1823 | if (status) |
1823 | pr_err("%s Unexpected streaming data." \ | 1824 | pr_err("%s Unexpected streaming data." \ |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 19c6477af19f..bec82a600d77 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | |||
505 | } | 505 | } |
506 | 506 | ||
507 | /* | 507 | /* |
508 | * Special cqe for drain WR completions... | ||
509 | */ | ||
510 | if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { | ||
511 | *cookie = CQE_DRAIN_COOKIE(hw_cqe); | ||
512 | *cqe = *hw_cqe; | ||
513 | goto skip_cqe; | ||
514 | } | ||
515 | |||
516 | /* | ||
508 | * Gotta tweak READ completions: | 517 | * Gotta tweak READ completions: |
509 | * 1) the cqe doesn't contain the sq_wptr from the wr. | 518 | * 1) the cqe doesn't contain the sq_wptr from the wr. |
510 | * 2) opcode not reflected from the wr. | 519 | * 2) opcode not reflected from the wr. |
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
753 | c4iw_invalidate_mr(qhp->rhp, | 762 | c4iw_invalidate_mr(qhp->rhp, |
754 | CQE_WRID_FR_STAG(&cqe)); | 763 | CQE_WRID_FR_STAG(&cqe)); |
755 | break; | 764 | break; |
765 | case C4IW_DRAIN_OPCODE: | ||
766 | wc->opcode = IB_WC_SEND; | ||
767 | break; | ||
756 | default: | 768 | default: |
757 | printk(KERN_ERR MOD "Unexpected opcode %d " | 769 | printk(KERN_ERR MOD "Unexpected opcode %d " |
758 | "in the CQE received for QPID=0x%0x\n", | 770 | "in the CQE received for QPID=0x%0x\n", |
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
817 | } | 829 | } |
818 | } | 830 | } |
819 | out: | 831 | out: |
820 | if (wq) { | 832 | if (wq) |
821 | if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) { | ||
822 | if (t4_sq_empty(wq)) | ||
823 | complete(&qhp->sq_drained); | ||
824 | if (t4_rq_empty(wq)) | ||
825 | complete(&qhp->rq_drained); | ||
826 | } | ||
827 | spin_unlock(&qhp->lock); | 833 | spin_unlock(&qhp->lock); |
828 | } | ||
829 | return ret; | 834 | return ret; |
830 | } | 835 | } |
831 | 836 | ||
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 516b0ae6dc3f..40c0e7b9fc6e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
846 | } | 846 | } |
847 | } | 847 | } |
848 | 848 | ||
849 | rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); | ||
850 | if (!rdev->free_workq) { | ||
851 | err = -ENOMEM; | ||
852 | goto err_free_status_page; | ||
853 | } | ||
854 | |||
849 | rdev->status_page->db_off = 0; | 855 | rdev->status_page->db_off = 0; |
850 | 856 | ||
851 | return 0; | 857 | return 0; |
858 | err_free_status_page: | ||
859 | free_page((unsigned long)rdev->status_page); | ||
852 | destroy_ocqp_pool: | 860 | destroy_ocqp_pool: |
853 | c4iw_ocqp_pool_destroy(rdev); | 861 | c4iw_ocqp_pool_destroy(rdev); |
854 | destroy_rqtpool: | 862 | destroy_rqtpool: |
@@ -862,6 +870,7 @@ destroy_resource: | |||
862 | 870 | ||
863 | static void c4iw_rdev_close(struct c4iw_rdev *rdev) | 871 | static void c4iw_rdev_close(struct c4iw_rdev *rdev) |
864 | { | 872 | { |
873 | destroy_workqueue(rdev->free_workq); | ||
865 | kfree(rdev->wr_log); | 874 | kfree(rdev->wr_log); |
866 | free_page((unsigned long)rdev->status_page); | 875 | free_page((unsigned long)rdev->status_page); |
867 | c4iw_pblpool_destroy(rdev); | 876 | c4iw_pblpool_destroy(rdev); |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4788e1a46fde..8cd4d054a87e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/kref.h> | 45 | #include <linux/kref.h> |
46 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
47 | #include <linux/io.h> | 47 | #include <linux/io.h> |
48 | #include <linux/workqueue.h> | ||
48 | 49 | ||
49 | #include <asm/byteorder.h> | 50 | #include <asm/byteorder.h> |
50 | 51 | ||
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext { | |||
107 | struct list_head qpids; | 108 | struct list_head qpids; |
108 | struct list_head cqids; | 109 | struct list_head cqids; |
109 | struct mutex lock; | 110 | struct mutex lock; |
111 | struct kref kref; | ||
110 | }; | 112 | }; |
111 | 113 | ||
112 | enum c4iw_rdev_flags { | 114 | enum c4iw_rdev_flags { |
@@ -183,6 +185,7 @@ struct c4iw_rdev { | |||
183 | atomic_t wr_log_idx; | 185 | atomic_t wr_log_idx; |
184 | struct wr_log_entry *wr_log; | 186 | struct wr_log_entry *wr_log; |
185 | int wr_log_size; | 187 | int wr_log_size; |
188 | struct workqueue_struct *free_workq; | ||
186 | }; | 189 | }; |
187 | 190 | ||
188 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) | 191 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) |
@@ -480,8 +483,8 @@ struct c4iw_qp { | |||
480 | wait_queue_head_t wait; | 483 | wait_queue_head_t wait; |
481 | struct timer_list timer; | 484 | struct timer_list timer; |
482 | int sq_sig_all; | 485 | int sq_sig_all; |
483 | struct completion rq_drained; | 486 | struct work_struct free_work; |
484 | struct completion sq_drained; | 487 | struct c4iw_ucontext *ucontext; |
485 | }; | 488 | }; |
486 | 489 | ||
487 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) | 490 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) |
@@ -495,6 +498,7 @@ struct c4iw_ucontext { | |||
495 | u32 key; | 498 | u32 key; |
496 | spinlock_t mmap_lock; | 499 | spinlock_t mmap_lock; |
497 | struct list_head mmaps; | 500 | struct list_head mmaps; |
501 | struct kref kref; | ||
498 | }; | 502 | }; |
499 | 503 | ||
500 | static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) | 504 | static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) |
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) | |||
502 | return container_of(c, struct c4iw_ucontext, ibucontext); | 506 | return container_of(c, struct c4iw_ucontext, ibucontext); |
503 | } | 507 | } |
504 | 508 | ||
509 | void _c4iw_free_ucontext(struct kref *kref); | ||
510 | |||
511 | static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) | ||
512 | { | ||
513 | kref_put(&ucontext->kref, _c4iw_free_ucontext); | ||
514 | } | ||
515 | |||
516 | static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) | ||
517 | { | ||
518 | kref_get(&ucontext->kref); | ||
519 | } | ||
520 | |||
505 | struct c4iw_mm_entry { | 521 | struct c4iw_mm_entry { |
506 | struct list_head entry; | 522 | struct list_head entry; |
507 | u64 addr; | 523 | u64 addr; |
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state) | |||
615 | return IB_QPS_ERR; | 631 | return IB_QPS_ERR; |
616 | } | 632 | } |
617 | 633 | ||
634 | #define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN | ||
635 | |||
618 | static inline u32 c4iw_ib_to_tpt_access(int a) | 636 | static inline u32 c4iw_ib_to_tpt_access(int a) |
619 | { | 637 | { |
620 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | 638 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log; | |||
997 | extern int db_fc_threshold; | 1015 | extern int db_fc_threshold; |
998 | extern int db_coalescing_threshold; | 1016 | extern int db_coalescing_threshold; |
999 | extern int use_dsgl; | 1017 | extern int use_dsgl; |
1000 | void c4iw_drain_rq(struct ib_qp *qp); | ||
1001 | void c4iw_drain_sq(struct ib_qp *qp); | ||
1002 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); | 1018 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); |
1003 | 1019 | ||
1004 | #endif | 1020 | #endif |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 49b51b7e0fd7..3345e1c312f7 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, | |||
93 | return -ENOSYS; | 93 | return -ENOSYS; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int c4iw_dealloc_ucontext(struct ib_ucontext *context) | 96 | void _c4iw_free_ucontext(struct kref *kref) |
97 | { | 97 | { |
98 | struct c4iw_dev *rhp = to_c4iw_dev(context->device); | 98 | struct c4iw_ucontext *ucontext; |
99 | struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); | 99 | struct c4iw_dev *rhp; |
100 | struct c4iw_mm_entry *mm, *tmp; | 100 | struct c4iw_mm_entry *mm, *tmp; |
101 | 101 | ||
102 | PDBG("%s context %p\n", __func__, context); | 102 | ucontext = container_of(kref, struct c4iw_ucontext, kref); |
103 | rhp = to_c4iw_dev(ucontext->ibucontext.device); | ||
104 | |||
105 | PDBG("%s ucontext %p\n", __func__, ucontext); | ||
103 | list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) | 106 | list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) |
104 | kfree(mm); | 107 | kfree(mm); |
105 | c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); | 108 | c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); |
106 | kfree(ucontext); | 109 | kfree(ucontext); |
110 | } | ||
111 | |||
112 | static int c4iw_dealloc_ucontext(struct ib_ucontext *context) | ||
113 | { | ||
114 | struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); | ||
115 | |||
116 | PDBG("%s context %p\n", __func__, context); | ||
117 | c4iw_put_ucontext(ucontext); | ||
107 | return 0; | 118 | return 0; |
108 | } | 119 | } |
109 | 120 | ||
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, | |||
127 | c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); | 138 | c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); |
128 | INIT_LIST_HEAD(&context->mmaps); | 139 | INIT_LIST_HEAD(&context->mmaps); |
129 | spin_lock_init(&context->mmap_lock); | 140 | spin_lock_init(&context->mmap_lock); |
141 | kref_init(&context->kref); | ||
130 | 142 | ||
131 | if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { | 143 | if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { |
132 | if (!warned++) | 144 | if (!warned++) |
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, | |||
361 | 373 | ||
362 | memset(props, 0, sizeof(struct ib_port_attr)); | 374 | memset(props, 0, sizeof(struct ib_port_attr)); |
363 | props->max_mtu = IB_MTU_4096; | 375 | props->max_mtu = IB_MTU_4096; |
364 | if (netdev->mtu >= 4096) | 376 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); |
365 | props->active_mtu = IB_MTU_4096; | ||
366 | else if (netdev->mtu >= 2048) | ||
367 | props->active_mtu = IB_MTU_2048; | ||
368 | else if (netdev->mtu >= 1024) | ||
369 | props->active_mtu = IB_MTU_1024; | ||
370 | else if (netdev->mtu >= 512) | ||
371 | props->active_mtu = IB_MTU_512; | ||
372 | else | ||
373 | props->active_mtu = IB_MTU_256; | ||
374 | 377 | ||
375 | if (!netif_carrier_ok(netdev)) | 378 | if (!netif_carrier_ok(netdev)) |
376 | props->state = IB_PORT_DOWN; | 379 | props->state = IB_PORT_DOWN; |
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
607 | dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; | 610 | dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; |
608 | dev->ibdev.get_port_immutable = c4iw_port_immutable; | 611 | dev->ibdev.get_port_immutable = c4iw_port_immutable; |
609 | dev->ibdev.get_dev_fw_str = get_dev_fw_str; | 612 | dev->ibdev.get_dev_fw_str = get_dev_fw_str; |
610 | dev->ibdev.drain_sq = c4iw_drain_sq; | ||
611 | dev->ibdev.drain_rq = c4iw_drain_rq; | ||
612 | 613 | ||
613 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); | 614 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); |
614 | if (!dev->ibdev.iwcm) | 615 | if (!dev->ibdev.iwcm) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cda5542e13a2..04c1c382dedb 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
715 | return 0; | 715 | return 0; |
716 | } | 716 | } |
717 | 717 | ||
718 | static void _free_qp(struct kref *kref) | 718 | static void free_qp_work(struct work_struct *work) |
719 | { | ||
720 | struct c4iw_ucontext *ucontext; | ||
721 | struct c4iw_qp *qhp; | ||
722 | struct c4iw_dev *rhp; | ||
723 | |||
724 | qhp = container_of(work, struct c4iw_qp, free_work); | ||
725 | ucontext = qhp->ucontext; | ||
726 | rhp = qhp->rhp; | ||
727 | |||
728 | PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); | ||
729 | destroy_qp(&rhp->rdev, &qhp->wq, | ||
730 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | ||
731 | |||
732 | if (ucontext) | ||
733 | c4iw_put_ucontext(ucontext); | ||
734 | kfree(qhp); | ||
735 | } | ||
736 | |||
737 | static void queue_qp_free(struct kref *kref) | ||
719 | { | 738 | { |
720 | struct c4iw_qp *qhp; | 739 | struct c4iw_qp *qhp; |
721 | 740 | ||
722 | qhp = container_of(kref, struct c4iw_qp, kref); | 741 | qhp = container_of(kref, struct c4iw_qp, kref); |
723 | PDBG("%s qhp %p\n", __func__, qhp); | 742 | PDBG("%s qhp %p\n", __func__, qhp); |
724 | kfree(qhp); | 743 | queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); |
725 | } | 744 | } |
726 | 745 | ||
727 | void c4iw_qp_add_ref(struct ib_qp *qp) | 746 | void c4iw_qp_add_ref(struct ib_qp *qp) |
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp) | |||
733 | void c4iw_qp_rem_ref(struct ib_qp *qp) | 752 | void c4iw_qp_rem_ref(struct ib_qp *qp) |
734 | { | 753 | { |
735 | PDBG("%s ib_qp %p\n", __func__, qp); | 754 | PDBG("%s ib_qp %p\n", __func__, qp); |
736 | kref_put(&to_c4iw_qp(qp)->kref, _free_qp); | 755 | kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); |
737 | } | 756 | } |
738 | 757 | ||
739 | static void add_to_fc_list(struct list_head *head, struct list_head *entry) | 758 | static void add_to_fc_list(struct list_head *head, struct list_head *entry) |
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) | |||
776 | return 0; | 795 | return 0; |
777 | } | 796 | } |
778 | 797 | ||
798 | static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) | ||
799 | { | ||
800 | struct t4_cqe cqe = {}; | ||
801 | struct c4iw_cq *schp; | ||
802 | unsigned long flag; | ||
803 | struct t4_cq *cq; | ||
804 | |||
805 | schp = to_c4iw_cq(qhp->ibqp.send_cq); | ||
806 | cq = &schp->cq; | ||
807 | |||
808 | cqe.u.drain_cookie = wr->wr_id; | ||
809 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | | ||
810 | CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | | ||
811 | CQE_TYPE_V(1) | | ||
812 | CQE_SWCQE_V(1) | | ||
813 | CQE_QPID_V(qhp->wq.sq.qid)); | ||
814 | |||
815 | spin_lock_irqsave(&schp->lock, flag); | ||
816 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); | ||
817 | cq->sw_queue[cq->sw_pidx] = cqe; | ||
818 | t4_swcq_produce(cq); | ||
819 | spin_unlock_irqrestore(&schp->lock, flag); | ||
820 | |||
821 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
822 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
823 | schp->ibcq.cq_context); | ||
824 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
825 | } | ||
826 | |||
827 | static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) | ||
828 | { | ||
829 | struct t4_cqe cqe = {}; | ||
830 | struct c4iw_cq *rchp; | ||
831 | unsigned long flag; | ||
832 | struct t4_cq *cq; | ||
833 | |||
834 | rchp = to_c4iw_cq(qhp->ibqp.recv_cq); | ||
835 | cq = &rchp->cq; | ||
836 | |||
837 | cqe.u.drain_cookie = wr->wr_id; | ||
838 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | | ||
839 | CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | | ||
840 | CQE_TYPE_V(0) | | ||
841 | CQE_SWCQE_V(1) | | ||
842 | CQE_QPID_V(qhp->wq.sq.qid)); | ||
843 | |||
844 | spin_lock_irqsave(&rchp->lock, flag); | ||
845 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); | ||
846 | cq->sw_queue[cq->sw_pidx] = cqe; | ||
847 | t4_swcq_produce(cq); | ||
848 | spin_unlock_irqrestore(&rchp->lock, flag); | ||
849 | |||
850 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
851 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | ||
852 | rchp->ibcq.cq_context); | ||
853 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
854 | } | ||
855 | |||
779 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 856 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
780 | struct ib_send_wr **bad_wr) | 857 | struct ib_send_wr **bad_wr) |
781 | { | 858 | { |
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
794 | spin_lock_irqsave(&qhp->lock, flag); | 871 | spin_lock_irqsave(&qhp->lock, flag); |
795 | if (t4_wq_in_error(&qhp->wq)) { | 872 | if (t4_wq_in_error(&qhp->wq)) { |
796 | spin_unlock_irqrestore(&qhp->lock, flag); | 873 | spin_unlock_irqrestore(&qhp->lock, flag); |
797 | *bad_wr = wr; | 874 | complete_sq_drain_wr(qhp, wr); |
798 | return -EINVAL; | 875 | return err; |
799 | } | 876 | } |
800 | num_wrs = t4_sq_avail(&qhp->wq); | 877 | num_wrs = t4_sq_avail(&qhp->wq); |
801 | if (num_wrs == 0) { | 878 | if (num_wrs == 0) { |
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
937 | spin_lock_irqsave(&qhp->lock, flag); | 1014 | spin_lock_irqsave(&qhp->lock, flag); |
938 | if (t4_wq_in_error(&qhp->wq)) { | 1015 | if (t4_wq_in_error(&qhp->wq)) { |
939 | spin_unlock_irqrestore(&qhp->lock, flag); | 1016 | spin_unlock_irqrestore(&qhp->lock, flag); |
940 | *bad_wr = wr; | 1017 | complete_rq_drain_wr(qhp, wr); |
941 | return -EINVAL; | 1018 | return err; |
942 | } | 1019 | } |
943 | num_wrs = t4_rq_avail(&qhp->wq); | 1020 | num_wrs = t4_rq_avail(&qhp->wq); |
944 | if (num_wrs == 0) { | 1021 | if (num_wrs == 0) { |
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1550 | } | 1627 | } |
1551 | break; | 1628 | break; |
1552 | case C4IW_QP_STATE_CLOSING: | 1629 | case C4IW_QP_STATE_CLOSING: |
1553 | if (!internal) { | 1630 | |
1631 | /* | ||
1632 | * Allow kernel users to move to ERROR for qp draining. | ||
1633 | */ | ||
1634 | if (!internal && (qhp->ibqp.uobject || attrs->next_state != | ||
1635 | C4IW_QP_STATE_ERROR)) { | ||
1554 | ret = -EINVAL; | 1636 | ret = -EINVAL; |
1555 | goto out; | 1637 | goto out; |
1556 | } | 1638 | } |
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
1643 | struct c4iw_dev *rhp; | 1725 | struct c4iw_dev *rhp; |
1644 | struct c4iw_qp *qhp; | 1726 | struct c4iw_qp *qhp; |
1645 | struct c4iw_qp_attributes attrs; | 1727 | struct c4iw_qp_attributes attrs; |
1646 | struct c4iw_ucontext *ucontext; | ||
1647 | 1728 | ||
1648 | qhp = to_c4iw_qp(ib_qp); | 1729 | qhp = to_c4iw_qp(ib_qp); |
1649 | rhp = qhp->rhp; | 1730 | rhp = qhp->rhp; |
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
1663 | spin_unlock_irq(&rhp->lock); | 1744 | spin_unlock_irq(&rhp->lock); |
1664 | free_ird(rhp, qhp->attr.max_ird); | 1745 | free_ird(rhp, qhp->attr.max_ird); |
1665 | 1746 | ||
1666 | ucontext = ib_qp->uobject ? | ||
1667 | to_c4iw_ucontext(ib_qp->uobject->context) : NULL; | ||
1668 | destroy_qp(&rhp->rdev, &qhp->wq, | ||
1669 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | ||
1670 | |||
1671 | c4iw_qp_rem_ref(ib_qp); | 1747 | c4iw_qp_rem_ref(ib_qp); |
1672 | 1748 | ||
1673 | PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); | 1749 | PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); |
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1763 | qhp->attr.max_ird = 0; | 1839 | qhp->attr.max_ird = 0; |
1764 | qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; | 1840 | qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; |
1765 | spin_lock_init(&qhp->lock); | 1841 | spin_lock_init(&qhp->lock); |
1766 | init_completion(&qhp->sq_drained); | ||
1767 | init_completion(&qhp->rq_drained); | ||
1768 | mutex_init(&qhp->mutex); | 1842 | mutex_init(&qhp->mutex); |
1769 | init_waitqueue_head(&qhp->wait); | 1843 | init_waitqueue_head(&qhp->wait); |
1770 | kref_init(&qhp->kref); | 1844 | kref_init(&qhp->kref); |
1845 | INIT_WORK(&qhp->free_work, free_qp_work); | ||
1771 | 1846 | ||
1772 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); | 1847 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); |
1773 | if (ret) | 1848 | if (ret) |
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1854 | ma_sync_key_mm->len = PAGE_SIZE; | 1929 | ma_sync_key_mm->len = PAGE_SIZE; |
1855 | insert_mmap(ucontext, ma_sync_key_mm); | 1930 | insert_mmap(ucontext, ma_sync_key_mm); |
1856 | } | 1931 | } |
1932 | |||
1933 | c4iw_get_ucontext(ucontext); | ||
1934 | qhp->ucontext = ucontext; | ||
1857 | } | 1935 | } |
1858 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | 1936 | qhp->ibqp.qp_num = qhp->wq.sq.qid; |
1859 | init_timer(&(qhp->timer)); | 1937 | init_timer(&(qhp->timer)); |
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1958 | init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; | 2036 | init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; |
1959 | return 0; | 2037 | return 0; |
1960 | } | 2038 | } |
1961 | |||
1962 | static void move_qp_to_err(struct c4iw_qp *qp) | ||
1963 | { | ||
1964 | struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR }; | ||
1965 | |||
1966 | (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1967 | } | ||
1968 | |||
1969 | void c4iw_drain_sq(struct ib_qp *ibqp) | ||
1970 | { | ||
1971 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); | ||
1972 | unsigned long flag; | ||
1973 | bool need_to_wait; | ||
1974 | |||
1975 | move_qp_to_err(qp); | ||
1976 | spin_lock_irqsave(&qp->lock, flag); | ||
1977 | need_to_wait = !t4_sq_empty(&qp->wq); | ||
1978 | spin_unlock_irqrestore(&qp->lock, flag); | ||
1979 | |||
1980 | if (need_to_wait) | ||
1981 | wait_for_completion(&qp->sq_drained); | ||
1982 | } | ||
1983 | |||
1984 | void c4iw_drain_rq(struct ib_qp *ibqp) | ||
1985 | { | ||
1986 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); | ||
1987 | unsigned long flag; | ||
1988 | bool need_to_wait; | ||
1989 | |||
1990 | move_qp_to_err(qp); | ||
1991 | spin_lock_irqsave(&qp->lock, flag); | ||
1992 | need_to_wait = !t4_rq_empty(&qp->wq); | ||
1993 | spin_unlock_irqrestore(&qp->lock, flag); | ||
1994 | |||
1995 | if (need_to_wait) | ||
1996 | wait_for_completion(&qp->rq_drained); | ||
1997 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 862381aa83c8..640d22148a3e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -179,6 +179,7 @@ struct t4_cqe { | |||
179 | __be32 wrid_hi; | 179 | __be32 wrid_hi; |
180 | __be32 wrid_low; | 180 | __be32 wrid_low; |
181 | } gen; | 181 | } gen; |
182 | u64 drain_cookie; | ||
182 | } u; | 183 | } u; |
183 | __be64 reserved; | 184 | __be64 reserved; |
184 | __be64 bits_type_ts; | 185 | __be64 bits_type_ts; |
@@ -238,6 +239,7 @@ struct t4_cqe { | |||
238 | /* generic accessor macros */ | 239 | /* generic accessor macros */ |
239 | #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) | 240 | #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) |
240 | #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) | 241 | #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) |
242 | #define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie) | ||
241 | 243 | ||
242 | /* macros for flit 3 of the cqe */ | 244 | /* macros for flit 3 of the cqe */ |
243 | #define CQE_GENBIT_S 63 | 245 | #define CQE_GENBIT_S 63 |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 29e97df9e1a7..4c000d60d5c6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev, | |||
100 | memset(props, 0, sizeof(*props)); | 100 | memset(props, 0, sizeof(*props)); |
101 | 101 | ||
102 | props->max_mtu = IB_MTU_4096; | 102 | props->max_mtu = IB_MTU_4096; |
103 | if (netdev->mtu >= 4096) | 103 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); |
104 | props->active_mtu = IB_MTU_4096; | ||
105 | else if (netdev->mtu >= 2048) | ||
106 | props->active_mtu = IB_MTU_2048; | ||
107 | else if (netdev->mtu >= 1024) | ||
108 | props->active_mtu = IB_MTU_1024; | ||
109 | else if (netdev->mtu >= 512) | ||
110 | props->active_mtu = IB_MTU_512; | ||
111 | else | ||
112 | props->active_mtu = IB_MTU_256; | ||
113 | 104 | ||
114 | props->lid = 1; | 105 | props->lid = 1; |
115 | if (netif_carrier_ok(iwdev->netdev)) | 106 | if (netif_carrier_ok(iwdev->netdev)) |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index aff9fb14768b..5a31f3c6a421 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr | |||
478 | memset(props, 0, sizeof(*props)); | 478 | memset(props, 0, sizeof(*props)); |
479 | 479 | ||
480 | props->max_mtu = IB_MTU_4096; | 480 | props->max_mtu = IB_MTU_4096; |
481 | 481 | props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); | |
482 | if (netdev->mtu >= 4096) | ||
483 | props->active_mtu = IB_MTU_4096; | ||
484 | else if (netdev->mtu >= 2048) | ||
485 | props->active_mtu = IB_MTU_2048; | ||
486 | else if (netdev->mtu >= 1024) | ||
487 | props->active_mtu = IB_MTU_1024; | ||
488 | else if (netdev->mtu >= 512) | ||
489 | props->active_mtu = IB_MTU_512; | ||
490 | else | ||
491 | props->active_mtu = IB_MTU_256; | ||
492 | 482 | ||
493 | props->lid = 1; | 483 | props->lid = 1; |
494 | props->lmc = 0; | 484 | props->lmc = 0; |
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7b74d09a8217..3ac8aa5ef37d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c | |||
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) | |||
576 | return 0; | 576 | return 0; |
577 | } | 577 | } |
578 | 578 | ||
579 | void qedr_unaffiliated_event(void *context, | 579 | void qedr_unaffiliated_event(void *context, u8 event_code) |
580 | u8 event_code) | ||
581 | { | 580 | { |
582 | pr_err("unaffiliated event not implemented yet\n"); | 581 | pr_err("unaffiliated event not implemented yet\n"); |
583 | } | 582 | } |
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, | |||
792 | if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) | 791 | if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) |
793 | goto sysfs_err; | 792 | goto sysfs_err; |
794 | 793 | ||
794 | if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | ||
795 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | ||
796 | |||
795 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); | 797 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); |
796 | return dev; | 798 | return dev; |
797 | 799 | ||
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev) | |||
824 | ib_dealloc_device(&dev->ibdev); | 826 | ib_dealloc_device(&dev->ibdev); |
825 | } | 827 | } |
826 | 828 | ||
827 | static int qedr_close(struct qedr_dev *dev) | 829 | static void qedr_close(struct qedr_dev *dev) |
828 | { | 830 | { |
829 | qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); | 831 | if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) |
830 | 832 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); | |
831 | return 0; | ||
832 | } | 833 | } |
833 | 834 | ||
834 | static void qedr_shutdown(struct qedr_dev *dev) | 835 | static void qedr_shutdown(struct qedr_dev *dev) |
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev) | |||
837 | qedr_remove(dev); | 838 | qedr_remove(dev); |
838 | } | 839 | } |
839 | 840 | ||
841 | static void qedr_open(struct qedr_dev *dev) | ||
842 | { | ||
843 | if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) | ||
844 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); | ||
845 | } | ||
846 | |||
840 | static void qedr_mac_address_change(struct qedr_dev *dev) | 847 | static void qedr_mac_address_change(struct qedr_dev *dev) |
841 | { | 848 | { |
842 | union ib_gid *sgid = &dev->sgid_tbl[0]; | 849 | union ib_gid *sgid = &dev->sgid_tbl[0]; |
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) | |||
863 | 870 | ||
864 | ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); | 871 | ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); |
865 | 872 | ||
866 | qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); | 873 | qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); |
867 | 874 | ||
868 | if (rc) | 875 | if (rc) |
869 | DP_ERR(dev, "Error updating mac filter\n"); | 876 | DP_ERR(dev, "Error updating mac filter\n"); |
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) | |||
877 | { | 884 | { |
878 | switch (event) { | 885 | switch (event) { |
879 | case QEDE_UP: | 886 | case QEDE_UP: |
880 | qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); | 887 | qedr_open(dev); |
881 | break; | 888 | break; |
882 | case QEDE_DOWN: | 889 | case QEDE_DOWN: |
883 | qedr_close(dev); | 890 | qedr_close(dev); |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 620badd7d4fb..bb32e4792ec9 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
@@ -113,6 +113,8 @@ struct qedr_device_attr { | |||
113 | struct qed_rdma_events events; | 113 | struct qed_rdma_events events; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | #define QEDR_ENET_STATE_BIT (0) | ||
117 | |||
116 | struct qedr_dev { | 118 | struct qedr_dev { |
117 | struct ib_device ibdev; | 119 | struct ib_device ibdev; |
118 | struct qed_dev *cdev; | 120 | struct qed_dev *cdev; |
@@ -153,6 +155,8 @@ struct qedr_dev { | |||
153 | struct qedr_cq *gsi_sqcq; | 155 | struct qedr_cq *gsi_sqcq; |
154 | struct qedr_cq *gsi_rqcq; | 156 | struct qedr_cq *gsi_rqcq; |
155 | struct qedr_qp *gsi_qp; | 157 | struct qedr_qp *gsi_qp; |
158 | |||
159 | unsigned long enet_state; | ||
156 | }; | 160 | }; |
157 | 161 | ||
158 | #define QEDR_MAX_SQ_PBL (0x8000) | 162 | #define QEDR_MAX_SQ_PBL (0x8000) |
@@ -188,6 +192,7 @@ struct qedr_dev { | |||
188 | #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) | 192 | #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) |
189 | 193 | ||
190 | #define QEDR_MAX_PORT (1) | 194 | #define QEDR_MAX_PORT (1) |
195 | #define QEDR_PORT (1) | ||
191 | 196 | ||
192 | #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | 197 | #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
193 | 198 | ||
@@ -251,9 +256,6 @@ struct qedr_cq { | |||
251 | 256 | ||
252 | u16 icid; | 257 | u16 icid; |
253 | 258 | ||
254 | /* Lock to protect completion handler */ | ||
255 | spinlock_t comp_handler_lock; | ||
256 | |||
257 | /* Lock to protect multiplem CQ's */ | 259 | /* Lock to protect multiplem CQ's */ |
258 | spinlock_t cq_lock; | 260 | spinlock_t cq_lock; |
259 | u8 arm_flags; | 261 | u8 arm_flags; |
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 63890ebb72bd..a9a8d8745d2e 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c | |||
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) | |||
87 | qedr_inc_sw_gsi_cons(&qp->sq); | 87 | qedr_inc_sw_gsi_cons(&qp->sq); |
88 | spin_unlock_irqrestore(&qp->q_lock, flags); | 88 | spin_unlock_irqrestore(&qp->q_lock, flags); |
89 | 89 | ||
90 | if (cq->ibcq.comp_handler) { | 90 | if (cq->ibcq.comp_handler) |
91 | spin_lock_irqsave(&cq->comp_handler_lock, flags); | ||
92 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); | 91 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); |
93 | spin_unlock_irqrestore(&cq->comp_handler_lock, flags); | ||
94 | } | ||
95 | } | 92 | } |
96 | 93 | ||
97 | void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, | 94 | void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, |
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, | |||
113 | 110 | ||
114 | spin_unlock_irqrestore(&qp->q_lock, flags); | 111 | spin_unlock_irqrestore(&qp->q_lock, flags); |
115 | 112 | ||
116 | if (cq->ibcq.comp_handler) { | 113 | if (cq->ibcq.comp_handler) |
117 | spin_lock_irqsave(&cq->comp_handler_lock, flags); | ||
118 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); | 114 | (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); |
119 | spin_unlock_irqrestore(&cq->comp_handler_lock, flags); | ||
120 | } | ||
121 | } | 115 | } |
122 | 116 | ||
123 | static void qedr_destroy_gsi_cq(struct qedr_dev *dev, | 117 | static void qedr_destroy_gsi_cq(struct qedr_dev *dev, |
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, | |||
404 | } | 398 | } |
405 | 399 | ||
406 | if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) | 400 | if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) |
407 | packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; | ||
408 | else | ||
409 | packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; | 401 | packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; |
402 | else | ||
403 | packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; | ||
410 | 404 | ||
411 | packet->roce_mode = roce_mode; | 405 | packet->roce_mode = roce_mode; |
412 | memcpy(packet->header.vaddr, ud_header_buffer, header_size); | 406 | memcpy(packet->header.vaddr, ud_header_buffer, header_size); |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 57c8de208077..c7d6c9a783bd 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, | |||
471 | struct ib_ucontext *context, struct ib_udata *udata) | 471 | struct ib_ucontext *context, struct ib_udata *udata) |
472 | { | 472 | { |
473 | struct qedr_dev *dev = get_qedr_dev(ibdev); | 473 | struct qedr_dev *dev = get_qedr_dev(ibdev); |
474 | struct qedr_ucontext *uctx = NULL; | ||
475 | struct qedr_alloc_pd_uresp uresp; | ||
476 | struct qedr_pd *pd; | 474 | struct qedr_pd *pd; |
477 | u16 pd_id; | 475 | u16 pd_id; |
478 | int rc; | 476 | int rc; |
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, | |||
489 | if (!pd) | 487 | if (!pd) |
490 | return ERR_PTR(-ENOMEM); | 488 | return ERR_PTR(-ENOMEM); |
491 | 489 | ||
492 | dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); | 490 | rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); |
491 | if (rc) | ||
492 | goto err; | ||
493 | 493 | ||
494 | uresp.pd_id = pd_id; | ||
495 | pd->pd_id = pd_id; | 494 | pd->pd_id = pd_id; |
496 | 495 | ||
497 | if (udata && context) { | 496 | if (udata && context) { |
497 | struct qedr_alloc_pd_uresp uresp; | ||
498 | |||
499 | uresp.pd_id = pd_id; | ||
500 | |||
498 | rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | 501 | rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); |
499 | if (rc) | 502 | if (rc) { |
500 | DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); | 503 | DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); |
501 | uctx = get_qedr_ucontext(context); | 504 | dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); |
502 | uctx->pd = pd; | 505 | goto err; |
503 | pd->uctx = uctx; | 506 | } |
507 | |||
508 | pd->uctx = get_qedr_ucontext(context); | ||
509 | pd->uctx->pd = pd; | ||
504 | } | 510 | } |
505 | 511 | ||
506 | return &pd->ibpd; | 512 | return &pd->ibpd; |
513 | |||
514 | err: | ||
515 | kfree(pd); | ||
516 | return ERR_PTR(rc); | ||
507 | } | 517 | } |
508 | 518 | ||
509 | int qedr_dealloc_pd(struct ib_pd *ibpd) | 519 | int qedr_dealloc_pd(struct ib_pd *ibpd) |
@@ -1600,7 +1610,7 @@ err0: | |||
1600 | return ERR_PTR(-EFAULT); | 1610 | return ERR_PTR(-EFAULT); |
1601 | } | 1611 | } |
1602 | 1612 | ||
1603 | enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) | 1613 | static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) |
1604 | { | 1614 | { |
1605 | switch (qp_state) { | 1615 | switch (qp_state) { |
1606 | case QED_ROCE_QP_STATE_RESET: | 1616 | case QED_ROCE_QP_STATE_RESET: |
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) | |||
1621 | return IB_QPS_ERR; | 1631 | return IB_QPS_ERR; |
1622 | } | 1632 | } |
1623 | 1633 | ||
1624 | enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) | 1634 | static enum qed_roce_qp_state qedr_get_state_from_ibqp( |
1635 | enum ib_qp_state qp_state) | ||
1625 | { | 1636 | { |
1626 | switch (qp_state) { | 1637 | switch (qp_state) { |
1627 | case IB_QPS_RESET: | 1638 | case IB_QPS_RESET: |
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, | |||
1657 | int status = 0; | 1668 | int status = 0; |
1658 | 1669 | ||
1659 | if (new_state == qp->state) | 1670 | if (new_state == qp->state) |
1660 | return 1; | 1671 | return 0; |
1661 | 1672 | ||
1662 | switch (qp->state) { | 1673 | switch (qp->state) { |
1663 | case QED_ROCE_QP_STATE_RESET: | 1674 | case QED_ROCE_QP_STATE_RESET: |
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, | |||
1733 | /* ERR->XXX */ | 1744 | /* ERR->XXX */ |
1734 | switch (new_state) { | 1745 | switch (new_state) { |
1735 | case QED_ROCE_QP_STATE_RESET: | 1746 | case QED_ROCE_QP_STATE_RESET: |
1747 | if ((qp->rq.prod != qp->rq.cons) || | ||
1748 | (qp->sq.prod != qp->sq.cons)) { | ||
1749 | DP_NOTICE(dev, | ||
1750 | "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", | ||
1751 | qp->rq.prod, qp->rq.cons, qp->sq.prod, | ||
1752 | qp->sq.cons); | ||
1753 | status = -EINVAL; | ||
1754 | } | ||
1736 | break; | 1755 | break; |
1737 | default: | 1756 | default: |
1738 | status = -EINVAL; | 1757 | status = -EINVAL; |
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1865 | qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); | 1884 | qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); |
1866 | DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", | 1885 | DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", |
1867 | qp_params.remote_mac_addr); | 1886 | qp_params.remote_mac_addr); |
1868 | ; | ||
1869 | 1887 | ||
1870 | qp_params.mtu = qp->mtu; | 1888 | qp_params.mtu = qp->mtu; |
1871 | qp_params.lb_indication = false; | 1889 | qp_params.lb_indication = false; |
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp, | |||
2016 | 2034 | ||
2017 | qp_attr->qp_state = qedr_get_ibqp_state(params.state); | 2035 | qp_attr->qp_state = qedr_get_ibqp_state(params.state); |
2018 | qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); | 2036 | qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); |
2019 | qp_attr->path_mtu = iboe_get_mtu(params.mtu); | 2037 | qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); |
2020 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | 2038 | qp_attr->path_mig_state = IB_MIG_MIGRATED; |
2021 | qp_attr->rq_psn = params.rq_psn; | 2039 | qp_attr->rq_psn = params.rq_psn; |
2022 | qp_attr->sq_psn = params.sq_psn; | 2040 | qp_attr->sq_psn = params.sq_psn; |
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp, | |||
2028 | qp_attr->cap.max_recv_wr = qp->rq.max_wr; | 2046 | qp_attr->cap.max_recv_wr = qp->rq.max_wr; |
2029 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | 2047 | qp_attr->cap.max_send_sge = qp->sq.max_sges; |
2030 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | 2048 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; |
2031 | qp_attr->cap.max_inline_data = qp->max_inline_data; | 2049 | qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; |
2032 | qp_init_attr->cap = qp_attr->cap; | 2050 | qp_init_attr->cap = qp_attr->cap; |
2033 | 2051 | ||
2034 | memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], | 2052 | memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], |
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr) | |||
2302 | return rc; | 2320 | return rc; |
2303 | } | 2321 | } |
2304 | 2322 | ||
2305 | struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) | 2323 | static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, |
2324 | int max_page_list_len) | ||
2306 | { | 2325 | { |
2307 | struct qedr_pd *pd = get_qedr_pd(ibpd); | 2326 | struct qedr_pd *pd = get_qedr_pd(ibpd); |
2308 | struct qedr_dev *dev = get_qedr_dev(ibpd->device); | 2327 | struct qedr_dev *dev = get_qedr_dev(ibpd->device); |
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp, | |||
2704 | return 0; | 2723 | return 0; |
2705 | } | 2724 | } |
2706 | 2725 | ||
2707 | enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) | 2726 | static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) |
2708 | { | 2727 | { |
2709 | switch (opcode) { | 2728 | switch (opcode) { |
2710 | case IB_WR_RDMA_WRITE: | 2729 | case IB_WR_RDMA_WRITE: |
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) | |||
2729 | } | 2748 | } |
2730 | } | 2749 | } |
2731 | 2750 | ||
2732 | inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) | 2751 | static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) |
2733 | { | 2752 | { |
2734 | int wq_is_full, err_wr, pbl_is_full; | 2753 | int wq_is_full, err_wr, pbl_is_full; |
2735 | struct qedr_dev *dev = qp->dev; | 2754 | struct qedr_dev *dev = qp->dev; |
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) | |||
2766 | return true; | 2785 | return true; |
2767 | } | 2786 | } |
2768 | 2787 | ||
2769 | int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 2788 | static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
2770 | struct ib_send_wr **bad_wr) | 2789 | struct ib_send_wr **bad_wr) |
2771 | { | 2790 | { |
2772 | struct qedr_dev *dev = get_qedr_dev(ibqp->device); | 2791 | struct qedr_dev *dev = get_qedr_dev(ibqp->device); |
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, | |||
3234 | IB_WC_SUCCESS, 0); | 3253 | IB_WC_SUCCESS, 0); |
3235 | break; | 3254 | break; |
3236 | case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: | 3255 | case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: |
3237 | DP_ERR(dev, | 3256 | if (qp->state != QED_ROCE_QP_STATE_ERR) |
3238 | "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", | 3257 | DP_ERR(dev, |
3239 | cq->icid, qp->icid); | 3258 | "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", |
3259 | cq->icid, qp->icid); | ||
3240 | cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, | 3260 | cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, |
3241 | IB_WC_WR_FLUSH_ERR, 1); | 3261 | IB_WC_WR_FLUSH_ERR, 1); |
3242 | break; | 3262 | break; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 231a1ce1f4be..bd8fbd3d2032 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, | |||
1029 | if (ret) { | 1029 | if (ret) { |
1030 | dev_err(&pdev->dev, "failed to allocate interrupts\n"); | 1030 | dev_err(&pdev->dev, "failed to allocate interrupts\n"); |
1031 | ret = -ENOMEM; | 1031 | ret = -ENOMEM; |
1032 | goto err_netdevice; | 1032 | goto err_free_cq_ring; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | /* Allocate UAR table. */ | 1035 | /* Allocate UAR table. */ |
@@ -1092,8 +1092,6 @@ err_free_uar_table: | |||
1092 | err_free_intrs: | 1092 | err_free_intrs: |
1093 | pvrdma_free_irq(dev); | 1093 | pvrdma_free_irq(dev); |
1094 | pvrdma_disable_msi_all(dev); | 1094 | pvrdma_disable_msi_all(dev); |
1095 | err_netdevice: | ||
1096 | unregister_netdevice_notifier(&dev->nb_netdev); | ||
1097 | err_free_cq_ring: | 1095 | err_free_cq_ring: |
1098 | pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); | 1096 | pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); |
1099 | err_free_async_ring: | 1097 | err_free_async_ring: |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 54891370d18a..c2aa52638dcb 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | |||
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, | |||
306 | union pvrdma_cmd_resp rsp; | 306 | union pvrdma_cmd_resp rsp; |
307 | struct pvrdma_cmd_create_uc *cmd = &req.create_uc; | 307 | struct pvrdma_cmd_create_uc *cmd = &req.create_uc; |
308 | struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; | 308 | struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; |
309 | struct pvrdma_alloc_ucontext_resp uresp; | 309 | struct pvrdma_alloc_ucontext_resp uresp = {0}; |
310 | int ret; | 310 | int ret; |
311 | void *ptr; | 311 | void *ptr; |
312 | 312 | ||
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 342e78163613..4abdeb359fb4 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c | |||
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev) | |||
555 | } | 555 | } |
556 | 556 | ||
557 | spin_lock_bh(&dev_list_lock); | 557 | spin_lock_bh(&dev_list_lock); |
558 | list_add_tail(&rxe_dev_list, &rxe->list); | 558 | list_add_tail(&rxe->list, &rxe_dev_list); |
559 | spin_unlock_bh(&dev_list_lock); | 559 | spin_unlock_bh(&dev_list_lock); |
560 | return rxe; | 560 | return rxe; |
561 | } | 561 | } |
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 486d576e55bc..44b2108253bd 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c | |||
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp) | |||
813 | del_timer_sync(&qp->rnr_nak_timer); | 813 | del_timer_sync(&qp->rnr_nak_timer); |
814 | 814 | ||
815 | rxe_cleanup_task(&qp->req.task); | 815 | rxe_cleanup_task(&qp->req.task); |
816 | if (qp_type(qp) == IB_QPT_RC) | 816 | rxe_cleanup_task(&qp->comp.task); |
817 | rxe_cleanup_task(&qp->comp.task); | ||
818 | 817 | ||
819 | /* flush out any receive wr's or pending requests */ | 818 | /* flush out any receive wr's or pending requests */ |
820 | __rxe_do_task(&qp->req.task); | 819 | __rxe_do_task(&qp->req.task); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9104e6b8cac9..e71af717e71b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
651 | SHOST_DIX_GUARD_CRC); | 651 | SHOST_DIX_GUARD_CRC); |
652 | } | 652 | } |
653 | 653 | ||
654 | /* | ||
655 | * Limit the sg_tablesize and max_sectors based on the device | ||
656 | * max fastreg page list length. | ||
657 | */ | ||
658 | shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, | ||
659 | ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); | ||
660 | |||
661 | if (iscsi_host_add(shost, | 654 | if (iscsi_host_add(shost, |
662 | ib_conn->device->ib_device->dma_device)) { | 655 | ib_conn->device->ib_device->dma_device)) { |
663 | mutex_unlock(&iser_conn->state_mutex); | 656 | mutex_unlock(&iser_conn->state_mutex); |
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, | |||
679 | max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; | 672 | max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; |
680 | shost->max_sectors = min(iser_max_sectors, max_fr_sectors); | 673 | shost->max_sectors = min(iser_max_sectors, max_fr_sectors); |
681 | 674 | ||
675 | iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", | ||
676 | iser_conn, shost->sg_tablesize, | ||
677 | shost->max_sectors); | ||
678 | |||
682 | if (cmds_max > max_cmds) { | 679 | if (cmds_max > max_cmds) { |
683 | iser_info("cmds_max changed from %u to %u\n", | 680 | iser_info("cmds_max changed from %u to %u\n", |
684 | cmds_max, max_cmds); | 681 | cmds_max, max_cmds); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0be6a7c5ddb5..9d0b22ad58c1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -496,7 +496,6 @@ struct ib_conn { | |||
496 | * @rx_descs: rx buffers array (cyclic buffer) | 496 | * @rx_descs: rx buffers array (cyclic buffer) |
497 | * @num_rx_descs: number of rx descriptors | 497 | * @num_rx_descs: number of rx descriptors |
498 | * @scsi_sg_tablesize: scsi host sg_tablesize | 498 | * @scsi_sg_tablesize: scsi host sg_tablesize |
499 | * @scsi_max_sectors: scsi host max sectors | ||
500 | */ | 499 | */ |
501 | struct iser_conn { | 500 | struct iser_conn { |
502 | struct ib_conn ib_conn; | 501 | struct ib_conn ib_conn; |
@@ -519,7 +518,6 @@ struct iser_conn { | |||
519 | struct iser_rx_desc *rx_descs; | 518 | struct iser_rx_desc *rx_descs; |
520 | u32 num_rx_descs; | 519 | u32 num_rx_descs; |
521 | unsigned short scsi_sg_tablesize; | 520 | unsigned short scsi_sg_tablesize; |
522 | unsigned int scsi_max_sectors; | ||
523 | bool snd_w_inv; | 521 | bool snd_w_inv; |
524 | }; | 522 | }; |
525 | 523 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 8ae7a3beddb7..6a9d1cb548ee 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, | |||
707 | sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, | 707 | sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, |
708 | device->ib_device->attrs.max_fast_reg_page_list_len); | 708 | device->ib_device->attrs.max_fast_reg_page_list_len); |
709 | 709 | ||
710 | if (sg_tablesize > sup_sg_tablesize) { | 710 | iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); |
711 | sg_tablesize = sup_sg_tablesize; | ||
712 | iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; | ||
713 | } else { | ||
714 | iser_conn->scsi_max_sectors = max_sectors; | ||
715 | } | ||
716 | |||
717 | iser_conn->scsi_sg_tablesize = sg_tablesize; | ||
718 | |||
719 | iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", | ||
720 | iser_conn, iser_conn->scsi_sg_tablesize, | ||
721 | iser_conn->scsi_max_sectors); | ||
722 | } | 711 | } |
723 | 712 | ||
724 | /** | 713 | /** |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8ddc07123193..79bf48477ddb 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, | |||
371 | struct srp_fr_desc *d; | 371 | struct srp_fr_desc *d; |
372 | struct ib_mr *mr; | 372 | struct ib_mr *mr; |
373 | int i, ret = -EINVAL; | 373 | int i, ret = -EINVAL; |
374 | enum ib_mr_type mr_type; | ||
374 | 375 | ||
375 | if (pool_size <= 0) | 376 | if (pool_size <= 0) |
376 | goto err; | 377 | goto err; |
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, | |||
384 | spin_lock_init(&pool->lock); | 385 | spin_lock_init(&pool->lock); |
385 | INIT_LIST_HEAD(&pool->free_list); | 386 | INIT_LIST_HEAD(&pool->free_list); |
386 | 387 | ||
388 | if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) | ||
389 | mr_type = IB_MR_TYPE_SG_GAPS; | ||
390 | else | ||
391 | mr_type = IB_MR_TYPE_MEM_REG; | ||
392 | |||
387 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { | 393 | for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { |
388 | mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, | 394 | mr = ib_alloc_mr(pd, mr_type, max_page_list_len); |
389 | max_page_list_len); | ||
390 | if (IS_ERR(mr)) { | 395 | if (IS_ERR(mr)) { |
391 | ret = PTR_ERR(mr); | 396 | ret = PTR_ERR(mr); |
392 | if (ret == -ENOMEM) | 397 | if (ret == -ENOMEM) |
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void) | |||
3694 | indirect_sg_entries = cmd_sg_entries; | 3699 | indirect_sg_entries = cmd_sg_entries; |
3695 | } | 3700 | } |
3696 | 3701 | ||
3702 | if (indirect_sg_entries > SG_MAX_SEGMENTS) { | ||
3703 | pr_warn("Clamping indirect_sg_entries to %u\n", | ||
3704 | SG_MAX_SEGMENTS); | ||
3705 | indirect_sg_entries = SG_MAX_SEGMENTS; | ||
3706 | } | ||
3707 | |||
3697 | srp_remove_wq = create_workqueue("srp_remove"); | 3708 | srp_remove_wq = create_workqueue("srp_remove"); |
3698 | if (!srp_remove_wq) { | 3709 | if (!srp_remove_wq) { |
3699 | ret = -ENOMEM; | 3710 | ret = -ENOMEM; |
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 1a1d99704fe6..296f1411fe84 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c | |||
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others) | |||
11297 | ((CAPI_MSG *) msg)->header.ncci = 0; | 11297 | ((CAPI_MSG *) msg)->header.ncci = 0; |
11298 | ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; | 11298 | ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; |
11299 | ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; | 11299 | ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; |
11300 | PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); | 11300 | ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff; |
11301 | ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8; | ||
11301 | ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; | 11302 | ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; |
11302 | w = api_put(notify_plci->appl, (CAPI_MSG *) msg); | 11303 | w = api_put(notify_plci->appl, (CAPI_MSG *) msg); |
11303 | if (w != _QUEUE_FULL) | 11304 | if (w != _QUEUE_FULL) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 82821ee0d57f..01175dac0db6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev) | |||
5291 | if (start_readonly && mddev->ro == 0) | 5291 | if (start_readonly && mddev->ro == 0) |
5292 | mddev->ro = 2; /* read-only, but switch on first write */ | 5292 | mddev->ro = 2; /* read-only, but switch on first write */ |
5293 | 5293 | ||
5294 | /* | ||
5295 | * NOTE: some pers->run(), for example r5l_recovery_log(), wakes | ||
5296 | * up mddev->thread. It is important to initialize critical | ||
5297 | * resources for mddev->thread BEFORE calling pers->run(). | ||
5298 | */ | ||
5294 | err = pers->run(mddev); | 5299 | err = pers->run(mddev); |
5295 | if (err) | 5300 | if (err) |
5296 | pr_warn("md: pers->run() failed ...\n"); | 5301 | pr_warn("md: pers->run() failed ...\n"); |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0e8ed2c327b0..302dea3296ba 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -162,6 +162,8 @@ struct r5l_log { | |||
162 | 162 | ||
163 | /* to submit async io_units, to fulfill ordering of flush */ | 163 | /* to submit async io_units, to fulfill ordering of flush */ |
164 | struct work_struct deferred_io_work; | 164 | struct work_struct deferred_io_work; |
165 | /* to disable write back during in degraded mode */ | ||
166 | struct work_struct disable_writeback_work; | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | /* | 169 | /* |
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work) | |||
611 | r5l_do_submit_io(log, io); | 613 | r5l_do_submit_io(log, io); |
612 | } | 614 | } |
613 | 615 | ||
616 | static void r5c_disable_writeback_async(struct work_struct *work) | ||
617 | { | ||
618 | struct r5l_log *log = container_of(work, struct r5l_log, | ||
619 | disable_writeback_work); | ||
620 | struct mddev *mddev = log->rdev->mddev; | ||
621 | |||
622 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | ||
623 | return; | ||
624 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", | ||
625 | mdname(mddev)); | ||
626 | mddev_suspend(mddev); | ||
627 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | ||
628 | mddev_resume(mddev); | ||
629 | } | ||
630 | |||
614 | static void r5l_submit_current_io(struct r5l_log *log) | 631 | static void r5l_submit_current_io(struct r5l_log *log) |
615 | { | 632 | { |
616 | struct r5l_io_unit *io = log->current_io; | 633 | struct r5l_io_unit *io = log->current_io; |
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log) | |||
1393 | next_checkpoint = r5c_calculate_new_cp(conf); | 1410 | next_checkpoint = r5c_calculate_new_cp(conf); |
1394 | spin_unlock_irq(&log->io_list_lock); | 1411 | spin_unlock_irq(&log->io_list_lock); |
1395 | 1412 | ||
1396 | BUG_ON(reclaimable < 0); | ||
1397 | |||
1398 | if (reclaimable == 0 || !write_super) | 1413 | if (reclaimable == 0 || !write_super) |
1399 | return; | 1414 | return; |
1400 | 1415 | ||
@@ -2062,7 +2077,7 @@ static int | |||
2062 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | 2077 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, |
2063 | struct r5l_recovery_ctx *ctx) | 2078 | struct r5l_recovery_ctx *ctx) |
2064 | { | 2079 | { |
2065 | struct stripe_head *sh, *next; | 2080 | struct stripe_head *sh; |
2066 | struct mddev *mddev = log->rdev->mddev; | 2081 | struct mddev *mddev = log->rdev->mddev; |
2067 | struct page *page; | 2082 | struct page *page; |
2068 | sector_t next_checkpoint = MaxSector; | 2083 | sector_t next_checkpoint = MaxSector; |
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
2076 | 2091 | ||
2077 | WARN_ON(list_empty(&ctx->cached_list)); | 2092 | WARN_ON(list_empty(&ctx->cached_list)); |
2078 | 2093 | ||
2079 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { | 2094 | list_for_each_entry(sh, &ctx->cached_list, lru) { |
2080 | struct r5l_meta_block *mb; | 2095 | struct r5l_meta_block *mb; |
2081 | int i; | 2096 | int i; |
2082 | int offset; | 2097 | int offset; |
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
2126 | ctx->pos = write_pos; | 2141 | ctx->pos = write_pos; |
2127 | ctx->seq += 1; | 2142 | ctx->seq += 1; |
2128 | next_checkpoint = sh->log_start; | 2143 | next_checkpoint = sh->log_start; |
2129 | list_del_init(&sh->lru); | ||
2130 | raid5_release_stripe(sh); | ||
2131 | } | 2144 | } |
2132 | log->next_checkpoint = next_checkpoint; | 2145 | log->next_checkpoint = next_checkpoint; |
2133 | __free_page(page); | 2146 | __free_page(page); |
2134 | return 0; | 2147 | return 0; |
2135 | } | 2148 | } |
2136 | 2149 | ||
2150 | static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, | ||
2151 | struct r5l_recovery_ctx *ctx) | ||
2152 | { | ||
2153 | struct mddev *mddev = log->rdev->mddev; | ||
2154 | struct r5conf *conf = mddev->private; | ||
2155 | struct stripe_head *sh, *next; | ||
2156 | |||
2157 | if (ctx->data_only_stripes == 0) | ||
2158 | return; | ||
2159 | |||
2160 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; | ||
2161 | |||
2162 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { | ||
2163 | r5c_make_stripe_write_out(sh); | ||
2164 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2165 | list_del_init(&sh->lru); | ||
2166 | raid5_release_stripe(sh); | ||
2167 | } | ||
2168 | |||
2169 | md_wakeup_thread(conf->mddev->thread); | ||
2170 | /* reuse conf->wait_for_quiescent in recovery */ | ||
2171 | wait_event(conf->wait_for_quiescent, | ||
2172 | atomic_read(&conf->active_stripes) == 0); | ||
2173 | |||
2174 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | ||
2175 | } | ||
2176 | |||
2137 | static int r5l_recovery_log(struct r5l_log *log) | 2177 | static int r5l_recovery_log(struct r5l_log *log) |
2138 | { | 2178 | { |
2139 | struct mddev *mddev = log->rdev->mddev; | 2179 | struct mddev *mddev = log->rdev->mddev; |
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log) | |||
2160 | pos = ctx.pos; | 2200 | pos = ctx.pos; |
2161 | ctx.seq += 10000; | 2201 | ctx.seq += 10000; |
2162 | 2202 | ||
2163 | if (ctx.data_only_stripes == 0) { | ||
2164 | log->next_checkpoint = ctx.pos; | ||
2165 | r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); | ||
2166 | ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); | ||
2167 | } | ||
2168 | 2203 | ||
2169 | if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) | 2204 | if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) |
2170 | pr_debug("md/raid:%s: starting from clean shutdown\n", | 2205 | pr_debug("md/raid:%s: starting from clean shutdown\n", |
2171 | mdname(mddev)); | 2206 | mdname(mddev)); |
2172 | else { | 2207 | else |
2173 | pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", | 2208 | pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", |
2174 | mdname(mddev), ctx.data_only_stripes, | 2209 | mdname(mddev), ctx.data_only_stripes, |
2175 | ctx.data_parity_stripes); | 2210 | ctx.data_parity_stripes); |
2176 | 2211 | ||
2177 | if (ctx.data_only_stripes > 0) | 2212 | if (ctx.data_only_stripes == 0) { |
2178 | if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { | 2213 | log->next_checkpoint = ctx.pos; |
2179 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", | 2214 | r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); |
2180 | mdname(mddev)); | 2215 | ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); |
2181 | return -EIO; | 2216 | } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { |
2182 | } | 2217 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", |
2218 | mdname(mddev)); | ||
2219 | return -EIO; | ||
2183 | } | 2220 | } |
2184 | 2221 | ||
2185 | log->log_start = ctx.pos; | 2222 | log->log_start = ctx.pos; |
2186 | log->seq = ctx.seq; | 2223 | log->seq = ctx.seq; |
2187 | log->last_checkpoint = pos; | 2224 | log->last_checkpoint = pos; |
2188 | r5l_write_super(log, pos); | 2225 | r5l_write_super(log, pos); |
2226 | |||
2227 | r5c_recovery_flush_data_only_stripes(log, &ctx); | ||
2189 | return 0; | 2228 | return 0; |
2190 | } | 2229 | } |
2191 | 2230 | ||
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, | |||
2247 | val > R5C_JOURNAL_MODE_WRITE_BACK) | 2286 | val > R5C_JOURNAL_MODE_WRITE_BACK) |
2248 | return -EINVAL; | 2287 | return -EINVAL; |
2249 | 2288 | ||
2289 | if (raid5_calc_degraded(conf) > 0 && | ||
2290 | val == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2291 | return -EINVAL; | ||
2292 | |||
2250 | mddev_suspend(mddev); | 2293 | mddev_suspend(mddev); |
2251 | conf->log->r5c_journal_mode = val; | 2294 | conf->log->r5c_journal_mode = val; |
2252 | mddev_resume(mddev); | 2295 | mddev_resume(mddev); |
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf, | |||
2301 | set_bit(STRIPE_R5C_CACHING, &sh->state); | 2344 | set_bit(STRIPE_R5C_CACHING, &sh->state); |
2302 | } | 2345 | } |
2303 | 2346 | ||
2347 | /* | ||
2348 | * When run in degraded mode, array is set to write-through mode. | ||
2349 | * This check helps drain pending write safely in the transition to | ||
2350 | * write-through mode. | ||
2351 | */ | ||
2352 | if (s->failed) { | ||
2353 | r5c_make_stripe_write_out(sh); | ||
2354 | return -EAGAIN; | ||
2355 | } | ||
2356 | |||
2304 | for (i = disks; i--; ) { | 2357 | for (i = disks; i--; ) { |
2305 | dev = &sh->dev[i]; | 2358 | dev = &sh->dev[i]; |
2306 | /* if non-overwrite, use writing-out phase */ | 2359 | /* if non-overwrite, use writing-out phase */ |
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh) | |||
2351 | struct page *p = sh->dev[i].orig_page; | 2404 | struct page *p = sh->dev[i].orig_page; |
2352 | 2405 | ||
2353 | sh->dev[i].orig_page = sh->dev[i].page; | 2406 | sh->dev[i].orig_page = sh->dev[i].page; |
2407 | clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); | ||
2408 | |||
2354 | if (!using_disk_info_extra_page) | 2409 | if (!using_disk_info_extra_page) |
2355 | put_page(p); | 2410 | put_page(p); |
2356 | } | 2411 | } |
@@ -2555,6 +2610,19 @@ ioerr: | |||
2555 | return ret; | 2610 | return ret; |
2556 | } | 2611 | } |
2557 | 2612 | ||
2613 | void r5c_update_on_rdev_error(struct mddev *mddev) | ||
2614 | { | ||
2615 | struct r5conf *conf = mddev->private; | ||
2616 | struct r5l_log *log = conf->log; | ||
2617 | |||
2618 | if (!log) | ||
2619 | return; | ||
2620 | |||
2621 | if (raid5_calc_degraded(conf) > 0 && | ||
2622 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) | ||
2623 | schedule_work(&log->disable_writeback_work); | ||
2624 | } | ||
2625 | |||
2558 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | 2626 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) |
2559 | { | 2627 | { |
2560 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 2628 | struct request_queue *q = bdev_get_queue(rdev->bdev); |
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | |||
2627 | spin_lock_init(&log->no_space_stripes_lock); | 2695 | spin_lock_init(&log->no_space_stripes_lock); |
2628 | 2696 | ||
2629 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); | 2697 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); |
2698 | INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); | ||
2630 | 2699 | ||
2631 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 2700 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; |
2632 | INIT_LIST_HEAD(&log->stripe_in_journal_list); | 2701 | INIT_LIST_HEAD(&log->stripe_in_journal_list); |
@@ -2659,6 +2728,7 @@ io_kc: | |||
2659 | 2728 | ||
2660 | void r5l_exit_log(struct r5l_log *log) | 2729 | void r5l_exit_log(struct r5l_log *log) |
2661 | { | 2730 | { |
2731 | flush_work(&log->disable_writeback_work); | ||
2662 | md_unregister_thread(&log->reclaim_thread); | 2732 | md_unregister_thread(&log->reclaim_thread); |
2663 | mempool_destroy(log->meta_pool); | 2733 | mempool_destroy(log->meta_pool); |
2664 | bioset_free(log->bs); | 2734 | bioset_free(log->bs); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36c13e4be9c9..3c7e106c12a2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, | |||
556 | * of the two sections, and some non-in_sync devices may | 556 | * of the two sections, and some non-in_sync devices may |
557 | * be insync in the section most affected by failed devices. | 557 | * be insync in the section most affected by failed devices. |
558 | */ | 558 | */ |
559 | static int calc_degraded(struct r5conf *conf) | 559 | int raid5_calc_degraded(struct r5conf *conf) |
560 | { | 560 | { |
561 | int degraded, degraded2; | 561 | int degraded, degraded2; |
562 | int i; | 562 | int i; |
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf) | |||
619 | if (conf->mddev->reshape_position == MaxSector) | 619 | if (conf->mddev->reshape_position == MaxSector) |
620 | return conf->mddev->degraded > conf->max_degraded; | 620 | return conf->mddev->degraded > conf->max_degraded; |
621 | 621 | ||
622 | degraded = calc_degraded(conf); | 622 | degraded = raid5_calc_degraded(conf); |
623 | if (degraded > conf->max_degraded) | 623 | if (degraded > conf->max_degraded) |
624 | return 1; | 624 | return 1; |
625 | return 0; | 625 | return 0; |
@@ -1015,7 +1015,17 @@ again: | |||
1015 | 1015 | ||
1016 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) | 1016 | if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) |
1017 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); | 1017 | WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); |
1018 | sh->dev[i].vec.bv_page = sh->dev[i].page; | 1018 | |
1019 | if (!op_is_write(op) && | ||
1020 | test_bit(R5_InJournal, &sh->dev[i].flags)) | ||
1021 | /* | ||
1022 | * issuing read for a page in journal, this | ||
1023 | * must be preparing for prexor in rmw; read | ||
1024 | * the data into orig_page | ||
1025 | */ | ||
1026 | sh->dev[i].vec.bv_page = sh->dev[i].orig_page; | ||
1027 | else | ||
1028 | sh->dev[i].vec.bv_page = sh->dev[i].page; | ||
1019 | bi->bi_vcnt = 1; | 1029 | bi->bi_vcnt = 1; |
1020 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; | 1030 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
1021 | bi->bi_io_vec[0].bv_offset = 0; | 1031 | bi->bi_io_vec[0].bv_offset = 0; |
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi) | |||
2380 | } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) | 2390 | } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
2381 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); | 2391 | clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); |
2382 | 2392 | ||
2393 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) | ||
2394 | /* | ||
2395 | * end read for a page in journal, this | ||
2396 | * must be preparing for prexor in rmw | ||
2397 | */ | ||
2398 | set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); | ||
2399 | |||
2383 | if (atomic_read(&rdev->read_errors)) | 2400 | if (atomic_read(&rdev->read_errors)) |
2384 | atomic_set(&rdev->read_errors, 0); | 2401 | atomic_set(&rdev->read_errors, 0); |
2385 | } else { | 2402 | } else { |
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2538 | 2555 | ||
2539 | spin_lock_irqsave(&conf->device_lock, flags); | 2556 | spin_lock_irqsave(&conf->device_lock, flags); |
2540 | clear_bit(In_sync, &rdev->flags); | 2557 | clear_bit(In_sync, &rdev->flags); |
2541 | mddev->degraded = calc_degraded(conf); | 2558 | mddev->degraded = raid5_calc_degraded(conf); |
2542 | spin_unlock_irqrestore(&conf->device_lock, flags); | 2559 | spin_unlock_irqrestore(&conf->device_lock, flags); |
2543 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 2560 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
2544 | 2561 | ||
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) | |||
2552 | bdevname(rdev->bdev, b), | 2569 | bdevname(rdev->bdev, b), |
2553 | mdname(mddev), | 2570 | mdname(mddev), |
2554 | conf->raid_disks - mddev->degraded); | 2571 | conf->raid_disks - mddev->degraded); |
2572 | r5c_update_on_rdev_error(mddev); | ||
2555 | } | 2573 | } |
2556 | 2574 | ||
2557 | /* | 2575 | /* |
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) | |||
2880 | return r_sector; | 2898 | return r_sector; |
2881 | } | 2899 | } |
2882 | 2900 | ||
2901 | /* | ||
2902 | * There are cases where we want handle_stripe_dirtying() and | ||
2903 | * schedule_reconstruction() to delay towrite to some dev of a stripe. | ||
2904 | * | ||
2905 | * This function checks whether we want to delay the towrite. Specifically, | ||
2906 | * we delay the towrite when: | ||
2907 | * | ||
2908 | * 1. degraded stripe has a non-overwrite to the missing dev, AND this | ||
2909 | * stripe has data in journal (for other devices). | ||
2910 | * | ||
2911 | * In this case, when reading data for the non-overwrite dev, it is | ||
2912 | * necessary to handle complex rmw of write back cache (prexor with | ||
2913 | * orig_page, and xor with page). To keep read path simple, we would | ||
2914 | * like to flush data in journal to RAID disks first, so complex rmw | ||
2915 | * is handled in the write patch (handle_stripe_dirtying). | ||
2916 | * | ||
2917 | */ | ||
2918 | static inline bool delay_towrite(struct r5dev *dev, | ||
2919 | struct stripe_head_state *s) | ||
2920 | { | ||
2921 | return !test_bit(R5_OVERWRITE, &dev->flags) && | ||
2922 | !test_bit(R5_Insync, &dev->flags) && s->injournal; | ||
2923 | } | ||
2924 | |||
2883 | static void | 2925 | static void |
2884 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | 2926 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
2885 | int rcw, int expand) | 2927 | int rcw, int expand) |
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2900 | for (i = disks; i--; ) { | 2942 | for (i = disks; i--; ) { |
2901 | struct r5dev *dev = &sh->dev[i]; | 2943 | struct r5dev *dev = &sh->dev[i]; |
2902 | 2944 | ||
2903 | if (dev->towrite) { | 2945 | if (dev->towrite && !delay_towrite(dev, s)) { |
2904 | set_bit(R5_LOCKED, &dev->flags); | 2946 | set_bit(R5_LOCKED, &dev->flags); |
2905 | set_bit(R5_Wantdrain, &dev->flags); | 2947 | set_bit(R5_Wantdrain, &dev->flags); |
2906 | if (!expand) | 2948 | if (!expand) |
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx) | |||
3295 | return rv; | 3337 | return rv; |
3296 | } | 3338 | } |
3297 | 3339 | ||
3298 | /* fetch_block - checks the given member device to see if its data needs | ||
3299 | * to be read or computed to satisfy a request. | ||
3300 | * | ||
3301 | * Returns 1 when no more member devices need to be checked, otherwise returns | ||
3302 | * 0 to tell the loop in handle_stripe_fill to continue | ||
3303 | */ | ||
3304 | |||
3305 | static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | 3340 | static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, |
3306 | int disk_idx, int disks) | 3341 | int disk_idx, int disks) |
3307 | { | 3342 | { |
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3392 | return 0; | 3427 | return 0; |
3393 | } | 3428 | } |
3394 | 3429 | ||
3430 | /* fetch_block - checks the given member device to see if its data needs | ||
3431 | * to be read or computed to satisfy a request. | ||
3432 | * | ||
3433 | * Returns 1 when no more member devices need to be checked, otherwise returns | ||
3434 | * 0 to tell the loop in handle_stripe_fill to continue | ||
3435 | */ | ||
3395 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, | 3436 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, |
3396 | int disk_idx, int disks) | 3437 | int disk_idx, int disks) |
3397 | { | 3438 | { |
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
3478 | * midst of changing due to a write | 3519 | * midst of changing due to a write |
3479 | */ | 3520 | */ |
3480 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && | 3521 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
3481 | !sh->reconstruct_state) | 3522 | !sh->reconstruct_state) { |
3523 | |||
3524 | /* | ||
3525 | * For degraded stripe with data in journal, do not handle | ||
3526 | * read requests yet, instead, flush the stripe to raid | ||
3527 | * disks first, this avoids handling complex rmw of write | ||
3528 | * back cache (prexor with orig_page, and then xor with | ||
3529 | * page) in the read path | ||
3530 | */ | ||
3531 | if (s->injournal && s->failed) { | ||
3532 | if (test_bit(STRIPE_R5C_CACHING, &sh->state)) | ||
3533 | r5c_make_stripe_write_out(sh); | ||
3534 | goto out; | ||
3535 | } | ||
3536 | |||
3482 | for (i = disks; i--; ) | 3537 | for (i = disks; i--; ) |
3483 | if (fetch_block(sh, s, i, disks)) | 3538 | if (fetch_block(sh, s, i, disks)) |
3484 | break; | 3539 | break; |
3540 | } | ||
3541 | out: | ||
3485 | set_bit(STRIPE_HANDLE, &sh->state); | 3542 | set_bit(STRIPE_HANDLE, &sh->state); |
3486 | } | 3543 | } |
3487 | 3544 | ||
@@ -3594,6 +3651,21 @@ unhash: | |||
3594 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); | 3651 | break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); |
3595 | } | 3652 | } |
3596 | 3653 | ||
3654 | /* | ||
3655 | * For RMW in write back cache, we need extra page in prexor to store the | ||
3656 | * old data. This page is stored in dev->orig_page. | ||
3657 | * | ||
3658 | * This function checks whether we have data for prexor. The exact logic | ||
3659 | * is: | ||
3660 | * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE) | ||
3661 | */ | ||
3662 | static inline bool uptodate_for_rmw(struct r5dev *dev) | ||
3663 | { | ||
3664 | return (test_bit(R5_UPTODATE, &dev->flags)) && | ||
3665 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3666 | test_bit(R5_OrigPageUPTDODATE, &dev->flags)); | ||
3667 | } | ||
3668 | |||
3597 | static int handle_stripe_dirtying(struct r5conf *conf, | 3669 | static int handle_stripe_dirtying(struct r5conf *conf, |
3598 | struct stripe_head *sh, | 3670 | struct stripe_head *sh, |
3599 | struct stripe_head_state *s, | 3671 | struct stripe_head_state *s, |
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3622 | } else for (i = disks; i--; ) { | 3694 | } else for (i = disks; i--; ) { |
3623 | /* would I have to read this buffer for read_modify_write */ | 3695 | /* would I have to read this buffer for read_modify_write */ |
3624 | struct r5dev *dev = &sh->dev[i]; | 3696 | struct r5dev *dev = &sh->dev[i]; |
3625 | if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || | 3697 | if (((dev->towrite && !delay_towrite(dev, s)) || |
3698 | i == sh->pd_idx || i == sh->qd_idx || | ||
3626 | test_bit(R5_InJournal, &dev->flags)) && | 3699 | test_bit(R5_InJournal, &dev->flags)) && |
3627 | !test_bit(R5_LOCKED, &dev->flags) && | 3700 | !test_bit(R5_LOCKED, &dev->flags) && |
3628 | !((test_bit(R5_UPTODATE, &dev->flags) && | 3701 | !(uptodate_for_rmw(dev) || |
3629 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3630 | dev->page != dev->orig_page)) || | ||
3631 | test_bit(R5_Wantcompute, &dev->flags))) { | 3702 | test_bit(R5_Wantcompute, &dev->flags))) { |
3632 | if (test_bit(R5_Insync, &dev->flags)) | 3703 | if (test_bit(R5_Insync, &dev->flags)) |
3633 | rmw++; | 3704 | rmw++; |
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3639 | i != sh->pd_idx && i != sh->qd_idx && | 3710 | i != sh->pd_idx && i != sh->qd_idx && |
3640 | !test_bit(R5_LOCKED, &dev->flags) && | 3711 | !test_bit(R5_LOCKED, &dev->flags) && |
3641 | !(test_bit(R5_UPTODATE, &dev->flags) || | 3712 | !(test_bit(R5_UPTODATE, &dev->flags) || |
3642 | test_bit(R5_InJournal, &dev->flags) || | ||
3643 | test_bit(R5_Wantcompute, &dev->flags))) { | 3713 | test_bit(R5_Wantcompute, &dev->flags))) { |
3644 | if (test_bit(R5_Insync, &dev->flags)) | 3714 | if (test_bit(R5_Insync, &dev->flags)) |
3645 | rcw++; | 3715 | rcw++; |
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3689 | 3759 | ||
3690 | for (i = disks; i--; ) { | 3760 | for (i = disks; i--; ) { |
3691 | struct r5dev *dev = &sh->dev[i]; | 3761 | struct r5dev *dev = &sh->dev[i]; |
3692 | if ((dev->towrite || | 3762 | if (((dev->towrite && !delay_towrite(dev, s)) || |
3693 | i == sh->pd_idx || i == sh->qd_idx || | 3763 | i == sh->pd_idx || i == sh->qd_idx || |
3694 | test_bit(R5_InJournal, &dev->flags)) && | 3764 | test_bit(R5_InJournal, &dev->flags)) && |
3695 | !test_bit(R5_LOCKED, &dev->flags) && | 3765 | !test_bit(R5_LOCKED, &dev->flags) && |
3696 | !((test_bit(R5_UPTODATE, &dev->flags) && | 3766 | !(uptodate_for_rmw(dev) || |
3697 | (!test_bit(R5_InJournal, &dev->flags) || | ||
3698 | dev->page != dev->orig_page)) || | ||
3699 | test_bit(R5_Wantcompute, &dev->flags)) && | 3767 | test_bit(R5_Wantcompute, &dev->flags)) && |
3700 | test_bit(R5_Insync, &dev->flags)) { | 3768 | test_bit(R5_Insync, &dev->flags)) { |
3701 | if (test_bit(STRIPE_PREREAD_ACTIVE, | 3769 | if (test_bit(STRIPE_PREREAD_ACTIVE, |
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf, | |||
3722 | i != sh->pd_idx && i != sh->qd_idx && | 3790 | i != sh->pd_idx && i != sh->qd_idx && |
3723 | !test_bit(R5_LOCKED, &dev->flags) && | 3791 | !test_bit(R5_LOCKED, &dev->flags) && |
3724 | !(test_bit(R5_UPTODATE, &dev->flags) || | 3792 | !(test_bit(R5_UPTODATE, &dev->flags) || |
3725 | test_bit(R5_InJournal, &dev->flags) || | ||
3726 | test_bit(R5_Wantcompute, &dev->flags))) { | 3793 | test_bit(R5_Wantcompute, &dev->flags))) { |
3727 | rcw++; | 3794 | rcw++; |
3728 | if (test_bit(R5_Insync, &dev->flags) && | 3795 | if (test_bit(R5_Insync, &dev->flags) && |
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev) | |||
7025 | /* | 7092 | /* |
7026 | * 0 for a fully functional array, 1 or 2 for a degraded array. | 7093 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
7027 | */ | 7094 | */ |
7028 | mddev->degraded = calc_degraded(conf); | 7095 | mddev->degraded = raid5_calc_degraded(conf); |
7029 | 7096 | ||
7030 | if (has_failed(conf)) { | 7097 | if (has_failed(conf)) { |
7031 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", | 7098 | pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", |
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev) | |||
7272 | } | 7339 | } |
7273 | } | 7340 | } |
7274 | spin_lock_irqsave(&conf->device_lock, flags); | 7341 | spin_lock_irqsave(&conf->device_lock, flags); |
7275 | mddev->degraded = calc_degraded(conf); | 7342 | mddev->degraded = raid5_calc_degraded(conf); |
7276 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7343 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7277 | print_raid5_conf(conf); | 7344 | print_raid5_conf(conf); |
7278 | return count; | 7345 | return count; |
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev) | |||
7632 | * pre and post number of devices. | 7699 | * pre and post number of devices. |
7633 | */ | 7700 | */ |
7634 | spin_lock_irqsave(&conf->device_lock, flags); | 7701 | spin_lock_irqsave(&conf->device_lock, flags); |
7635 | mddev->degraded = calc_degraded(conf); | 7702 | mddev->degraded = raid5_calc_degraded(conf); |
7636 | spin_unlock_irqrestore(&conf->device_lock, flags); | 7703 | spin_unlock_irqrestore(&conf->device_lock, flags); |
7637 | } | 7704 | } |
7638 | mddev->raid_disks = conf->raid_disks; | 7705 | mddev->raid_disks = conf->raid_disks; |
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
7720 | } else { | 7787 | } else { |
7721 | int d; | 7788 | int d; |
7722 | spin_lock_irq(&conf->device_lock); | 7789 | spin_lock_irq(&conf->device_lock); |
7723 | mddev->degraded = calc_degraded(conf); | 7790 | mddev->degraded = raid5_calc_degraded(conf); |
7724 | spin_unlock_irq(&conf->device_lock); | 7791 | spin_unlock_irq(&conf->device_lock); |
7725 | for (d = conf->raid_disks ; | 7792 | for (d = conf->raid_disks ; |
7726 | d < conf->raid_disks - mddev->delta_disks; | 7793 | d < conf->raid_disks - mddev->delta_disks; |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index ed8e1362ab36..1440fa26e296 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -322,6 +322,11 @@ enum r5dev_flags { | |||
322 | * data and parity being written are in the journal | 322 | * data and parity being written are in the journal |
323 | * device | 323 | * device |
324 | */ | 324 | */ |
325 | R5_OrigPageUPTDODATE, /* with write back cache, we read old data into | ||
326 | * dev->orig_page for prexor. When this flag is | ||
327 | * set, orig_page contains latest data in the | ||
328 | * raid disk. | ||
329 | */ | ||
325 | }; | 330 | }; |
326 | 331 | ||
327 | /* | 332 | /* |
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, | |||
753 | extern struct stripe_head * | 758 | extern struct stripe_head * |
754 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | 759 | raid5_get_active_stripe(struct r5conf *conf, sector_t sector, |
755 | int previous, int noblock, int noquiesce); | 760 | int previous, int noblock, int noquiesce); |
761 | extern int raid5_calc_degraded(struct r5conf *conf); | ||
756 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); | 762 | extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); |
757 | extern void r5l_exit_log(struct r5l_log *log); | 763 | extern void r5l_exit_log(struct r5l_log *log); |
758 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); | 764 | extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); |
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num); | |||
781 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); | 787 | extern void r5c_check_stripe_cache_usage(struct r5conf *conf); |
782 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); | 788 | extern void r5c_check_cached_full_stripe(struct r5conf *conf); |
783 | extern struct md_sysfs_entry r5c_journal_mode; | 789 | extern struct md_sysfs_entry r5c_journal_mode; |
790 | extern void r5c_update_on_rdev_error(struct mddev *mddev); | ||
784 | #endif | 791 | #endif |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 0ea4efb3de66..ebb5e391b800 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
@@ -30,8 +30,9 @@ | |||
30 | 30 | ||
31 | #include "cec-priv.h" | 31 | #include "cec-priv.h" |
32 | 32 | ||
33 | static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx); | 33 | static void cec_fill_msg_report_features(struct cec_adapter *adap, |
34 | static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx); | 34 | struct cec_msg *msg, |
35 | unsigned int la_idx); | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * 400 ms is the time it takes for one 16 byte message to be | 38 | * 400 ms is the time it takes for one 16 byte message to be |
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data) | |||
288 | 289 | ||
289 | /* Mark it as an error */ | 290 | /* Mark it as an error */ |
290 | data->msg.tx_ts = ktime_get_ns(); | 291 | data->msg.tx_ts = ktime_get_ns(); |
291 | data->msg.tx_status = CEC_TX_STATUS_ERROR | | 292 | data->msg.tx_status |= CEC_TX_STATUS_ERROR | |
292 | CEC_TX_STATUS_MAX_RETRIES; | 293 | CEC_TX_STATUS_MAX_RETRIES; |
294 | data->msg.tx_error_cnt++; | ||
293 | data->attempts = 0; | 295 | data->attempts = 0; |
294 | data->msg.tx_error_cnt = 1; | ||
295 | /* Queue transmitted message for monitoring purposes */ | 296 | /* Queue transmitted message for monitoring purposes */ |
296 | cec_queue_msg_monitor(data->adap, &data->msg, 1); | 297 | cec_queue_msg_monitor(data->adap, &data->msg, 1); |
297 | 298 | ||
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = { | |||
851 | [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, | 852 | [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, |
852 | [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, | 853 | [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, |
853 | [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, | 854 | [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, |
854 | [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST, | 855 | [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST, |
855 | [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, | 856 | [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, |
856 | }; | 857 | }; |
857 | 858 | ||
@@ -1250,30 +1251,49 @@ configured: | |||
1250 | for (i = 1; i < las->num_log_addrs; i++) | 1251 | for (i = 1; i < las->num_log_addrs; i++) |
1251 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | 1252 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; |
1252 | } | 1253 | } |
1254 | for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) | ||
1255 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | ||
1253 | adap->is_configured = true; | 1256 | adap->is_configured = true; |
1254 | adap->is_configuring = false; | 1257 | adap->is_configuring = false; |
1255 | cec_post_state_event(adap); | 1258 | cec_post_state_event(adap); |
1256 | mutex_unlock(&adap->lock); | ||
1257 | 1259 | ||
1260 | /* | ||
1261 | * Now post the Report Features and Report Physical Address broadcast | ||
1262 | * messages. Note that these are non-blocking transmits, meaning that | ||
1263 | * they are just queued up and once adap->lock is unlocked the main | ||
1264 | * thread will kick in and start transmitting these. | ||
1265 | * | ||
1266 | * If after this function is done (but before one or more of these | ||
1267 | * messages are actually transmitted) the CEC adapter is unconfigured, | ||
1268 | * then any remaining messages will be dropped by the main thread. | ||
1269 | */ | ||
1258 | for (i = 0; i < las->num_log_addrs; i++) { | 1270 | for (i = 0; i < las->num_log_addrs; i++) { |
1271 | struct cec_msg msg = {}; | ||
1272 | |||
1259 | if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || | 1273 | if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || |
1260 | (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) | 1274 | (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) |
1261 | continue; | 1275 | continue; |
1262 | 1276 | ||
1263 | /* | 1277 | msg.msg[0] = (las->log_addr[i] << 4) | 0x0f; |
1264 | * Report Features must come first according | 1278 | |
1265 | * to CEC 2.0 | 1279 | /* Report Features must come first according to CEC 2.0 */ |
1266 | */ | 1280 | if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED && |
1267 | if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED) | 1281 | adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) { |
1268 | cec_report_features(adap, i); | 1282 | cec_fill_msg_report_features(adap, &msg, i); |
1269 | cec_report_phys_addr(adap, i); | 1283 | cec_transmit_msg_fh(adap, &msg, NULL, false); |
1284 | } | ||
1285 | |||
1286 | /* Report Physical Address */ | ||
1287 | cec_msg_report_physical_addr(&msg, adap->phys_addr, | ||
1288 | las->primary_device_type[i]); | ||
1289 | dprintk(2, "config: la %d pa %x.%x.%x.%x\n", | ||
1290 | las->log_addr[i], | ||
1291 | cec_phys_addr_exp(adap->phys_addr)); | ||
1292 | cec_transmit_msg_fh(adap, &msg, NULL, false); | ||
1270 | } | 1293 | } |
1271 | for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) | ||
1272 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | ||
1273 | mutex_lock(&adap->lock); | ||
1274 | adap->kthread_config = NULL; | 1294 | adap->kthread_config = NULL; |
1275 | mutex_unlock(&adap->lock); | ||
1276 | complete(&adap->config_completion); | 1295 | complete(&adap->config_completion); |
1296 | mutex_unlock(&adap->lock); | ||
1277 | return 0; | 1297 | return 0; |
1278 | 1298 | ||
1279 | unconfigure: | 1299 | unconfigure: |
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs); | |||
1526 | 1546 | ||
1527 | /* High-level core CEC message handling */ | 1547 | /* High-level core CEC message handling */ |
1528 | 1548 | ||
1529 | /* Transmit the Report Features message */ | 1549 | /* Fill in the Report Features message */ |
1530 | static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx) | 1550 | static void cec_fill_msg_report_features(struct cec_adapter *adap, |
1551 | struct cec_msg *msg, | ||
1552 | unsigned int la_idx) | ||
1531 | { | 1553 | { |
1532 | struct cec_msg msg = { }; | ||
1533 | const struct cec_log_addrs *las = &adap->log_addrs; | 1554 | const struct cec_log_addrs *las = &adap->log_addrs; |
1534 | const u8 *features = las->features[la_idx]; | 1555 | const u8 *features = las->features[la_idx]; |
1535 | bool op_is_dev_features = false; | 1556 | bool op_is_dev_features = false; |
1536 | unsigned int idx; | 1557 | unsigned int idx; |
1537 | 1558 | ||
1538 | /* This is 2.0 and up only */ | ||
1539 | if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0) | ||
1540 | return 0; | ||
1541 | |||
1542 | /* Report Features */ | 1559 | /* Report Features */ |
1543 | msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; | 1560 | msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; |
1544 | msg.len = 4; | 1561 | msg->len = 4; |
1545 | msg.msg[1] = CEC_MSG_REPORT_FEATURES; | 1562 | msg->msg[1] = CEC_MSG_REPORT_FEATURES; |
1546 | msg.msg[2] = adap->log_addrs.cec_version; | 1563 | msg->msg[2] = adap->log_addrs.cec_version; |
1547 | msg.msg[3] = las->all_device_types[la_idx]; | 1564 | msg->msg[3] = las->all_device_types[la_idx]; |
1548 | 1565 | ||
1549 | /* Write RC Profiles first, then Device Features */ | 1566 | /* Write RC Profiles first, then Device Features */ |
1550 | for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { | 1567 | for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { |
1551 | msg.msg[msg.len++] = features[idx]; | 1568 | msg->msg[msg->len++] = features[idx]; |
1552 | if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { | 1569 | if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { |
1553 | if (op_is_dev_features) | 1570 | if (op_is_dev_features) |
1554 | break; | 1571 | break; |
1555 | op_is_dev_features = true; | 1572 | op_is_dev_features = true; |
1556 | } | 1573 | } |
1557 | } | 1574 | } |
1558 | return cec_transmit_msg(adap, &msg, false); | ||
1559 | } | ||
1560 | |||
1561 | /* Transmit the Report Physical Address message */ | ||
1562 | static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx) | ||
1563 | { | ||
1564 | const struct cec_log_addrs *las = &adap->log_addrs; | ||
1565 | struct cec_msg msg = { }; | ||
1566 | |||
1567 | /* Report Physical Address */ | ||
1568 | msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; | ||
1569 | cec_msg_report_physical_addr(&msg, adap->phys_addr, | ||
1570 | las->primary_device_type[la_idx]); | ||
1571 | dprintk(2, "config: la %d pa %x.%x.%x.%x\n", | ||
1572 | las->log_addr[la_idx], | ||
1573 | cec_phys_addr_exp(adap->phys_addr)); | ||
1574 | return cec_transmit_msg(adap, &msg, false); | ||
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | /* Transmit the Feature Abort message */ | 1577 | /* Transmit the Feature Abort message */ |
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | case CEC_MSG_GIVE_FEATURES: | 1779 | case CEC_MSG_GIVE_FEATURES: |
1780 | if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) | 1780 | if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0) |
1781 | return cec_report_features(adap, la_idx); | 1781 | return cec_feature_abort(adap, msg); |
1782 | return 0; | 1782 | cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx); |
1783 | return cec_transmit_msg(adap, &tx_cec_msg, false); | ||
1783 | 1784 | ||
1784 | default: | 1785 | default: |
1785 | /* | 1786 | /* |
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index bc5e8cfe7ca2..8f11d7e45993 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c | |||
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h, | |||
719 | skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, | 719 | skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, |
720 | ETH_ALEN); | 720 | ETH_ALEN); |
721 | skb_pull(h->priv->ule_skb, ETH_ALEN); | 721 | skb_pull(h->priv->ule_skb, ETH_ALEN); |
722 | } else { | ||
723 | /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */ | ||
724 | eth_zero_addr(dest_addr); | ||
722 | } | 725 | } |
723 | 726 | ||
724 | /* Handle ULE Extension Headers. */ | 727 | /* Handle ULE Extension Headers. */ |
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h, | |||
750 | if (!h->priv->ule_bridged) { | 753 | if (!h->priv->ule_bridged) { |
751 | skb_push(h->priv->ule_skb, ETH_HLEN); | 754 | skb_push(h->priv->ule_skb, ETH_HLEN); |
752 | h->ethh = (struct ethhdr *)h->priv->ule_skb->data; | 755 | h->ethh = (struct ethhdr *)h->priv->ule_skb->data; |
753 | if (!h->priv->ule_dbit) { | 756 | memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN); |
754 | /* | 757 | eth_zero_addr(h->ethh->h_source); |
755 | * dest_addr buffer is only valid if | ||
756 | * h->priv->ule_dbit == 0 | ||
757 | */ | ||
758 | memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN); | ||
759 | eth_zero_addr(h->ethh->h_source); | ||
760 | } else /* zeroize source and dest */ | ||
761 | memset(h->ethh, 0, ETH_ALEN * 2); | ||
762 | |||
763 | h->ethh->h_proto = htons(h->priv->ule_sndu_type); | 758 | h->ethh->h_proto = htons(h->priv->ule_sndu_type); |
764 | } | 759 | } |
765 | /* else: skb is in correct state; nothing to do. */ | 760 | /* else: skb is in correct state; nothing to do. */ |
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index b31fa6fae009..b979ea148251 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig | |||
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3 | |||
655 | config VIDEO_S5K4ECGX | 655 | config VIDEO_S5K4ECGX |
656 | tristate "Samsung S5K4ECGX sensor support" | 656 | tristate "Samsung S5K4ECGX sensor support" |
657 | depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API | 657 | depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API |
658 | select CRC32 | ||
658 | ---help--- | 659 | ---help--- |
659 | This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M | 660 | This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M |
660 | camera sensor with an embedded SoC image signal processor. | 661 | camera sensor with an embedded SoC image signal processor. |
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 59872b31f832..f4e92bdfe192 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c | |||
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = { | |||
2741 | * I2C Driver | 2741 | * I2C Driver |
2742 | */ | 2742 | */ |
2743 | 2743 | ||
2744 | #ifdef CONFIG_PM | 2744 | static int __maybe_unused smiapp_suspend(struct device *dev) |
2745 | |||
2746 | static int smiapp_suspend(struct device *dev) | ||
2747 | { | 2745 | { |
2748 | struct i2c_client *client = to_i2c_client(dev); | 2746 | struct i2c_client *client = to_i2c_client(dev); |
2749 | struct v4l2_subdev *subdev = i2c_get_clientdata(client); | 2747 | struct v4l2_subdev *subdev = i2c_get_clientdata(client); |
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev) | |||
2768 | return 0; | 2766 | return 0; |
2769 | } | 2767 | } |
2770 | 2768 | ||
2771 | static int smiapp_resume(struct device *dev) | 2769 | static int __maybe_unused smiapp_resume(struct device *dev) |
2772 | { | 2770 | { |
2773 | struct i2c_client *client = to_i2c_client(dev); | 2771 | struct i2c_client *client = to_i2c_client(dev); |
2774 | struct v4l2_subdev *subdev = i2c_get_clientdata(client); | 2772 | struct v4l2_subdev *subdev = i2c_get_clientdata(client); |
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev) | |||
2783 | return rval; | 2781 | return rval; |
2784 | } | 2782 | } |
2785 | 2783 | ||
2786 | #else | ||
2787 | |||
2788 | #define smiapp_suspend NULL | ||
2789 | #define smiapp_resume NULL | ||
2790 | |||
2791 | #endif /* CONFIG_PM */ | ||
2792 | |||
2793 | static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) | 2784 | static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) |
2794 | { | 2785 | { |
2795 | struct smiapp_hwconfig *hwcfg; | 2786 | struct smiapp_hwconfig *hwcfg; |
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client, | |||
2913 | if (IS_ERR(sensor->xshutdown)) | 2904 | if (IS_ERR(sensor->xshutdown)) |
2914 | return PTR_ERR(sensor->xshutdown); | 2905 | return PTR_ERR(sensor->xshutdown); |
2915 | 2906 | ||
2916 | pm_runtime_enable(&client->dev); | 2907 | rval = smiapp_power_on(&client->dev); |
2917 | 2908 | if (rval < 0) | |
2918 | rval = pm_runtime_get_sync(&client->dev); | 2909 | return rval; |
2919 | if (rval < 0) { | ||
2920 | rval = -ENODEV; | ||
2921 | goto out_power_off; | ||
2922 | } | ||
2923 | 2910 | ||
2924 | rval = smiapp_identify_module(sensor); | 2911 | rval = smiapp_identify_module(sensor); |
2925 | if (rval) { | 2912 | if (rval) { |
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client, | |||
3100 | if (rval < 0) | 3087 | if (rval < 0) |
3101 | goto out_media_entity_cleanup; | 3088 | goto out_media_entity_cleanup; |
3102 | 3089 | ||
3090 | pm_runtime_set_active(&client->dev); | ||
3091 | pm_runtime_get_noresume(&client->dev); | ||
3092 | pm_runtime_enable(&client->dev); | ||
3103 | pm_runtime_set_autosuspend_delay(&client->dev, 1000); | 3093 | pm_runtime_set_autosuspend_delay(&client->dev, 1000); |
3104 | pm_runtime_use_autosuspend(&client->dev); | 3094 | pm_runtime_use_autosuspend(&client->dev); |
3105 | pm_runtime_put_autosuspend(&client->dev); | 3095 | pm_runtime_put_autosuspend(&client->dev); |
@@ -3113,8 +3103,7 @@ out_cleanup: | |||
3113 | smiapp_cleanup(sensor); | 3103 | smiapp_cleanup(sensor); |
3114 | 3104 | ||
3115 | out_power_off: | 3105 | out_power_off: |
3116 | pm_runtime_put(&client->dev); | 3106 | smiapp_power_off(&client->dev); |
3117 | pm_runtime_disable(&client->dev); | ||
3118 | 3107 | ||
3119 | return rval; | 3108 | return rval; |
3120 | } | 3109 | } |
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client) | |||
3127 | 3116 | ||
3128 | v4l2_async_unregister_subdev(subdev); | 3117 | v4l2_async_unregister_subdev(subdev); |
3129 | 3118 | ||
3130 | pm_runtime_suspend(&client->dev); | ||
3131 | pm_runtime_disable(&client->dev); | 3119 | pm_runtime_disable(&client->dev); |
3120 | if (!pm_runtime_status_suspended(&client->dev)) | ||
3121 | smiapp_power_off(&client->dev); | ||
3122 | pm_runtime_set_suspended(&client->dev); | ||
3132 | 3123 | ||
3133 | for (i = 0; i < sensor->ssds_used; i++) { | 3124 | for (i = 0; i < sensor->ssds_used; i++) { |
3134 | v4l2_device_unregister_subdev(&sensor->ssds[i].sd); | 3125 | v4l2_device_unregister_subdev(&sensor->ssds[i].sd); |
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3a0fe8cc64e9..48646a7f3fb0 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c | |||
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd) | |||
291 | tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); | 291 | tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); |
292 | tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); | 292 | tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); |
293 | 293 | ||
294 | /* Svideo should enable YCrCb output and disable GPCL output | 294 | /* |
295 | * For Composite and TV, it should be the reverse | 295 | * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For |
296 | * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK | ||
297 | * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the | ||
298 | * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set | ||
299 | * INTREQ/GPCL/VBLK to logic 1. | ||
296 | */ | 300 | */ |
297 | val = tvp5150_read(sd, TVP5150_MISC_CTL); | 301 | val = tvp5150_read(sd, TVP5150_MISC_CTL); |
298 | if (val < 0) { | 302 | if (val < 0) { |
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd) | |||
301 | } | 305 | } |
302 | 306 | ||
303 | if (decoder->input == TVP5150_SVIDEO) | 307 | if (decoder->input == TVP5150_SVIDEO) |
304 | val = (val & ~0x40) | 0x10; | 308 | val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK; |
305 | else | 309 | else |
306 | val = (val & ~0x10) | 0x40; | 310 | val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL; |
307 | tvp5150_write(sd, TVP5150_MISC_CTL, val); | 311 | tvp5150_write(sd, TVP5150_MISC_CTL, val); |
308 | }; | 312 | }; |
309 | 313 | ||
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = { | |||
455 | },{ /* Automatic offset and AGC enabled */ | 459 | },{ /* Automatic offset and AGC enabled */ |
456 | TVP5150_ANAL_CHL_CTL, 0x15 | 460 | TVP5150_ANAL_CHL_CTL, 0x15 |
457 | },{ /* Activate YCrCb output 0x9 or 0xd ? */ | 461 | },{ /* Activate YCrCb output 0x9 or 0xd ? */ |
458 | TVP5150_MISC_CTL, 0x6f | 462 | TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL | |
463 | TVP5150_MISC_CTL_INTREQ_OE | | ||
464 | TVP5150_MISC_CTL_YCBCR_OE | | ||
465 | TVP5150_MISC_CTL_SYNC_OE | | ||
466 | TVP5150_MISC_CTL_VBLANK | | ||
467 | TVP5150_MISC_CTL_CLOCK_OE, | ||
459 | },{ /* Activates video std autodetection for all standards */ | 468 | },{ /* Activates video std autodetection for all standards */ |
460 | TVP5150_AUTOSW_MSK, 0x0 | 469 | TVP5150_AUTOSW_MSK, 0x0 |
461 | },{ /* Default format: 0x47. For 4:2:2: 0x40 */ | 470 | },{ /* Default format: 0x47. For 4:2:2: 0x40 */ |
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, | |||
861 | 870 | ||
862 | f = &format->format; | 871 | f = &format->format; |
863 | 872 | ||
864 | tvp5150_reset(sd, 0); | ||
865 | |||
866 | f->width = decoder->rect.width; | 873 | f->width = decoder->rect.width; |
867 | f->height = decoder->rect.height / 2; | 874 | f->height = decoder->rect.height / 2; |
868 | 875 | ||
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = { | |||
1051 | static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) | 1058 | static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) |
1052 | { | 1059 | { |
1053 | struct tvp5150 *decoder = to_tvp5150(sd); | 1060 | struct tvp5150 *decoder = to_tvp5150(sd); |
1054 | /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ | 1061 | int val; |
1055 | int val = 0x09; | ||
1056 | |||
1057 | /* Output format: 8-bit 4:2:2 YUV with discrete sync */ | ||
1058 | if (decoder->mbus_type == V4L2_MBUS_PARALLEL) | ||
1059 | val = 0x0d; | ||
1060 | 1062 | ||
1061 | /* Initializes TVP5150 to its default values */ | 1063 | /* Enable or disable the video output signals. */ |
1062 | /* # set PCLK (27MHz) */ | 1064 | val = tvp5150_read(sd, TVP5150_MISC_CTL); |
1063 | tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); | 1065 | if (val < 0) |
1066 | return val; | ||
1067 | |||
1068 | val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE | | ||
1069 | TVP5150_MISC_CTL_CLOCK_OE); | ||
1070 | |||
1071 | if (enable) { | ||
1072 | /* | ||
1073 | * Enable the YCbCr and clock outputs. In discrete sync mode | ||
1074 | * (non-BT.656) additionally enable the the sync outputs. | ||
1075 | */ | ||
1076 | val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE; | ||
1077 | if (decoder->mbus_type == V4L2_MBUS_PARALLEL) | ||
1078 | val |= TVP5150_MISC_CTL_SYNC_OE; | ||
1079 | } | ||
1064 | 1080 | ||
1065 | if (enable) | 1081 | tvp5150_write(sd, TVP5150_MISC_CTL, val); |
1066 | tvp5150_write(sd, TVP5150_MISC_CTL, val); | ||
1067 | else | ||
1068 | tvp5150_write(sd, TVP5150_MISC_CTL, 0x00); | ||
1069 | 1082 | ||
1070 | return 0; | 1083 | return 0; |
1071 | } | 1084 | } |
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c, | |||
1524 | res = core->hdl.error; | 1537 | res = core->hdl.error; |
1525 | goto err; | 1538 | goto err; |
1526 | } | 1539 | } |
1527 | v4l2_ctrl_handler_setup(&core->hdl); | ||
1528 | 1540 | ||
1529 | /* Default is no cropping */ | 1541 | /* Default is no cropping */ |
1530 | core->rect.top = 0; | 1542 | core->rect.top = 0; |
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c, | |||
1535 | core->rect.left = 0; | 1547 | core->rect.left = 0; |
1536 | core->rect.width = TVP5150_H_MAX; | 1548 | core->rect.width = TVP5150_H_MAX; |
1537 | 1549 | ||
1550 | tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */ | ||
1551 | |||
1538 | res = v4l2_async_register_subdev(sd); | 1552 | res = v4l2_async_register_subdev(sd); |
1539 | if (res < 0) | 1553 | if (res < 0) |
1540 | goto err; | 1554 | goto err; |
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h index 25a994944918..30a48c28d05a 100644 --- a/drivers/media/i2c/tvp5150_reg.h +++ b/drivers/media/i2c/tvp5150_reg.h | |||
@@ -9,6 +9,15 @@ | |||
9 | #define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ | 9 | #define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ |
10 | #define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ | 10 | #define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ |
11 | #define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ | 11 | #define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ |
12 | #define TVP5150_MISC_CTL_VBLK_GPCL BIT(7) | ||
13 | #define TVP5150_MISC_CTL_GPCL BIT(6) | ||
14 | #define TVP5150_MISC_CTL_INTREQ_OE BIT(5) | ||
15 | #define TVP5150_MISC_CTL_HVLK BIT(4) | ||
16 | #define TVP5150_MISC_CTL_YCBCR_OE BIT(3) | ||
17 | #define TVP5150_MISC_CTL_SYNC_OE BIT(2) | ||
18 | #define TVP5150_MISC_CTL_VBLANK BIT(1) | ||
19 | #define TVP5150_MISC_CTL_CLOCK_OE BIT(0) | ||
20 | |||
12 | #define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ | 21 | #define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ |
13 | 22 | ||
14 | /* Reserved 05h */ | 23 | /* Reserved 05h */ |
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c index 979634000597..d5c911c09e2b 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.c +++ b/drivers/media/pci/cobalt/cobalt-driver.c | |||
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev) | |||
308 | static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) | 308 | static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) |
309 | { | 309 | { |
310 | free_irq(pci_dev->irq, (void *)cobalt); | 310 | free_irq(pci_dev->irq, (void *)cobalt); |
311 | 311 | pci_free_irq_vectors(pci_dev); | |
312 | if (cobalt->msi_enabled) | ||
313 | pci_disable_msi(pci_dev); | ||
314 | } | 312 | } |
315 | 313 | ||
316 | static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, | 314 | static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, |
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, | |||
387 | from being generated. */ | 385 | from being generated. */ |
388 | cobalt_set_interrupt(cobalt, false); | 386 | cobalt_set_interrupt(cobalt, false); |
389 | 387 | ||
390 | if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { | 388 | if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) { |
391 | cobalt_err("Could not enable MSI\n"); | 389 | cobalt_err("Could not enable MSI\n"); |
392 | cobalt->msi_enabled = false; | ||
393 | ret = -EIO; | 390 | ret = -EIO; |
394 | goto err_release; | 391 | goto err_release; |
395 | } | 392 | } |
396 | msi_config_show(cobalt, pci_dev); | 393 | msi_config_show(cobalt, pci_dev); |
397 | cobalt->msi_enabled = true; | ||
398 | 394 | ||
399 | /* Register IRQ */ | 395 | /* Register IRQ */ |
400 | if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, | 396 | if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, |
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h index ed00dc9d9399..00f773ec359a 100644 --- a/drivers/media/pci/cobalt/cobalt-driver.h +++ b/drivers/media/pci/cobalt/cobalt-driver.h | |||
@@ -287,8 +287,6 @@ struct cobalt { | |||
287 | u32 irq_none; | 287 | u32 irq_none; |
288 | u32 irq_full_fifo; | 288 | u32 irq_full_fifo; |
289 | 289 | ||
290 | bool msi_enabled; | ||
291 | |||
292 | /* omnitek dma */ | 290 | /* omnitek dma */ |
293 | int dma_channels; | 291 | int dma_channels; |
294 | int first_fifo_channel; | 292 | int first_fifo_channel; |
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index 07fa08be9e99..d54ebe7e0215 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c | |||
@@ -97,14 +97,13 @@ struct pctv452e_state { | |||
97 | u8 c; /* transaction counter, wraps around... */ | 97 | u8 c; /* transaction counter, wraps around... */ |
98 | u8 initialized; /* set to 1 if 0x15 has been sent */ | 98 | u8 initialized; /* set to 1 if 0x15 has been sent */ |
99 | u16 last_rc_key; | 99 | u16 last_rc_key; |
100 | |||
101 | unsigned char data[80]; | ||
102 | }; | 100 | }; |
103 | 101 | ||
104 | static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, | 102 | static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, |
105 | unsigned int write_len, unsigned int read_len) | 103 | unsigned int write_len, unsigned int read_len) |
106 | { | 104 | { |
107 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 105 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
106 | u8 *buf; | ||
108 | u8 id; | 107 | u8 id; |
109 | unsigned int rlen; | 108 | unsigned int rlen; |
110 | int ret; | 109 | int ret; |
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, | |||
114 | return -EIO; | 113 | return -EIO; |
115 | } | 114 | } |
116 | 115 | ||
117 | mutex_lock(&state->ca_mutex); | 116 | buf = kmalloc(64, GFP_KERNEL); |
117 | if (!buf) | ||
118 | return -ENOMEM; | ||
119 | |||
118 | id = state->c++; | 120 | id = state->c++; |
119 | 121 | ||
120 | state->data[0] = SYNC_BYTE_OUT; | 122 | buf[0] = SYNC_BYTE_OUT; |
121 | state->data[1] = id; | 123 | buf[1] = id; |
122 | state->data[2] = cmd; | 124 | buf[2] = cmd; |
123 | state->data[3] = write_len; | 125 | buf[3] = write_len; |
124 | 126 | ||
125 | memcpy(state->data + 4, data, write_len); | 127 | memcpy(buf + 4, data, write_len); |
126 | 128 | ||
127 | rlen = (read_len > 0) ? 64 : 0; | 129 | rlen = (read_len > 0) ? 64 : 0; |
128 | ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, | 130 | ret = dvb_usb_generic_rw(d, buf, 4 + write_len, |
129 | state->data, rlen, /* delay_ms */ 0); | 131 | buf, rlen, /* delay_ms */ 0); |
130 | if (0 != ret) | 132 | if (0 != ret) |
131 | goto failed; | 133 | goto failed; |
132 | 134 | ||
133 | ret = -EIO; | 135 | ret = -EIO; |
134 | if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) | 136 | if (SYNC_BYTE_IN != buf[0] || id != buf[1]) |
135 | goto failed; | 137 | goto failed; |
136 | 138 | ||
137 | memcpy(data, state->data + 4, read_len); | 139 | memcpy(data, buf + 4, read_len); |
138 | 140 | ||
139 | mutex_unlock(&state->ca_mutex); | 141 | kfree(buf); |
140 | return 0; | 142 | return 0; |
141 | 143 | ||
142 | failed: | 144 | failed: |
143 | err("CI error %d; %02X %02X %02X -> %*ph.", | 145 | err("CI error %d; %02X %02X %02X -> %*ph.", |
144 | ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); | 146 | ret, SYNC_BYTE_OUT, id, cmd, 3, buf); |
145 | 147 | ||
146 | mutex_unlock(&state->ca_mutex); | 148 | kfree(buf); |
147 | return ret; | 149 | return ret; |
148 | } | 150 | } |
149 | 151 | ||
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr, | |||
410 | u8 *rcv_buf, u8 rcv_len) | 412 | u8 *rcv_buf, u8 rcv_len) |
411 | { | 413 | { |
412 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 414 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
415 | u8 *buf; | ||
413 | u8 id; | 416 | u8 id; |
414 | int ret; | 417 | int ret; |
415 | 418 | ||
416 | mutex_lock(&state->ca_mutex); | 419 | buf = kmalloc(64, GFP_KERNEL); |
420 | if (!buf) | ||
421 | return -ENOMEM; | ||
422 | |||
417 | id = state->c++; | 423 | id = state->c++; |
418 | 424 | ||
419 | ret = -EINVAL; | 425 | ret = -EINVAL; |
420 | if (snd_len > 64 - 7 || rcv_len > 64 - 7) | 426 | if (snd_len > 64 - 7 || rcv_len > 64 - 7) |
421 | goto failed; | 427 | goto failed; |
422 | 428 | ||
423 | state->data[0] = SYNC_BYTE_OUT; | 429 | buf[0] = SYNC_BYTE_OUT; |
424 | state->data[1] = id; | 430 | buf[1] = id; |
425 | state->data[2] = PCTV_CMD_I2C; | 431 | buf[2] = PCTV_CMD_I2C; |
426 | state->data[3] = snd_len + 3; | 432 | buf[3] = snd_len + 3; |
427 | state->data[4] = addr << 1; | 433 | buf[4] = addr << 1; |
428 | state->data[5] = snd_len; | 434 | buf[5] = snd_len; |
429 | state->data[6] = rcv_len; | 435 | buf[6] = rcv_len; |
430 | 436 | ||
431 | memcpy(state->data + 7, snd_buf, snd_len); | 437 | memcpy(buf + 7, snd_buf, snd_len); |
432 | 438 | ||
433 | ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, | 439 | ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, |
434 | state->data, /* rcv_len */ 64, | 440 | buf, /* rcv_len */ 64, |
435 | /* delay_ms */ 0); | 441 | /* delay_ms */ 0); |
436 | if (ret < 0) | 442 | if (ret < 0) |
437 | goto failed; | 443 | goto failed; |
438 | 444 | ||
439 | /* TT USB protocol error. */ | 445 | /* TT USB protocol error. */ |
440 | ret = -EIO; | 446 | ret = -EIO; |
441 | if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) | 447 | if (SYNC_BYTE_IN != buf[0] || id != buf[1]) |
442 | goto failed; | 448 | goto failed; |
443 | 449 | ||
444 | /* I2C device didn't respond as expected. */ | 450 | /* I2C device didn't respond as expected. */ |
445 | ret = -EREMOTEIO; | 451 | ret = -EREMOTEIO; |
446 | if (state->data[5] < snd_len || state->data[6] < rcv_len) | 452 | if (buf[5] < snd_len || buf[6] < rcv_len) |
447 | goto failed; | 453 | goto failed; |
448 | 454 | ||
449 | memcpy(rcv_buf, state->data + 7, rcv_len); | 455 | memcpy(rcv_buf, buf + 7, rcv_len); |
450 | mutex_unlock(&state->ca_mutex); | ||
451 | 456 | ||
457 | kfree(buf); | ||
452 | return rcv_len; | 458 | return rcv_len; |
453 | 459 | ||
454 | failed: | 460 | failed: |
455 | err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", | 461 | err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", |
456 | ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, | 462 | ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, |
457 | 7, state->data); | 463 | 7, buf); |
458 | 464 | ||
459 | mutex_unlock(&state->ca_mutex); | 465 | kfree(buf); |
460 | return ret; | 466 | return ret; |
461 | } | 467 | } |
462 | 468 | ||
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter) | |||
505 | static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | 511 | static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) |
506 | { | 512 | { |
507 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 513 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
508 | u8 *rx; | 514 | u8 *b0, *rx; |
509 | int ret; | 515 | int ret; |
510 | 516 | ||
511 | info("%s: %d\n", __func__, i); | 517 | info("%s: %d\n", __func__, i); |
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | |||
516 | if (state->initialized) | 522 | if (state->initialized) |
517 | return 0; | 523 | return 0; |
518 | 524 | ||
519 | rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); | 525 | b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL); |
520 | if (!rx) | 526 | if (!b0) |
521 | return -ENOMEM; | 527 | return -ENOMEM; |
522 | 528 | ||
523 | mutex_lock(&state->ca_mutex); | 529 | rx = b0 + 5; |
530 | |||
524 | /* hmm where shoud this should go? */ | 531 | /* hmm where shoud this should go? */ |
525 | ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); | 532 | ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); |
526 | if (ret != 0) | 533 | if (ret != 0) |
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | |||
528 | __func__, ret); | 535 | __func__, ret); |
529 | 536 | ||
530 | /* this is a one-time initialization, dont know where to put */ | 537 | /* this is a one-time initialization, dont know where to put */ |
531 | state->data[0] = 0xaa; | 538 | b0[0] = 0xaa; |
532 | state->data[1] = state->c++; | 539 | b0[1] = state->c++; |
533 | state->data[2] = PCTV_CMD_RESET; | 540 | b0[2] = PCTV_CMD_RESET; |
534 | state->data[3] = 1; | 541 | b0[3] = 1; |
535 | state->data[4] = 0; | 542 | b0[4] = 0; |
536 | /* reset board */ | 543 | /* reset board */ |
537 | ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); | 544 | ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0); |
538 | if (ret) | 545 | if (ret) |
539 | goto ret; | 546 | goto ret; |
540 | 547 | ||
541 | state->data[1] = state->c++; | 548 | b0[1] = state->c++; |
542 | state->data[4] = 1; | 549 | b0[4] = 1; |
543 | /* reset board (again?) */ | 550 | /* reset board (again?) */ |
544 | ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); | 551 | ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0); |
545 | if (ret) | 552 | if (ret) |
546 | goto ret; | 553 | goto ret; |
547 | 554 | ||
548 | state->initialized = 1; | 555 | state->initialized = 1; |
549 | 556 | ||
550 | ret: | 557 | ret: |
551 | mutex_unlock(&state->ca_mutex); | 558 | kfree(b0); |
552 | kfree(rx); | ||
553 | return ret; | 559 | return ret; |
554 | } | 560 | } |
555 | 561 | ||
556 | static int pctv452e_rc_query(struct dvb_usb_device *d) | 562 | static int pctv452e_rc_query(struct dvb_usb_device *d) |
557 | { | 563 | { |
558 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 564 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
565 | u8 *b, *rx; | ||
559 | int ret, i; | 566 | int ret, i; |
560 | u8 id; | 567 | u8 id; |
561 | 568 | ||
562 | mutex_lock(&state->ca_mutex); | 569 | b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL); |
570 | if (!b) | ||
571 | return -ENOMEM; | ||
572 | |||
573 | rx = b + CMD_BUFFER_SIZE; | ||
574 | |||
563 | id = state->c++; | 575 | id = state->c++; |
564 | 576 | ||
565 | /* prepare command header */ | 577 | /* prepare command header */ |
566 | state->data[0] = SYNC_BYTE_OUT; | 578 | b[0] = SYNC_BYTE_OUT; |
567 | state->data[1] = id; | 579 | b[1] = id; |
568 | state->data[2] = PCTV_CMD_IR; | 580 | b[2] = PCTV_CMD_IR; |
569 | state->data[3] = 0; | 581 | b[3] = 0; |
570 | 582 | ||
571 | /* send ir request */ | 583 | /* send ir request */ |
572 | ret = dvb_usb_generic_rw(d, state->data, 4, | 584 | ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); |
573 | state->data, PCTV_ANSWER_LEN, 0); | ||
574 | if (ret != 0) | 585 | if (ret != 0) |
575 | goto ret; | 586 | goto ret; |
576 | 587 | ||
577 | if (debug > 3) { | 588 | if (debug > 3) { |
578 | info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); | 589 | info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx); |
579 | for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) | 590 | for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) |
580 | info(" %02x", state->data[i + 3]); | 591 | info(" %02x", rx[i+3]); |
581 | 592 | ||
582 | info("\n"); | 593 | info("\n"); |
583 | } | 594 | } |
584 | 595 | ||
585 | if ((state->data[3] == 9) && (state->data[12] & 0x01)) { | 596 | if ((rx[3] == 9) && (rx[12] & 0x01)) { |
586 | /* got a "press" event */ | 597 | /* got a "press" event */ |
587 | state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); | 598 | state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]); |
588 | if (debug > 2) | 599 | if (debug > 2) |
589 | info("%s: cmd=0x%02x sys=0x%02x\n", | 600 | info("%s: cmd=0x%02x sys=0x%02x\n", |
590 | __func__, state->data[6], state->data[7]); | 601 | __func__, rx[6], rx[7]); |
591 | 602 | ||
592 | rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); | 603 | rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); |
593 | } else if (state->last_rc_key) { | 604 | } else if (state->last_rc_key) { |
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d) | |||
595 | state->last_rc_key = 0; | 606 | state->last_rc_key = 0; |
596 | } | 607 | } |
597 | ret: | 608 | ret: |
598 | mutex_unlock(&state->ca_mutex); | 609 | kfree(b); |
599 | return ret; | 610 | return ret; |
600 | } | 611 | } |
601 | 612 | ||
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index a0547dbf9806..76382c858c35 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card, | |||
330 | struct ms_id_register id_reg; | 330 | struct ms_id_register id_reg; |
331 | 331 | ||
332 | if (!(*mrq)) { | 332 | if (!(*mrq)) { |
333 | memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, | 333 | memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, |
334 | sizeof(struct ms_id_register)); | 334 | sizeof(struct ms_id_register)); |
335 | *mrq = &card->current_mrq; | 335 | *mrq = &card->current_mrq; |
336 | return 0; | 336 | return 0; |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index b44306b886cb..73db08558e4d 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev) | |||
3354 | 3354 | ||
3355 | if (!slot) | 3355 | if (!slot) |
3356 | continue; | 3356 | continue; |
3357 | if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { | 3357 | if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) |
3358 | dw_mci_set_ios(slot->mmc, &slot->mmc->ios); | 3358 | dw_mci_set_ios(slot->mmc, &slot->mmc->ios); |
3359 | dw_mci_setup_bus(slot, true); | 3359 | |
3360 | } | 3360 | /* Force setup bus to guarantee available clock output */ |
3361 | dw_mci_setup_bus(slot, true); | ||
3361 | } | 3362 | } |
3362 | 3363 | ||
3363 | /* Now that slots are all setup, we can enable card detect */ | 3364 | /* Now that slots are all setup, we can enable card detect */ |
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 7be393c96b1a..cf7c18947189 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c | |||
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev, | |||
161 | 161 | ||
162 | dev->irq = pdev->irq; | 162 | dev->irq = pdev->irq; |
163 | priv->base = addr; | 163 | priv->base = addr; |
164 | priv->device = &pdev->dev; | ||
164 | 165 | ||
165 | if (!c_can_pci_data->freq) { | 166 | if (!c_can_pci_data->freq) { |
166 | dev_err(&pdev->dev, "no clock frequency defined\n"); | 167 | dev_err(&pdev->dev, "no clock frequency defined\n"); |
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 680d1ff07a55..6749b1829469 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c | |||
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev) | |||
948 | netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, | 948 | netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, |
949 | HECC_DEF_NAPI_WEIGHT); | 949 | HECC_DEF_NAPI_WEIGHT); |
950 | 950 | ||
951 | clk_enable(priv->clk); | 951 | err = clk_prepare_enable(priv->clk); |
952 | if (err) { | ||
953 | dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); | ||
954 | goto probe_exit_clk; | ||
955 | } | ||
956 | |||
952 | err = register_candev(ndev); | 957 | err = register_candev(ndev); |
953 | if (err) { | 958 | if (err) { |
954 | dev_err(&pdev->dev, "register_candev() failed\n"); | 959 | dev_err(&pdev->dev, "register_candev() failed\n"); |
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev) | |||
981 | struct ti_hecc_priv *priv = netdev_priv(ndev); | 986 | struct ti_hecc_priv *priv = netdev_priv(ndev); |
982 | 987 | ||
983 | unregister_candev(ndev); | 988 | unregister_candev(ndev); |
984 | clk_disable(priv->clk); | 989 | clk_disable_unprepare(priv->clk); |
985 | clk_put(priv->clk); | 990 | clk_put(priv->clk); |
986 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 991 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
987 | iounmap(priv->base); | 992 | iounmap(priv->base); |
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) | |||
1006 | hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); | 1011 | hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); |
1007 | priv->can.state = CAN_STATE_SLEEPING; | 1012 | priv->can.state = CAN_STATE_SLEEPING; |
1008 | 1013 | ||
1009 | clk_disable(priv->clk); | 1014 | clk_disable_unprepare(priv->clk); |
1010 | 1015 | ||
1011 | return 0; | 1016 | return 0; |
1012 | } | 1017 | } |
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev) | |||
1015 | { | 1020 | { |
1016 | struct net_device *dev = platform_get_drvdata(pdev); | 1021 | struct net_device *dev = platform_get_drvdata(pdev); |
1017 | struct ti_hecc_priv *priv = netdev_priv(dev); | 1022 | struct ti_hecc_priv *priv = netdev_priv(dev); |
1023 | int err; | ||
1018 | 1024 | ||
1019 | clk_enable(priv->clk); | 1025 | err = clk_prepare_enable(priv->clk); |
1026 | if (err) | ||
1027 | return err; | ||
1020 | 1028 | ||
1021 | hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); | 1029 | hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); |
1022 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | 1030 | priv->can.state = CAN_STATE_ERROR_ACTIVE; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 5b7ba25e0065..8a280e7d66bd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
@@ -891,6 +891,8 @@ | |||
891 | #define PCS_V1_WINDOW_SELECT 0x03fc | 891 | #define PCS_V1_WINDOW_SELECT 0x03fc |
892 | #define PCS_V2_WINDOW_DEF 0x9060 | 892 | #define PCS_V2_WINDOW_DEF 0x9060 |
893 | #define PCS_V2_WINDOW_SELECT 0x9064 | 893 | #define PCS_V2_WINDOW_SELECT 0x9064 |
894 | #define PCS_V2_RV_WINDOW_DEF 0x1060 | ||
895 | #define PCS_V2_RV_WINDOW_SELECT 0x1064 | ||
894 | 896 | ||
895 | /* PCS register entry bit positions and sizes */ | 897 | /* PCS register entry bit positions and sizes */ |
896 | #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 | 898 | #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index aaf0350076a9..a7d16db5c4b2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, | |||
1151 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); | 1151 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); |
1152 | 1152 | ||
1153 | spin_lock_irqsave(&pdata->xpcs_lock, flags); | 1153 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1154 | XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); | 1154 | XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); |
1155 | mmd_data = XPCS16_IOREAD(pdata, offset); | 1155 | mmd_data = XPCS16_IOREAD(pdata, offset); |
1156 | spin_unlock_irqrestore(&pdata->xpcs_lock, flags); | 1156 | spin_unlock_irqrestore(&pdata->xpcs_lock, flags); |
1157 | 1157 | ||
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, | |||
1183 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); | 1183 | offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); |
1184 | 1184 | ||
1185 | spin_lock_irqsave(&pdata->xpcs_lock, flags); | 1185 | spin_lock_irqsave(&pdata->xpcs_lock, flags); |
1186 | XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); | 1186 | XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); |
1187 | XPCS16_IOWRITE(pdata, offset, mmd_data); | 1187 | XPCS16_IOWRITE(pdata, offset, mmd_data); |
1188 | spin_unlock_irqrestore(&pdata->xpcs_lock, flags); | 1188 | spin_unlock_irqrestore(&pdata->xpcs_lock, flags); |
1189 | } | 1189 | } |
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) | |||
3407 | 3407 | ||
3408 | /* Flush Tx queues */ | 3408 | /* Flush Tx queues */ |
3409 | ret = xgbe_flush_tx_queues(pdata); | 3409 | ret = xgbe_flush_tx_queues(pdata); |
3410 | if (ret) | 3410 | if (ret) { |
3411 | netdev_err(pdata->netdev, "error flushing TX queues\n"); | ||
3411 | return ret; | 3412 | return ret; |
3413 | } | ||
3412 | 3414 | ||
3413 | /* | 3415 | /* |
3414 | * Initialize DMA related features | 3416 | * Initialize DMA related features |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 9943629fcbf9..1c87cc204075 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) | |||
1070 | 1070 | ||
1071 | DBGPR("-->xgbe_start\n"); | 1071 | DBGPR("-->xgbe_start\n"); |
1072 | 1072 | ||
1073 | hw_if->init(pdata); | 1073 | ret = hw_if->init(pdata); |
1074 | if (ret) | ||
1075 | return ret; | ||
1074 | 1076 | ||
1075 | xgbe_napi_enable(pdata, 1); | 1077 | xgbe_napi_enable(pdata, 1); |
1076 | 1078 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index e76b7f65b805..c2730f15bd8b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c | |||
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
265 | struct xgbe_prv_data *pdata; | 265 | struct xgbe_prv_data *pdata; |
266 | struct device *dev = &pdev->dev; | 266 | struct device *dev = &pdev->dev; |
267 | void __iomem * const *iomap_table; | 267 | void __iomem * const *iomap_table; |
268 | struct pci_dev *rdev; | ||
268 | unsigned int ma_lo, ma_hi; | 269 | unsigned int ma_lo, ma_hi; |
269 | unsigned int reg; | 270 | unsigned int reg; |
270 | int bar_mask; | 271 | int bar_mask; |
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
326 | if (netif_msg_probe(pdata)) | 327 | if (netif_msg_probe(pdata)) |
327 | dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); | 328 | dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); |
328 | 329 | ||
330 | /* Set the PCS indirect addressing definition registers */ | ||
331 | rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); | ||
332 | if (rdev && | ||
333 | (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { | ||
334 | pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; | ||
335 | pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; | ||
336 | } else { | ||
337 | pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; | ||
338 | pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; | ||
339 | } | ||
340 | pci_dev_put(rdev); | ||
341 | |||
329 | /* Configure the PCS indirect addressing support */ | 342 | /* Configure the PCS indirect addressing support */ |
330 | reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); | 343 | reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); |
331 | pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); | 344 | pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); |
332 | pdata->xpcs_window <<= 6; | 345 | pdata->xpcs_window <<= 6; |
333 | pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); | 346 | pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index f52a9bd05bac..00108815b55e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h | |||
@@ -955,6 +955,8 @@ struct xgbe_prv_data { | |||
955 | 955 | ||
956 | /* XPCS indirect addressing lock */ | 956 | /* XPCS indirect addressing lock */ |
957 | spinlock_t xpcs_lock; | 957 | spinlock_t xpcs_lock; |
958 | unsigned int xpcs_window_def_reg; | ||
959 | unsigned int xpcs_window_sel_reg; | ||
958 | unsigned int xpcs_window; | 960 | unsigned int xpcs_window; |
959 | unsigned int xpcs_window_size; | 961 | unsigned int xpcs_window_size; |
960 | unsigned int xpcs_window_mask; | 962 | unsigned int xpcs_window_mask; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8f525574d68..7dcc907a449d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx) | |||
685 | return -ENOMEM; | 685 | return -ENOMEM; |
686 | } | 686 | } |
687 | 687 | ||
688 | alx_reinit_rings(alx); | ||
689 | |||
690 | return 0; | 688 | return 0; |
691 | } | 689 | } |
692 | 690 | ||
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx) | |||
703 | if (alx->qnapi[0] && alx->qnapi[0]->rxq) | 701 | if (alx->qnapi[0] && alx->qnapi[0]->rxq) |
704 | kfree(alx->qnapi[0]->rxq->bufs); | 702 | kfree(alx->qnapi[0]->rxq->bufs); |
705 | 703 | ||
706 | if (!alx->descmem.virt) | 704 | if (alx->descmem.virt) |
707 | dma_free_coherent(&alx->hw.pdev->dev, | 705 | dma_free_coherent(&alx->hw.pdev->dev, |
708 | alx->descmem.size, | 706 | alx->descmem.size, |
709 | alx->descmem.virt, | 707 | alx->descmem.virt, |
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx) | |||
984 | alx_free_rings(alx); | 982 | alx_free_rings(alx); |
985 | alx_free_napis(alx); | 983 | alx_free_napis(alx); |
986 | alx_disable_advanced_intr(alx); | 984 | alx_disable_advanced_intr(alx); |
985 | alx_init_intr(alx, false); | ||
987 | 986 | ||
988 | err = alx_alloc_napis(alx); | 987 | err = alx_alloc_napis(alx); |
989 | if (err) | 988 | if (err) |
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) | |||
1241 | if (err) | 1240 | if (err) |
1242 | goto out_free_rings; | 1241 | goto out_free_rings; |
1243 | 1242 | ||
1243 | /* must be called after alx_request_irq because the chip stops working | ||
1244 | * if we copy the dma addresses in alx_init_ring_ptrs twice when | ||
1245 | * requesting msi-x interrupts failed | ||
1246 | */ | ||
1247 | alx_reinit_rings(alx); | ||
1248 | |||
1244 | netif_set_real_num_tx_queues(alx->dev, alx->num_txq); | 1249 | netif_set_real_num_tx_queues(alx->dev, alx->num_txq); |
1245 | netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); | 1250 | netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); |
1246 | 1251 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3b14d5144228..c483618b57bd 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev) | |||
913 | priv->old_link = 0; | 913 | priv->old_link = 0; |
914 | priv->old_duplex = -1; | 914 | priv->old_duplex = -1; |
915 | priv->old_pause = -1; | 915 | priv->old_pause = -1; |
916 | } else { | ||
917 | phydev = NULL; | ||
916 | } | 918 | } |
917 | 919 | ||
918 | /* mask all interrupts and request them */ | 920 | /* mask all interrupts and request them */ |
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev) | |||
1083 | enet_dmac_writel(priv, priv->dma_chan_int_mask, | 1085 | enet_dmac_writel(priv, priv->dma_chan_int_mask, |
1084 | ENETDMAC_IRMASK, priv->tx_chan); | 1086 | ENETDMAC_IRMASK, priv->tx_chan); |
1085 | 1087 | ||
1086 | if (priv->has_phy) | 1088 | if (phydev) |
1087 | phy_start(phydev); | 1089 | phy_start(phydev); |
1088 | else | 1090 | else |
1089 | bcm_enet_adjust_link(dev); | 1091 | bcm_enet_adjust_link(dev); |
@@ -1126,7 +1128,7 @@ out_freeirq: | |||
1126 | free_irq(dev->irq, dev); | 1128 | free_irq(dev->irq, dev); |
1127 | 1129 | ||
1128 | out_phy_disconnect: | 1130 | out_phy_disconnect: |
1129 | if (priv->has_phy) | 1131 | if (phydev) |
1130 | phy_disconnect(phydev); | 1132 | phy_disconnect(phydev); |
1131 | 1133 | ||
1132 | return ret; | 1134 | return ret; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9608cb49a11c..4fcc6a84a087 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, | |||
1099 | { | 1099 | { |
1100 | #ifdef CONFIG_INET | 1100 | #ifdef CONFIG_INET |
1101 | struct tcphdr *th; | 1101 | struct tcphdr *th; |
1102 | int len, nw_off, tcp_opt_len; | 1102 | int len, nw_off, tcp_opt_len = 0; |
1103 | 1103 | ||
1104 | if (tcp_ts) | 1104 | if (tcp_ts) |
1105 | tcp_opt_len = 12; | 1105 | tcp_opt_len = 12; |
@@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) | |||
5314 | if ((link_info->support_auto_speeds | diff) != | 5314 | if ((link_info->support_auto_speeds | diff) != |
5315 | link_info->support_auto_speeds) { | 5315 | link_info->support_auto_speeds) { |
5316 | /* An advertised speed is no longer supported, so we need to | 5316 | /* An advertised speed is no longer supported, so we need to |
5317 | * update the advertisement settings. See bnxt_reset() for | 5317 | * update the advertisement settings. Caller holds RTNL |
5318 | * comments about the rtnl_lock() sequence below. | 5318 | * so we can modify link settings. |
5319 | */ | 5319 | */ |
5320 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5321 | rtnl_lock(); | ||
5322 | link_info->advertising = link_info->support_auto_speeds; | 5320 | link_info->advertising = link_info->support_auto_speeds; |
5323 | if (test_bit(BNXT_STATE_OPEN, &bp->state) && | 5321 | if (link_info->autoneg & BNXT_AUTONEG_SPEED) |
5324 | (link_info->autoneg & BNXT_AUTONEG_SPEED)) | ||
5325 | bnxt_hwrm_set_link_setting(bp, true, false); | 5322 | bnxt_hwrm_set_link_setting(bp, true, false); |
5326 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
5327 | rtnl_unlock(); | ||
5328 | } | 5323 | } |
5329 | return 0; | 5324 | return 0; |
5330 | } | 5325 | } |
@@ -6200,29 +6195,37 @@ bnxt_restart_timer: | |||
6200 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 6195 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
6201 | } | 6196 | } |
6202 | 6197 | ||
6203 | /* Only called from bnxt_sp_task() */ | 6198 | static void bnxt_rtnl_lock_sp(struct bnxt *bp) |
6204 | static void bnxt_reset(struct bnxt *bp, bool silent) | ||
6205 | { | 6199 | { |
6206 | /* bnxt_reset_task() calls bnxt_close_nic() which waits | 6200 | /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK |
6207 | * for BNXT_STATE_IN_SP_TASK to clear. | 6201 | * set. If the device is being closed, bnxt_close() may be holding |
6208 | * If there is a parallel dev_close(), bnxt_close() may be holding | ||
6209 | * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we | 6202 | * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we |
6210 | * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). | 6203 | * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). |
6211 | */ | 6204 | */ |
6212 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | 6205 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
6213 | rtnl_lock(); | 6206 | rtnl_lock(); |
6214 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | 6207 | } |
6215 | bnxt_reset_task(bp, silent); | 6208 | |
6209 | static void bnxt_rtnl_unlock_sp(struct bnxt *bp) | ||
6210 | { | ||
6216 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | 6211 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
6217 | rtnl_unlock(); | 6212 | rtnl_unlock(); |
6218 | } | 6213 | } |
6219 | 6214 | ||
6215 | /* Only called from bnxt_sp_task() */ | ||
6216 | static void bnxt_reset(struct bnxt *bp, bool silent) | ||
6217 | { | ||
6218 | bnxt_rtnl_lock_sp(bp); | ||
6219 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
6220 | bnxt_reset_task(bp, silent); | ||
6221 | bnxt_rtnl_unlock_sp(bp); | ||
6222 | } | ||
6223 | |||
6220 | static void bnxt_cfg_ntp_filters(struct bnxt *); | 6224 | static void bnxt_cfg_ntp_filters(struct bnxt *); |
6221 | 6225 | ||
6222 | static void bnxt_sp_task(struct work_struct *work) | 6226 | static void bnxt_sp_task(struct work_struct *work) |
6223 | { | 6227 | { |
6224 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); | 6228 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); |
6225 | int rc; | ||
6226 | 6229 | ||
6227 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | 6230 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
6228 | smp_mb__after_atomic(); | 6231 | smp_mb__after_atomic(); |
@@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work) | |||
6236 | 6239 | ||
6237 | if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) | 6240 | if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) |
6238 | bnxt_cfg_ntp_filters(bp); | 6241 | bnxt_cfg_ntp_filters(bp); |
6239 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | ||
6240 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, | ||
6241 | &bp->sp_event)) | ||
6242 | bnxt_hwrm_phy_qcaps(bp); | ||
6243 | |||
6244 | rc = bnxt_update_link(bp, true); | ||
6245 | if (rc) | ||
6246 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | ||
6247 | rc); | ||
6248 | } | ||
6249 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) | 6242 | if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) |
6250 | bnxt_hwrm_exec_fwd_req(bp); | 6243 | bnxt_hwrm_exec_fwd_req(bp); |
6251 | if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { | 6244 | if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { |
@@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work) | |||
6266 | bnxt_hwrm_tunnel_dst_port_free( | 6259 | bnxt_hwrm_tunnel_dst_port_free( |
6267 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); | 6260 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); |
6268 | } | 6261 | } |
6262 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | ||
6263 | bnxt_hwrm_port_qstats(bp); | ||
6264 | |||
6265 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
6266 | * must be the last functions to be called before exiting. | ||
6267 | */ | ||
6268 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | ||
6269 | int rc = 0; | ||
6270 | |||
6271 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, | ||
6272 | &bp->sp_event)) | ||
6273 | bnxt_hwrm_phy_qcaps(bp); | ||
6274 | |||
6275 | bnxt_rtnl_lock_sp(bp); | ||
6276 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
6277 | rc = bnxt_update_link(bp, true); | ||
6278 | bnxt_rtnl_unlock_sp(bp); | ||
6279 | if (rc) | ||
6280 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | ||
6281 | rc); | ||
6282 | } | ||
6283 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { | ||
6284 | bnxt_rtnl_lock_sp(bp); | ||
6285 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | ||
6286 | bnxt_get_port_module_status(bp); | ||
6287 | bnxt_rtnl_unlock_sp(bp); | ||
6288 | } | ||
6269 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | 6289 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) |
6270 | bnxt_reset(bp, false); | 6290 | bnxt_reset(bp, false); |
6271 | 6291 | ||
6272 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) | 6292 | if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) |
6273 | bnxt_reset(bp, true); | 6293 | bnxt_reset(bp, true); |
6274 | 6294 | ||
6275 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) | ||
6276 | bnxt_get_port_module_status(bp); | ||
6277 | |||
6278 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | ||
6279 | bnxt_hwrm_port_qstats(bp); | ||
6280 | |||
6281 | smp_mb__before_atomic(); | 6295 | smp_mb__before_atomic(); |
6282 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | 6296 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
6283 | } | 6297 | } |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a6e7afa878be..c1b671667920 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, | |||
2948 | } | 2948 | } |
2949 | 2949 | ||
2950 | /* try reuse page */ | 2950 | /* try reuse page */ |
2951 | if (unlikely(page_count(page) != 1)) | 2951 | if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) |
2952 | return false; | 2952 | return false; |
2953 | 2953 | ||
2954 | /* change offset to the other half */ | 2954 | /* change offset to the other half */ |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index a831f947ca8c..309f5c66083c 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
1601 | netdev->netdev_ops = &ibmveth_netdev_ops; | 1601 | netdev->netdev_ops = &ibmveth_netdev_ops; |
1602 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1602 | netdev->ethtool_ops = &netdev_ethtool_ops; |
1603 | SET_NETDEV_DEV(netdev, &dev->dev); | 1603 | SET_NETDEV_DEV(netdev, &dev->dev); |
1604 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | | 1604 | netdev->hw_features = NETIF_F_SG; |
1605 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1605 | if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { |
1606 | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | ||
1607 | NETIF_F_RXCSUM; | ||
1608 | } | ||
1606 | 1609 | ||
1607 | netdev->features |= netdev->hw_features; | 1610 | netdev->features |= netdev->hw_features; |
1608 | 1611 | ||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3dd87889e67e..1c29c86f8709 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev) | |||
2517 | } | 2517 | } |
2518 | 2518 | ||
2519 | const struct of_device_id of_mtk_match[] = { | 2519 | const struct of_device_id of_mtk_match[] = { |
2520 | { .compatible = "mediatek,mt7623-eth" }, | 2520 | { .compatible = "mediatek,mt2701-eth" }, |
2521 | {}, | 2521 | {}, |
2522 | }; | 2522 | }; |
2523 | MODULE_DEVICE_TABLE(of, of_mtk_match); | 2523 | MODULE_DEVICE_TABLE(of, of_mtk_match); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d9c9f86a30df..d5a9372ed84d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev, | |||
1732 | { | 1732 | { |
1733 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1733 | struct mlx4_en_priv *priv = netdev_priv(dev); |
1734 | 1734 | ||
1735 | memset(channel, 0, sizeof(*channel)); | ||
1736 | |||
1737 | channel->max_rx = MAX_RX_RINGS; | 1735 | channel->max_rx = MAX_RX_RINGS; |
1738 | channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; | 1736 | channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; |
1739 | 1737 | ||
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev, | |||
1752 | int xdp_count; | 1750 | int xdp_count; |
1753 | int err = 0; | 1751 | int err = 0; |
1754 | 1752 | ||
1755 | if (channel->other_count || channel->combined_count || | 1753 | if (!channel->tx_count || !channel->rx_count) |
1756 | channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || | ||
1757 | channel->rx_count > MAX_RX_RINGS || | ||
1758 | !channel->tx_count || !channel->rx_count) | ||
1759 | return -EINVAL; | 1754 | return -EINVAL; |
1760 | 1755 | ||
1761 | tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); | 1756 | tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 33a399a8b5d5..5197817e4b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -543,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev, | |||
543 | struct ethtool_channels *ch) | 543 | struct ethtool_channels *ch) |
544 | { | 544 | { |
545 | struct mlx5e_priv *priv = netdev_priv(dev); | 545 | struct mlx5e_priv *priv = netdev_priv(dev); |
546 | int ncv = mlx5e_get_max_num_channels(priv->mdev); | ||
547 | unsigned int count = ch->combined_count; | 546 | unsigned int count = ch->combined_count; |
548 | bool arfs_enabled; | 547 | bool arfs_enabled; |
549 | bool was_opened; | 548 | bool was_opened; |
@@ -554,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev, | |||
554 | __func__); | 553 | __func__); |
555 | return -EINVAL; | 554 | return -EINVAL; |
556 | } | 555 | } |
557 | if (ch->rx_count || ch->tx_count) { | ||
558 | netdev_info(dev, "%s: separate rx/tx count not supported\n", | ||
559 | __func__); | ||
560 | return -EINVAL; | ||
561 | } | ||
562 | if (count > ncv) { | ||
563 | netdev_info(dev, "%s: count (%d) > max (%d)\n", | ||
564 | __func__, count, ncv); | ||
565 | return -EINVAL; | ||
566 | } | ||
567 | 556 | ||
568 | if (priv->params.num_channels == count) | 557 | if (priv->params.num_channels == count) |
569 | return 0; | 558 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0e2fb3ed1790..06d5e6fecb0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, | |||
193 | return false; | 193 | return false; |
194 | } | 194 | } |
195 | 195 | ||
196 | if (unlikely(page_is_pfmemalloc(dma_info->page))) | ||
197 | return false; | ||
198 | |||
196 | cache->page_cache[cache->tail] = *dma_info; | 199 | cache->page_cache[cache->tail] = *dma_info; |
197 | cache->tail = tail_next; | 200 | cache->tail = tail_next; |
198 | return true; | 201 | return true; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 01d0efa9c5c7..9e494a446b7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, | |||
1172 | 1172 | ||
1173 | static int | 1173 | static int |
1174 | mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, | 1174 | mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, |
1175 | struct mlxsw_sp_nexthop_group *nh_grp) | 1175 | struct mlxsw_sp_nexthop_group *nh_grp, |
1176 | bool reallocate) | ||
1176 | { | 1177 | { |
1177 | u32 adj_index = nh_grp->adj_index; /* base */ | 1178 | u32 adj_index = nh_grp->adj_index; /* base */ |
1178 | struct mlxsw_sp_nexthop *nh; | 1179 | struct mlxsw_sp_nexthop *nh; |
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, | |||
1187 | continue; | 1188 | continue; |
1188 | } | 1189 | } |
1189 | 1190 | ||
1190 | if (nh->update) { | 1191 | if (nh->update || reallocate) { |
1191 | err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, | 1192 | err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, |
1192 | adj_index, nh); | 1193 | adj_index, nh); |
1193 | if (err) | 1194 | if (err) |
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, | |||
1248 | /* Nothing was added or removed, so no need to reallocate. Just | 1249 | /* Nothing was added or removed, so no need to reallocate. Just |
1249 | * update MAC on existing adjacency indexes. | 1250 | * update MAC on existing adjacency indexes. |
1250 | */ | 1251 | */ |
1251 | err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); | 1252 | err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, |
1253 | false); | ||
1252 | if (err) { | 1254 | if (err) { |
1253 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); | 1255 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); |
1254 | goto set_trap; | 1256 | goto set_trap; |
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, | |||
1276 | nh_grp->adj_index_valid = 1; | 1278 | nh_grp->adj_index_valid = 1; |
1277 | nh_grp->adj_index = adj_index; | 1279 | nh_grp->adj_index = adj_index; |
1278 | nh_grp->ecmp_size = ecmp_size; | 1280 | nh_grp->ecmp_size = ecmp_size; |
1279 | err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); | 1281 | err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true); |
1280 | if (err) { | 1282 | if (err) { |
1281 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); | 1283 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); |
1282 | goto set_trap; | 1284 | goto set_trap; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 8e5cb7605b0f..873ce2cd76ba 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
297 | list_del(&p_pkt->list_entry); | 297 | list_del(&p_pkt->list_entry); |
298 | b_last_packet = list_empty(&p_tx->active_descq); | 298 | b_last_packet = list_empty(&p_tx->active_descq); |
299 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); | 299 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); |
300 | if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { | 300 | if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { |
301 | struct qed_ooo_buffer *p_buffer; | 301 | struct qed_ooo_buffer *p_buffer; |
302 | 302 | ||
303 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; | 303 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
309 | b_last_frag = | 309 | b_last_frag = |
310 | p_tx->cur_completing_bd_idx == p_pkt->bd_used; | 310 | p_tx->cur_completing_bd_idx == p_pkt->bd_used; |
311 | tx_frag = p_pkt->bds_set[0].tx_frag; | 311 | tx_frag = p_pkt->bds_set[0].tx_frag; |
312 | if (p_ll2_conn->gsi_enable) | 312 | if (p_ll2_conn->conn.gsi_enable) |
313 | qed_ll2b_release_tx_gsi_packet(p_hwfn, | 313 | qed_ll2b_release_tx_gsi_packet(p_hwfn, |
314 | p_ll2_conn-> | 314 | p_ll2_conn-> |
315 | my_id, | 315 | my_id, |
@@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) | |||
378 | 378 | ||
379 | spin_unlock_irqrestore(&p_tx->lock, flags); | 379 | spin_unlock_irqrestore(&p_tx->lock, flags); |
380 | tx_frag = p_pkt->bds_set[0].tx_frag; | 380 | tx_frag = p_pkt->bds_set[0].tx_frag; |
381 | if (p_ll2_conn->gsi_enable) | 381 | if (p_ll2_conn->conn.gsi_enable) |
382 | qed_ll2b_complete_tx_gsi_packet(p_hwfn, | 382 | qed_ll2b_complete_tx_gsi_packet(p_hwfn, |
383 | p_ll2_conn->my_id, | 383 | p_ll2_conn->my_id, |
384 | p_pkt->cookie, | 384 | p_pkt->cookie, |
@@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
550 | 550 | ||
551 | list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); | 551 | list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); |
552 | 552 | ||
553 | if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { | 553 | if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { |
554 | struct qed_ooo_buffer *p_buffer; | 554 | struct qed_ooo_buffer *p_buffer; |
555 | 555 | ||
556 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; | 556 | p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; |
@@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, | |||
738 | rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, | 738 | rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, |
739 | p_buffer->vlan, bd_flags, | 739 | p_buffer->vlan, bd_flags, |
740 | l4_hdr_offset_w, | 740 | l4_hdr_offset_w, |
741 | p_ll2_conn->tx_dest, 0, | 741 | p_ll2_conn->conn.tx_dest, 0, |
742 | first_frag, | 742 | first_frag, |
743 | p_buffer->packet_length, | 743 | p_buffer->packet_length, |
744 | p_buffer, true); | 744 | p_buffer, true); |
@@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, | |||
858 | u16 buf_idx; | 858 | u16 buf_idx; |
859 | int rc = 0; | 859 | int rc = 0; |
860 | 860 | ||
861 | if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) | 861 | if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) |
862 | return rc; | 862 | return rc; |
863 | 863 | ||
864 | if (!rx_num_ooo_buffers) | 864 | if (!rx_num_ooo_buffers) |
@@ -901,7 +901,7 @@ static void | |||
901 | qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, | 901 | qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, |
902 | struct qed_ll2_info *p_ll2_conn) | 902 | struct qed_ll2_info *p_ll2_conn) |
903 | { | 903 | { |
904 | if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) | 904 | if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) |
905 | return; | 905 | return; |
906 | 906 | ||
907 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); | 907 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); |
@@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, | |||
913 | { | 913 | { |
914 | struct qed_ooo_buffer *p_buffer; | 914 | struct qed_ooo_buffer *p_buffer; |
915 | 915 | ||
916 | if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) | 916 | if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) |
917 | return; | 917 | return; |
918 | 918 | ||
919 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); | 919 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); |
@@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, | |||
945 | { | 945 | { |
946 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); | 946 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
947 | u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; | 947 | u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; |
948 | struct qed_ll2_info *ll2_info; | 948 | struct qed_ll2_conn ll2_info; |
949 | int rc; | 949 | int rc; |
950 | 950 | ||
951 | ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); | 951 | ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; |
952 | if (!ll2_info) | 952 | ll2_info.mtu = params->mtu; |
953 | return -ENOMEM; | 953 | ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; |
954 | ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; | 954 | ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; |
955 | ll2_info->mtu = params->mtu; | 955 | ll2_info.tx_tc = OOO_LB_TC; |
956 | ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; | 956 | ll2_info.tx_dest = CORE_TX_DEST_LB; |
957 | ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; | 957 | |
958 | ll2_info->tx_tc = OOO_LB_TC; | 958 | rc = qed_ll2_acquire_connection(hwfn, &ll2_info, |
959 | ll2_info->tx_dest = CORE_TX_DEST_LB; | ||
960 | |||
961 | rc = qed_ll2_acquire_connection(hwfn, ll2_info, | ||
962 | QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, | 959 | QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, |
963 | handle); | 960 | handle); |
964 | kfree(ll2_info); | ||
965 | if (rc) { | 961 | if (rc) { |
966 | DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); | 962 | DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); |
967 | goto out; | 963 | goto out; |
@@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, | |||
1006 | struct qed_ll2_info *p_ll2_conn, | 1002 | struct qed_ll2_info *p_ll2_conn, |
1007 | u8 action_on_error) | 1003 | u8 action_on_error) |
1008 | { | 1004 | { |
1009 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; | 1005 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; |
1010 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; | 1006 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; |
1011 | struct core_rx_start_ramrod_data *p_ramrod = NULL; | 1007 | struct core_rx_start_ramrod_data *p_ramrod = NULL; |
1012 | struct qed_spq_entry *p_ent = NULL; | 1008 | struct qed_spq_entry *p_ent = NULL; |
@@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, | |||
1032 | p_ramrod->sb_index = p_rx->rx_sb_index; | 1028 | p_ramrod->sb_index = p_rx->rx_sb_index; |
1033 | p_ramrod->complete_event_flg = 1; | 1029 | p_ramrod->complete_event_flg = 1; |
1034 | 1030 | ||
1035 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); | 1031 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); |
1036 | DMA_REGPAIR_LE(p_ramrod->bd_base, | 1032 | DMA_REGPAIR_LE(p_ramrod->bd_base, |
1037 | p_rx->rxq_chain.p_phys_addr); | 1033 | p_rx->rxq_chain.p_phys_addr); |
1038 | cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); | 1034 | cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); |
@@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, | |||
1040 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, | 1036 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, |
1041 | qed_chain_get_pbl_phys(&p_rx->rcq_chain)); | 1037 | qed_chain_get_pbl_phys(&p_rx->rcq_chain)); |
1042 | 1038 | ||
1043 | p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; | 1039 | p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg; |
1044 | p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; | 1040 | p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en; |
1045 | p_ramrod->queue_id = p_ll2_conn->queue_id; | 1041 | p_ramrod->queue_id = p_ll2_conn->queue_id; |
1046 | p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 | 1042 | p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 |
1047 | : 1; | 1043 | : 1; |
@@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, | |||
1056 | } | 1052 | } |
1057 | 1053 | ||
1058 | p_ramrod->action_on_error.error_type = action_on_error; | 1054 | p_ramrod->action_on_error.error_type = action_on_error; |
1059 | p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; | 1055 | p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; |
1060 | return qed_spq_post(p_hwfn, p_ent, NULL); | 1056 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1061 | } | 1057 | } |
1062 | 1058 | ||
1063 | static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | 1059 | static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, |
1064 | struct qed_ll2_info *p_ll2_conn) | 1060 | struct qed_ll2_info *p_ll2_conn) |
1065 | { | 1061 | { |
1066 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; | 1062 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; |
1067 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; | 1063 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; |
1068 | struct core_tx_start_ramrod_data *p_ramrod = NULL; | 1064 | struct core_tx_start_ramrod_data *p_ramrod = NULL; |
1069 | struct qed_spq_entry *p_ent = NULL; | 1065 | struct qed_spq_entry *p_ent = NULL; |
@@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | |||
1075 | if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) | 1071 | if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) |
1076 | return 0; | 1072 | return 0; |
1077 | 1073 | ||
1078 | if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) | 1074 | if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) |
1079 | p_ll2_conn->tx_stats_en = 0; | 1075 | p_ll2_conn->tx_stats_en = 0; |
1080 | else | 1076 | else |
1081 | p_ll2_conn->tx_stats_en = 1; | 1077 | p_ll2_conn->tx_stats_en = 1; |
@@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | |||
1096 | 1092 | ||
1097 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); | 1093 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); |
1098 | p_ramrod->sb_index = p_tx->tx_sb_index; | 1094 | p_ramrod->sb_index = p_tx->tx_sb_index; |
1099 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); | 1095 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); |
1100 | p_ramrod->stats_en = p_ll2_conn->tx_stats_en; | 1096 | p_ramrod->stats_en = p_ll2_conn->tx_stats_en; |
1101 | p_ramrod->stats_id = p_ll2_conn->tx_stats_id; | 1097 | p_ramrod->stats_id = p_ll2_conn->tx_stats_id; |
1102 | 1098 | ||
@@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | |||
1106 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); | 1102 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); |
1107 | 1103 | ||
1108 | memset(&pq_params, 0, sizeof(pq_params)); | 1104 | memset(&pq_params, 0, sizeof(pq_params)); |
1109 | pq_params.core.tc = p_ll2_conn->tx_tc; | 1105 | pq_params.core.tc = p_ll2_conn->conn.tx_tc; |
1110 | pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); | 1106 | pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); |
1111 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); | 1107 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); |
1112 | 1108 | ||
@@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | |||
1123 | DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); | 1119 | DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); |
1124 | } | 1120 | } |
1125 | 1121 | ||
1126 | p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; | 1122 | p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; |
1127 | return qed_spq_post(p_hwfn, p_ent, NULL); | 1123 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1128 | } | 1124 | } |
1129 | 1125 | ||
@@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, | |||
1224 | 1220 | ||
1225 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, | 1221 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, |
1226 | "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", | 1222 | "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", |
1227 | p_ll2_info->conn_type, rx_num_desc); | 1223 | p_ll2_info->conn.conn_type, rx_num_desc); |
1228 | 1224 | ||
1229 | out: | 1225 | out: |
1230 | return rc; | 1226 | return rc; |
@@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, | |||
1262 | 1258 | ||
1263 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, | 1259 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, |
1264 | "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", | 1260 | "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", |
1265 | p_ll2_info->conn_type, tx_num_desc); | 1261 | p_ll2_info->conn.conn_type, tx_num_desc); |
1266 | 1262 | ||
1267 | out: | 1263 | out: |
1268 | if (rc) | 1264 | if (rc) |
@@ -1273,7 +1269,7 @@ out: | |||
1273 | } | 1269 | } |
1274 | 1270 | ||
1275 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, | 1271 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, |
1276 | struct qed_ll2_info *p_params, | 1272 | struct qed_ll2_conn *p_params, |
1277 | u16 rx_num_desc, | 1273 | u16 rx_num_desc, |
1278 | u16 tx_num_desc, | 1274 | u16 tx_num_desc, |
1279 | u8 *p_connection_handle) | 1275 | u8 *p_connection_handle) |
@@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, | |||
1302 | if (!p_ll2_info) | 1298 | if (!p_ll2_info) |
1303 | return -EBUSY; | 1299 | return -EBUSY; |
1304 | 1300 | ||
1305 | p_ll2_info->conn_type = p_params->conn_type; | 1301 | p_ll2_info->conn = *p_params; |
1306 | p_ll2_info->mtu = p_params->mtu; | ||
1307 | p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg; | ||
1308 | p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en; | ||
1309 | p_ll2_info->tx_tc = p_params->tx_tc; | ||
1310 | p_ll2_info->tx_dest = p_params->tx_dest; | ||
1311 | p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big; | ||
1312 | p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf; | ||
1313 | p_ll2_info->gsi_enable = p_params->gsi_enable; | ||
1314 | 1302 | ||
1315 | rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); | 1303 | rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); |
1316 | if (rc) | 1304 | if (rc) |
@@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, | |||
1371 | 1359 | ||
1372 | SET_FIELD(action_on_error, | 1360 | SET_FIELD(action_on_error, |
1373 | CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, | 1361 | CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, |
1374 | p_ll2_conn->ai_err_packet_too_big); | 1362 | p_ll2_conn->conn.ai_err_packet_too_big); |
1375 | SET_FIELD(action_on_error, | 1363 | SET_FIELD(action_on_error, |
1376 | CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); | 1364 | CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf); |
1377 | 1365 | ||
1378 | return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); | 1366 | return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); |
1379 | } | 1367 | } |
@@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | |||
1600 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", | 1588 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", |
1601 | p_ll2->queue_id, | 1589 | p_ll2->queue_id, |
1602 | p_ll2->cid, | 1590 | p_ll2->cid, |
1603 | p_ll2->conn_type, | 1591 | p_ll2->conn.conn_type, |
1604 | prod_idx, | 1592 | prod_idx, |
1605 | first_frag_len, | 1593 | first_frag_len, |
1606 | num_of_bds, | 1594 | num_of_bds, |
@@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, | |||
1676 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), | 1664 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), |
1677 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", | 1665 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", |
1678 | p_ll2_conn->queue_id, | 1666 | p_ll2_conn->queue_id, |
1679 | p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); | 1667 | p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod); |
1680 | } | 1668 | } |
1681 | 1669 | ||
1682 | int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, | 1670 | int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, |
@@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
1817 | qed_ll2_rxq_flush(p_hwfn, connection_handle); | 1805 | qed_ll2_rxq_flush(p_hwfn, connection_handle); |
1818 | } | 1806 | } |
1819 | 1807 | ||
1820 | if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) | 1808 | if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) |
1821 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); | 1809 | qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); |
1822 | 1810 | ||
1823 | return rc; | 1811 | return rc; |
@@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, | |||
1993 | 1981 | ||
1994 | static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) | 1982 | static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) |
1995 | { | 1983 | { |
1996 | struct qed_ll2_info ll2_info; | 1984 | struct qed_ll2_conn ll2_info; |
1997 | struct qed_ll2_buffer *buffer, *tmp_buffer; | 1985 | struct qed_ll2_buffer *buffer, *tmp_buffer; |
1998 | enum qed_ll2_conn_type conn_type; | 1986 | enum qed_ll2_conn_type conn_type; |
1999 | struct qed_ptt *p_ptt; | 1987 | struct qed_ptt *p_ptt; |
@@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) | |||
2041 | 2029 | ||
2042 | /* Prepare the temporary ll2 information */ | 2030 | /* Prepare the temporary ll2 information */ |
2043 | memset(&ll2_info, 0, sizeof(ll2_info)); | 2031 | memset(&ll2_info, 0, sizeof(ll2_info)); |
2032 | |||
2044 | ll2_info.conn_type = conn_type; | 2033 | ll2_info.conn_type = conn_type; |
2045 | ll2_info.mtu = params->mtu; | 2034 | ll2_info.mtu = params->mtu; |
2046 | ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; | 2035 | ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; |
@@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) | |||
2120 | } | 2109 | } |
2121 | 2110 | ||
2122 | ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); | 2111 | ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); |
2123 | |||
2124 | return 0; | 2112 | return 0; |
2125 | 2113 | ||
2126 | release_terminate_all: | 2114 | release_terminate_all: |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 6625a3ae5a33..31417928b635 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h | |||
@@ -112,15 +112,8 @@ struct qed_ll2_tx_queue { | |||
112 | bool b_completing_packet; | 112 | bool b_completing_packet; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | struct qed_ll2_info { | 115 | struct qed_ll2_conn { |
116 | /* Lock protecting the state of LL2 */ | ||
117 | struct mutex mutex; | ||
118 | enum qed_ll2_conn_type conn_type; | 116 | enum qed_ll2_conn_type conn_type; |
119 | u32 cid; | ||
120 | u8 my_id; | ||
121 | u8 queue_id; | ||
122 | u8 tx_stats_id; | ||
123 | bool b_active; | ||
124 | u16 mtu; | 117 | u16 mtu; |
125 | u8 rx_drop_ttl0_flg; | 118 | u8 rx_drop_ttl0_flg; |
126 | u8 rx_vlan_removal_en; | 119 | u8 rx_vlan_removal_en; |
@@ -128,10 +121,21 @@ struct qed_ll2_info { | |||
128 | enum core_tx_dest tx_dest; | 121 | enum core_tx_dest tx_dest; |
129 | enum core_error_handle ai_err_packet_too_big; | 122 | enum core_error_handle ai_err_packet_too_big; |
130 | enum core_error_handle ai_err_no_buf; | 123 | enum core_error_handle ai_err_no_buf; |
124 | u8 gsi_enable; | ||
125 | }; | ||
126 | |||
127 | struct qed_ll2_info { | ||
128 | /* Lock protecting the state of LL2 */ | ||
129 | struct mutex mutex; | ||
130 | struct qed_ll2_conn conn; | ||
131 | u32 cid; | ||
132 | u8 my_id; | ||
133 | u8 queue_id; | ||
134 | u8 tx_stats_id; | ||
135 | bool b_active; | ||
131 | u8 tx_stats_en; | 136 | u8 tx_stats_en; |
132 | struct qed_ll2_rx_queue rx_queue; | 137 | struct qed_ll2_rx_queue rx_queue; |
133 | struct qed_ll2_tx_queue tx_queue; | 138 | struct qed_ll2_tx_queue tx_queue; |
134 | u8 gsi_enable; | ||
135 | }; | 139 | }; |
136 | 140 | ||
137 | /** | 141 | /** |
@@ -149,7 +153,7 @@ struct qed_ll2_info { | |||
149 | * @return 0 on success, failure otherwise | 153 | * @return 0 on success, failure otherwise |
150 | */ | 154 | */ |
151 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, | 155 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, |
152 | struct qed_ll2_info *p_params, | 156 | struct qed_ll2_conn *p_params, |
153 | u16 rx_num_desc, | 157 | u16 rx_num_desc, |
154 | u16 tx_num_desc, | 158 | u16 tx_num_desc, |
155 | u8 *p_connection_handle); | 159 | u8 *p_connection_handle); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 2a16547c8966..2dbdb3298991 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, | |||
2632 | { | 2632 | { |
2633 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); | 2633 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
2634 | struct qed_roce_ll2_info *roce_ll2; | 2634 | struct qed_roce_ll2_info *roce_ll2; |
2635 | struct qed_ll2_info ll2_params; | 2635 | struct qed_ll2_conn ll2_params; |
2636 | int rc; | 2636 | int rc; |
2637 | 2637 | ||
2638 | if (!params) { | 2638 | if (!params) { |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 89ac1e3f6175..301f48755093 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = { | |||
179 | .get_mdio_data = ravb_get_mdio_data, | 179 | .get_mdio_data = ravb_get_mdio_data, |
180 | }; | 180 | }; |
181 | 181 | ||
182 | /* Free TX skb function for AVB-IP */ | ||
183 | static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) | ||
184 | { | ||
185 | struct ravb_private *priv = netdev_priv(ndev); | ||
186 | struct net_device_stats *stats = &priv->stats[q]; | ||
187 | struct ravb_tx_desc *desc; | ||
188 | int free_num = 0; | ||
189 | int entry; | ||
190 | u32 size; | ||
191 | |||
192 | for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
193 | bool txed; | ||
194 | |||
195 | entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
196 | NUM_TX_DESC); | ||
197 | desc = &priv->tx_ring[q][entry]; | ||
198 | txed = desc->die_dt == DT_FEMPTY; | ||
199 | if (free_txed_only && !txed) | ||
200 | break; | ||
201 | /* Descriptor type must be checked before all other reads */ | ||
202 | dma_rmb(); | ||
203 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
204 | /* Free the original skb. */ | ||
205 | if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
206 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
207 | size, DMA_TO_DEVICE); | ||
208 | /* Last packet descriptor? */ | ||
209 | if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
210 | entry /= NUM_TX_DESC; | ||
211 | dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
212 | priv->tx_skb[q][entry] = NULL; | ||
213 | if (txed) | ||
214 | stats->tx_packets++; | ||
215 | } | ||
216 | free_num++; | ||
217 | } | ||
218 | if (txed) | ||
219 | stats->tx_bytes += size; | ||
220 | desc->die_dt = DT_EEMPTY; | ||
221 | } | ||
222 | return free_num; | ||
223 | } | ||
224 | |||
182 | /* Free skb's and DMA buffers for Ethernet AVB */ | 225 | /* Free skb's and DMA buffers for Ethernet AVB */ |
183 | static void ravb_ring_free(struct net_device *ndev, int q) | 226 | static void ravb_ring_free(struct net_device *ndev, int q) |
184 | { | 227 | { |
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
194 | kfree(priv->rx_skb[q]); | 237 | kfree(priv->rx_skb[q]); |
195 | priv->rx_skb[q] = NULL; | 238 | priv->rx_skb[q] = NULL; |
196 | 239 | ||
197 | /* Free TX skb ringbuffer */ | ||
198 | if (priv->tx_skb[q]) { | ||
199 | for (i = 0; i < priv->num_tx_ring[q]; i++) | ||
200 | dev_kfree_skb(priv->tx_skb[q][i]); | ||
201 | } | ||
202 | kfree(priv->tx_skb[q]); | ||
203 | priv->tx_skb[q] = NULL; | ||
204 | |||
205 | /* Free aligned TX buffers */ | 240 | /* Free aligned TX buffers */ |
206 | kfree(priv->tx_align[q]); | 241 | kfree(priv->tx_align[q]); |
207 | priv->tx_align[q] = NULL; | 242 | priv->tx_align[q] = NULL; |
208 | 243 | ||
209 | if (priv->rx_ring[q]) { | 244 | if (priv->rx_ring[q]) { |
245 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | ||
246 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; | ||
247 | |||
248 | if (!dma_mapping_error(ndev->dev.parent, | ||
249 | le32_to_cpu(desc->dptr))) | ||
250 | dma_unmap_single(ndev->dev.parent, | ||
251 | le32_to_cpu(desc->dptr), | ||
252 | PKT_BUF_SZ, | ||
253 | DMA_FROM_DEVICE); | ||
254 | } | ||
210 | ring_size = sizeof(struct ravb_ex_rx_desc) * | 255 | ring_size = sizeof(struct ravb_ex_rx_desc) * |
211 | (priv->num_rx_ring[q] + 1); | 256 | (priv->num_rx_ring[q] + 1); |
212 | dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], | 257 | dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], |
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
215 | } | 260 | } |
216 | 261 | ||
217 | if (priv->tx_ring[q]) { | 262 | if (priv->tx_ring[q]) { |
263 | ravb_tx_free(ndev, q, false); | ||
264 | |||
218 | ring_size = sizeof(struct ravb_tx_desc) * | 265 | ring_size = sizeof(struct ravb_tx_desc) * |
219 | (priv->num_tx_ring[q] * NUM_TX_DESC + 1); | 266 | (priv->num_tx_ring[q] * NUM_TX_DESC + 1); |
220 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], | 267 | dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], |
221 | priv->tx_desc_dma[q]); | 268 | priv->tx_desc_dma[q]); |
222 | priv->tx_ring[q] = NULL; | 269 | priv->tx_ring[q] = NULL; |
223 | } | 270 | } |
271 | |||
272 | /* Free TX skb ringbuffer. | ||
273 | * SKBs are freed by ravb_tx_free() call above. | ||
274 | */ | ||
275 | kfree(priv->tx_skb[q]); | ||
276 | priv->tx_skb[q] = NULL; | ||
224 | } | 277 | } |
225 | 278 | ||
226 | /* Format skb and descriptor buffer for Ethernet AVB */ | 279 | /* Format skb and descriptor buffer for Ethernet AVB */ |
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev) | |||
431 | return 0; | 484 | return 0; |
432 | } | 485 | } |
433 | 486 | ||
434 | /* Free TX skb function for AVB-IP */ | ||
435 | static int ravb_tx_free(struct net_device *ndev, int q) | ||
436 | { | ||
437 | struct ravb_private *priv = netdev_priv(ndev); | ||
438 | struct net_device_stats *stats = &priv->stats[q]; | ||
439 | struct ravb_tx_desc *desc; | ||
440 | int free_num = 0; | ||
441 | int entry; | ||
442 | u32 size; | ||
443 | |||
444 | for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { | ||
445 | entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * | ||
446 | NUM_TX_DESC); | ||
447 | desc = &priv->tx_ring[q][entry]; | ||
448 | if (desc->die_dt != DT_FEMPTY) | ||
449 | break; | ||
450 | /* Descriptor type must be checked before all other reads */ | ||
451 | dma_rmb(); | ||
452 | size = le16_to_cpu(desc->ds_tagl) & TX_DS; | ||
453 | /* Free the original skb. */ | ||
454 | if (priv->tx_skb[q][entry / NUM_TX_DESC]) { | ||
455 | dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), | ||
456 | size, DMA_TO_DEVICE); | ||
457 | /* Last packet descriptor? */ | ||
458 | if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { | ||
459 | entry /= NUM_TX_DESC; | ||
460 | dev_kfree_skb_any(priv->tx_skb[q][entry]); | ||
461 | priv->tx_skb[q][entry] = NULL; | ||
462 | stats->tx_packets++; | ||
463 | } | ||
464 | free_num++; | ||
465 | } | ||
466 | stats->tx_bytes += size; | ||
467 | desc->die_dt = DT_EEMPTY; | ||
468 | } | ||
469 | return free_num; | ||
470 | } | ||
471 | |||
472 | static void ravb_get_tx_tstamp(struct net_device *ndev) | 487 | static void ravb_get_tx_tstamp(struct net_device *ndev) |
473 | { | 488 | { |
474 | struct ravb_private *priv = netdev_priv(ndev); | 489 | struct ravb_private *priv = netdev_priv(ndev); |
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
902 | spin_lock_irqsave(&priv->lock, flags); | 917 | spin_lock_irqsave(&priv->lock, flags); |
903 | /* Clear TX interrupt */ | 918 | /* Clear TX interrupt */ |
904 | ravb_write(ndev, ~mask, TIS); | 919 | ravb_write(ndev, ~mask, TIS); |
905 | ravb_tx_free(ndev, q); | 920 | ravb_tx_free(ndev, q, true); |
906 | netif_wake_subqueue(ndev, q); | 921 | netif_wake_subqueue(ndev, q); |
907 | mmiowb(); | 922 | mmiowb(); |
908 | spin_unlock_irqrestore(&priv->lock, flags); | 923 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1567 | 1582 | ||
1568 | priv->cur_tx[q] += NUM_TX_DESC; | 1583 | priv->cur_tx[q] += NUM_TX_DESC; |
1569 | if (priv->cur_tx[q] - priv->dirty_tx[q] > | 1584 | if (priv->cur_tx[q] - priv->dirty_tx[q] > |
1570 | (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) | 1585 | (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && |
1586 | !ravb_tx_free(ndev, q, true)) | ||
1571 | netif_stop_subqueue(ndev, q); | 1587 | netif_stop_subqueue(ndev, q); |
1572 | 1588 | ||
1573 | exit: | 1589 | exit: |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 082cd48db6a7..36942f5a6a53 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, | |||
351 | if (of_phy_is_fixed_link(np)) | 351 | if (of_phy_is_fixed_link(np)) |
352 | of_phy_deregister_fixed_link(np); | 352 | of_phy_deregister_fixed_link(np); |
353 | of_node_put(plat->phy_node); | 353 | of_node_put(plat->phy_node); |
354 | of_node_put(plat->mdio_node); | ||
354 | } | 355 | } |
355 | #else | 356 | #else |
356 | struct plat_stmmacenet_data * | 357 | struct plat_stmmacenet_data * |
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 8b6810bad54b..99d3df788ce8 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
@@ -69,7 +69,6 @@ struct gtp_dev { | |||
69 | struct socket *sock0; | 69 | struct socket *sock0; |
70 | struct socket *sock1u; | 70 | struct socket *sock1u; |
71 | 71 | ||
72 | struct net *net; | ||
73 | struct net_device *dev; | 72 | struct net_device *dev; |
74 | 73 | ||
75 | unsigned int hash_size; | 74 | unsigned int hash_size; |
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
316 | 315 | ||
317 | netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); | 316 | netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); |
318 | 317 | ||
319 | xnet = !net_eq(gtp->net, dev_net(gtp->dev)); | 318 | xnet = !net_eq(sock_net(sk), dev_net(gtp->dev)); |
320 | 319 | ||
321 | switch (udp_sk(sk)->encap_type) { | 320 | switch (udp_sk(sk)->encap_type) { |
322 | case UDP_ENCAP_GTP0: | 321 | case UDP_ENCAP_GTP0: |
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
612 | pktinfo.fl4.saddr, pktinfo.fl4.daddr, | 611 | pktinfo.fl4.saddr, pktinfo.fl4.daddr, |
613 | pktinfo.iph->tos, | 612 | pktinfo.iph->tos, |
614 | ip4_dst_hoplimit(&pktinfo.rt->dst), | 613 | ip4_dst_hoplimit(&pktinfo.rt->dst), |
615 | htons(IP_DF), | 614 | 0, |
616 | pktinfo.gtph_port, pktinfo.gtph_port, | 615 | pktinfo.gtph_port, pktinfo.gtph_port, |
617 | true, false); | 616 | true, false); |
618 | break; | 617 | break; |
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev) | |||
658 | static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); | 657 | static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); |
659 | static void gtp_hashtable_free(struct gtp_dev *gtp); | 658 | static void gtp_hashtable_free(struct gtp_dev *gtp); |
660 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, | 659 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, |
661 | int fd_gtp0, int fd_gtp1, struct net *src_net); | 660 | int fd_gtp0, int fd_gtp1); |
662 | 661 | ||
663 | static int gtp_newlink(struct net *src_net, struct net_device *dev, | 662 | static int gtp_newlink(struct net *src_net, struct net_device *dev, |
664 | struct nlattr *tb[], struct nlattr *data[]) | 663 | struct nlattr *tb[], struct nlattr *data[]) |
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, | |||
675 | fd0 = nla_get_u32(data[IFLA_GTP_FD0]); | 674 | fd0 = nla_get_u32(data[IFLA_GTP_FD0]); |
676 | fd1 = nla_get_u32(data[IFLA_GTP_FD1]); | 675 | fd1 = nla_get_u32(data[IFLA_GTP_FD1]); |
677 | 676 | ||
678 | err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); | 677 | err = gtp_encap_enable(dev, gtp, fd0, fd1); |
679 | if (err < 0) | 678 | if (err < 0) |
680 | goto out_err; | 679 | goto out_err; |
681 | 680 | ||
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) | |||
821 | } | 820 | } |
822 | 821 | ||
823 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, | 822 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, |
824 | int fd_gtp0, int fd_gtp1, struct net *src_net) | 823 | int fd_gtp0, int fd_gtp1) |
825 | { | 824 | { |
826 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; | 825 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; |
827 | struct socket *sock0, *sock1u; | 826 | struct socket *sock0, *sock1u; |
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, | |||
858 | 857 | ||
859 | gtp->sock0 = sock0; | 858 | gtp->sock0 = sock0; |
860 | gtp->sock1u = sock1u; | 859 | gtp->sock1u = sock1u; |
861 | gtp->net = src_net; | ||
862 | 860 | ||
863 | tuncfg.sk_user_data = gtp; | 861 | tuncfg.sk_user_data = gtp; |
864 | tuncfg.encap_rcv = gtp_encap_recv; | 862 | tuncfg.encap_rcv = gtp_encap_recv; |
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL"); | |||
1376 | MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); | 1374 | MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); |
1377 | MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); | 1375 | MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); |
1378 | MODULE_ALIAS_RTNL_LINK("gtp"); | 1376 | MODULE_ALIAS_RTNL_LINK("gtp"); |
1377 | MODULE_ALIAS_GENL_FAMILY("gtp"); | ||
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5c26653eceb5..402618565838 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
825 | return -EINVAL; | 825 | return -EINVAL; |
826 | 826 | ||
827 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, | 827 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, |
828 | macvtap_is_little_endian(q))) | 828 | macvtap_is_little_endian(q), true)) |
829 | BUG(); | 829 | BUG(); |
830 | 830 | ||
831 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != | 831 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != |
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index e741bf614c4e..b0492ef2cdaa 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c | |||
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver"); | |||
21 | MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); | 21 | MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); |
22 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
23 | 23 | ||
24 | static int bcm63xx_config_intr(struct phy_device *phydev) | ||
25 | { | ||
26 | int reg, err; | ||
27 | |||
28 | reg = phy_read(phydev, MII_BCM63XX_IR); | ||
29 | if (reg < 0) | ||
30 | return reg; | ||
31 | |||
32 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | ||
33 | reg &= ~MII_BCM63XX_IR_GMASK; | ||
34 | else | ||
35 | reg |= MII_BCM63XX_IR_GMASK; | ||
36 | |||
37 | err = phy_write(phydev, MII_BCM63XX_IR, reg); | ||
38 | return err; | ||
39 | } | ||
40 | |||
24 | static int bcm63xx_config_init(struct phy_device *phydev) | 41 | static int bcm63xx_config_init(struct phy_device *phydev) |
25 | { | 42 | { |
26 | int reg, err; | 43 | int reg, err; |
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = { | |||
55 | .config_aneg = genphy_config_aneg, | 72 | .config_aneg = genphy_config_aneg, |
56 | .read_status = genphy_read_status, | 73 | .read_status = genphy_read_status, |
57 | .ack_interrupt = bcm_phy_ack_intr, | 74 | .ack_interrupt = bcm_phy_ack_intr, |
58 | .config_intr = bcm_phy_config_intr, | 75 | .config_intr = bcm63xx_config_intr, |
59 | }, { | 76 | }, { |
60 | /* same phy as above, with just a different OUI */ | 77 | /* same phy as above, with just a different OUI */ |
61 | .phy_id = 0x002bdc00, | 78 | .phy_id = 0x002bdc00, |
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = { | |||
67 | .config_aneg = genphy_config_aneg, | 84 | .config_aneg = genphy_config_aneg, |
68 | .read_status = genphy_read_status, | 85 | .read_status = genphy_read_status, |
69 | .ack_interrupt = bcm_phy_ack_intr, | 86 | .ack_interrupt = bcm_phy_ack_intr, |
70 | .config_intr = bcm_phy_config_intr, | 87 | .config_intr = bcm63xx_config_intr, |
71 | } }; | 88 | } }; |
72 | 89 | ||
73 | module_phy_driver(bcm63xx_driver); | 90 | module_phy_driver(bcm63xx_driver); |
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 800b39f06279..a10d0e7fc5f7 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/phy.h> | 17 | #include <linux/phy.h> |
18 | 18 | ||
19 | #define TI_DP83848C_PHY_ID 0x20005ca0 | 19 | #define TI_DP83848C_PHY_ID 0x20005ca0 |
20 | #define TI_DP83620_PHY_ID 0x20005ce0 | ||
20 | #define NS_DP83848C_PHY_ID 0x20005c90 | 21 | #define NS_DP83848C_PHY_ID 0x20005c90 |
21 | #define TLK10X_PHY_ID 0x2000a210 | 22 | #define TLK10X_PHY_ID 0x2000a210 |
22 | #define TI_DP83822_PHY_ID 0x2000a240 | 23 | #define TI_DP83822_PHY_ID 0x2000a240 |
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev) | |||
77 | static struct mdio_device_id __maybe_unused dp83848_tbl[] = { | 78 | static struct mdio_device_id __maybe_unused dp83848_tbl[] = { |
78 | { TI_DP83848C_PHY_ID, 0xfffffff0 }, | 79 | { TI_DP83848C_PHY_ID, 0xfffffff0 }, |
79 | { NS_DP83848C_PHY_ID, 0xfffffff0 }, | 80 | { NS_DP83848C_PHY_ID, 0xfffffff0 }, |
81 | { TI_DP83620_PHY_ID, 0xfffffff0 }, | ||
80 | { TLK10X_PHY_ID, 0xfffffff0 }, | 82 | { TLK10X_PHY_ID, 0xfffffff0 }, |
81 | { TI_DP83822_PHY_ID, 0xfffffff0 }, | 83 | { TI_DP83822_PHY_ID, 0xfffffff0 }, |
82 | { } | 84 | { } |
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); | |||
106 | static struct phy_driver dp83848_driver[] = { | 108 | static struct phy_driver dp83848_driver[] = { |
107 | DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), | 109 | DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), |
108 | DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), | 110 | DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), |
111 | DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"), | ||
109 | DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), | 112 | DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), |
110 | DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), | 113 | DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), |
111 | }; | 114 | }; |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0b78210c0fa7..ed0d235cf850 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -1679,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = { | |||
1679 | .ack_interrupt = &marvell_ack_interrupt, | 1679 | .ack_interrupt = &marvell_ack_interrupt, |
1680 | .config_intr = &marvell_config_intr, | 1680 | .config_intr = &marvell_config_intr, |
1681 | .did_interrupt = &m88e1121_did_interrupt, | 1681 | .did_interrupt = &m88e1121_did_interrupt, |
1682 | .get_wol = &m88e1318_get_wol, | ||
1683 | .set_wol = &m88e1318_set_wol, | ||
1682 | .resume = &marvell_resume, | 1684 | .resume = &marvell_resume, |
1683 | .suspend = &marvell_suspend, | 1685 | .suspend = &marvell_suspend, |
1684 | .get_sset_count = marvell_get_sset_count, | 1686 | .get_sset_count = marvell_get_sset_count, |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9a77289109b7..e55809c5beb7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = { | |||
1008 | .get_stats = kszphy_get_stats, | 1008 | .get_stats = kszphy_get_stats, |
1009 | .suspend = genphy_suspend, | 1009 | .suspend = genphy_suspend, |
1010 | .resume = genphy_resume, | 1010 | .resume = genphy_resume, |
1011 | }, { | ||
1012 | .phy_id = PHY_ID_KSZ8795, | ||
1013 | .phy_id_mask = MICREL_PHY_ID_MASK, | ||
1014 | .name = "Micrel KSZ8795", | ||
1015 | .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), | ||
1016 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, | ||
1017 | .config_init = kszphy_config_init, | ||
1018 | .config_aneg = ksz8873mll_config_aneg, | ||
1019 | .read_status = ksz8873mll_read_status, | ||
1020 | .get_sset_count = kszphy_get_sset_count, | ||
1021 | .get_strings = kszphy_get_strings, | ||
1022 | .get_stats = kszphy_get_stats, | ||
1023 | .suspend = genphy_suspend, | ||
1024 | .resume = genphy_resume, | ||
1011 | } }; | 1025 | } }; |
1012 | 1026 | ||
1013 | module_phy_driver(ksphy_driver); | 1027 | module_phy_driver(ksphy_driver); |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 48da6e93c3f7..7cc1b7dcfe05 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/mii.h> | 29 | #include <linux/mii.h> |
30 | #include <linux/ethtool.h> | 30 | #include <linux/ethtool.h> |
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | #include <linux/phy_led_triggers.h> | ||
32 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
33 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
34 | #include <linux/mdio.h> | 35 | #include <linux/mdio.h> |
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev) | |||
649 | * phy_trigger_machine - trigger the state machine to run | 650 | * phy_trigger_machine - trigger the state machine to run |
650 | * | 651 | * |
651 | * @phydev: the phy_device struct | 652 | * @phydev: the phy_device struct |
653 | * @sync: indicate whether we should wait for the workqueue cancelation | ||
652 | * | 654 | * |
653 | * Description: There has been a change in state which requires that the | 655 | * Description: There has been a change in state which requires that the |
654 | * state machine runs. | 656 | * state machine runs. |
655 | */ | 657 | */ |
656 | 658 | ||
657 | static void phy_trigger_machine(struct phy_device *phydev) | 659 | static void phy_trigger_machine(struct phy_device *phydev, bool sync) |
658 | { | 660 | { |
659 | cancel_delayed_work_sync(&phydev->state_queue); | 661 | if (sync) |
662 | cancel_delayed_work_sync(&phydev->state_queue); | ||
663 | else | ||
664 | cancel_delayed_work(&phydev->state_queue); | ||
660 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); | 665 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); |
661 | } | 666 | } |
662 | 667 | ||
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev) | |||
693 | phydev->state = PHY_HALTED; | 698 | phydev->state = PHY_HALTED; |
694 | mutex_unlock(&phydev->lock); | 699 | mutex_unlock(&phydev->lock); |
695 | 700 | ||
696 | phy_trigger_machine(phydev); | 701 | phy_trigger_machine(phydev, false); |
697 | } | 702 | } |
698 | 703 | ||
699 | /** | 704 | /** |
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev) | |||
840 | } | 845 | } |
841 | 846 | ||
842 | /* reschedule state queue work to run as soon as possible */ | 847 | /* reschedule state queue work to run as soon as possible */ |
843 | phy_trigger_machine(phydev); | 848 | phy_trigger_machine(phydev, true); |
844 | return; | 849 | return; |
845 | 850 | ||
846 | ignore: | 851 | ignore: |
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev) | |||
942 | if (do_resume) | 947 | if (do_resume) |
943 | phy_resume(phydev); | 948 | phy_resume(phydev); |
944 | 949 | ||
945 | phy_trigger_machine(phydev); | 950 | phy_trigger_machine(phydev, true); |
946 | } | 951 | } |
947 | EXPORT_SYMBOL(phy_start); | 952 | EXPORT_SYMBOL(phy_start); |
948 | 953 | ||
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index fa62bdf2f526..94ca42e630bb 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include <linux/leds.h> | 13 | #include <linux/leds.h> |
14 | #include <linux/phy.h> | 14 | #include <linux/phy.h> |
15 | #include <linux/phy_led_triggers.h> | ||
15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
16 | 17 | ||
17 | static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, | 18 | static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, |
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy) | |||
102 | sizeof(struct phy_led_trigger) * | 103 | sizeof(struct phy_led_trigger) * |
103 | phy->phy_num_led_triggers, | 104 | phy->phy_num_led_triggers, |
104 | GFP_KERNEL); | 105 | GFP_KERNEL); |
105 | if (!phy->phy_led_triggers) | 106 | if (!phy->phy_led_triggers) { |
106 | return -ENOMEM; | 107 | err = -ENOMEM; |
108 | goto out_clear; | ||
109 | } | ||
107 | 110 | ||
108 | for (i = 0; i < phy->phy_num_led_triggers; i++) { | 111 | for (i = 0; i < phy->phy_num_led_triggers; i++) { |
109 | err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], | 112 | err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], |
@@ -120,6 +123,8 @@ out_unreg: | |||
120 | while (i--) | 123 | while (i--) |
121 | phy_led_trigger_unregister(&phy->phy_led_triggers[i]); | 124 | phy_led_trigger_unregister(&phy->phy_led_triggers[i]); |
122 | devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); | 125 | devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); |
126 | out_clear: | ||
127 | phy->phy_num_led_triggers = 0; | ||
123 | return err; | 128 | return err; |
124 | } | 129 | } |
125 | EXPORT_SYMBOL_GPL(phy_led_triggers_register); | 130 | EXPORT_SYMBOL_GPL(phy_led_triggers_register); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cd8e02c94be0..2cd10b26b650 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1360,7 +1360,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1360 | return -EINVAL; | 1360 | return -EINVAL; |
1361 | 1361 | ||
1362 | if (virtio_net_hdr_from_skb(skb, &gso, | 1362 | if (virtio_net_hdr_from_skb(skb, &gso, |
1363 | tun_is_little_endian(tun))) { | 1363 | tun_is_little_endian(tun), true)) { |
1364 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 1364 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
1365 | pr_err("unexpected GSO type: " | 1365 | pr_err("unexpected GSO type: " |
1366 | "0x%x, gso_size %d, hdr_len %d\n", | 1366 | "0x%x, gso_size %d, hdr_len %d\n", |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index fe7b2886cb6b..86144f9a80ee 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = { | |||
531 | #define SAMSUNG_VENDOR_ID 0x04e8 | 531 | #define SAMSUNG_VENDOR_ID 0x04e8 |
532 | #define LENOVO_VENDOR_ID 0x17ef | 532 | #define LENOVO_VENDOR_ID 0x17ef |
533 | #define NVIDIA_VENDOR_ID 0x0955 | 533 | #define NVIDIA_VENDOR_ID 0x0955 |
534 | #define HP_VENDOR_ID 0x03f0 | ||
534 | 535 | ||
535 | static const struct usb_device_id products[] = { | 536 | static const struct usb_device_id products[] = { |
536 | /* BLACKLIST !! | 537 | /* BLACKLIST !! |
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = { | |||
677 | .driver_info = 0, | 678 | .driver_info = 0, |
678 | }, | 679 | }, |
679 | 680 | ||
681 | /* HP lt2523 (Novatel E371) - handled by qmi_wwan */ | ||
682 | { | ||
683 | USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM, | ||
684 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
685 | .driver_info = 0, | ||
686 | }, | ||
687 | |||
680 | /* AnyDATA ADU960S - handled by qmi_wwan */ | 688 | /* AnyDATA ADU960S - handled by qmi_wwan */ |
681 | { | 689 | { |
682 | USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, | 690 | USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6fe1cdb0174f..24d5272cdce5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = { | |||
654 | USB_CDC_PROTO_NONE), | 654 | USB_CDC_PROTO_NONE), |
655 | .driver_info = (unsigned long)&qmi_wwan_info, | 655 | .driver_info = (unsigned long)&qmi_wwan_info, |
656 | }, | 656 | }, |
657 | { /* HP lt2523 (Novatel E371) */ | ||
658 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, | ||
659 | USB_CLASS_COMM, | ||
660 | USB_CDC_SUBCLASS_ETHERNET, | ||
661 | USB_CDC_PROTO_NONE), | ||
662 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
663 | }, | ||
657 | { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ | 664 | { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ |
658 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), | 665 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), |
659 | .driver_info = (unsigned long)&qmi_wwan_info, | 666 | .driver_info = (unsigned long)&qmi_wwan_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f3b48ad90865..ad42295356dd 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #define NETNEXT_VERSION "08" | 32 | #define NETNEXT_VERSION "08" |
33 | 33 | ||
34 | /* Information for net */ | 34 | /* Information for net */ |
35 | #define NET_VERSION "6" | 35 | #define NET_VERSION "8" |
36 | 36 | ||
37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION | 37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION |
38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) | |||
1936 | napi_complete(napi); | 1936 | napi_complete(napi); |
1937 | if (!list_empty(&tp->rx_done)) | 1937 | if (!list_empty(&tp->rx_done)) |
1938 | napi_schedule(napi); | 1938 | napi_schedule(napi); |
1939 | else if (!skb_queue_empty(&tp->tx_queue) && | ||
1940 | !list_empty(&tp->tx_free)) | ||
1941 | napi_schedule(napi); | ||
1939 | } | 1942 | } |
1940 | 1943 | ||
1941 | return work_done; | 1944 | return work_done; |
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp) | |||
3155 | if (!netif_carrier_ok(netdev)) { | 3158 | if (!netif_carrier_ok(netdev)) { |
3156 | tp->rtl_ops.enable(tp); | 3159 | tp->rtl_ops.enable(tp); |
3157 | set_bit(RTL8152_SET_RX_MODE, &tp->flags); | 3160 | set_bit(RTL8152_SET_RX_MODE, &tp->flags); |
3161 | netif_stop_queue(netdev); | ||
3158 | napi_disable(&tp->napi); | 3162 | napi_disable(&tp->napi); |
3159 | netif_carrier_on(netdev); | 3163 | netif_carrier_on(netdev); |
3160 | rtl_start_rx(tp); | 3164 | rtl_start_rx(tp); |
3161 | napi_enable(&tp->napi); | 3165 | napi_enable(&tp->napi); |
3166 | netif_wake_queue(netdev); | ||
3167 | netif_info(tp, link, netdev, "carrier on\n"); | ||
3162 | } | 3168 | } |
3163 | } else { | 3169 | } else { |
3164 | if (netif_carrier_ok(netdev)) { | 3170 | if (netif_carrier_ok(netdev)) { |
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp) | |||
3166 | napi_disable(&tp->napi); | 3172 | napi_disable(&tp->napi); |
3167 | tp->rtl_ops.disable(tp); | 3173 | tp->rtl_ops.disable(tp); |
3168 | napi_enable(&tp->napi); | 3174 | napi_enable(&tp->napi); |
3175 | netif_info(tp, link, netdev, "carrier off\n"); | ||
3169 | } | 3176 | } |
3170 | } | 3177 | } |
3171 | } | 3178 | } |
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf) | |||
3515 | if (!netif_running(netdev)) | 3522 | if (!netif_running(netdev)) |
3516 | return 0; | 3523 | return 0; |
3517 | 3524 | ||
3525 | netif_stop_queue(netdev); | ||
3518 | napi_disable(&tp->napi); | 3526 | napi_disable(&tp->napi); |
3519 | clear_bit(WORK_ENABLE, &tp->flags); | 3527 | clear_bit(WORK_ENABLE, &tp->flags); |
3520 | usb_kill_urb(tp->intr_urb); | 3528 | usb_kill_urb(tp->intr_urb); |
3521 | cancel_delayed_work_sync(&tp->schedule); | 3529 | cancel_delayed_work_sync(&tp->schedule); |
3522 | if (netif_carrier_ok(netdev)) { | 3530 | if (netif_carrier_ok(netdev)) { |
3523 | netif_stop_queue(netdev); | ||
3524 | mutex_lock(&tp->control); | 3531 | mutex_lock(&tp->control); |
3525 | tp->rtl_ops.disable(tp); | 3532 | tp->rtl_ops.disable(tp); |
3526 | mutex_unlock(&tp->control); | 3533 | mutex_unlock(&tp->control); |
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf) | |||
3545 | if (netif_carrier_ok(netdev)) { | 3552 | if (netif_carrier_ok(netdev)) { |
3546 | mutex_lock(&tp->control); | 3553 | mutex_lock(&tp->control); |
3547 | tp->rtl_ops.enable(tp); | 3554 | tp->rtl_ops.enable(tp); |
3555 | rtl_start_rx(tp); | ||
3548 | rtl8152_set_rx_mode(netdev); | 3556 | rtl8152_set_rx_mode(netdev); |
3549 | mutex_unlock(&tp->control); | 3557 | mutex_unlock(&tp->control); |
3550 | netif_wake_queue(netdev); | ||
3551 | } | 3558 | } |
3552 | 3559 | ||
3553 | napi_enable(&tp->napi); | 3560 | napi_enable(&tp->napi); |
3561 | netif_wake_queue(netdev); | ||
3562 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); | ||
3563 | |||
3564 | if (!list_empty(&tp->rx_done)) | ||
3565 | napi_schedule(&tp->napi); | ||
3554 | 3566 | ||
3555 | return 0; | 3567 | return 0; |
3556 | } | 3568 | } |
@@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp) | |||
3572 | */ | 3584 | */ |
3573 | if (!sw_linking && tp->rtl_ops.in_nway(tp)) | 3585 | if (!sw_linking && tp->rtl_ops.in_nway(tp)) |
3574 | return true; | 3586 | return true; |
3587 | else if (!skb_queue_empty(&tp->tx_queue)) | ||
3588 | return true; | ||
3575 | else | 3589 | else |
3576 | return false; | 3590 | return false; |
3577 | } | 3591 | } |
@@ -3581,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | |||
3581 | struct net_device *netdev = tp->netdev; | 3595 | struct net_device *netdev = tp->netdev; |
3582 | int ret = 0; | 3596 | int ret = 0; |
3583 | 3597 | ||
3598 | set_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3599 | smp_mb__after_atomic(); | ||
3600 | |||
3584 | if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { | 3601 | if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { |
3585 | u32 rcr = 0; | 3602 | u32 rcr = 0; |
3586 | 3603 | ||
3587 | if (delay_autosuspend(tp)) { | 3604 | if (delay_autosuspend(tp)) { |
3605 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3606 | smp_mb__after_atomic(); | ||
3588 | ret = -EBUSY; | 3607 | ret = -EBUSY; |
3589 | goto out1; | 3608 | goto out1; |
3590 | } | 3609 | } |
@@ -3601,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | |||
3601 | if (!(ocp_data & RXFIFO_EMPTY)) { | 3620 | if (!(ocp_data & RXFIFO_EMPTY)) { |
3602 | rxdy_gated_en(tp, false); | 3621 | rxdy_gated_en(tp, false); |
3603 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); | 3622 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); |
3623 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3624 | smp_mb__after_atomic(); | ||
3604 | ret = -EBUSY; | 3625 | ret = -EBUSY; |
3605 | goto out1; | 3626 | goto out1; |
3606 | } | 3627 | } |
@@ -3620,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) | |||
3620 | } | 3641 | } |
3621 | } | 3642 | } |
3622 | 3643 | ||
3623 | set_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3624 | |||
3625 | out1: | 3644 | out1: |
3626 | return ret; | 3645 | return ret; |
3627 | } | 3646 | } |
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
3677 | if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { | 3696 | if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { |
3678 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3697 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
3679 | tp->rtl_ops.autosuspend_en(tp, false); | 3698 | tp->rtl_ops.autosuspend_en(tp, false); |
3680 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3681 | napi_disable(&tp->napi); | 3699 | napi_disable(&tp->napi); |
3682 | set_bit(WORK_ENABLE, &tp->flags); | 3700 | set_bit(WORK_ENABLE, &tp->flags); |
3683 | if (netif_carrier_ok(tp->netdev)) | 3701 | if (netif_carrier_ok(tp->netdev)) |
3684 | rtl_start_rx(tp); | 3702 | rtl_start_rx(tp); |
3685 | napi_enable(&tp->napi); | 3703 | napi_enable(&tp->napi); |
3704 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | ||
3705 | smp_mb__after_atomic(); | ||
3706 | if (!list_empty(&tp->rx_done)) | ||
3707 | napi_schedule(&tp->napi); | ||
3686 | } else { | 3708 | } else { |
3687 | tp->rtl_ops.up(tp); | 3709 | tp->rtl_ops.up(tp); |
3688 | netif_carrier_off(tp->netdev); | 3710 | netif_carrier_off(tp->netdev); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4a105006ca63..765c2d6358da 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444); | |||
48 | */ | 48 | */ |
49 | DECLARE_EWMA(pkt_len, 1, 64) | 49 | DECLARE_EWMA(pkt_len, 1, 64) |
50 | 50 | ||
51 | /* With mergeable buffers we align buffer address and use the low bits to | ||
52 | * encode its true size. Buffer size is up to 1 page so we need to align to | ||
53 | * square root of page size to ensure we reserve enough bits to encode the true | ||
54 | * size. | ||
55 | */ | ||
56 | #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) | ||
57 | |||
51 | /* Minimum alignment for mergeable packet buffers. */ | 58 | /* Minimum alignment for mergeable packet buffers. */ |
52 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) | 59 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ |
60 | 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) | ||
53 | 61 | ||
54 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 62 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
55 | 63 | ||
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | |||
1104 | hdr = skb_vnet_hdr(skb); | 1112 | hdr = skb_vnet_hdr(skb); |
1105 | 1113 | ||
1106 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, | 1114 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
1107 | virtio_is_little_endian(vi->vdev))) | 1115 | virtio_is_little_endian(vi->vdev), false)) |
1108 | BUG(); | 1116 | BUG(); |
1109 | 1117 | ||
1110 | if (vi->mergeable_rx_bufs) | 1118 | if (vi->mergeable_rx_bufs) |
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) | |||
1707 | u16 xdp_qp = 0, curr_qp; | 1715 | u16 xdp_qp = 0, curr_qp; |
1708 | int i, err; | 1716 | int i, err; |
1709 | 1717 | ||
1718 | if (prog && prog->xdp_adjust_head) { | ||
1719 | netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); | ||
1720 | return -EOPNOTSUPP; | ||
1721 | } | ||
1722 | |||
1710 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1723 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1711 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | 1724 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
1712 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | 1725 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi) | |||
1890 | put_page(vi->rq[i].alloc_frag.page); | 1903 | put_page(vi->rq[i].alloc_frag.page); |
1891 | } | 1904 | } |
1892 | 1905 | ||
1893 | static bool is_xdp_queue(struct virtnet_info *vi, int q) | 1906 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
1894 | { | 1907 | { |
1908 | /* For small receive mode always use kfree_skb variants */ | ||
1909 | if (!vi->mergeable_rx_bufs) | ||
1910 | return false; | ||
1911 | |||
1895 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | 1912 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) |
1896 | return false; | 1913 | return false; |
1897 | else if (q < vi->curr_queue_pairs) | 1914 | else if (q < vi->curr_queue_pairs) |
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
1908 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1925 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1909 | struct virtqueue *vq = vi->sq[i].vq; | 1926 | struct virtqueue *vq = vi->sq[i].vq; |
1910 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 1927 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
1911 | if (!is_xdp_queue(vi, i)) | 1928 | if (!is_xdp_raw_buffer_queue(vi, i)) |
1912 | dev_kfree_skb(buf); | 1929 | dev_kfree_skb(buf); |
1913 | else | 1930 | else |
1914 | put_page(virt_to_head_page(buf)); | 1931 | put_page(virt_to_head_page(buf)); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ca7196c40060..50b62db213b0 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg) | |||
2268 | = container_of(p, struct vxlan_fdb, hlist); | 2268 | = container_of(p, struct vxlan_fdb, hlist); |
2269 | unsigned long timeout; | 2269 | unsigned long timeout; |
2270 | 2270 | ||
2271 | if (f->state & NUD_PERMANENT) | 2271 | if (f->state & (NUD_PERMANENT | NUD_NOARP)) |
2272 | continue; | 2272 | continue; |
2273 | 2273 | ||
2274 | timeout = f->used + vxlan->cfg.age_interval * HZ; | 2274 | timeout = f->used + vxlan->cfg.age_interval * HZ; |
@@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev) | |||
2354 | } | 2354 | } |
2355 | 2355 | ||
2356 | /* Purge the forwarding table */ | 2356 | /* Purge the forwarding table */ |
2357 | static void vxlan_flush(struct vxlan_dev *vxlan) | 2357 | static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) |
2358 | { | 2358 | { |
2359 | unsigned int h; | 2359 | unsigned int h; |
2360 | 2360 | ||
@@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan) | |||
2364 | hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { | 2364 | hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { |
2365 | struct vxlan_fdb *f | 2365 | struct vxlan_fdb *f |
2366 | = container_of(p, struct vxlan_fdb, hlist); | 2366 | = container_of(p, struct vxlan_fdb, hlist); |
2367 | if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) | ||
2368 | continue; | ||
2367 | /* the all_zeros_mac entry is deleted at vxlan_uninit */ | 2369 | /* the all_zeros_mac entry is deleted at vxlan_uninit */ |
2368 | if (!is_zero_ether_addr(f->eth_addr)) | 2370 | if (!is_zero_ether_addr(f->eth_addr)) |
2369 | vxlan_fdb_destroy(vxlan, f); | 2371 | vxlan_fdb_destroy(vxlan, f); |
@@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev) | |||
2385 | 2387 | ||
2386 | del_timer_sync(&vxlan->age_timer); | 2388 | del_timer_sync(&vxlan->age_timer); |
2387 | 2389 | ||
2388 | vxlan_flush(vxlan); | 2390 | vxlan_flush(vxlan, false); |
2389 | vxlan_sock_release(vxlan); | 2391 | vxlan_sock_release(vxlan); |
2390 | 2392 | ||
2391 | return ret; | 2393 | return ret; |
@@ -2890,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2890 | memcpy(&vxlan->cfg, conf, sizeof(*conf)); | 2892 | memcpy(&vxlan->cfg, conf, sizeof(*conf)); |
2891 | if (!vxlan->cfg.dst_port) { | 2893 | if (!vxlan->cfg.dst_port) { |
2892 | if (conf->flags & VXLAN_F_GPE) | 2894 | if (conf->flags & VXLAN_F_GPE) |
2893 | vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ | 2895 | vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */ |
2894 | else | 2896 | else |
2895 | vxlan->cfg.dst_port = default_port; | 2897 | vxlan->cfg.dst_port = default_port; |
2896 | } | 2898 | } |
@@ -3058,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
3058 | struct vxlan_dev *vxlan = netdev_priv(dev); | 3060 | struct vxlan_dev *vxlan = netdev_priv(dev); |
3059 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | 3061 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
3060 | 3062 | ||
3063 | vxlan_flush(vxlan, true); | ||
3064 | |||
3061 | spin_lock(&vn->sock_lock); | 3065 | spin_lock(&vn->sock_lock); |
3062 | if (!hlist_unhashed(&vxlan->hlist)) | 3066 | if (!hlist_unhashed(&vxlan->hlist)) |
3063 | hlist_del_rcu(&vxlan->hlist); | 3067 | hlist_del_rcu(&vxlan->hlist); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e30ffd29b7e9..579521327b03 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | |||
221 | { | 221 | { |
222 | struct xenvif *vif = netdev_priv(dev); | 222 | struct xenvif *vif = netdev_priv(dev); |
223 | struct xenvif_queue *queue = NULL; | 223 | struct xenvif_queue *queue = NULL; |
224 | unsigned int num_queues = vif->num_queues; | ||
225 | unsigned long rx_bytes = 0; | 224 | unsigned long rx_bytes = 0; |
226 | unsigned long rx_packets = 0; | 225 | unsigned long rx_packets = 0; |
227 | unsigned long tx_bytes = 0; | 226 | unsigned long tx_bytes = 0; |
228 | unsigned long tx_packets = 0; | 227 | unsigned long tx_packets = 0; |
229 | unsigned int index; | 228 | unsigned int index; |
230 | 229 | ||
230 | spin_lock(&vif->lock); | ||
231 | if (vif->queues == NULL) | 231 | if (vif->queues == NULL) |
232 | goto out; | 232 | goto out; |
233 | 233 | ||
234 | /* Aggregate tx and rx stats from each queue */ | 234 | /* Aggregate tx and rx stats from each queue */ |
235 | for (index = 0; index < num_queues; ++index) { | 235 | for (index = 0; index < vif->num_queues; ++index) { |
236 | queue = &vif->queues[index]; | 236 | queue = &vif->queues[index]; |
237 | rx_bytes += queue->stats.rx_bytes; | 237 | rx_bytes += queue->stats.rx_bytes; |
238 | rx_packets += queue->stats.rx_packets; | 238 | rx_packets += queue->stats.rx_packets; |
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | out: | 243 | out: |
244 | spin_unlock(&vif->lock); | ||
245 | |||
244 | vif->dev->stats.rx_bytes = rx_bytes; | 246 | vif->dev->stats.rx_bytes = rx_bytes; |
245 | vif->dev->stats.rx_packets = rx_packets; | 247 | vif->dev->stats.rx_packets = rx_packets; |
246 | vif->dev->stats.tx_bytes = tx_bytes; | 248 | vif->dev->stats.tx_bytes = tx_bytes; |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 3124eaec9427..85b742e1c42f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be) | |||
493 | static void backend_disconnect(struct backend_info *be) | 493 | static void backend_disconnect(struct backend_info *be) |
494 | { | 494 | { |
495 | if (be->vif) { | 495 | if (be->vif) { |
496 | unsigned int queue_index; | ||
497 | |||
496 | xen_unregister_watchers(be->vif); | 498 | xen_unregister_watchers(be->vif); |
497 | #ifdef CONFIG_DEBUG_FS | 499 | #ifdef CONFIG_DEBUG_FS |
498 | xenvif_debugfs_delif(be->vif); | 500 | xenvif_debugfs_delif(be->vif); |
499 | #endif /* CONFIG_DEBUG_FS */ | 501 | #endif /* CONFIG_DEBUG_FS */ |
500 | xenvif_disconnect_data(be->vif); | 502 | xenvif_disconnect_data(be->vif); |
503 | for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) | ||
504 | xenvif_deinit_queue(&be->vif->queues[queue_index]); | ||
505 | |||
506 | spin_lock(&be->vif->lock); | ||
507 | vfree(be->vif->queues); | ||
508 | be->vif->num_queues = 0; | ||
509 | be->vif->queues = NULL; | ||
510 | spin_unlock(&be->vif->lock); | ||
511 | |||
501 | xenvif_disconnect_ctrl(be->vif); | 512 | xenvif_disconnect_ctrl(be->vif); |
502 | } | 513 | } |
503 | } | 514 | } |
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be) | |||
1034 | err: | 1045 | err: |
1035 | if (be->vif->num_queues > 0) | 1046 | if (be->vif->num_queues > 0) |
1036 | xenvif_disconnect_data(be->vif); /* Clean up existing queues */ | 1047 | xenvif_disconnect_data(be->vif); /* Clean up existing queues */ |
1048 | for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) | ||
1049 | xenvif_deinit_queue(&be->vif->queues[queue_index]); | ||
1037 | vfree(be->vif->queues); | 1050 | vfree(be->vif->queues); |
1038 | be->vif->queues = NULL; | 1051 | be->vif->queues = NULL; |
1039 | be->vif->num_queues = 0; | 1052 | be->vif->num_queues = 0; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a479cd99911d..8315fe73ecd0 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
321 | queue->rx.req_prod_pvt = req_prod; | 321 | queue->rx.req_prod_pvt = req_prod; |
322 | 322 | ||
323 | /* Not enough requests? Try again later. */ | 323 | /* Not enough requests? Try again later. */ |
324 | if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { | 324 | if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { |
325 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); | 325 | mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); |
326 | return; | 326 | return; |
327 | } | 327 | } |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index fcc9dcfdf675..e65041c640cb 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |||
1663 | return 0; | 1663 | return 0; |
1664 | 1664 | ||
1665 | freq->sg_table.sgl = freq->first_sgl; | 1665 | freq->sg_table.sgl = freq->first_sgl; |
1666 | ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, | 1666 | ret = sg_alloc_table_chained(&freq->sg_table, |
1667 | freq->sg_table.sgl); | 1667 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); |
1668 | if (ret) | 1668 | if (ret) |
1669 | return -ENOMEM; | 1669 | return -ENOMEM; |
1670 | 1670 | ||
1671 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); | 1671 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); |
1672 | WARN_ON(op->nents > rq->nr_phys_segments); | 1672 | WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); |
1673 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | 1673 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
1674 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, | 1674 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, |
1675 | op->nents, dir); | 1675 | op->nents, dir); |
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 6f5074153dcd..be8c800078e2 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c | |||
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item) | |||
631 | { | 631 | { |
632 | struct nvmet_subsys *subsys = to_subsys(item); | 632 | struct nvmet_subsys *subsys = to_subsys(item); |
633 | 633 | ||
634 | nvmet_subsys_del_ctrls(subsys); | ||
634 | nvmet_subsys_put(subsys); | 635 | nvmet_subsys_put(subsys); |
635 | } | 636 | } |
636 | 637 | ||
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b1d66ed655c9..fc5ba2f9e15f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) | |||
200 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", | 200 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", |
201 | ctrl->cntlid, ctrl->kato); | 201 | ctrl->cntlid, ctrl->kato); |
202 | 202 | ||
203 | ctrl->ops->delete_ctrl(ctrl); | 203 | nvmet_ctrl_fatal_error(ctrl); |
204 | } | 204 | } |
205 | 205 | ||
206 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) | 206 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) | |||
816 | list_del(&ctrl->subsys_entry); | 816 | list_del(&ctrl->subsys_entry); |
817 | mutex_unlock(&subsys->lock); | 817 | mutex_unlock(&subsys->lock); |
818 | 818 | ||
819 | flush_work(&ctrl->async_event_work); | ||
820 | cancel_work_sync(&ctrl->fatal_err_work); | ||
821 | |||
819 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); | 822 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); |
820 | nvmet_subsys_put(subsys); | 823 | nvmet_subsys_put(subsys); |
821 | 824 | ||
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref) | |||
935 | kfree(subsys); | 938 | kfree(subsys); |
936 | } | 939 | } |
937 | 940 | ||
941 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) | ||
942 | { | ||
943 | struct nvmet_ctrl *ctrl; | ||
944 | |||
945 | mutex_lock(&subsys->lock); | ||
946 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | ||
947 | ctrl->ops->delete_ctrl(ctrl); | ||
948 | mutex_unlock(&subsys->lock); | ||
949 | } | ||
950 | |||
938 | void nvmet_subsys_put(struct nvmet_subsys *subsys) | 951 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
939 | { | 952 | { |
940 | kref_put(&subsys->ref, nvmet_subsys_free); | 953 | kref_put(&subsys->ref, nvmet_subsys_free); |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 173e842f19c9..ba57f9852bde 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1314 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; | 1314 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; |
1315 | struct fcnvme_ls_disconnect_acc *acc = | 1315 | struct fcnvme_ls_disconnect_acc *acc = |
1316 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; | 1316 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; |
1317 | struct nvmet_fc_tgt_queue *queue; | 1317 | struct nvmet_fc_tgt_queue *queue = NULL; |
1318 | struct nvmet_fc_tgt_assoc *assoc; | 1318 | struct nvmet_fc_tgt_assoc *assoc; |
1319 | int ret = 0; | 1319 | int ret = 0; |
1320 | bool del_assoc = false; | 1320 | bool del_assoc = false; |
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1348 | assoc = nvmet_fc_find_target_assoc(tgtport, | 1348 | assoc = nvmet_fc_find_target_assoc(tgtport, |
1349 | be64_to_cpu(rqst->associd.association_id)); | 1349 | be64_to_cpu(rqst->associd.association_id)); |
1350 | iod->assoc = assoc; | 1350 | iod->assoc = assoc; |
1351 | if (!assoc) | 1351 | if (assoc) { |
1352 | if (rqst->discon_cmd.scope == | ||
1353 | FCNVME_DISCONN_CONNECTION) { | ||
1354 | queue = nvmet_fc_find_target_queue(tgtport, | ||
1355 | be64_to_cpu( | ||
1356 | rqst->discon_cmd.id)); | ||
1357 | if (!queue) { | ||
1358 | nvmet_fc_tgt_a_put(assoc); | ||
1359 | ret = VERR_NO_CONN; | ||
1360 | } | ||
1361 | } | ||
1362 | } else | ||
1352 | ret = VERR_NO_ASSOC; | 1363 | ret = VERR_NO_ASSOC; |
1353 | } | 1364 | } |
1354 | 1365 | ||
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1373 | FCNVME_LS_DISCONNECT); | 1384 | FCNVME_LS_DISCONNECT); |
1374 | 1385 | ||
1375 | 1386 | ||
1376 | if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { | 1387 | /* are we to delete a Connection ID (queue) */ |
1377 | queue = nvmet_fc_find_target_queue(tgtport, | 1388 | if (queue) { |
1378 | be64_to_cpu(rqst->discon_cmd.id)); | 1389 | int qid = queue->qid; |
1379 | if (queue) { | ||
1380 | int qid = queue->qid; | ||
1381 | 1390 | ||
1382 | nvmet_fc_delete_target_queue(queue); | 1391 | nvmet_fc_delete_target_queue(queue); |
1383 | 1392 | ||
1384 | /* release the get taken by find_target_queue */ | 1393 | /* release the get taken by find_target_queue */ |
1385 | nvmet_fc_tgt_q_put(queue); | 1394 | nvmet_fc_tgt_q_put(queue); |
1386 | 1395 | ||
1387 | /* tear association down if io queue terminated */ | 1396 | /* tear association down if io queue terminated */ |
1388 | if (!qid) | 1397 | if (!qid) |
1389 | del_assoc = true; | 1398 | del_assoc = true; |
1390 | } | ||
1391 | } | 1399 | } |
1392 | 1400 | ||
1393 | /* release get taken in nvmet_fc_find_target_assoc */ | 1401 | /* release get taken in nvmet_fc_find_target_assoc */ |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 23d5eb1c944f..cc7ad06b43a7 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); | |||
282 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | 282 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
283 | enum nvme_subsys_type type); | 283 | enum nvme_subsys_type type); |
284 | void nvmet_subsys_put(struct nvmet_subsys *subsys); | 284 | void nvmet_subsys_put(struct nvmet_subsys *subsys); |
285 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); | ||
285 | 286 | ||
286 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); | 287 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); |
287 | void nvmet_put_namespace(struct nvmet_ns *ns); | 288 | void nvmet_put_namespace(struct nvmet_ns *ns); |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 8c3760a78ac0..60990220bd83 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | |||
438 | { | 438 | { |
439 | struct ib_recv_wr *bad_wr; | 439 | struct ib_recv_wr *bad_wr; |
440 | 440 | ||
441 | ib_dma_sync_single_for_device(ndev->device, | ||
442 | cmd->sge[0].addr, cmd->sge[0].length, | ||
443 | DMA_FROM_DEVICE); | ||
444 | |||
441 | if (ndev->srq) | 445 | if (ndev->srq) |
442 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); | 446 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); |
443 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); | 447 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); |
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) | |||
538 | first_wr = &rsp->send_wr; | 542 | first_wr = &rsp->send_wr; |
539 | 543 | ||
540 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | 544 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); |
545 | |||
546 | ib_dma_sync_single_for_device(rsp->queue->dev->device, | ||
547 | rsp->send_sge.addr, rsp->send_sge.length, | ||
548 | DMA_TO_DEVICE); | ||
549 | |||
541 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { | 550 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { |
542 | pr_err("sending cmd response failed\n"); | 551 | pr_err("sending cmd response failed\n"); |
543 | nvmet_rdma_release_rsp(rsp); | 552 | nvmet_rdma_release_rsp(rsp); |
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |||
698 | cmd->n_rdma = 0; | 707 | cmd->n_rdma = 0; |
699 | cmd->req.port = queue->port; | 708 | cmd->req.port = queue->port; |
700 | 709 | ||
710 | |||
711 | ib_dma_sync_single_for_cpu(queue->dev->device, | ||
712 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | ||
713 | DMA_FROM_DEVICE); | ||
714 | ib_dma_sync_single_for_cpu(queue->dev->device, | ||
715 | cmd->send_sge.addr, cmd->send_sge.length, | ||
716 | DMA_TO_DEVICE); | ||
717 | |||
701 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, | 718 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, |
702 | &queue->nvme_sq, &nvmet_rdma_ops)) | 719 | &queue->nvme_sq, &nvmet_rdma_ops)) |
703 | return; | 720 | return; |
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index dd6d4ccb41e4..3858b87fd0bb 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c | |||
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
293 | p->irq = PARPORT_IRQ_NONE; | 293 | p->irq = PARPORT_IRQ_NONE; |
294 | } | 294 | } |
295 | if (p->irq != PARPORT_IRQ_NONE) { | 295 | if (p->irq != PARPORT_IRQ_NONE) { |
296 | printk(", irq %d", p->irq); | 296 | pr_cont(", irq %d", p->irq); |
297 | 297 | ||
298 | if (p->dma == PARPORT_DMA_AUTO) { | 298 | if (p->dma == PARPORT_DMA_AUTO) { |
299 | p->dma = PARPORT_DMA_NONE; | 299 | p->dma = PARPORT_DMA_NONE; |
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
303 | is mandatory (see above) */ | 303 | is mandatory (see above) */ |
304 | p->dma = PARPORT_DMA_NONE; | 304 | p->dma = PARPORT_DMA_NONE; |
305 | 305 | ||
306 | printk(" ["); | 306 | pr_cont(" ["); |
307 | #define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} | 307 | #define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} |
308 | { | 308 | { |
309 | int f = 0; | 309 | int f = 0; |
310 | printmode(PCSPP); | 310 | printmode(PCSPP); |
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, | |||
315 | // printmode(DMA); | 315 | // printmode(DMA); |
316 | } | 316 | } |
317 | #undef printmode | 317 | #undef printmode |
318 | printk("]\n"); | 318 | pr_cont("]\n"); |
319 | 319 | ||
320 | if (p->irq != PARPORT_IRQ_NONE) { | 320 | if (p->irq != PARPORT_IRQ_NONE) { |
321 | if (request_irq (p->irq, parport_irq_handler, | 321 | if (request_irq (p->irq, parport_irq_handler, |
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 37300634b7d2..c123488266ce 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, | |||
1092 | enum pin_config_param param = pinconf_to_config_param(*config); | 1092 | enum pin_config_param param = pinconf_to_config_param(*config); |
1093 | void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); | 1093 | void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); |
1094 | void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); | 1094 | void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); |
1095 | void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); | ||
1095 | unsigned long flags; | 1096 | unsigned long flags; |
1096 | u32 conf, pull, val, debounce; | 1097 | u32 conf, pull, val, debounce; |
1097 | u16 arg = 0; | 1098 | u16 arg = 0; |
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, | |||
1128 | return -EINVAL; | 1129 | return -EINVAL; |
1129 | 1130 | ||
1130 | raw_spin_lock_irqsave(&vg->lock, flags); | 1131 | raw_spin_lock_irqsave(&vg->lock, flags); |
1131 | debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); | 1132 | debounce = readl(db_reg); |
1132 | raw_spin_unlock_irqrestore(&vg->lock, flags); | 1133 | raw_spin_unlock_irqrestore(&vg->lock, flags); |
1133 | 1134 | ||
1134 | switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { | 1135 | switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { |
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | |||
1176 | unsigned int param, arg; | 1177 | unsigned int param, arg; |
1177 | void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); | 1178 | void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); |
1178 | void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); | 1179 | void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); |
1180 | void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); | ||
1179 | unsigned long flags; | 1181 | unsigned long flags; |
1180 | u32 conf, val, debounce; | 1182 | u32 conf, val, debounce; |
1181 | int i, ret = 0; | 1183 | int i, ret = 0; |
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, | |||
1238 | 1240 | ||
1239 | break; | 1241 | break; |
1240 | case PIN_CONFIG_INPUT_DEBOUNCE: | 1242 | case PIN_CONFIG_INPUT_DEBOUNCE: |
1241 | debounce = readl(byt_gpio_reg(vg, offset, | 1243 | debounce = readl(db_reg); |
1242 | BYT_DEBOUNCE_REG)); | 1244 | debounce &= ~BYT_DEBOUNCE_PULSE_MASK; |
1243 | conf &= ~BYT_DEBOUNCE_PULSE_MASK; | ||
1244 | 1245 | ||
1245 | switch (arg) { | 1246 | switch (arg) { |
1247 | case 0: | ||
1248 | conf &= BYT_DEBOUNCE_EN; | ||
1249 | break; | ||
1246 | case 375: | 1250 | case 375: |
1247 | conf |= BYT_DEBOUNCE_PULSE_375US; | 1251 | debounce |= BYT_DEBOUNCE_PULSE_375US; |
1248 | break; | 1252 | break; |
1249 | case 750: | 1253 | case 750: |
1250 | conf |= BYT_DEBOUNCE_PULSE_750US; | 1254 | debounce |= BYT_DEBOUNCE_PULSE_750US; |
1251 | break; | 1255 | break; |
1252 | case 1500: | 1256 | case 1500: |
1253 | conf |= BYT_DEBOUNCE_PULSE_1500US; | 1257 | debounce |= BYT_DEBOUNCE_PULSE_1500US; |
1254 | break; | 1258 | break; |
1255 | case 3000: | 1259 | case 3000: |
1256 | conf |= BYT_DEBOUNCE_PULSE_3MS; | 1260 | debounce |= BYT_DEBOUNCE_PULSE_3MS; |
1257 | break; | 1261 | break; |
1258 | case 6000: | 1262 | case 6000: |
1259 | conf |= BYT_DEBOUNCE_PULSE_6MS; | 1263 | debounce |= BYT_DEBOUNCE_PULSE_6MS; |
1260 | break; | 1264 | break; |
1261 | case 12000: | 1265 | case 12000: |
1262 | conf |= BYT_DEBOUNCE_PULSE_12MS; | 1266 | debounce |= BYT_DEBOUNCE_PULSE_12MS; |
1263 | break; | 1267 | break; |
1264 | case 24000: | 1268 | case 24000: |
1265 | conf |= BYT_DEBOUNCE_PULSE_24MS; | 1269 | debounce |= BYT_DEBOUNCE_PULSE_24MS; |
1266 | break; | 1270 | break; |
1267 | default: | 1271 | default: |
1268 | ret = -EINVAL; | 1272 | ret = -EINVAL; |
1269 | } | 1273 | } |
1270 | 1274 | ||
1275 | if (!ret) | ||
1276 | writel(debounce, db_reg); | ||
1271 | break; | 1277 | break; |
1272 | default: | 1278 | default: |
1273 | ret = -ENOTSUPP; | 1279 | ret = -ENOTSUPP; |
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) | |||
1617 | 1623 | ||
1618 | static void byt_gpio_irq_init_hw(struct byt_gpio *vg) | 1624 | static void byt_gpio_irq_init_hw(struct byt_gpio *vg) |
1619 | { | 1625 | { |
1626 | struct gpio_chip *gc = &vg->chip; | ||
1627 | struct device *dev = &vg->pdev->dev; | ||
1620 | void __iomem *reg; | 1628 | void __iomem *reg; |
1621 | u32 base, value; | 1629 | u32 base, value; |
1622 | int i; | 1630 | int i; |
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) | |||
1638 | } | 1646 | } |
1639 | 1647 | ||
1640 | value = readl(reg); | 1648 | value = readl(reg); |
1641 | if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && | 1649 | if (value & BYT_DIRECT_IRQ_EN) { |
1642 | !(value & BYT_DIRECT_IRQ_EN)) { | 1650 | clear_bit(i, gc->irq_valid_mask); |
1651 | dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i); | ||
1652 | } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) { | ||
1643 | byt_gpio_clear_triggering(vg, i); | 1653 | byt_gpio_clear_triggering(vg, i); |
1644 | dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); | 1654 | dev_dbg(dev, "disabling GPIO %d\n", i); |
1645 | } | 1655 | } |
1646 | } | 1656 | } |
1647 | 1657 | ||
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) | |||
1680 | gc->can_sleep = false; | 1690 | gc->can_sleep = false; |
1681 | gc->parent = &vg->pdev->dev; | 1691 | gc->parent = &vg->pdev->dev; |
1682 | gc->ngpio = vg->soc_data->npins; | 1692 | gc->ngpio = vg->soc_data->npins; |
1693 | gc->irq_need_valid_mask = true; | ||
1683 | 1694 | ||
1684 | #ifdef CONFIG_PM_SLEEP | 1695 | #ifdef CONFIG_PM_SLEEP |
1685 | vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, | 1696 | vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, |
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index 59cb7a6fc5be..901b356b09d7 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #define BXT_PAD_OWN 0x020 | 20 | #define BXT_PAD_OWN 0x020 |
21 | #define BXT_HOSTSW_OWN 0x080 | 21 | #define BXT_HOSTSW_OWN 0x080 |
22 | #define BXT_PADCFGLOCK 0x090 | 22 | #define BXT_PADCFGLOCK 0x060 |
23 | #define BXT_GPI_IE 0x110 | 23 | #define BXT_GPI_IE 0x110 |
24 | 24 | ||
25 | #define BXT_COMMUNITY(s, e) \ | 25 | #define BXT_COMMUNITY(s, e) \ |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1e139672f1af..6df35dcb29ae 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, | |||
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) | ||
357 | { | ||
358 | u32 value; | ||
359 | |||
360 | value = readl(padcfg0); | ||
361 | if (input) { | ||
362 | value &= ~PADCFG0_GPIORXDIS; | ||
363 | value |= PADCFG0_GPIOTXDIS; | ||
364 | } else { | ||
365 | value &= ~PADCFG0_GPIOTXDIS; | ||
366 | value |= PADCFG0_GPIORXDIS; | ||
367 | } | ||
368 | writel(value, padcfg0); | ||
369 | } | ||
370 | |||
356 | static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, | 371 | static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
357 | struct pinctrl_gpio_range *range, | 372 | struct pinctrl_gpio_range *range, |
358 | unsigned pin) | 373 | unsigned pin) |
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, | |||
375 | /* Disable SCI/SMI/NMI generation */ | 390 | /* Disable SCI/SMI/NMI generation */ |
376 | value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); | 391 | value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); |
377 | value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); | 392 | value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); |
378 | /* Disable TX buffer and enable RX (this will be input) */ | ||
379 | value &= ~PADCFG0_GPIORXDIS; | ||
380 | value |= PADCFG0_GPIOTXDIS; | ||
381 | writel(value, padcfg0); | 393 | writel(value, padcfg0); |
382 | 394 | ||
395 | /* Disable TX buffer and enable RX (this will be input) */ | ||
396 | __intel_gpio_set_direction(padcfg0, true); | ||
397 | |||
383 | raw_spin_unlock_irqrestore(&pctrl->lock, flags); | 398 | raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
384 | 399 | ||
385 | return 0; | 400 | return 0; |
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, | |||
392 | struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); | 407 | struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); |
393 | void __iomem *padcfg0; | 408 | void __iomem *padcfg0; |
394 | unsigned long flags; | 409 | unsigned long flags; |
395 | u32 value; | ||
396 | 410 | ||
397 | raw_spin_lock_irqsave(&pctrl->lock, flags); | 411 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
398 | 412 | ||
399 | padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); | 413 | padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); |
400 | 414 | __intel_gpio_set_direction(padcfg0, input); | |
401 | value = readl(padcfg0); | ||
402 | if (input) | ||
403 | value |= PADCFG0_GPIOTXDIS; | ||
404 | else | ||
405 | value &= ~PADCFG0_GPIOTXDIS; | ||
406 | writel(value, padcfg0); | ||
407 | 415 | ||
408 | raw_spin_unlock_irqrestore(&pctrl->lock, flags); | 416 | raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
409 | 417 | ||
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index c3928aa3fefa..e0bca4df2a2f 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c | |||
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; | |||
253 | static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; | 253 | static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; |
254 | static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; | 254 | static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; |
255 | static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; | 255 | static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; |
256 | static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; | 256 | static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; |
257 | static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), | 257 | static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; |
258 | PIN(GPIOAO_5, 0) }; | ||
259 | static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; | 258 | static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; |
260 | static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; | 259 | static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; |
261 | 260 | ||
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = { | |||
498 | GPIO_GROUP(GPIOAO_13, 0), | 497 | GPIO_GROUP(GPIOAO_13, 0), |
499 | 498 | ||
500 | /* bank AO */ | 499 | /* bank AO */ |
501 | GROUP(uart_tx_ao_b, 0, 26), | 500 | GROUP(uart_tx_ao_b, 0, 24), |
502 | GROUP(uart_rx_ao_b, 0, 25), | 501 | GROUP(uart_rx_ao_b, 0, 25), |
503 | GROUP(uart_tx_ao_a, 0, 12), | 502 | GROUP(uart_tx_ao_a, 0, 12), |
504 | GROUP(uart_rx_ao_a, 0, 11), | 503 | GROUP(uart_rx_ao_a, 0, 11), |
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 25694f7094c7..b69743b07a1d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c | |||
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; | |||
214 | static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; | 214 | static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; |
215 | static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; | 215 | static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; |
216 | static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; | 216 | static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; |
217 | static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; | 217 | static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; |
218 | static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), | 218 | static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; |
219 | PIN(GPIOAO_5, 0) }; | ||
220 | static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; | 219 | static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; |
221 | static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; | 220 | static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; |
222 | 221 | ||
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = { | |||
409 | GPIO_GROUP(GPIOAO_9, 0), | 408 | GPIO_GROUP(GPIOAO_9, 0), |
410 | 409 | ||
411 | /* bank AO */ | 410 | /* bank AO */ |
412 | GROUP(uart_tx_ao_b, 0, 26), | 411 | GROUP(uart_tx_ao_b, 0, 24), |
413 | GROUP(uart_rx_ao_b, 0, 25), | 412 | GROUP(uart_rx_ao_b, 0, 25), |
414 | GROUP(uart_tx_ao_a, 0, 12), | 413 | GROUP(uart_tx_ao_a, 0, 12), |
415 | GROUP(uart_rx_ao_a, 0, 11), | 414 | GROUP(uart_rx_ao_a, 0, 11), |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index c9a146948192..537b52055756 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc) | |||
202 | i = 128; | 202 | i = 128; |
203 | pin_num = AMD_GPIO_PINS_BANK2 + i; | 203 | pin_num = AMD_GPIO_PINS_BANK2 + i; |
204 | break; | 204 | break; |
205 | default: | ||
206 | return; | ||
205 | } | 207 | } |
206 | 208 | ||
207 | for (; i < pin_num; i++) { | 209 | for (; i < pin_num; i++) { |
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c index aa8bd9794683..96686336e3a3 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c | |||
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |||
561 | 0, 0, 0, 0}; | 561 | 0, 0, 0, 0}; |
562 | static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, | 562 | static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, |
563 | 41, 42, 45}; | 563 | 41, 42, 45}; |
564 | static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; | 564 | static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}; |
565 | static const unsigned i2c0_pins[] = {63, 64}; | 565 | static const unsigned i2c0_pins[] = {63, 64}; |
566 | static const int i2c0_muxvals[] = {0, 0}; | 566 | static const int i2c0_muxvals[] = {0, 0}; |
567 | static const unsigned i2c1_pins[] = {65, 66}; | 567 | static const unsigned i2c1_pins[] = {65, 66}; |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 410741acb3c9..f46ece2ce3c4 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
813 | case 8: | 813 | case 8: |
814 | case 7: | 814 | case 7: |
815 | case 6: | 815 | case 6: |
816 | case 1: | ||
816 | ideapad_input_report(priv, vpc_bit); | 817 | ideapad_input_report(priv, vpc_bit); |
817 | break; | 818 | break; |
818 | case 5: | 819 | case 5: |
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 1fc0de870ff8..361770568ad0 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c | |||
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev) | |||
77 | 77 | ||
78 | input_set_capability(input, EV_KEY, KEY_POWER); | 78 | input_set_capability(input, EV_KEY, KEY_POWER); |
79 | 79 | ||
80 | error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, | 80 | error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT, |
81 | DRIVER_NAME, input); | 81 | DRIVER_NAME, input); |
82 | if (error) { | 82 | if (error) { |
83 | dev_err(&pdev->dev, "Unable to request irq %d for mfld power" | 83 | dev_err(&pdev->dev, "Unable to request irq %d for mfld power" |
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 97b4c3a219c0..25f15df5c2d7 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c | |||
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void) | |||
326 | return 0; | 326 | return 0; |
327 | 327 | ||
328 | fail_platform_mux_register: | 328 | fail_platform_mux_register: |
329 | for (i--; i > 0 ; i--) | 329 | while (--i >= 0) |
330 | platform_device_unregister(priv->pdev_mux[i]); | 330 | platform_device_unregister(priv->pdev_mux[i]); |
331 | platform_device_unregister(priv->pdev_i2c); | 331 | platform_device_unregister(priv->pdev_i2c); |
332 | fail_alloc: | 332 | fail_alloc: |
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c index cbf4d83a7271..25b176996cb7 100644 --- a/drivers/platform/x86/surface3-wmi.c +++ b/drivers/platform/x86/surface3-wmi.c | |||
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle, | |||
139 | 139 | ||
140 | static int s3_wmi_check_platform_device(struct device *dev, void *data) | 140 | static int s3_wmi_check_platform_device(struct device *dev, void *data) |
141 | { | 141 | { |
142 | struct acpi_device *adev, *ts_adev; | 142 | struct acpi_device *adev, *ts_adev = NULL; |
143 | acpi_handle handle; | 143 | acpi_handle handle; |
144 | acpi_status status; | 144 | acpi_status status; |
145 | 145 | ||
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device) | |||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | #ifdef CONFIG_PM | 247 | static int __maybe_unused s3_wmi_resume(struct device *dev) |
248 | static int s3_wmi_resume(struct device *dev) | ||
249 | { | 248 | { |
250 | s3_wmi_send_lid_state(); | 249 | s3_wmi_send_lid_state(); |
251 | return 0; | 250 | return 0; |
252 | } | 251 | } |
253 | #endif | ||
254 | static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); | 252 | static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); |
255 | 253 | ||
256 | static struct platform_driver s3_wmi_driver = { | 254 | static struct platform_driver s3_wmi_driver = { |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 0b09638fa39b..1f5d92a25a49 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) | |||
836 | struct bio *bio = rq->bio; | 836 | struct bio *bio = rq->bio; |
837 | sector_t sector = blk_rq_pos(rq); | 837 | sector_t sector = blk_rq_pos(rq); |
838 | unsigned int nr_sectors = blk_rq_sectors(rq); | 838 | unsigned int nr_sectors = blk_rq_sectors(rq); |
839 | unsigned int nr_bytes = blk_rq_bytes(rq); | ||
839 | int ret; | 840 | int ret; |
840 | 841 | ||
841 | if (sdkp->device->no_write_same) | 842 | if (sdkp->device->no_write_same) |
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) | |||
868 | 869 | ||
869 | cmd->transfersize = sdp->sector_size; | 870 | cmd->transfersize = sdp->sector_size; |
870 | cmd->allowed = SD_MAX_RETRIES; | 871 | cmd->allowed = SD_MAX_RETRIES; |
871 | return scsi_init_io(cmd); | 872 | |
873 | /* | ||
874 | * For WRITE SAME the data transferred via the DATA OUT buffer is | ||
875 | * different from the amount of data actually written to the target. | ||
876 | * | ||
877 | * We set up __data_len to the amount of data transferred via the | ||
878 | * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list | ||
879 | * to transfer a single sector of data first, but then reset it to | ||
880 | * the amount of data to be written right after so that the I/O path | ||
881 | * knows how much to actually write. | ||
882 | */ | ||
883 | rq->__data_len = sdp->sector_size; | ||
884 | ret = scsi_init_io(cmd); | ||
885 | rq->__data_len = nr_bytes; | ||
886 | return ret; | ||
872 | } | 887 | } |
873 | 888 | ||
874 | static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) | 889 | static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) |
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index c4a508a124dc..541af5946203 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c | |||
@@ -59,6 +59,14 @@ static LIST_HEAD(thermal_hwmon_list); | |||
59 | static DEFINE_MUTEX(thermal_hwmon_list_lock); | 59 | static DEFINE_MUTEX(thermal_hwmon_list_lock); |
60 | 60 | ||
61 | static ssize_t | 61 | static ssize_t |
62 | name_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
63 | { | ||
64 | struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); | ||
65 | return sprintf(buf, "%s\n", hwmon->type); | ||
66 | } | ||
67 | static DEVICE_ATTR_RO(name); | ||
68 | |||
69 | static ssize_t | ||
62 | temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) | 70 | temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) |
63 | { | 71 | { |
64 | int temperature; | 72 | int temperature; |
@@ -157,12 +165,15 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
157 | 165 | ||
158 | INIT_LIST_HEAD(&hwmon->tz_list); | 166 | INIT_LIST_HEAD(&hwmon->tz_list); |
159 | strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); | 167 | strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); |
160 | hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type, | 168 | hwmon->device = hwmon_device_register(NULL); |
161 | hwmon, NULL, NULL); | ||
162 | if (IS_ERR(hwmon->device)) { | 169 | if (IS_ERR(hwmon->device)) { |
163 | result = PTR_ERR(hwmon->device); | 170 | result = PTR_ERR(hwmon->device); |
164 | goto free_mem; | 171 | goto free_mem; |
165 | } | 172 | } |
173 | dev_set_drvdata(hwmon->device, hwmon); | ||
174 | result = device_create_file(hwmon->device, &dev_attr_name); | ||
175 | if (result) | ||
176 | goto free_mem; | ||
166 | 177 | ||
167 | register_sys_interface: | 178 | register_sys_interface: |
168 | temp = kzalloc(sizeof(*temp), GFP_KERNEL); | 179 | temp = kzalloc(sizeof(*temp), GFP_KERNEL); |
@@ -211,8 +222,10 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
211 | free_temp_mem: | 222 | free_temp_mem: |
212 | kfree(temp); | 223 | kfree(temp); |
213 | unregister_name: | 224 | unregister_name: |
214 | if (new_hwmon_device) | 225 | if (new_hwmon_device) { |
226 | device_remove_file(hwmon->device, &dev_attr_name); | ||
215 | hwmon_device_unregister(hwmon->device); | 227 | hwmon_device_unregister(hwmon->device); |
228 | } | ||
216 | free_mem: | 229 | free_mem: |
217 | if (new_hwmon_device) | 230 | if (new_hwmon_device) |
218 | kfree(hwmon); | 231 | kfree(hwmon); |
@@ -254,6 +267,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) | |||
254 | list_del(&hwmon->node); | 267 | list_del(&hwmon->node); |
255 | mutex_unlock(&thermal_hwmon_list_lock); | 268 | mutex_unlock(&thermal_hwmon_list_lock); |
256 | 269 | ||
270 | device_remove_file(hwmon->device, &dev_attr_name); | ||
257 | hwmon_device_unregister(hwmon->device); | 271 | hwmon_device_unregister(hwmon->device); |
258 | kfree(hwmon); | 272 | kfree(hwmon); |
259 | } | 273 | } |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index c8823578a1b2..128d10282d16 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data, | |||
1270 | /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", | 1270 | /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", |
1271 | iommu_group_id(iommu_group), iommu_group); */ | 1271 | iommu_group_id(iommu_group), iommu_group); */ |
1272 | table_group = iommu_group_get_iommudata(iommu_group); | 1272 | table_group = iommu_group_get_iommudata(iommu_group); |
1273 | if (!table_group) { | ||
1274 | ret = -ENODEV; | ||
1275 | goto unlock_exit; | ||
1276 | } | ||
1273 | 1277 | ||
1274 | if (tce_groups_attached(container) && (!table_group->ops || | 1278 | if (tce_groups_attached(container) && (!table_group->ops || |
1275 | !table_group->ops->take_ownership || | 1279 | !table_group->ops->take_ownership || |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bbbf588540ed..ce5e63d2c66a 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work) | |||
373 | 373 | ||
374 | static int vhost_vsock_start(struct vhost_vsock *vsock) | 374 | static int vhost_vsock_start(struct vhost_vsock *vsock) |
375 | { | 375 | { |
376 | struct vhost_virtqueue *vq; | ||
376 | size_t i; | 377 | size_t i; |
377 | int ret; | 378 | int ret; |
378 | 379 | ||
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) | |||
383 | goto err; | 384 | goto err; |
384 | 385 | ||
385 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | 386 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
386 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | 387 | vq = &vsock->vqs[i]; |
387 | 388 | ||
388 | mutex_lock(&vq->mutex); | 389 | mutex_lock(&vq->mutex); |
389 | 390 | ||
390 | if (!vhost_vq_access_ok(vq)) { | 391 | if (!vhost_vq_access_ok(vq)) { |
391 | ret = -EFAULT; | 392 | ret = -EFAULT; |
392 | mutex_unlock(&vq->mutex); | ||
393 | goto err_vq; | 393 | goto err_vq; |
394 | } | 394 | } |
395 | 395 | ||
396 | if (!vq->private_data) { | 396 | if (!vq->private_data) { |
397 | vq->private_data = vsock; | 397 | vq->private_data = vsock; |
398 | vhost_vq_init_access(vq); | 398 | ret = vhost_vq_init_access(vq); |
399 | if (ret) | ||
400 | goto err_vq; | ||
399 | } | 401 | } |
400 | 402 | ||
401 | mutex_unlock(&vq->mutex); | 403 | mutex_unlock(&vq->mutex); |
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) | |||
405 | return 0; | 407 | return 0; |
406 | 408 | ||
407 | err_vq: | 409 | err_vq: |
410 | vq->private_data = NULL; | ||
411 | mutex_unlock(&vq->mutex); | ||
412 | |||
408 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | 413 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { |
409 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | 414 | vq = &vsock->vqs[i]; |
410 | 415 | ||
411 | mutex_lock(&vq->mutex); | 416 | mutex_lock(&vq->mutex); |
412 | vq->private_data = NULL; | 417 | vq->private_data = NULL; |
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index f89245b8ba8e..68a113594808 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c | |||
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) | |||
163 | 163 | ||
164 | int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) | 164 | int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) |
165 | { | 165 | { |
166 | int tooff = 0, fromoff = 0; | 166 | unsigned int tooff = 0, fromoff = 0; |
167 | int size; | 167 | size_t size; |
168 | 168 | ||
169 | if (to->start > from->start) | 169 | if (to->start > from->start) |
170 | fromoff = to->start - from->start; | 170 | fromoff = to->start - from->start; |
171 | else | 171 | else |
172 | tooff = from->start - to->start; | 172 | tooff = from->start - to->start; |
173 | size = to->len - tooff; | 173 | if (fromoff >= from->len || tooff >= to->len) |
174 | if (size > (int) (from->len - fromoff)) | 174 | return -EINVAL; |
175 | size = from->len - fromoff; | 175 | |
176 | if (size <= 0) | 176 | size = min_t(size_t, to->len - tooff, from->len - fromoff); |
177 | if (size == 0) | ||
177 | return -EINVAL; | 178 | return -EINVAL; |
178 | size *= sizeof(u16); | 179 | size *= sizeof(u16); |
179 | 180 | ||
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) | |||
187 | 188 | ||
188 | int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) | 189 | int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) |
189 | { | 190 | { |
190 | int tooff = 0, fromoff = 0; | 191 | unsigned int tooff = 0, fromoff = 0; |
191 | int size; | 192 | size_t size; |
192 | 193 | ||
193 | if (to->start > from->start) | 194 | if (to->start > from->start) |
194 | fromoff = to->start - from->start; | 195 | fromoff = to->start - from->start; |
195 | else | 196 | else |
196 | tooff = from->start - to->start; | 197 | tooff = from->start - to->start; |
197 | size = to->len - tooff; | 198 | if (fromoff >= from->len || tooff >= to->len) |
198 | if (size > (int) (from->len - fromoff)) | 199 | return -EINVAL; |
199 | size = from->len - fromoff; | 200 | |
200 | if (size <= 0) | 201 | size = min_t(size_t, to->len - tooff, from->len - fromoff); |
202 | if (size == 0) | ||
201 | return -EINVAL; | 203 | return -EINVAL; |
202 | size *= sizeof(u16); | 204 | size *= sizeof(u16); |
203 | 205 | ||
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index d47a2fcef818..c71fde5fe835 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #define pr_fmt(fmt) "virtio-mmio: " fmt | 59 | #define pr_fmt(fmt) "virtio-mmio: " fmt |
60 | 60 | ||
61 | #include <linux/acpi.h> | 61 | #include <linux/acpi.h> |
62 | #include <linux/dma-mapping.h> | ||
62 | #include <linux/highmem.h> | 63 | #include <linux/highmem.h> |
63 | #include <linux/interrupt.h> | 64 | #include <linux/interrupt.h> |
64 | #include <linux/io.h> | 65 | #include <linux/io.h> |
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
498 | struct virtio_mmio_device *vm_dev; | 499 | struct virtio_mmio_device *vm_dev; |
499 | struct resource *mem; | 500 | struct resource *mem; |
500 | unsigned long magic; | 501 | unsigned long magic; |
502 | int rc; | ||
501 | 503 | ||
502 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 504 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
503 | if (!mem) | 505 | if (!mem) |
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
547 | } | 549 | } |
548 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); | 550 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); |
549 | 551 | ||
550 | if (vm_dev->version == 1) | 552 | if (vm_dev->version == 1) { |
551 | writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); | 553 | writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); |
552 | 554 | ||
555 | rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
556 | /* | ||
557 | * In the legacy case, ensure our coherently-allocated virtio | ||
558 | * ring will be at an address expressable as a 32-bit PFN. | ||
559 | */ | ||
560 | if (!rc) | ||
561 | dma_set_coherent_mask(&pdev->dev, | ||
562 | DMA_BIT_MASK(32 + PAGE_SHIFT)); | ||
563 | } else { | ||
564 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | ||
565 | } | ||
566 | if (rc) | ||
567 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
568 | if (rc) | ||
569 | dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); | ||
570 | |||
553 | platform_set_drvdata(pdev, vm_dev); | 571 | platform_set_drvdata(pdev, vm_dev); |
554 | 572 | ||
555 | return register_virtio_device(&vm_dev->vdev); | 573 | return register_virtio_device(&vm_dev->vdev); |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 409aeaa49246..7e38ed79c3fc 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev) | |||
159 | if (xen_domain()) | 159 | if (xen_domain()) |
160 | return true; | 160 | return true; |
161 | 161 | ||
162 | /* | ||
163 | * On ARM-based machines, the DMA ops will do the right thing, | ||
164 | * so always use them with legacy devices. | ||
165 | */ | ||
166 | if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)) | ||
167 | return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1); | ||
168 | |||
162 | return false; | 169 | return false; |
163 | } | 170 | } |
164 | 171 | ||
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f905d6eeb048..f8afc6dcc29f 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |||
414 | if (map == SWIOTLB_MAP_ERROR) | 414 | if (map == SWIOTLB_MAP_ERROR) |
415 | return DMA_ERROR_CODE; | 415 | return DMA_ERROR_CODE; |
416 | 416 | ||
417 | dev_addr = xen_phys_to_bus(map); | ||
417 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), | 418 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
418 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); | 419 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); |
419 | dev_addr = xen_phys_to_bus(map); | ||
420 | 420 | ||
421 | /* | 421 | /* |
422 | * Ensure that the address returned is DMA'ble | 422 | * Ensure that the address returned is DMA'ble |
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
575 | sg_dma_len(sgl) = 0; | 575 | sg_dma_len(sgl) = 0; |
576 | return 0; | 576 | return 0; |
577 | } | 577 | } |
578 | dev_addr = xen_phys_to_bus(map); | ||
578 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), | 579 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), |
579 | dev_addr, | 580 | dev_addr, |
580 | map & ~PAGE_MASK, | 581 | map & ~PAGE_MASK, |
581 | sg->length, | 582 | sg->length, |
582 | dir, | 583 | dir, |
583 | attrs); | 584 | attrs); |
584 | sg->dma_address = xen_phys_to_bus(map); | 585 | sg->dma_address = dev_addr; |
585 | } else { | 586 | } else { |
586 | /* we are not interested in the dma_addr returned by | 587 | /* we are not interested in the dma_addr returned by |
587 | * xen_dma_map_page, only in the potential cache flushes executed | 588 | * xen_dma_map_page, only in the potential cache flushes executed |
diff --git a/fs/Kconfig b/fs/Kconfig index c2a377cdda2b..83eab52fb3f6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -38,6 +38,7 @@ config FS_DAX | |||
38 | bool "Direct Access (DAX) support" | 38 | bool "Direct Access (DAX) support" |
39 | depends on MMU | 39 | depends on MMU |
40 | depends on !(ARM || MIPS || SPARC) | 40 | depends on !(ARM || MIPS || SPARC) |
41 | select FS_IOMAP | ||
41 | help | 42 | help |
42 | Direct Access (DAX) can be used on memory-backed block devices. | 43 | Direct Access (DAX) can be used on memory-backed block devices. |
43 | If the block device supports DAX and the filesystem supports DAX, | 44 | If the block device supports DAX and the filesystem supports DAX, |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 5db5d1340d69..3c47614a4b32 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
331 | struct blk_plug plug; | 331 | struct blk_plug plug; |
332 | struct blkdev_dio *dio; | 332 | struct blkdev_dio *dio; |
333 | struct bio *bio; | 333 | struct bio *bio; |
334 | bool is_read = (iov_iter_rw(iter) == READ); | 334 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
335 | loff_t pos = iocb->ki_pos; | 335 | loff_t pos = iocb->ki_pos; |
336 | blk_qc_t qc = BLK_QC_T_NONE; | 336 | blk_qc_t qc = BLK_QC_T_NONE; |
337 | int ret; | 337 | int ret; |
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
344 | bio_get(bio); /* extra ref for the completion handler */ | 344 | bio_get(bio); /* extra ref for the completion handler */ |
345 | 345 | ||
346 | dio = container_of(bio, struct blkdev_dio, bio); | 346 | dio = container_of(bio, struct blkdev_dio, bio); |
347 | dio->is_sync = is_sync_kiocb(iocb); | 347 | dio->is_sync = is_sync = is_sync_kiocb(iocb); |
348 | if (dio->is_sync) | 348 | if (dio->is_sync) |
349 | dio->waiter = current; | 349 | dio->waiter = current; |
350 | else | 350 | else |
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
398 | } | 398 | } |
399 | blk_finish_plug(&plug); | 399 | blk_finish_plug(&plug); |
400 | 400 | ||
401 | if (!dio->is_sync) | 401 | if (!is_sync) |
402 | return -EIOCBQUEUED; | 402 | return -EIOCBQUEUED; |
403 | 403 | ||
404 | for (;;) { | 404 | for (;;) { |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4e024260ad71..1e861a063721 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3835,10 +3835,7 @@ cache_acl: | |||
3835 | break; | 3835 | break; |
3836 | case S_IFDIR: | 3836 | case S_IFDIR: |
3837 | inode->i_fop = &btrfs_dir_file_operations; | 3837 | inode->i_fop = &btrfs_dir_file_operations; |
3838 | if (root == fs_info->tree_root) | 3838 | inode->i_op = &btrfs_dir_inode_operations; |
3839 | inode->i_op = &btrfs_dir_ro_inode_operations; | ||
3840 | else | ||
3841 | inode->i_op = &btrfs_dir_inode_operations; | ||
3842 | break; | 3839 | break; |
3843 | case S_IFLNK: | 3840 | case S_IFLNK: |
3844 | inode->i_op = &btrfs_symlink_inode_operations; | 3841 | inode->i_op = &btrfs_symlink_inode_operations; |
@@ -4505,8 +4502,19 @@ search_again: | |||
4505 | if (found_type > min_type) { | 4502 | if (found_type > min_type) { |
4506 | del_item = 1; | 4503 | del_item = 1; |
4507 | } else { | 4504 | } else { |
4508 | if (item_end < new_size) | 4505 | if (item_end < new_size) { |
4506 | /* | ||
4507 | * With NO_HOLES mode, for the following mapping | ||
4508 | * | ||
4509 | * [0-4k][hole][8k-12k] | ||
4510 | * | ||
4511 | * if truncating isize down to 6k, it ends up | ||
4512 | * isize being 8k. | ||
4513 | */ | ||
4514 | if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) | ||
4515 | last_size = new_size; | ||
4509 | break; | 4516 | break; |
4517 | } | ||
4510 | if (found_key.offset >= new_size) | 4518 | if (found_key.offset >= new_size) |
4511 | del_item = 1; | 4519 | del_item = 1; |
4512 | else | 4520 | else |
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s, | |||
5710 | 5718 | ||
5711 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | 5719 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; |
5712 | inode->i_op = &btrfs_dir_ro_inode_operations; | 5720 | inode->i_op = &btrfs_dir_ro_inode_operations; |
5721 | inode->i_opflags &= ~IOP_XATTR; | ||
5713 | inode->i_fop = &simple_dir_operations; | 5722 | inode->i_fop = &simple_dir_operations; |
5714 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | 5723 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; |
5715 | inode->i_mtime = current_time(inode); | 5724 | inode->i_mtime = current_time(inode); |
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, | |||
7215 | struct extent_map *em = NULL; | 7224 | struct extent_map *em = NULL; |
7216 | int ret; | 7225 | int ret; |
7217 | 7226 | ||
7218 | down_read(&BTRFS_I(inode)->dio_sem); | ||
7219 | if (type != BTRFS_ORDERED_NOCOW) { | 7227 | if (type != BTRFS_ORDERED_NOCOW) { |
7220 | em = create_pinned_em(inode, start, len, orig_start, | 7228 | em = create_pinned_em(inode, start, len, orig_start, |
7221 | block_start, block_len, orig_block_len, | 7229 | block_start, block_len, orig_block_len, |
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, | |||
7234 | em = ERR_PTR(ret); | 7242 | em = ERR_PTR(ret); |
7235 | } | 7243 | } |
7236 | out: | 7244 | out: |
7237 | up_read(&BTRFS_I(inode)->dio_sem); | ||
7238 | 7245 | ||
7239 | return em; | 7246 | return em; |
7240 | } | 7247 | } |
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |||
8692 | dio_data.unsubmitted_oe_range_start = (u64)offset; | 8699 | dio_data.unsubmitted_oe_range_start = (u64)offset; |
8693 | dio_data.unsubmitted_oe_range_end = (u64)offset; | 8700 | dio_data.unsubmitted_oe_range_end = (u64)offset; |
8694 | current->journal_info = &dio_data; | 8701 | current->journal_info = &dio_data; |
8702 | down_read(&BTRFS_I(inode)->dio_sem); | ||
8695 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8703 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8696 | &BTRFS_I(inode)->runtime_flags)) { | 8704 | &BTRFS_I(inode)->runtime_flags)) { |
8697 | inode_dio_end(inode); | 8705 | inode_dio_end(inode); |
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |||
8704 | iter, btrfs_get_blocks_direct, NULL, | 8712 | iter, btrfs_get_blocks_direct, NULL, |
8705 | btrfs_submit_direct, flags); | 8713 | btrfs_submit_direct, flags); |
8706 | if (iov_iter_rw(iter) == WRITE) { | 8714 | if (iov_iter_rw(iter) == WRITE) { |
8715 | up_read(&BTRFS_I(inode)->dio_sem); | ||
8707 | current->journal_info = NULL; | 8716 | current->journal_info = NULL; |
8708 | if (ret < 0 && ret != -EIOCBQUEUED) { | 8717 | if (ret < 0 && ret != -EIOCBQUEUED) { |
8709 | if (dio_data.reserve) | 8718 | if (dio_data.reserve) |
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode) | |||
9212 | break; | 9221 | break; |
9213 | } | 9222 | } |
9214 | 9223 | ||
9224 | btrfs_block_rsv_release(fs_info, rsv, -1); | ||
9215 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, | 9225 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, |
9216 | rsv, min_size, 0); | 9226 | rsv, min_size, 0); |
9217 | BUG_ON(ret); /* shouldn't happen */ | 9227 | BUG_ON(ret); /* shouldn't happen */ |
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = { | |||
10579 | static const struct inode_operations btrfs_dir_ro_inode_operations = { | 10589 | static const struct inode_operations btrfs_dir_ro_inode_operations = { |
10580 | .lookup = btrfs_lookup, | 10590 | .lookup = btrfs_lookup, |
10581 | .permission = btrfs_permission, | 10591 | .permission = btrfs_permission, |
10582 | .get_acl = btrfs_get_acl, | ||
10583 | .set_acl = btrfs_set_acl, | ||
10584 | .update_time = btrfs_update_time, | 10592 | .update_time = btrfs_update_time, |
10585 | }; | 10593 | }; |
10586 | 10594 | ||
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, | |||
990 | } | 990 | } |
991 | EXPORT_SYMBOL_GPL(__dax_zero_page_range); | 991 | EXPORT_SYMBOL_GPL(__dax_zero_page_range); |
992 | 992 | ||
993 | #ifdef CONFIG_FS_IOMAP | ||
994 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) | 993 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) |
995 | { | 994 | { |
996 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); | 995 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); |
@@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
1428 | } | 1427 | } |
1429 | EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); | 1428 | EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); |
1430 | #endif /* CONFIG_FS_DAX_PMD */ | 1429 | #endif /* CONFIG_FS_DAX_PMD */ |
1431 | #endif /* CONFIG_FS_IOMAP */ | ||
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig index 36bea5adcaba..c634874e12d9 100644 --- a/fs/ext2/Kconfig +++ b/fs/ext2/Kconfig | |||
@@ -1,6 +1,5 @@ | |||
1 | config EXT2_FS | 1 | config EXT2_FS |
2 | tristate "Second extended fs support" | 2 | tristate "Second extended fs support" |
3 | select FS_IOMAP if FS_DAX | ||
4 | help | 3 | help |
5 | Ext2 is a standard Linux file system for hard disks. | 4 | Ext2 is a standard Linux file system for hard disks. |
6 | 5 | ||
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 7b90691e98c4..e38039fd96ff 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
@@ -37,7 +37,6 @@ config EXT4_FS | |||
37 | select CRC16 | 37 | select CRC16 |
38 | select CRYPTO | 38 | select CRYPTO |
39 | select CRYPTO_CRC32C | 39 | select CRYPTO_CRC32C |
40 | select FS_IOMAP if FS_DAX | ||
41 | help | 40 | help |
42 | This is the next generation of the ext3 filesystem. | 41 | This is the next generation of the ext3 filesystem. |
43 | 42 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ecc151697fd4..0a0eaecf9676 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2700,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, | |||
2700 | sattr->ia_valid |= ATTR_MTIME; | 2700 | sattr->ia_valid |= ATTR_MTIME; |
2701 | 2701 | ||
2702 | /* Except MODE, it seems harmless of setting twice. */ | 2702 | /* Except MODE, it seems harmless of setting twice. */ |
2703 | if ((attrset[1] & FATTR4_WORD1_MODE)) | 2703 | if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && |
2704 | attrset[1] & FATTR4_WORD1_MODE) | ||
2704 | sattr->ia_valid &= ~ATTR_MODE; | 2705 | sattr->ia_valid &= ~ATTR_MODE; |
2705 | 2706 | ||
2706 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) | 2707 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) |
@@ -8490,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
8490 | goto out; | 8491 | goto out; |
8491 | } | 8492 | } |
8492 | 8493 | ||
8494 | nfs4_sequence_free_slot(&lgp->res.seq_res); | ||
8493 | err = nfs4_handle_exception(server, nfs4err, exception); | 8495 | err = nfs4_handle_exception(server, nfs4err, exception); |
8494 | if (!status) { | 8496 | if (!status) { |
8495 | if (exception->retry) | 8497 | if (exception->retry) |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 90e6193ce6be..daeb94e3acd4 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | |||
1091 | case -NFS4ERR_BADXDR: | 1091 | case -NFS4ERR_BADXDR: |
1092 | case -NFS4ERR_RESOURCE: | 1092 | case -NFS4ERR_RESOURCE: |
1093 | case -NFS4ERR_NOFILEHANDLE: | 1093 | case -NFS4ERR_NOFILEHANDLE: |
1094 | case -NFS4ERR_MOVED: | ||
1094 | /* Non-seqid mutating errors */ | 1095 | /* Non-seqid mutating errors */ |
1095 | return; | 1096 | return; |
1096 | }; | 1097 | }; |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 59554f3adf29..dd042498ce7c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino) | |||
1200 | 1200 | ||
1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); | 1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); |
1202 | spin_unlock(&ino->i_lock); | 1202 | spin_unlock(&ino->i_lock); |
1203 | pnfs_free_lseg_list(&tmp_list); | ||
1204 | if (send) | 1203 | if (send) |
1205 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); | 1204 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); |
1206 | out_put_layout_hdr: | 1205 | out_put_layout_hdr: |
1206 | pnfs_free_lseg_list(&tmp_list); | ||
1207 | pnfs_put_layout_hdr(lo); | 1207 | pnfs_put_layout_hdr(lo); |
1208 | out: | 1208 | out: |
1209 | dprintk("<-- %s status: %d\n", __func__, status); | 1209 | dprintk("<-- %s status: %d\n", __func__, status); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 8e7e61b28f31..87c9a9aacda3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | |||
3179 | iter.tgid += 1, iter = next_tgid(ns, iter)) { | 3179 | iter.tgid += 1, iter = next_tgid(ns, iter)) { |
3180 | char name[PROC_NUMBUF]; | 3180 | char name[PROC_NUMBUF]; |
3181 | int len; | 3181 | int len; |
3182 | |||
3183 | cond_resched(); | ||
3182 | if (!has_pid_permissions(ns, iter.task, 2)) | 3184 | if (!has_pid_permissions(ns, iter.task, 2)) |
3183 | continue; | 3185 | continue; |
3184 | 3186 | ||
diff --git a/fs/romfs/super.c b/fs/romfs/super.c index d0f8a38dfafa..0186fe6d39f3 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/highmem.h> | 74 | #include <linux/highmem.h> |
75 | #include <linux/pagemap.h> | 75 | #include <linux/pagemap.h> |
76 | #include <linux/uaccess.h> | 76 | #include <linux/uaccess.h> |
77 | #include <linux/major.h> | ||
77 | #include "internal.h" | 78 | #include "internal.h" |
78 | 79 | ||
79 | static struct kmem_cache *romfs_inode_cachep; | 80 | static struct kmem_cache *romfs_inode_cachep; |
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode) | |||
416 | static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 417 | static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
417 | { | 418 | { |
418 | struct super_block *sb = dentry->d_sb; | 419 | struct super_block *sb = dentry->d_sb; |
419 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | 420 | u64 id = 0; |
421 | |||
422 | /* When calling huge_encode_dev(), | ||
423 | * use sb->s_bdev->bd_dev when, | ||
424 | * - CONFIG_ROMFS_ON_BLOCK defined | ||
425 | * use sb->s_dev when, | ||
426 | * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
427 | * - CONFIG_ROMFS_ON_MTD defined | ||
428 | * leave id as 0 when, | ||
429 | * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
430 | * - CONFIG_ROMFS_ON_MTD undefined | ||
431 | */ | ||
432 | if (sb->s_bdev) | ||
433 | id = huge_encode_dev(sb->s_bdev->bd_dev); | ||
434 | else if (sb->s_dev) | ||
435 | id = huge_encode_dev(sb->s_dev); | ||
420 | 436 | ||
421 | buf->f_type = ROMFS_MAGIC; | 437 | buf->f_type = ROMFS_MAGIC; |
422 | buf->f_namelen = ROMFS_MAXFN; | 438 | buf->f_namelen = ROMFS_MAXFN; |
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) | |||
489 | sb->s_flags |= MS_RDONLY | MS_NOATIME; | 505 | sb->s_flags |= MS_RDONLY | MS_NOATIME; |
490 | sb->s_op = &romfs_super_ops; | 506 | sb->s_op = &romfs_super_ops; |
491 | 507 | ||
508 | #ifdef CONFIG_ROMFS_ON_MTD | ||
509 | /* Use same dev ID from the underlying mtdblock device */ | ||
510 | if (sb->s_mtd) | ||
511 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); | ||
512 | #endif | ||
492 | /* read the image superblock and check it */ | 513 | /* read the image superblock and check it */ |
493 | rsb = kmalloc(512, GFP_KERNEL); | 514 | rsb = kmalloc(512, GFP_KERNEL); |
494 | if (!rsb) | 515 | if (!rsb) |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index d96e2f30084b..43953e03c356 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue { | |||
63 | struct uffd_msg msg; | 63 | struct uffd_msg msg; |
64 | wait_queue_t wq; | 64 | wait_queue_t wq; |
65 | struct userfaultfd_ctx *ctx; | 65 | struct userfaultfd_ctx *ctx; |
66 | bool waken; | ||
66 | }; | 67 | }; |
67 | 68 | ||
68 | struct userfaultfd_wake_range { | 69 | struct userfaultfd_wake_range { |
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, | |||
86 | if (len && (start > uwq->msg.arg.pagefault.address || | 87 | if (len && (start > uwq->msg.arg.pagefault.address || |
87 | start + len <= uwq->msg.arg.pagefault.address)) | 88 | start + len <= uwq->msg.arg.pagefault.address)) |
88 | goto out; | 89 | goto out; |
90 | WRITE_ONCE(uwq->waken, true); | ||
91 | /* | ||
92 | * The implicit smp_mb__before_spinlock in try_to_wake_up() | ||
93 | * renders uwq->waken visible to other CPUs before the task is | ||
94 | * waken. | ||
95 | */ | ||
89 | ret = wake_up_state(wq->private, mode); | 96 | ret = wake_up_state(wq->private, mode); |
90 | if (ret) | 97 | if (ret) |
91 | /* | 98 | /* |
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
264 | struct userfaultfd_wait_queue uwq; | 271 | struct userfaultfd_wait_queue uwq; |
265 | int ret; | 272 | int ret; |
266 | bool must_wait, return_to_userland; | 273 | bool must_wait, return_to_userland; |
274 | long blocking_state; | ||
267 | 275 | ||
268 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 276 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
269 | 277 | ||
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
334 | uwq.wq.private = current; | 342 | uwq.wq.private = current; |
335 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); | 343 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); |
336 | uwq.ctx = ctx; | 344 | uwq.ctx = ctx; |
345 | uwq.waken = false; | ||
337 | 346 | ||
338 | return_to_userland = | 347 | return_to_userland = |
339 | (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == | 348 | (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == |
340 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); | 349 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); |
350 | blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : | ||
351 | TASK_KILLABLE; | ||
341 | 352 | ||
342 | spin_lock(&ctx->fault_pending_wqh.lock); | 353 | spin_lock(&ctx->fault_pending_wqh.lock); |
343 | /* | 354 | /* |
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
350 | * following the spin_unlock to happen before the list_add in | 361 | * following the spin_unlock to happen before the list_add in |
351 | * __add_wait_queue. | 362 | * __add_wait_queue. |
352 | */ | 363 | */ |
353 | set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : | 364 | set_current_state(blocking_state); |
354 | TASK_KILLABLE); | ||
355 | spin_unlock(&ctx->fault_pending_wqh.lock); | 365 | spin_unlock(&ctx->fault_pending_wqh.lock); |
356 | 366 | ||
357 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, | 367 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, |
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
364 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 374 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
365 | schedule(); | 375 | schedule(); |
366 | ret |= VM_FAULT_MAJOR; | 376 | ret |= VM_FAULT_MAJOR; |
377 | |||
378 | /* | ||
379 | * False wakeups can orginate even from rwsem before | ||
380 | * up_read() however userfaults will wait either for a | ||
381 | * targeted wakeup on the specific uwq waitqueue from | ||
382 | * wake_userfault() or for signals or for uffd | ||
383 | * release. | ||
384 | */ | ||
385 | while (!READ_ONCE(uwq.waken)) { | ||
386 | /* | ||
387 | * This needs the full smp_store_mb() | ||
388 | * guarantee as the state write must be | ||
389 | * visible to other CPUs before reading | ||
390 | * uwq.waken from other CPUs. | ||
391 | */ | ||
392 | set_current_state(blocking_state); | ||
393 | if (READ_ONCE(uwq.waken) || | ||
394 | READ_ONCE(ctx->released) || | ||
395 | (return_to_userland ? signal_pending(current) : | ||
396 | fatal_signal_pending(current))) | ||
397 | break; | ||
398 | schedule(); | ||
399 | } | ||
367 | } | 400 | } |
368 | 401 | ||
369 | __set_current_state(TASK_RUNNING); | 402 | __set_current_state(TASK_RUNNING); |
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index d346d42c54d1..33db69be4832 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "xfs_rmap_btree.h" | 39 | #include "xfs_rmap_btree.h" |
40 | #include "xfs_btree.h" | 40 | #include "xfs_btree.h" |
41 | #include "xfs_refcount_btree.h" | 41 | #include "xfs_refcount_btree.h" |
42 | #include "xfs_ialloc_btree.h" | ||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * Per-AG Block Reservations | 45 | * Per-AG Block Reservations |
@@ -200,22 +201,30 @@ __xfs_ag_resv_init( | |||
200 | struct xfs_mount *mp = pag->pag_mount; | 201 | struct xfs_mount *mp = pag->pag_mount; |
201 | struct xfs_ag_resv *resv; | 202 | struct xfs_ag_resv *resv; |
202 | int error; | 203 | int error; |
204 | xfs_extlen_t reserved; | ||
203 | 205 | ||
204 | resv = xfs_perag_resv(pag, type); | ||
205 | if (used > ask) | 206 | if (used > ask) |
206 | ask = used; | 207 | ask = used; |
207 | resv->ar_asked = ask; | 208 | reserved = ask - used; |
208 | resv->ar_reserved = resv->ar_orig_reserved = ask - used; | ||
209 | mp->m_ag_max_usable -= ask; | ||
210 | 209 | ||
211 | trace_xfs_ag_resv_init(pag, type, ask); | 210 | error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true); |
212 | 211 | if (error) { | |
213 | error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true); | ||
214 | if (error) | ||
215 | trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, | 212 | trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, |
216 | error, _RET_IP_); | 213 | error, _RET_IP_); |
214 | xfs_warn(mp, | ||
215 | "Per-AG reservation for AG %u failed. Filesystem may run out of space.", | ||
216 | pag->pag_agno); | ||
217 | return error; | ||
218 | } | ||
217 | 219 | ||
218 | return error; | 220 | mp->m_ag_max_usable -= ask; |
221 | |||
222 | resv = xfs_perag_resv(pag, type); | ||
223 | resv->ar_asked = ask; | ||
224 | resv->ar_reserved = resv->ar_orig_reserved = reserved; | ||
225 | |||
226 | trace_xfs_ag_resv_init(pag, type, ask); | ||
227 | return 0; | ||
219 | } | 228 | } |
220 | 229 | ||
221 | /* Create a per-AG block reservation. */ | 230 | /* Create a per-AG block reservation. */ |
@@ -223,6 +232,8 @@ int | |||
223 | xfs_ag_resv_init( | 232 | xfs_ag_resv_init( |
224 | struct xfs_perag *pag) | 233 | struct xfs_perag *pag) |
225 | { | 234 | { |
235 | struct xfs_mount *mp = pag->pag_mount; | ||
236 | xfs_agnumber_t agno = pag->pag_agno; | ||
226 | xfs_extlen_t ask; | 237 | xfs_extlen_t ask; |
227 | xfs_extlen_t used; | 238 | xfs_extlen_t used; |
228 | int error = 0; | 239 | int error = 0; |
@@ -231,23 +242,45 @@ xfs_ag_resv_init( | |||
231 | if (pag->pag_meta_resv.ar_asked == 0) { | 242 | if (pag->pag_meta_resv.ar_asked == 0) { |
232 | ask = used = 0; | 243 | ask = used = 0; |
233 | 244 | ||
234 | error = xfs_refcountbt_calc_reserves(pag->pag_mount, | 245 | error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used); |
235 | pag->pag_agno, &ask, &used); | ||
236 | if (error) | 246 | if (error) |
237 | goto out; | 247 | goto out; |
238 | 248 | ||
239 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | 249 | error = xfs_finobt_calc_reserves(mp, agno, &ask, &used); |
240 | ask, used); | ||
241 | if (error) | 250 | if (error) |
242 | goto out; | 251 | goto out; |
252 | |||
253 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | ||
254 | ask, used); | ||
255 | if (error) { | ||
256 | /* | ||
257 | * Because we didn't have per-AG reservations when the | ||
258 | * finobt feature was added we might not be able to | ||
259 | * reserve all needed blocks. Warn and fall back to the | ||
260 | * old and potentially buggy code in that case, but | ||
261 | * ensure we do have the reservation for the refcountbt. | ||
262 | */ | ||
263 | ask = used = 0; | ||
264 | |||
265 | mp->m_inotbt_nores = true; | ||
266 | |||
267 | error = xfs_refcountbt_calc_reserves(mp, agno, &ask, | ||
268 | &used); | ||
269 | if (error) | ||
270 | goto out; | ||
271 | |||
272 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | ||
273 | ask, used); | ||
274 | if (error) | ||
275 | goto out; | ||
276 | } | ||
243 | } | 277 | } |
244 | 278 | ||
245 | /* Create the AGFL metadata reservation */ | 279 | /* Create the AGFL metadata reservation */ |
246 | if (pag->pag_agfl_resv.ar_asked == 0) { | 280 | if (pag->pag_agfl_resv.ar_asked == 0) { |
247 | ask = used = 0; | 281 | ask = used = 0; |
248 | 282 | ||
249 | error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno, | 283 | error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used); |
250 | &ask, &used); | ||
251 | if (error) | 284 | if (error) |
252 | goto out; | 285 | goto out; |
253 | 286 | ||
@@ -256,9 +289,16 @@ xfs_ag_resv_init( | |||
256 | goto out; | 289 | goto out; |
257 | } | 290 | } |
258 | 291 | ||
292 | #ifdef DEBUG | ||
293 | /* need to read in the AGF for the ASSERT below to work */ | ||
294 | error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0); | ||
295 | if (error) | ||
296 | return error; | ||
297 | |||
259 | ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + | 298 | ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + |
260 | xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= | 299 | xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= |
261 | pag->pagf_freeblks + pag->pagf_flcount); | 300 | pag->pagf_freeblks + pag->pagf_flcount); |
301 | #endif | ||
262 | out: | 302 | out: |
263 | return error; | 303 | return error; |
264 | } | 304 | } |
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index af1ecb19121e..6622d46ddec3 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c | |||
@@ -131,9 +131,6 @@ xfs_attr_get( | |||
131 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 131 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
132 | return -EIO; | 132 | return -EIO; |
133 | 133 | ||
134 | if (!xfs_inode_hasattr(ip)) | ||
135 | return -ENOATTR; | ||
136 | |||
137 | error = xfs_attr_args_init(&args, ip, name, flags); | 134 | error = xfs_attr_args_init(&args, ip, name, flags); |
138 | if (error) | 135 | if (error) |
139 | return error; | 136 | return error; |
@@ -392,9 +389,6 @@ xfs_attr_remove( | |||
392 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 389 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
393 | return -EIO; | 390 | return -EIO; |
394 | 391 | ||
395 | if (!xfs_inode_hasattr(dp)) | ||
396 | return -ENOATTR; | ||
397 | |||
398 | error = xfs_attr_args_init(&args, dp, name, flags); | 392 | error = xfs_attr_args_init(&args, dp, name, flags); |
399 | if (error) | 393 | if (error) |
400 | return error; | 394 | return error; |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 44773c9eb957..bfc00de5c6f1 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc( | |||
3629 | align = xfs_get_cowextsz_hint(ap->ip); | 3629 | align = xfs_get_cowextsz_hint(ap->ip); |
3630 | else if (xfs_alloc_is_userdata(ap->datatype)) | 3630 | else if (xfs_alloc_is_userdata(ap->datatype)) |
3631 | align = xfs_get_extsz_hint(ap->ip); | 3631 | align = xfs_get_extsz_hint(ap->ip); |
3632 | if (unlikely(align)) { | 3632 | if (align) { |
3633 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | 3633 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, |
3634 | align, 0, ap->eof, 0, ap->conv, | 3634 | align, 0, ap->eof, 0, ap->conv, |
3635 | &ap->offset, &ap->length); | 3635 | &ap->offset, &ap->length); |
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc( | |||
3701 | args.minlen = ap->minlen; | 3701 | args.minlen = ap->minlen; |
3702 | } | 3702 | } |
3703 | /* apply extent size hints if obtained earlier */ | 3703 | /* apply extent size hints if obtained earlier */ |
3704 | if (unlikely(align)) { | 3704 | if (align) { |
3705 | args.prod = align; | 3705 | args.prod = align; |
3706 | if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) | 3706 | if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) |
3707 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | 3707 | args.mod = (xfs_extlen_t)(args.prod - args.mod); |
@@ -4514,8 +4514,6 @@ xfs_bmapi_write( | |||
4514 | int n; /* current extent index */ | 4514 | int n; /* current extent index */ |
4515 | xfs_fileoff_t obno; /* old block number (offset) */ | 4515 | xfs_fileoff_t obno; /* old block number (offset) */ |
4516 | int whichfork; /* data or attr fork */ | 4516 | int whichfork; /* data or attr fork */ |
4517 | char inhole; /* current location is hole in file */ | ||
4518 | char wasdelay; /* old extent was delayed */ | ||
4519 | 4517 | ||
4520 | #ifdef DEBUG | 4518 | #ifdef DEBUG |
4521 | xfs_fileoff_t orig_bno; /* original block number value */ | 4519 | xfs_fileoff_t orig_bno; /* original block number value */ |
@@ -4603,22 +4601,44 @@ xfs_bmapi_write( | |||
4603 | bma.firstblock = firstblock; | 4601 | bma.firstblock = firstblock; |
4604 | 4602 | ||
4605 | while (bno < end && n < *nmap) { | 4603 | while (bno < end && n < *nmap) { |
4606 | inhole = eof || bma.got.br_startoff > bno; | 4604 | bool need_alloc = false, wasdelay = false; |
4607 | wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); | ||
4608 | 4605 | ||
4609 | /* | 4606 | /* in hole or beyoned EOF? */ |
4610 | * Make sure we only reflink into a hole. | 4607 | if (eof || bma.got.br_startoff > bno) { |
4611 | */ | 4608 | if (flags & XFS_BMAPI_DELALLOC) { |
4612 | if (flags & XFS_BMAPI_REMAP) | 4609 | /* |
4613 | ASSERT(inhole); | 4610 | * For the COW fork we can reasonably get a |
4614 | if (flags & XFS_BMAPI_COWFORK) | 4611 | * request for converting an extent that races |
4615 | ASSERT(!inhole); | 4612 | * with other threads already having converted |
4613 | * part of it, as there converting COW to | ||
4614 | * regular blocks is not protected using the | ||
4615 | * IOLOCK. | ||
4616 | */ | ||
4617 | ASSERT(flags & XFS_BMAPI_COWFORK); | ||
4618 | if (!(flags & XFS_BMAPI_COWFORK)) { | ||
4619 | error = -EIO; | ||
4620 | goto error0; | ||
4621 | } | ||
4622 | |||
4623 | if (eof || bno >= end) | ||
4624 | break; | ||
4625 | } else { | ||
4626 | need_alloc = true; | ||
4627 | } | ||
4628 | } else { | ||
4629 | /* | ||
4630 | * Make sure we only reflink into a hole. | ||
4631 | */ | ||
4632 | ASSERT(!(flags & XFS_BMAPI_REMAP)); | ||
4633 | if (isnullstartblock(bma.got.br_startblock)) | ||
4634 | wasdelay = true; | ||
4635 | } | ||
4616 | 4636 | ||
4617 | /* | 4637 | /* |
4618 | * First, deal with the hole before the allocated space | 4638 | * First, deal with the hole before the allocated space |
4619 | * that we found, if any. | 4639 | * that we found, if any. |
4620 | */ | 4640 | */ |
4621 | if (inhole || wasdelay) { | 4641 | if (need_alloc || wasdelay) { |
4622 | bma.eof = eof; | 4642 | bma.eof = eof; |
4623 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); | 4643 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); |
4624 | bma.wasdel = wasdelay; | 4644 | bma.wasdel = wasdelay; |
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index cecd094404cc..cdef87db5262 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h | |||
@@ -110,6 +110,9 @@ struct xfs_extent_free_item | |||
110 | /* Map something in the CoW fork. */ | 110 | /* Map something in the CoW fork. */ |
111 | #define XFS_BMAPI_COWFORK 0x200 | 111 | #define XFS_BMAPI_COWFORK 0x200 |
112 | 112 | ||
113 | /* Only convert delalloc space, don't allocate entirely new extents */ | ||
114 | #define XFS_BMAPI_DELALLOC 0x400 | ||
115 | |||
113 | #define XFS_BMAPI_FLAGS \ | 116 | #define XFS_BMAPI_FLAGS \ |
114 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ | 117 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ |
115 | { XFS_BMAPI_METADATA, "METADATA" }, \ | 118 | { XFS_BMAPI_METADATA, "METADATA" }, \ |
@@ -120,7 +123,8 @@ struct xfs_extent_free_item | |||
120 | { XFS_BMAPI_CONVERT, "CONVERT" }, \ | 123 | { XFS_BMAPI_CONVERT, "CONVERT" }, \ |
121 | { XFS_BMAPI_ZERO, "ZERO" }, \ | 124 | { XFS_BMAPI_ZERO, "ZERO" }, \ |
122 | { XFS_BMAPI_REMAP, "REMAP" }, \ | 125 | { XFS_BMAPI_REMAP, "REMAP" }, \ |
123 | { XFS_BMAPI_COWFORK, "COWFORK" } | 126 | { XFS_BMAPI_COWFORK, "COWFORK" }, \ |
127 | { XFS_BMAPI_DELALLOC, "DELALLOC" } | ||
124 | 128 | ||
125 | 129 | ||
126 | static inline int xfs_bmapi_aflag(int w) | 130 | static inline int xfs_bmapi_aflag(int w) |
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 0fd086d03d41..7c471881c9a6 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c | |||
@@ -82,11 +82,12 @@ xfs_finobt_set_root( | |||
82 | } | 82 | } |
83 | 83 | ||
84 | STATIC int | 84 | STATIC int |
85 | xfs_inobt_alloc_block( | 85 | __xfs_inobt_alloc_block( |
86 | struct xfs_btree_cur *cur, | 86 | struct xfs_btree_cur *cur, |
87 | union xfs_btree_ptr *start, | 87 | union xfs_btree_ptr *start, |
88 | union xfs_btree_ptr *new, | 88 | union xfs_btree_ptr *new, |
89 | int *stat) | 89 | int *stat, |
90 | enum xfs_ag_resv_type resv) | ||
90 | { | 91 | { |
91 | xfs_alloc_arg_t args; /* block allocation args */ | 92 | xfs_alloc_arg_t args; /* block allocation args */ |
92 | int error; /* error return value */ | 93 | int error; /* error return value */ |
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block( | |||
103 | args.maxlen = 1; | 104 | args.maxlen = 1; |
104 | args.prod = 1; | 105 | args.prod = 1; |
105 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | 106 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
107 | args.resv = resv; | ||
106 | 108 | ||
107 | error = xfs_alloc_vextent(&args); | 109 | error = xfs_alloc_vextent(&args); |
108 | if (error) { | 110 | if (error) { |
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block( | |||
123 | } | 125 | } |
124 | 126 | ||
125 | STATIC int | 127 | STATIC int |
128 | xfs_inobt_alloc_block( | ||
129 | struct xfs_btree_cur *cur, | ||
130 | union xfs_btree_ptr *start, | ||
131 | union xfs_btree_ptr *new, | ||
132 | int *stat) | ||
133 | { | ||
134 | return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); | ||
135 | } | ||
136 | |||
137 | STATIC int | ||
138 | xfs_finobt_alloc_block( | ||
139 | struct xfs_btree_cur *cur, | ||
140 | union xfs_btree_ptr *start, | ||
141 | union xfs_btree_ptr *new, | ||
142 | int *stat) | ||
143 | { | ||
144 | return __xfs_inobt_alloc_block(cur, start, new, stat, | ||
145 | XFS_AG_RESV_METADATA); | ||
146 | } | ||
147 | |||
148 | STATIC int | ||
126 | xfs_inobt_free_block( | 149 | xfs_inobt_free_block( |
127 | struct xfs_btree_cur *cur, | 150 | struct xfs_btree_cur *cur, |
128 | struct xfs_buf *bp) | 151 | struct xfs_buf *bp) |
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = { | |||
328 | 351 | ||
329 | .dup_cursor = xfs_inobt_dup_cursor, | 352 | .dup_cursor = xfs_inobt_dup_cursor, |
330 | .set_root = xfs_finobt_set_root, | 353 | .set_root = xfs_finobt_set_root, |
331 | .alloc_block = xfs_inobt_alloc_block, | 354 | .alloc_block = xfs_finobt_alloc_block, |
332 | .free_block = xfs_inobt_free_block, | 355 | .free_block = xfs_inobt_free_block, |
333 | .get_minrecs = xfs_inobt_get_minrecs, | 356 | .get_minrecs = xfs_inobt_get_minrecs, |
334 | .get_maxrecs = xfs_inobt_get_maxrecs, | 357 | .get_maxrecs = xfs_inobt_get_maxrecs, |
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count( | |||
480 | return 0; | 503 | return 0; |
481 | } | 504 | } |
482 | #endif /* DEBUG */ | 505 | #endif /* DEBUG */ |
506 | |||
507 | static xfs_extlen_t | ||
508 | xfs_inobt_max_size( | ||
509 | struct xfs_mount *mp) | ||
510 | { | ||
511 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | ||
512 | if (mp->m_inobt_mxr[0] == 0) | ||
513 | return 0; | ||
514 | |||
515 | return xfs_btree_calc_size(mp, mp->m_inobt_mnr, | ||
516 | (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / | ||
517 | XFS_INODES_PER_CHUNK); | ||
518 | } | ||
519 | |||
520 | static int | ||
521 | xfs_inobt_count_blocks( | ||
522 | struct xfs_mount *mp, | ||
523 | xfs_agnumber_t agno, | ||
524 | xfs_btnum_t btnum, | ||
525 | xfs_extlen_t *tree_blocks) | ||
526 | { | ||
527 | struct xfs_buf *agbp; | ||
528 | struct xfs_btree_cur *cur; | ||
529 | int error; | ||
530 | |||
531 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | ||
532 | if (error) | ||
533 | return error; | ||
534 | |||
535 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum); | ||
536 | error = xfs_btree_count_blocks(cur, tree_blocks); | ||
537 | xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | ||
538 | xfs_buf_relse(agbp); | ||
539 | |||
540 | return error; | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Figure out how many blocks to reserve and how many are used by this btree. | ||
545 | */ | ||
546 | int | ||
547 | xfs_finobt_calc_reserves( | ||
548 | struct xfs_mount *mp, | ||
549 | xfs_agnumber_t agno, | ||
550 | xfs_extlen_t *ask, | ||
551 | xfs_extlen_t *used) | ||
552 | { | ||
553 | xfs_extlen_t tree_len = 0; | ||
554 | int error; | ||
555 | |||
556 | if (!xfs_sb_version_hasfinobt(&mp->m_sb)) | ||
557 | return 0; | ||
558 | |||
559 | error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len); | ||
560 | if (error) | ||
561 | return error; | ||
562 | |||
563 | *ask += xfs_inobt_max_size(mp); | ||
564 | *used += tree_len; | ||
565 | return 0; | ||
566 | } | ||
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h index bd88453217ce..aa81e2e63f3f 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.h +++ b/fs/xfs/libxfs/xfs_ialloc_btree.h | |||
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *, | |||
72 | #define xfs_inobt_rec_check_count(mp, rec) 0 | 72 | #define xfs_inobt_rec_check_count(mp, rec) 0 |
73 | #endif /* DEBUG */ | 73 | #endif /* DEBUG */ |
74 | 74 | ||
75 | int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno, | ||
76 | xfs_extlen_t *ask, xfs_extlen_t *used); | ||
77 | |||
75 | #endif /* __XFS_IALLOC_BTREE_H__ */ | 78 | #endif /* __XFS_IALLOC_BTREE_H__ */ |
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 2580262e4ea0..584ec896a533 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c | |||
@@ -242,7 +242,7 @@ xfs_mount_validate_sb( | |||
242 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || | 242 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || |
243 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || | 243 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
244 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || | 244 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || |
245 | sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || | 245 | sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
246 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || | 246 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || |
247 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || | 247 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || |
248 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || | 248 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index b9abce524c33..c1417919ab0a 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -528,7 +528,6 @@ xfs_getbmap( | |||
528 | xfs_bmbt_irec_t *map; /* buffer for user's data */ | 528 | xfs_bmbt_irec_t *map; /* buffer for user's data */ |
529 | xfs_mount_t *mp; /* file system mount point */ | 529 | xfs_mount_t *mp; /* file system mount point */ |
530 | int nex; /* # of user extents can do */ | 530 | int nex; /* # of user extents can do */ |
531 | int nexleft; /* # of user extents left */ | ||
532 | int subnex; /* # of bmapi's can do */ | 531 | int subnex; /* # of bmapi's can do */ |
533 | int nmap; /* number of map entries */ | 532 | int nmap; /* number of map entries */ |
534 | struct getbmapx *out; /* output structure */ | 533 | struct getbmapx *out; /* output structure */ |
@@ -686,10 +685,8 @@ xfs_getbmap( | |||
686 | goto out_free_map; | 685 | goto out_free_map; |
687 | } | 686 | } |
688 | 687 | ||
689 | nexleft = nex; | ||
690 | |||
691 | do { | 688 | do { |
692 | nmap = (nexleft > subnex) ? subnex : nexleft; | 689 | nmap = (nex> subnex) ? subnex : nex; |
693 | error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), | 690 | error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), |
694 | XFS_BB_TO_FSB(mp, bmv->bmv_length), | 691 | XFS_BB_TO_FSB(mp, bmv->bmv_length), |
695 | map, &nmap, bmapi_flags); | 692 | map, &nmap, bmapi_flags); |
@@ -697,8 +694,8 @@ xfs_getbmap( | |||
697 | goto out_free_map; | 694 | goto out_free_map; |
698 | ASSERT(nmap <= subnex); | 695 | ASSERT(nmap <= subnex); |
699 | 696 | ||
700 | for (i = 0; i < nmap && nexleft && bmv->bmv_length && | 697 | for (i = 0; i < nmap && bmv->bmv_length && |
701 | cur_ext < bmv->bmv_count; i++) { | 698 | cur_ext < bmv->bmv_count - 1; i++) { |
702 | out[cur_ext].bmv_oflags = 0; | 699 | out[cur_ext].bmv_oflags = 0; |
703 | if (map[i].br_state == XFS_EXT_UNWRITTEN) | 700 | if (map[i].br_state == XFS_EXT_UNWRITTEN) |
704 | out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; | 701 | out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; |
@@ -760,16 +757,27 @@ xfs_getbmap( | |||
760 | continue; | 757 | continue; |
761 | } | 758 | } |
762 | 759 | ||
760 | /* | ||
761 | * In order to report shared extents accurately, | ||
762 | * we report each distinct shared/unshared part | ||
763 | * of a single bmbt record using multiple bmap | ||
764 | * extents. To make that happen, we iterate the | ||
765 | * same map array item multiple times, each | ||
766 | * time trimming out the subextent that we just | ||
767 | * reported. | ||
768 | * | ||
769 | * Because of this, we must check the out array | ||
770 | * index (cur_ext) directly against bmv_count-1 | ||
771 | * to avoid overflows. | ||
772 | */ | ||
763 | if (inject_map.br_startblock != NULLFSBLOCK) { | 773 | if (inject_map.br_startblock != NULLFSBLOCK) { |
764 | map[i] = inject_map; | 774 | map[i] = inject_map; |
765 | i--; | 775 | i--; |
766 | } else | 776 | } |
767 | nexleft--; | ||
768 | bmv->bmv_entries++; | 777 | bmv->bmv_entries++; |
769 | cur_ext++; | 778 | cur_ext++; |
770 | } | 779 | } |
771 | } while (nmap && nexleft && bmv->bmv_length && | 780 | } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1); |
772 | cur_ext < bmv->bmv_count); | ||
773 | 781 | ||
774 | out_free_map: | 782 | out_free_map: |
775 | kmem_free(map); | 783 | kmem_free(map); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7f0a01f7b592..ac3b4db519df 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -422,6 +422,7 @@ retry: | |||
422 | out_free_pages: | 422 | out_free_pages: |
423 | for (i = 0; i < bp->b_page_count; i++) | 423 | for (i = 0; i < bp->b_page_count; i++) |
424 | __free_page(bp->b_pages[i]); | 424 | __free_page(bp->b_pages[i]); |
425 | bp->b_flags &= ~_XBF_PAGES; | ||
425 | return error; | 426 | return error; |
426 | } | 427 | } |
427 | 428 | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b9557795eb74..de32f0fe47c8 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree( | |||
1792 | int error; | 1792 | int error; |
1793 | 1793 | ||
1794 | /* | 1794 | /* |
1795 | * The ifree transaction might need to allocate blocks for record | 1795 | * We try to use a per-AG reservation for any block needed by the finobt |
1796 | * insertion to the finobt. We don't want to fail here at ENOSPC, so | 1796 | * tree, but as the finobt feature predates the per-AG reservation |
1797 | * allow ifree to dip into the reserved block pool if necessary. | 1797 | * support a degraded file system might not have enough space for the |
1798 | * | 1798 | * reservation at mount time. In that case try to dip into the reserved |
1799 | * Freeing large sets of inodes generally means freeing inode chunks, | 1799 | * pool and pray. |
1800 | * directory and file data blocks, so this should be relatively safe. | ||
1801 | * Only under severe circumstances should it be possible to free enough | ||
1802 | * inodes to exhaust the reserve block pool via finobt expansion while | ||
1803 | * at the same time not creating free space in the filesystem. | ||
1804 | * | 1800 | * |
1805 | * Send a warning if the reservation does happen to fail, as the inode | 1801 | * Send a warning if the reservation does happen to fail, as the inode |
1806 | * now remains allocated and sits on the unlinked list until the fs is | 1802 | * now remains allocated and sits on the unlinked list until the fs is |
1807 | * repaired. | 1803 | * repaired. |
1808 | */ | 1804 | */ |
1809 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, | 1805 | if (unlikely(mp->m_inotbt_nores)) { |
1810 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); | 1806 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, |
1807 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, | ||
1808 | &tp); | ||
1809 | } else { | ||
1810 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); | ||
1811 | } | ||
1811 | if (error) { | 1812 | if (error) { |
1812 | if (error == -ENOSPC) { | 1813 | if (error == -ENOSPC) { |
1813 | xfs_warn_ratelimited(mp, | 1814 | xfs_warn_ratelimited(mp, |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 0d147428971e..1aa3abd67b36 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate( | |||
681 | xfs_trans_t *tp; | 681 | xfs_trans_t *tp; |
682 | int nimaps; | 682 | int nimaps; |
683 | int error = 0; | 683 | int error = 0; |
684 | int flags = 0; | 684 | int flags = XFS_BMAPI_DELALLOC; |
685 | int nres; | 685 | int nres; |
686 | 686 | ||
687 | if (whichfork == XFS_COW_FORK) | 687 | if (whichfork == XFS_COW_FORK) |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 84f785218907..7f351f706b7a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -140,6 +140,7 @@ typedef struct xfs_mount { | |||
140 | int m_fixedfsid[2]; /* unchanged for life of FS */ | 140 | int m_fixedfsid[2]; /* unchanged for life of FS */ |
141 | uint m_dmevmask; /* DMI events for this FS */ | 141 | uint m_dmevmask; /* DMI events for this FS */ |
142 | __uint64_t m_flags; /* global mount flags */ | 142 | __uint64_t m_flags; /* global mount flags */ |
143 | bool m_inotbt_nores; /* no per-AG finobt resv. */ | ||
143 | int m_ialloc_inos; /* inodes in inode allocation */ | 144 | int m_ialloc_inos; /* inodes in inode allocation */ |
144 | int m_ialloc_blks; /* blocks in inode allocation */ | 145 | int m_ialloc_blks; /* blocks in inode allocation */ |
145 | int m_ialloc_min_blks;/* min blocks in sparse inode | 146 | int m_ialloc_min_blks;/* min blocks in sparse inode |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 45e50ea90769..b669b123287b 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust( | |||
1177 | * the case in all other instances. It's OK that we do this because | 1177 | * the case in all other instances. It's OK that we do this because |
1178 | * quotacheck is done only at mount time. | 1178 | * quotacheck is done only at mount time. |
1179 | */ | 1179 | */ |
1180 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); | 1180 | error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL, |
1181 | &ip); | ||
1181 | if (error) { | 1182 | if (error) { |
1182 | *res = BULKSTAT_RV_NOTHING; | 1183 | *res = BULKSTAT_RV_NOTHING; |
1183 | return error; | 1184 | return error; |
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d6d241f63b9f..56814e8ae7ea 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h | |||
@@ -144,7 +144,7 @@ struct __drm_crtcs_state { | |||
144 | struct drm_crtc *ptr; | 144 | struct drm_crtc *ptr; |
145 | struct drm_crtc_state *state; | 145 | struct drm_crtc_state *state; |
146 | struct drm_crtc_commit *commit; | 146 | struct drm_crtc_commit *commit; |
147 | s64 __user *out_fence_ptr; | 147 | s32 __user *out_fence_ptr; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct __drm_connnectors_state { | 150 | struct __drm_connnectors_state { |
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index bf9991b20611..137432386310 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h | |||
@@ -488,7 +488,7 @@ struct drm_mode_config { | |||
488 | /** | 488 | /** |
489 | * @prop_out_fence_ptr: Sync File fd pointer representing the | 489 | * @prop_out_fence_ptr: Sync File fd pointer representing the |
490 | * outgoing fences for a CRTC. Userspace should provide a pointer to a | 490 | * outgoing fences for a CRTC. Userspace should provide a pointer to a |
491 | * value of type s64, and then cast that pointer to u64. | 491 | * value of type s32, and then cast that pointer to u64. |
492 | */ | 492 | */ |
493 | struct drm_property *prop_out_fence_ptr; | 493 | struct drm_property *prop_out_fence_ptr; |
494 | /** | 494 | /** |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 05cf951df3fe..3ed1f3b1d594 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); | |||
247 | void bpf_map_put_with_uref(struct bpf_map *map); | 247 | void bpf_map_put_with_uref(struct bpf_map *map); |
248 | void bpf_map_put(struct bpf_map *map); | 248 | void bpf_map_put(struct bpf_map *map); |
249 | int bpf_map_precharge_memlock(u32 pages); | 249 | int bpf_map_precharge_memlock(u32 pages); |
250 | void *bpf_map_area_alloc(size_t size); | ||
251 | void bpf_map_area_free(void *base); | ||
250 | 252 | ||
251 | extern int sysctl_unprivileged_bpf_disabled; | 253 | extern int sysctl_unprivileged_bpf_disabled; |
252 | 254 | ||
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7e05c5e4e45c..87165f06a307 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #define CPUFREQ_ETERNAL (-1) | 32 | #define CPUFREQ_ETERNAL (-1) |
33 | #define CPUFREQ_NAME_LEN 16 | 33 | #define CPUFREQ_NAME_LEN 16 |
34 | /* Print length for names. Extra 1 space for accomodating '\n' in prints */ | 34 | /* Print length for names. Extra 1 space for accommodating '\n' in prints */ |
35 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) | 35 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
36 | 36 | ||
37 | struct cpufreq_governor; | 37 | struct cpufreq_governor; |
@@ -115,7 +115,7 @@ struct cpufreq_policy { | |||
115 | * guarantee that frequency can be changed on any CPU sharing the | 115 | * guarantee that frequency can be changed on any CPU sharing the |
116 | * policy and that the change will affect all of the policy CPUs then. | 116 | * policy and that the change will affect all of the policy CPUs then. |
117 | * - fast_switch_enabled is to be set by governors that support fast | 117 | * - fast_switch_enabled is to be set by governors that support fast |
118 | * freqnency switching with the help of cpufreq_enable_fast_switch(). | 118 | * frequency switching with the help of cpufreq_enable_fast_switch(). |
119 | */ | 119 | */ |
120 | bool fast_switch_possible; | 120 | bool fast_switch_possible; |
121 | bool fast_switch_enabled; | 121 | bool fast_switch_enabled; |
@@ -415,9 +415,6 @@ static inline void cpufreq_resume(void) {} | |||
415 | /* Policy Notifiers */ | 415 | /* Policy Notifiers */ |
416 | #define CPUFREQ_ADJUST (0) | 416 | #define CPUFREQ_ADJUST (0) |
417 | #define CPUFREQ_NOTIFY (1) | 417 | #define CPUFREQ_NOTIFY (1) |
418 | #define CPUFREQ_START (2) | ||
419 | #define CPUFREQ_CREATE_POLICY (3) | ||
420 | #define CPUFREQ_REMOVE_POLICY (4) | ||
421 | 418 | ||
422 | #ifdef CONFIG_CPU_FREQ | 419 | #ifdef CONFIG_CPU_FREQ |
423 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); | 420 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c2748accea71..e973faba69dc 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, | |||
274 | struct irq_chip *irqchip, | 274 | struct irq_chip *irqchip, |
275 | int parent_irq); | 275 | int parent_irq); |
276 | 276 | ||
277 | int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, | 277 | int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, |
278 | struct irq_chip *irqchip, | ||
279 | unsigned int first_irq, | ||
280 | irq_flow_handler_t handler, | ||
281 | unsigned int type, | ||
282 | bool nested, | ||
283 | struct lock_class_key *lock_key); | ||
284 | |||
285 | #ifdef CONFIG_LOCKDEP | ||
286 | |||
287 | /* | ||
288 | * Lockdep requires that each irqchip instance be created with a | ||
289 | * unique key so as to avoid unnecessary warnings. This upfront | ||
290 | * boilerplate static inlines provides such a key for each | ||
291 | * unique instance. | ||
292 | */ | ||
293 | static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, | ||
294 | struct irq_chip *irqchip, | ||
295 | unsigned int first_irq, | ||
296 | irq_flow_handler_t handler, | ||
297 | unsigned int type) | ||
298 | { | ||
299 | static struct lock_class_key key; | ||
300 | |||
301 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | ||
302 | handler, type, false, &key); | ||
303 | } | ||
304 | |||
305 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | ||
278 | struct irq_chip *irqchip, | 306 | struct irq_chip *irqchip, |
279 | unsigned int first_irq, | 307 | unsigned int first_irq, |
280 | irq_flow_handler_t handler, | 308 | irq_flow_handler_t handler, |
281 | unsigned int type, | 309 | unsigned int type) |
282 | bool nested, | 310 | { |
283 | struct lock_class_key *lock_key); | 311 | |
312 | static struct lock_class_key key; | ||
313 | |||
314 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | ||
315 | handler, type, true, &key); | ||
316 | } | ||
317 | #else | ||
318 | static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, | ||
319 | struct irq_chip *irqchip, | ||
320 | unsigned int first_irq, | ||
321 | irq_flow_handler_t handler, | ||
322 | unsigned int type) | ||
323 | { | ||
324 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, | ||
325 | handler, type, false, NULL); | ||
326 | } | ||
284 | 327 | ||
285 | /* FIXME: I assume threaded IRQchips do not have the lockdep problem */ | ||
286 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | 328 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, |
287 | struct irq_chip *irqchip, | 329 | struct irq_chip *irqchip, |
288 | unsigned int first_irq, | 330 | unsigned int first_irq, |
289 | irq_flow_handler_t handler, | 331 | irq_flow_handler_t handler, |
290 | unsigned int type) | 332 | unsigned int type) |
291 | { | 333 | { |
292 | return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, | 334 | return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, |
293 | handler, type, true, NULL); | 335 | handler, type, true, NULL); |
294 | } | 336 | } |
295 | 337 | #endif /* CONFIG_LOCKDEP */ | |
296 | #ifdef CONFIG_LOCKDEP | ||
297 | #define gpiochip_irqchip_add(...) \ | ||
298 | ( \ | ||
299 | ({ \ | ||
300 | static struct lock_class_key _key; \ | ||
301 | _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \ | ||
302 | }) \ | ||
303 | ) | ||
304 | #else | ||
305 | #define gpiochip_irqchip_add(...) \ | ||
306 | _gpiochip_irqchip_add(__VA_ARGS__, false, NULL) | ||
307 | #endif | ||
308 | 338 | ||
309 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ | 339 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ |
310 | 340 | ||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 01033fadea47..c1784c0b4f35 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | |||
284 | unsigned long map_offset); | 284 | unsigned long map_offset); |
285 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, | 285 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
286 | unsigned long pnum); | 286 | unsigned long pnum); |
287 | extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, | 287 | extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages, |
288 | enum zone_type target); | 288 | enum zone_type target, int *zone_shift); |
289 | 289 | ||
290 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 290 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 257173e0095e..f541da68d1e7 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h | |||
@@ -35,6 +35,8 @@ | |||
35 | #define PHY_ID_KSZ886X 0x00221430 | 35 | #define PHY_ID_KSZ886X 0x00221430 |
36 | #define PHY_ID_KSZ8863 0x00221435 | 36 | #define PHY_ID_KSZ8863 0x00221435 |
37 | 37 | ||
38 | #define PHY_ID_KSZ8795 0x00221550 | ||
39 | |||
38 | /* struct phy_device dev_flags definitions */ | 40 | /* struct phy_device dev_flags definitions */ |
39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 | 41 | #define MICREL_PHY_50MHZ_CLK 0x00000001 |
40 | #define MICREL_PHY_FXEN 0x00000002 | 42 | #define MICREL_PHY_FXEN 0x00000002 |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 36d9896fbc1e..f4aac87adcc3 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, | |||
972 | * @zonelist - The zonelist to search for a suitable zone | 972 | * @zonelist - The zonelist to search for a suitable zone |
973 | * @highest_zoneidx - The zone index of the highest zone to return | 973 | * @highest_zoneidx - The zone index of the highest zone to return |
974 | * @nodes - An optional nodemask to filter the zonelist with | 974 | * @nodes - An optional nodemask to filter the zonelist with |
975 | * @zone - The first suitable zone found is returned via this parameter | 975 | * @return - Zoneref pointer for the first suitable zone found (see below) |
976 | * | 976 | * |
977 | * This function returns the first zone at or below a given zone index that is | 977 | * This function returns the first zone at or below a given zone index that is |
978 | * within the allowed nodemask. The zoneref returned is a cursor that can be | 978 | * within the allowed nodemask. The zoneref returned is a cursor that can be |
979 | * used to iterate the zonelist with next_zones_zonelist by advancing it by | 979 | * used to iterate the zonelist with next_zones_zonelist by advancing it by |
980 | * one before calling. | 980 | * one before calling. |
981 | * | ||
982 | * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is | ||
983 | * never NULL). This may happen either genuinely, or due to concurrent nodemask | ||
984 | * update due to cpuset modification. | ||
981 | */ | 985 | */ |
982 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | 986 | static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, |
983 | enum zone_type highest_zoneidx, | 987 | enum zone_type highest_zoneidx, |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index bca536341d1a..1b1ca04820a3 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -282,7 +282,7 @@ enum nfsstat4 { | |||
282 | 282 | ||
283 | static inline bool seqid_mutating_err(u32 err) | 283 | static inline bool seqid_mutating_err(u32 err) |
284 | { | 284 | { |
285 | /* rfc 3530 section 8.1.5: */ | 285 | /* See RFC 7530, section 9.1.7 */ |
286 | switch (err) { | 286 | switch (err) { |
287 | case NFS4ERR_STALE_CLIENTID: | 287 | case NFS4ERR_STALE_CLIENTID: |
288 | case NFS4ERR_STALE_STATEID: | 288 | case NFS4ERR_STALE_STATEID: |
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err) | |||
291 | case NFS4ERR_BADXDR: | 291 | case NFS4ERR_BADXDR: |
292 | case NFS4ERR_RESOURCE: | 292 | case NFS4ERR_RESOURCE: |
293 | case NFS4ERR_NOFILEHANDLE: | 293 | case NFS4ERR_NOFILEHANDLE: |
294 | case NFS4ERR_MOVED: | ||
294 | return false; | 295 | return false; |
295 | }; | 296 | }; |
296 | return true; | 297 | return true; |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index aacca824a6ae..0a3fadc32693 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled; | |||
110 | extern int watchdog_thresh; | 110 | extern int watchdog_thresh; |
111 | extern unsigned long watchdog_enabled; | 111 | extern unsigned long watchdog_enabled; |
112 | extern unsigned long *watchdog_cpumask_bits; | 112 | extern unsigned long *watchdog_cpumask_bits; |
113 | extern atomic_t watchdog_park_in_progress; | ||
113 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
114 | extern int sysctl_softlockup_all_cpu_backtrace; | 115 | extern int sysctl_softlockup_all_cpu_backtrace; |
115 | extern int sysctl_hardlockup_all_cpu_backtrace; | 116 | extern int sysctl_hardlockup_all_cpu_backtrace; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index f7d95f644eed..7fc1105605bf 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/mod_devicetable.h> | 27 | #include <linux/mod_devicetable.h> |
28 | #include <linux/phy_led_triggers.h> | ||
29 | 28 | ||
30 | #include <linux/atomic.h> | 29 | #include <linux/atomic.h> |
31 | 30 | ||
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h index a2daea0a37d2..b37b05bfd1a6 100644 --- a/include/linux/phy_led_triggers.h +++ b/include/linux/phy_led_triggers.h | |||
@@ -18,11 +18,11 @@ struct phy_device; | |||
18 | #ifdef CONFIG_LED_TRIGGER_PHY | 18 | #ifdef CONFIG_LED_TRIGGER_PHY |
19 | 19 | ||
20 | #include <linux/leds.h> | 20 | #include <linux/leds.h> |
21 | #include <linux/phy.h> | ||
21 | 22 | ||
22 | #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 | 23 | #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 |
23 | #define PHY_MII_BUS_ID_SIZE (20 - 3) | ||
24 | 24 | ||
25 | #define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ | 25 | #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ |
26 | FIELD_SIZEOF(struct mdio_device, addr)+\ | 26 | FIELD_SIZEOF(struct mdio_device, addr)+\ |
27 | PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) | 27 | PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) |
28 | 28 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 85cc819676e8..333ad11b3dd9 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *); | |||
216 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); | 216 | void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); |
217 | bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, | 217 | bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, |
218 | const struct sockaddr *sap); | 218 | const struct sockaddr *sap); |
219 | void rpc_cleanup_clids(void); | ||
219 | #endif /* __KERNEL__ */ | 220 | #endif /* __KERNEL__ */ |
220 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 221 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 0c729c3c8549..d9718378a8be 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -194,8 +194,6 @@ struct platform_freeze_ops { | |||
194 | }; | 194 | }; |
195 | 195 | ||
196 | #ifdef CONFIG_SUSPEND | 196 | #ifdef CONFIG_SUSPEND |
197 | extern suspend_state_t mem_sleep_default; | ||
198 | |||
199 | /** | 197 | /** |
200 | * suspend_set_ops - set platform dependent suspend operations | 198 | * suspend_set_ops - set platform dependent suspend operations |
201 | * @ops: The new suspend operations to set. | 199 | * @ops: The new suspend operations to set. |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 66204007d7ac..5209b5ed2a64 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, | |||
56 | 56 | ||
57 | static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, | 57 | static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, |
58 | struct virtio_net_hdr *hdr, | 58 | struct virtio_net_hdr *hdr, |
59 | bool little_endian) | 59 | bool little_endian, |
60 | bool has_data_valid) | ||
60 | { | 61 | { |
61 | memset(hdr, 0, sizeof(*hdr)); /* no info leak */ | 62 | memset(hdr, 0, sizeof(*hdr)); /* no info leak */ |
62 | 63 | ||
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, | |||
91 | skb_checksum_start_offset(skb)); | 92 | skb_checksum_start_offset(skb)); |
92 | hdr->csum_offset = __cpu_to_virtio16(little_endian, | 93 | hdr->csum_offset = __cpu_to_virtio16(little_endian, |
93 | skb->csum_offset); | 94 | skb->csum_offset); |
94 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | 95 | } else if (has_data_valid && |
96 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
95 | hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; | 97 | hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; |
96 | } /* else everything is zero */ | 98 | } /* else everything is zero */ |
97 | 99 | ||
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 487e57391664..7afe991e900e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); | |||
871 | * upper-layer output functions | 871 | * upper-layer output functions |
872 | */ | 872 | */ |
873 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | 873 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, |
874 | struct ipv6_txoptions *opt, int tclass); | 874 | __u32 mark, struct ipv6_txoptions *opt, int tclass); |
875 | 875 | ||
876 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); | 876 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); |
877 | 877 | ||
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index d4c1c75b8862..73dd87647460 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h | |||
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops { | |||
44 | int (*get_encap_size)(struct lwtunnel_state *lwtstate); | 44 | int (*get_encap_size)(struct lwtunnel_state *lwtstate); |
45 | int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); | 45 | int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); |
46 | int (*xmit)(struct sk_buff *skb); | 46 | int (*xmit)(struct sk_buff *skb); |
47 | |||
48 | struct module *owner; | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | #ifdef CONFIG_LWTUNNEL | 51 | #ifdef CONFIG_LWTUNNEL |
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, | |||
105 | unsigned int num); | 107 | unsigned int num); |
106 | int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, | 108 | int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, |
107 | unsigned int num); | 109 | unsigned int num); |
110 | int lwtunnel_valid_encap_type(u16 encap_type); | ||
111 | int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len); | ||
108 | int lwtunnel_build_state(struct net_device *dev, u16 encap_type, | 112 | int lwtunnel_build_state(struct net_device *dev, u16 encap_type, |
109 | struct nlattr *encap, | 113 | struct nlattr *encap, |
110 | unsigned int family, const void *cfg, | 114 | unsigned int family, const void *cfg, |
@@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, | |||
168 | return -EOPNOTSUPP; | 172 | return -EOPNOTSUPP; |
169 | } | 173 | } |
170 | 174 | ||
175 | static inline int lwtunnel_valid_encap_type(u16 encap_type) | ||
176 | { | ||
177 | return -EOPNOTSUPP; | ||
178 | } | ||
179 | static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) | ||
180 | { | ||
181 | return -EOPNOTSUPP; | ||
182 | } | ||
183 | |||
171 | static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, | 184 | static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, |
172 | struct nlattr *encap, | 185 | struct nlattr *encap, |
173 | unsigned int family, const void *cfg, | 186 | unsigned int family, const void *cfg, |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 924325c46aab..7dfdb517f0be 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -207,9 +207,9 @@ struct nft_set_iter { | |||
207 | unsigned int skip; | 207 | unsigned int skip; |
208 | int err; | 208 | int err; |
209 | int (*fn)(const struct nft_ctx *ctx, | 209 | int (*fn)(const struct nft_ctx *ctx, |
210 | const struct nft_set *set, | 210 | struct nft_set *set, |
211 | const struct nft_set_iter *iter, | 211 | const struct nft_set_iter *iter, |
212 | const struct nft_set_elem *elem); | 212 | struct nft_set_elem *elem); |
213 | }; | 213 | }; |
214 | 214 | ||
215 | /** | 215 | /** |
@@ -301,7 +301,7 @@ struct nft_set_ops { | |||
301 | void (*remove)(const struct nft_set *set, | 301 | void (*remove)(const struct nft_set *set, |
302 | const struct nft_set_elem *elem); | 302 | const struct nft_set_elem *elem); |
303 | void (*walk)(const struct nft_ctx *ctx, | 303 | void (*walk)(const struct nft_ctx *ctx, |
304 | const struct nft_set *set, | 304 | struct nft_set *set, |
305 | struct nft_set_iter *iter); | 305 | struct nft_set_iter *iter); |
306 | 306 | ||
307 | unsigned int (*privsize)(const struct nlattr * const nla[]); | 307 | unsigned int (*privsize)(const struct nlattr * const nla[]); |
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index cbedda077db2..5ceb2205e4e3 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h | |||
@@ -9,6 +9,12 @@ struct nft_fib { | |||
9 | 9 | ||
10 | extern const struct nla_policy nft_fib_policy[]; | 10 | extern const struct nla_policy nft_fib_policy[]; |
11 | 11 | ||
12 | static inline bool | ||
13 | nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in) | ||
14 | { | ||
15 | return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; | ||
16 | } | ||
17 | |||
12 | int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); | 18 | int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); |
13 | int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | 19 | int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, |
14 | const struct nlattr * const tb[]); | 20 | const struct nlattr * const tb[]); |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 958a24d8fae7..b567e4452a47 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) | |||
352 | } | 352 | } |
353 | } | 353 | } |
354 | 354 | ||
355 | static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) | ||
356 | { | ||
357 | if (mtu >= 4096) | ||
358 | return IB_MTU_4096; | ||
359 | else if (mtu >= 2048) | ||
360 | return IB_MTU_2048; | ||
361 | else if (mtu >= 1024) | ||
362 | return IB_MTU_1024; | ||
363 | else if (mtu >= 512) | ||
364 | return IB_MTU_512; | ||
365 | else | ||
366 | return IB_MTU_256; | ||
367 | } | ||
368 | |||
355 | enum ib_port_state { | 369 | enum ib_port_state { |
356 | IB_PORT_NOP = 0, | 370 | IB_PORT_NOP = 0, |
357 | IB_PORT_DOWN = 1, | 371 | IB_PORT_DOWN = 1, |
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index 6902c2a8bd23..4b6b489a8d7c 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h | |||
@@ -55,17 +55,17 @@ struct mcip_cmd { | |||
55 | 55 | ||
56 | struct mcip_bcr { | 56 | struct mcip_bcr { |
57 | #ifdef CONFIG_CPU_BIG_ENDIAN | 57 | #ifdef CONFIG_CPU_BIG_ENDIAN |
58 | unsigned int pad3:8, | 58 | unsigned int pad4:6, pw_dom:1, pad3:1, |
59 | idu:1, llm:1, num_cores:6, | 59 | idu:1, pad2:1, num_cores:6, |
60 | iocoh:1, gfrc:1, dbg:1, pad2:1, | 60 | pad:1, gfrc:1, dbg:1, pw:1, |
61 | msg:1, sem:1, ipi:1, pad:1, | 61 | msg:1, sem:1, ipi:1, slv:1, |
62 | ver:8; | 62 | ver:8; |
63 | #else | 63 | #else |
64 | unsigned int ver:8, | 64 | unsigned int ver:8, |
65 | pad:1, ipi:1, sem:1, msg:1, | 65 | slv:1, ipi:1, sem:1, msg:1, |
66 | pad2:1, dbg:1, gfrc:1, iocoh:1, | 66 | pw:1, dbg:1, gfrc:1, pad:1, |
67 | num_cores:6, llm:1, idu:1, | 67 | num_cores:6, pad2:1, idu:1, |
68 | pad3:8; | 68 | pad3:1, pw_dom:1, pad4:6; |
69 | #endif | 69 | #endif |
70 | }; | 70 | }; |
71 | 71 | ||
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h index 3cbc327801d6..c451eec42a83 100644 --- a/include/uapi/linux/cec-funcs.h +++ b/include/uapi/linux/cec-funcs.h | |||
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg, | |||
1665 | __u8 audio_out_compensated, | 1665 | __u8 audio_out_compensated, |
1666 | __u8 audio_out_delay) | 1666 | __u8 audio_out_delay) |
1667 | { | 1667 | { |
1668 | msg->len = 7; | 1668 | msg->len = 6; |
1669 | msg->msg[0] |= 0xf; /* broadcast */ | 1669 | msg->msg[0] |= 0xf; /* broadcast */ |
1670 | msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; | 1670 | msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; |
1671 | msg->msg[2] = phys_addr >> 8; | 1671 | msg->msg[2] = phys_addr >> 8; |
1672 | msg->msg[3] = phys_addr & 0xff; | 1672 | msg->msg[3] = phys_addr & 0xff; |
1673 | msg->msg[4] = video_latency; | 1673 | msg->msg[4] = video_latency; |
1674 | msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; | 1674 | msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; |
1675 | msg->msg[6] = audio_out_delay; | 1675 | if (audio_out_compensated == 3) |
1676 | msg->msg[msg->len++] = audio_out_delay; | ||
1676 | } | 1677 | } |
1677 | 1678 | ||
1678 | static inline void cec_ops_report_current_latency(const struct cec_msg *msg, | 1679 | static inline void cec_ops_report_current_latency(const struct cec_msg *msg, |
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg, | |||
1686 | *video_latency = msg->msg[4]; | 1687 | *video_latency = msg->msg[4]; |
1687 | *low_latency_mode = (msg->msg[5] >> 2) & 1; | 1688 | *low_latency_mode = (msg->msg[5] >> 2) & 1; |
1688 | *audio_out_compensated = msg->msg[5] & 3; | 1689 | *audio_out_compensated = msg->msg[5] & 3; |
1689 | *audio_out_delay = msg->msg[6]; | 1690 | if (*audio_out_compensated == 3 && msg->len >= 7) |
1691 | *audio_out_delay = msg->msg[6]; | ||
1692 | else | ||
1693 | *audio_out_delay = 0; | ||
1690 | } | 1694 | } |
1691 | 1695 | ||
1692 | static inline void cec_msg_request_current_latency(struct cec_msg *msg, | 1696 | static inline void cec_msg_request_current_latency(struct cec_msg *msg, |
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h index 8be21e02387d..d0b5fa91ff54 100644 --- a/include/uapi/linux/netfilter/nf_log.h +++ b/include/uapi/linux/netfilter/nf_log.h | |||
@@ -9,4 +9,6 @@ | |||
9 | #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ | 9 | #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ |
10 | #define NF_LOG_MASK 0x2f | 10 | #define NF_LOG_MASK 0x2f |
11 | 11 | ||
12 | #define NF_LOG_PREFIXLEN 128 | ||
13 | |||
12 | #endif /* _NETFILTER_NF_LOG_H */ | 14 | #endif /* _NETFILTER_NF_LOG_H */ |
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 881d49e94569..e3f27e09eb2b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h | |||
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags { | |||
235 | /** | 235 | /** |
236 | * enum nft_rule_compat_attributes - nf_tables rule compat attributes | 236 | * enum nft_rule_compat_attributes - nf_tables rule compat attributes |
237 | * | 237 | * |
238 | * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32) | 238 | * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32) |
239 | * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) | 239 | * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) |
240 | */ | 240 | */ |
241 | enum nft_rule_compat_attributes { | 241 | enum nft_rule_compat_attributes { |
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes { | |||
499 | * enum nft_byteorder_ops - nf_tables byteorder operators | 499 | * enum nft_byteorder_ops - nf_tables byteorder operators |
500 | * | 500 | * |
501 | * @NFT_BYTEORDER_NTOH: network to host operator | 501 | * @NFT_BYTEORDER_NTOH: network to host operator |
502 | * @NFT_BYTEORDER_HTON: host to network opertaor | 502 | * @NFT_BYTEORDER_HTON: host to network operator |
503 | */ | 503 | */ |
504 | enum nft_byteorder_ops { | 504 | enum nft_byteorder_ops { |
505 | NFT_BYTEORDER_NTOH, | 505 | NFT_BYTEORDER_NTOH, |
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild index 82bdf5626859..bb68cb1b04ed 100644 --- a/include/uapi/rdma/Kbuild +++ b/include/uapi/rdma/Kbuild | |||
@@ -16,3 +16,4 @@ header-y += nes-abi.h | |||
16 | header-y += ocrdma-abi.h | 16 | header-y += ocrdma-abi.h |
17 | header-y += hns-abi.h | 17 | header-y += hns-abi.h |
18 | header-y += vmw_pvrdma-abi.h | 18 | header-y += vmw_pvrdma-abi.h |
19 | header-y += qedr-abi.h | ||
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h index 48a19bda071b..d24eee12128f 100644 --- a/include/uapi/rdma/cxgb3-abi.h +++ b/include/uapi/rdma/cxgb3-abi.h | |||
@@ -30,7 +30,7 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | #ifndef CXGB3_ABI_USER_H | 32 | #ifndef CXGB3_ABI_USER_H |
33 | #define CXBG3_ABI_USER_H | 33 | #define CXGB3_ABI_USER_H |
34 | 34 | ||
35 | #include <linux/types.h> | 35 | #include <linux/types.h> |
36 | 36 | ||
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 229a5d5df977..3d55d95dcf49 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/bpf.h> | 12 | #include <linux/bpf.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
16 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
17 | #include <linux/filter.h> | 16 | #include <linux/filter.h> |
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
74 | if (array_size >= U32_MAX - PAGE_SIZE) | 73 | if (array_size >= U32_MAX - PAGE_SIZE) |
75 | return ERR_PTR(-ENOMEM); | 74 | return ERR_PTR(-ENOMEM); |
76 | 75 | ||
77 | |||
78 | /* allocate all map elements and zero-initialize them */ | 76 | /* allocate all map elements and zero-initialize them */ |
79 | array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); | 77 | array = bpf_map_area_alloc(array_size); |
80 | if (!array) { | 78 | if (!array) |
81 | array = vzalloc(array_size); | 79 | return ERR_PTR(-ENOMEM); |
82 | if (!array) | ||
83 | return ERR_PTR(-ENOMEM); | ||
84 | } | ||
85 | 80 | ||
86 | /* copy mandatory map attributes */ | 81 | /* copy mandatory map attributes */ |
87 | array->map.map_type = attr->map_type; | 82 | array->map.map_type = attr->map_type; |
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
97 | 92 | ||
98 | if (array_size >= U32_MAX - PAGE_SIZE || | 93 | if (array_size >= U32_MAX - PAGE_SIZE || |
99 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { | 94 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { |
100 | kvfree(array); | 95 | bpf_map_area_free(array); |
101 | return ERR_PTR(-ENOMEM); | 96 | return ERR_PTR(-ENOMEM); |
102 | } | 97 | } |
103 | out: | 98 | out: |
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map) | |||
262 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) | 257 | if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |
263 | bpf_array_free_percpu(array); | 258 | bpf_array_free_percpu(array); |
264 | 259 | ||
265 | kvfree(array); | 260 | bpf_map_area_free(array); |
266 | } | 261 | } |
267 | 262 | ||
268 | static const struct bpf_map_ops array_ops = { | 263 | static const struct bpf_map_ops array_ops = { |
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map) | |||
319 | /* make sure it's empty */ | 314 | /* make sure it's empty */ |
320 | for (i = 0; i < array->map.max_entries; i++) | 315 | for (i = 0; i < array->map.max_entries; i++) |
321 | BUG_ON(array->ptrs[i] != NULL); | 316 | BUG_ON(array->ptrs[i] != NULL); |
322 | kvfree(array); | 317 | |
318 | bpf_map_area_free(array); | ||
323 | } | 319 | } |
324 | 320 | ||
325 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) | 321 | static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3f2bb58952d8..a753bbe7df0a 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/bpf.h> | 13 | #include <linux/bpf.h> |
14 | #include <linux/jhash.h> | 14 | #include <linux/jhash.h> |
15 | #include <linux/filter.h> | 15 | #include <linux/filter.h> |
16 | #include <linux/vmalloc.h> | ||
17 | #include "percpu_freelist.h" | 16 | #include "percpu_freelist.h" |
18 | #include "bpf_lru_list.h" | 17 | #include "bpf_lru_list.h" |
19 | 18 | ||
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab) | |||
103 | free_percpu(pptr); | 102 | free_percpu(pptr); |
104 | } | 103 | } |
105 | free_elems: | 104 | free_elems: |
106 | vfree(htab->elems); | 105 | bpf_map_area_free(htab->elems); |
107 | } | 106 | } |
108 | 107 | ||
109 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, | 108 | static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, |
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab) | |||
125 | { | 124 | { |
126 | int err = -ENOMEM, i; | 125 | int err = -ENOMEM, i; |
127 | 126 | ||
128 | htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); | 127 | htab->elems = bpf_map_area_alloc(htab->elem_size * |
128 | htab->map.max_entries); | ||
129 | if (!htab->elems) | 129 | if (!htab->elems) |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
131 | 131 | ||
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
320 | goto free_htab; | 320 | goto free_htab; |
321 | 321 | ||
322 | err = -ENOMEM; | 322 | err = -ENOMEM; |
323 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), | 323 | htab->buckets = bpf_map_area_alloc(htab->n_buckets * |
324 | GFP_USER | __GFP_NOWARN); | 324 | sizeof(struct bucket)); |
325 | 325 | if (!htab->buckets) | |
326 | if (!htab->buckets) { | 326 | goto free_htab; |
327 | htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket)); | ||
328 | if (!htab->buckets) | ||
329 | goto free_htab; | ||
330 | } | ||
331 | 327 | ||
332 | for (i = 0; i < htab->n_buckets; i++) { | 328 | for (i = 0; i < htab->n_buckets; i++) { |
333 | INIT_HLIST_HEAD(&htab->buckets[i].head); | 329 | INIT_HLIST_HEAD(&htab->buckets[i].head); |
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
354 | free_extra_elems: | 350 | free_extra_elems: |
355 | free_percpu(htab->extra_elems); | 351 | free_percpu(htab->extra_elems); |
356 | free_buckets: | 352 | free_buckets: |
357 | kvfree(htab->buckets); | 353 | bpf_map_area_free(htab->buckets); |
358 | free_htab: | 354 | free_htab: |
359 | kfree(htab); | 355 | kfree(htab); |
360 | return ERR_PTR(err); | 356 | return ERR_PTR(err); |
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map) | |||
1014 | prealloc_destroy(htab); | 1010 | prealloc_destroy(htab); |
1015 | 1011 | ||
1016 | free_percpu(htab->extra_elems); | 1012 | free_percpu(htab->extra_elems); |
1017 | kvfree(htab->buckets); | 1013 | bpf_map_area_free(htab->buckets); |
1018 | kfree(htab); | 1014 | kfree(htab); |
1019 | } | 1015 | } |
1020 | 1016 | ||
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 732ae16d12b7..be8519148c25 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/bpf.h> | 7 | #include <linux/bpf.h> |
8 | #include <linux/jhash.h> | 8 | #include <linux/jhash.h> |
9 | #include <linux/filter.h> | 9 | #include <linux/filter.h> |
10 | #include <linux/vmalloc.h> | ||
11 | #include <linux/stacktrace.h> | 10 | #include <linux/stacktrace.h> |
12 | #include <linux/perf_event.h> | 11 | #include <linux/perf_event.h> |
13 | #include "percpu_freelist.h" | 12 | #include "percpu_freelist.h" |
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | |||
32 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; | 31 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; |
33 | int err; | 32 | int err; |
34 | 33 | ||
35 | smap->elems = vzalloc(elem_size * smap->map.max_entries); | 34 | smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); |
36 | if (!smap->elems) | 35 | if (!smap->elems) |
37 | return -ENOMEM; | 36 | return -ENOMEM; |
38 | 37 | ||
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | |||
45 | return 0; | 44 | return 0; |
46 | 45 | ||
47 | free_elems: | 46 | free_elems: |
48 | vfree(smap->elems); | 47 | bpf_map_area_free(smap->elems); |
49 | return err; | 48 | return err; |
50 | } | 49 | } |
51 | 50 | ||
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
76 | if (cost >= U32_MAX - PAGE_SIZE) | 75 | if (cost >= U32_MAX - PAGE_SIZE) |
77 | return ERR_PTR(-E2BIG); | 76 | return ERR_PTR(-E2BIG); |
78 | 77 | ||
79 | smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); | 78 | smap = bpf_map_area_alloc(cost); |
80 | if (!smap) { | 79 | if (!smap) |
81 | smap = vzalloc(cost); | 80 | return ERR_PTR(-ENOMEM); |
82 | if (!smap) | ||
83 | return ERR_PTR(-ENOMEM); | ||
84 | } | ||
85 | 81 | ||
86 | err = -E2BIG; | 82 | err = -E2BIG; |
87 | cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); | 83 | cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); |
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
112 | put_buffers: | 108 | put_buffers: |
113 | put_callchain_buffers(); | 109 | put_callchain_buffers(); |
114 | free_smap: | 110 | free_smap: |
115 | kvfree(smap); | 111 | bpf_map_area_free(smap); |
116 | return ERR_PTR(err); | 112 | return ERR_PTR(err); |
117 | } | 113 | } |
118 | 114 | ||
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map) | |||
262 | /* wait for bpf programs to complete before freeing stack map */ | 258 | /* wait for bpf programs to complete before freeing stack map */ |
263 | synchronize_rcu(); | 259 | synchronize_rcu(); |
264 | 260 | ||
265 | vfree(smap->elems); | 261 | bpf_map_area_free(smap->elems); |
266 | pcpu_freelist_destroy(&smap->freelist); | 262 | pcpu_freelist_destroy(&smap->freelist); |
267 | kvfree(smap); | 263 | bpf_map_area_free(smap); |
268 | put_callchain_buffers(); | 264 | put_callchain_buffers(); |
269 | } | 265 | } |
270 | 266 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1d6b29e4e2c3..19b6129eab23 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/bpf.h> | 12 | #include <linux/bpf.h> |
13 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/mmzone.h> | ||
15 | #include <linux/anon_inodes.h> | 17 | #include <linux/anon_inodes.h> |
16 | #include <linux/file.h> | 18 | #include <linux/file.h> |
17 | #include <linux/license.h> | 19 | #include <linux/license.h> |
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl) | |||
49 | list_add(&tl->list_node, &bpf_map_types); | 51 | list_add(&tl->list_node, &bpf_map_types); |
50 | } | 52 | } |
51 | 53 | ||
54 | void *bpf_map_area_alloc(size_t size) | ||
55 | { | ||
56 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't | ||
57 | * trigger under memory pressure as we really just want to | ||
58 | * fail instead. | ||
59 | */ | ||
60 | const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; | ||
61 | void *area; | ||
62 | |||
63 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | ||
64 | area = kmalloc(size, GFP_USER | flags); | ||
65 | if (area != NULL) | ||
66 | return area; | ||
67 | } | ||
68 | |||
69 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, | ||
70 | PAGE_KERNEL); | ||
71 | } | ||
72 | |||
73 | void bpf_map_area_free(void *area) | ||
74 | { | ||
75 | kvfree(area); | ||
76 | } | ||
77 | |||
52 | int bpf_map_precharge_memlock(u32 pages) | 78 | int bpf_map_precharge_memlock(u32 pages) |
53 | { | 79 | { |
54 | struct user_struct *user = get_current_user(); | 80 | struct user_struct *user = get_current_user(); |
diff --git a/kernel/panic.c b/kernel/panic.c index 901c4fb46002..08aa88dde7de 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...) | |||
249 | * Delay timeout seconds before rebooting the machine. | 249 | * Delay timeout seconds before rebooting the machine. |
250 | * We can't use the "normal" timers since we just panicked. | 250 | * We can't use the "normal" timers since we just panicked. |
251 | */ | 251 | */ |
252 | pr_emerg("Rebooting in %d seconds..", panic_timeout); | 252 | pr_emerg("Rebooting in %d seconds..\n", panic_timeout); |
253 | 253 | ||
254 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { | 254 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
255 | touch_nmi_watchdog(); | 255 | touch_nmi_watchdog(); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f67ceb7768b8..15e6baef5c73 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = { | |||
46 | const char *mem_sleep_states[PM_SUSPEND_MAX]; | 46 | const char *mem_sleep_states[PM_SUSPEND_MAX]; |
47 | 47 | ||
48 | suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; | 48 | suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; |
49 | suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; | 49 | static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM; |
50 | 50 | ||
51 | unsigned int pm_suspend_global_flags; | 51 | unsigned int pm_suspend_global_flags; |
52 | EXPORT_SYMBOL_GPL(pm_suspend_global_flags); | 52 | EXPORT_SYMBOL_GPL(pm_suspend_global_flags); |
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops) | |||
168 | } | 168 | } |
169 | if (valid_state(PM_SUSPEND_MEM)) { | 169 | if (valid_state(PM_SUSPEND_MEM)) { |
170 | mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; | 170 | mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; |
171 | if (mem_sleep_default >= PM_SUSPEND_MEM) | 171 | if (mem_sleep_default == PM_SUSPEND_MEM) |
172 | mem_sleep_current = PM_SUSPEND_MEM; | 172 | mem_sleep_current = PM_SUSPEND_MEM; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8dbaec0e4f7f..1aea594a54db 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int | |||
2475 | break; | 2475 | break; |
2476 | if (neg) | 2476 | if (neg) |
2477 | continue; | 2477 | continue; |
2478 | val = convmul * val / convdiv; | ||
2478 | if ((min && val < *min) || (max && val > *max)) | 2479 | if ((min && val < *min) || (max && val > *max)) |
2479 | continue; | 2480 | continue; |
2480 | *i = val; | 2481 | *i = val; |
diff --git a/kernel/ucount.c b/kernel/ucount.c index 9d20d5dd298a..4bbd38ec3788 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c | |||
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
128 | struct hlist_head *hashent = ucounts_hashentry(ns, uid); | 128 | struct hlist_head *hashent = ucounts_hashentry(ns, uid); |
129 | struct ucounts *ucounts, *new; | 129 | struct ucounts *ucounts, *new; |
130 | 130 | ||
131 | spin_lock(&ucounts_lock); | 131 | spin_lock_irq(&ucounts_lock); |
132 | ucounts = find_ucounts(ns, uid, hashent); | 132 | ucounts = find_ucounts(ns, uid, hashent); |
133 | if (!ucounts) { | 133 | if (!ucounts) { |
134 | spin_unlock(&ucounts_lock); | 134 | spin_unlock_irq(&ucounts_lock); |
135 | 135 | ||
136 | new = kzalloc(sizeof(*new), GFP_KERNEL); | 136 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
137 | if (!new) | 137 | if (!new) |
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
141 | new->uid = uid; | 141 | new->uid = uid; |
142 | atomic_set(&new->count, 0); | 142 | atomic_set(&new->count, 0); |
143 | 143 | ||
144 | spin_lock(&ucounts_lock); | 144 | spin_lock_irq(&ucounts_lock); |
145 | ucounts = find_ucounts(ns, uid, hashent); | 145 | ucounts = find_ucounts(ns, uid, hashent); |
146 | if (ucounts) { | 146 | if (ucounts) { |
147 | kfree(new); | 147 | kfree(new); |
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
152 | } | 152 | } |
153 | if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) | 153 | if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) |
154 | ucounts = NULL; | 154 | ucounts = NULL; |
155 | spin_unlock(&ucounts_lock); | 155 | spin_unlock_irq(&ucounts_lock); |
156 | return ucounts; | 156 | return ucounts; |
157 | } | 157 | } |
158 | 158 | ||
159 | static void put_ucounts(struct ucounts *ucounts) | 159 | static void put_ucounts(struct ucounts *ucounts) |
160 | { | 160 | { |
161 | unsigned long flags; | ||
162 | |||
161 | if (atomic_dec_and_test(&ucounts->count)) { | 163 | if (atomic_dec_and_test(&ucounts->count)) { |
162 | spin_lock(&ucounts_lock); | 164 | spin_lock_irqsave(&ucounts_lock, flags); |
163 | hlist_del_init(&ucounts->node); | 165 | hlist_del_init(&ucounts->node); |
164 | spin_unlock(&ucounts_lock); | 166 | spin_unlock_irqrestore(&ucounts_lock, flags); |
165 | 167 | ||
166 | kfree(ucounts); | 168 | kfree(ucounts); |
167 | } | 169 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d4b0fa01cae3..63177be0159e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | |||
49 | #define for_each_watchdog_cpu(cpu) \ | 49 | #define for_each_watchdog_cpu(cpu) \ |
50 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) | 50 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) |
51 | 51 | ||
52 | atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); | ||
53 | |||
52 | /* | 54 | /* |
53 | * The 'watchdog_running' variable is set to 1 when the watchdog threads | 55 | * The 'watchdog_running' variable is set to 1 when the watchdog threads |
54 | * are registered/started and is set to 0 when the watchdog threads are | 56 | * are registered/started and is set to 0 when the watchdog threads are |
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
260 | int duration; | 262 | int duration; |
261 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; | 263 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
262 | 264 | ||
265 | if (atomic_read(&watchdog_park_in_progress) != 0) | ||
266 | return HRTIMER_NORESTART; | ||
267 | |||
263 | /* kick the hardlockup detector */ | 268 | /* kick the hardlockup detector */ |
264 | watchdog_interrupt_count(); | 269 | watchdog_interrupt_count(); |
265 | 270 | ||
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void) | |||
467 | { | 472 | { |
468 | int cpu, ret = 0; | 473 | int cpu, ret = 0; |
469 | 474 | ||
475 | atomic_set(&watchdog_park_in_progress, 1); | ||
476 | |||
470 | for_each_watchdog_cpu(cpu) { | 477 | for_each_watchdog_cpu(cpu) { |
471 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); | 478 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); |
472 | if (ret) | 479 | if (ret) |
473 | break; | 480 | break; |
474 | } | 481 | } |
475 | 482 | ||
483 | atomic_set(&watchdog_park_in_progress, 0); | ||
484 | |||
476 | return ret; | 485 | return ret; |
477 | } | 486 | } |
478 | 487 | ||
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 84016c8aee6b..12b8dd640786 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event, | |||
84 | /* Ensure the watchdog never gets throttled */ | 84 | /* Ensure the watchdog never gets throttled */ |
85 | event->hw.interrupts = 0; | 85 | event->hw.interrupts = 0; |
86 | 86 | ||
87 | if (atomic_read(&watchdog_park_in_progress) != 0) | ||
88 | return; | ||
89 | |||
87 | if (__this_cpu_read(watchdog_nmi_touch) == true) { | 90 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
88 | __this_cpu_write(watchdog_nmi_touch, false); | 91 | __this_cpu_write(watchdog_nmi_touch, false); |
89 | return; | 92 | return; |
diff --git a/lib/ioremap.c b/lib/ioremap.c index 86c8911b0e3a..a3e14ce92a56 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr, | |||
144 | 144 | ||
145 | return err; | 145 | return err; |
146 | } | 146 | } |
147 | EXPORT_SYMBOL_GPL(ioremap_page_range); | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0b92d605fb69..84812a9fb16f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) | |||
769 | struct radix_tree_node *old = child; | 769 | struct radix_tree_node *old = child; |
770 | offset = child->offset + 1; | 770 | offset = child->offset + 1; |
771 | child = child->parent; | 771 | child = child->parent; |
772 | WARN_ON_ONCE(!list_empty(&node->private_list)); | 772 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
773 | radix_tree_node_free(old); | 773 | radix_tree_node_free(old); |
774 | if (old == entry_to_node(node)) | 774 | if (old == entry_to_node(node)) |
775 | return; | 775 | return; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a6bd6c8d55a..5f3ad65c85de 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
783 | 783 | ||
784 | assert_spin_locked(pmd_lockptr(mm, pmd)); | 784 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
785 | 785 | ||
786 | /* | ||
787 | * When we COW a devmap PMD entry, we split it into PTEs, so we should | ||
788 | * not be in this function with `flags & FOLL_COW` set. | ||
789 | */ | ||
790 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); | ||
791 | |||
786 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) | 792 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
787 | return NULL; | 793 | return NULL; |
788 | 794 | ||
@@ -1128,6 +1134,16 @@ out_unlock: | |||
1128 | return ret; | 1134 | return ret; |
1129 | } | 1135 | } |
1130 | 1136 | ||
1137 | /* | ||
1138 | * FOLL_FORCE can write to even unwritable pmd's, but only | ||
1139 | * after we've gone through a COW cycle and they are dirty. | ||
1140 | */ | ||
1141 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) | ||
1142 | { | ||
1143 | return pmd_write(pmd) || | ||
1144 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); | ||
1145 | } | ||
1146 | |||
1131 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | 1147 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
1132 | unsigned long addr, | 1148 | unsigned long addr, |
1133 | pmd_t *pmd, | 1149 | pmd_t *pmd, |
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
1138 | 1154 | ||
1139 | assert_spin_locked(pmd_lockptr(mm, pmd)); | 1155 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
1140 | 1156 | ||
1141 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) | 1157 | if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) |
1142 | goto out; | 1158 | goto out; |
1143 | 1159 | ||
1144 | /* Avoid dumping huge zero page */ | 1160 | /* Avoid dumping huge zero page */ |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a63a8f832664..b822e158b319 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count) | |||
4353 | return ret; | 4353 | return ret; |
4354 | } | 4354 | } |
4355 | 4355 | ||
4356 | /* Try charges one by one with reclaim */ | 4356 | /* Try charges one by one with reclaim, but do not retry */ |
4357 | while (count--) { | 4357 | while (count--) { |
4358 | ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); | 4358 | ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); |
4359 | if (ret) | 4359 | if (ret) |
4360 | return ret; | 4360 | return ret; |
4361 | mc.precharge++; | 4361 | mc.precharge++; |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e43142c15631..ca2723d47338 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg) | |||
1033 | node_set_state(node, N_MEMORY); | 1033 | node_set_state(node, N_MEMORY); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | int zone_can_shift(unsigned long pfn, unsigned long nr_pages, | 1036 | bool zone_can_shift(unsigned long pfn, unsigned long nr_pages, |
1037 | enum zone_type target) | 1037 | enum zone_type target, int *zone_shift) |
1038 | { | 1038 | { |
1039 | struct zone *zone = page_zone(pfn_to_page(pfn)); | 1039 | struct zone *zone = page_zone(pfn_to_page(pfn)); |
1040 | enum zone_type idx = zone_idx(zone); | 1040 | enum zone_type idx = zone_idx(zone); |
1041 | int i; | 1041 | int i; |
1042 | 1042 | ||
1043 | *zone_shift = 0; | ||
1044 | |||
1043 | if (idx < target) { | 1045 | if (idx < target) { |
1044 | /* pages must be at end of current zone */ | 1046 | /* pages must be at end of current zone */ |
1045 | if (pfn + nr_pages != zone_end_pfn(zone)) | 1047 | if (pfn + nr_pages != zone_end_pfn(zone)) |
1046 | return 0; | 1048 | return false; |
1047 | 1049 | ||
1048 | /* no zones in use between current zone and target */ | 1050 | /* no zones in use between current zone and target */ |
1049 | for (i = idx + 1; i < target; i++) | 1051 | for (i = idx + 1; i < target; i++) |
1050 | if (zone_is_initialized(zone - idx + i)) | 1052 | if (zone_is_initialized(zone - idx + i)) |
1051 | return 0; | 1053 | return false; |
1052 | } | 1054 | } |
1053 | 1055 | ||
1054 | if (target < idx) { | 1056 | if (target < idx) { |
1055 | /* pages must be at beginning of current zone */ | 1057 | /* pages must be at beginning of current zone */ |
1056 | if (pfn != zone->zone_start_pfn) | 1058 | if (pfn != zone->zone_start_pfn) |
1057 | return 0; | 1059 | return false; |
1058 | 1060 | ||
1059 | /* no zones in use between current zone and target */ | 1061 | /* no zones in use between current zone and target */ |
1060 | for (i = target + 1; i < idx; i++) | 1062 | for (i = target + 1; i < idx; i++) |
1061 | if (zone_is_initialized(zone - idx + i)) | 1063 | if (zone_is_initialized(zone - idx + i)) |
1062 | return 0; | 1064 | return false; |
1063 | } | 1065 | } |
1064 | 1066 | ||
1065 | return target - idx; | 1067 | *zone_shift = target - idx; |
1068 | return true; | ||
1066 | } | 1069 | } |
1067 | 1070 | ||
1068 | /* Must be protected by mem_hotplug_begin() */ | 1071 | /* Must be protected by mem_hotplug_begin() */ |
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ | |||
1089 | !can_online_high_movable(zone)) | 1092 | !can_online_high_movable(zone)) |
1090 | return -EINVAL; | 1093 | return -EINVAL; |
1091 | 1094 | ||
1092 | if (online_type == MMOP_ONLINE_KERNEL) | 1095 | if (online_type == MMOP_ONLINE_KERNEL) { |
1093 | zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); | 1096 | if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift)) |
1094 | else if (online_type == MMOP_ONLINE_MOVABLE) | 1097 | return -EINVAL; |
1095 | zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); | 1098 | } else if (online_type == MMOP_ONLINE_MOVABLE) { |
1099 | if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift)) | ||
1100 | return -EINVAL; | ||
1101 | } | ||
1096 | 1102 | ||
1097 | zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); | 1103 | zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); |
1098 | if (!zone) | 1104 | if (!zone) |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 2e346645eb80..1e7873e40c9a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2017,8 +2017,8 @@ retry_cpuset: | |||
2017 | 2017 | ||
2018 | nmask = policy_nodemask(gfp, pol); | 2018 | nmask = policy_nodemask(gfp, pol); |
2019 | zl = policy_zonelist(gfp, pol, node); | 2019 | zl = policy_zonelist(gfp, pol, node); |
2020 | mpol_cond_put(pol); | ||
2021 | page = __alloc_pages_nodemask(gfp, order, zl, nmask); | 2020 | page = __alloc_pages_nodemask(gfp, order, zl, nmask); |
2021 | mpol_cond_put(pol); | ||
2022 | out: | 2022 | out: |
2023 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) | 2023 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) |
2024 | goto retry_cpuset; | 2024 | goto retry_cpuset; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d604d2596b7b..f3e0c69a97b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
3523 | struct page *page = NULL; | 3523 | struct page *page = NULL; |
3524 | unsigned int alloc_flags; | 3524 | unsigned int alloc_flags; |
3525 | unsigned long did_some_progress; | 3525 | unsigned long did_some_progress; |
3526 | enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; | 3526 | enum compact_priority compact_priority; |
3527 | enum compact_result compact_result; | 3527 | enum compact_result compact_result; |
3528 | int compaction_retries = 0; | 3528 | int compaction_retries; |
3529 | int no_progress_loops = 0; | 3529 | int no_progress_loops; |
3530 | unsigned long alloc_start = jiffies; | 3530 | unsigned long alloc_start = jiffies; |
3531 | unsigned int stall_timeout = 10 * HZ; | 3531 | unsigned int stall_timeout = 10 * HZ; |
3532 | unsigned int cpuset_mems_cookie; | ||
3532 | 3533 | ||
3533 | /* | 3534 | /* |
3534 | * In the slowpath, we sanity check order to avoid ever trying to | 3535 | * In the slowpath, we sanity check order to avoid ever trying to |
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
3549 | (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) | 3550 | (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) |
3550 | gfp_mask &= ~__GFP_ATOMIC; | 3551 | gfp_mask &= ~__GFP_ATOMIC; |
3551 | 3552 | ||
3553 | retry_cpuset: | ||
3554 | compaction_retries = 0; | ||
3555 | no_progress_loops = 0; | ||
3556 | compact_priority = DEF_COMPACT_PRIORITY; | ||
3557 | cpuset_mems_cookie = read_mems_allowed_begin(); | ||
3558 | /* | ||
3559 | * We need to recalculate the starting point for the zonelist iterator | ||
3560 | * because we might have used different nodemask in the fast path, or | ||
3561 | * there was a cpuset modification and we are retrying - otherwise we | ||
3562 | * could end up iterating over non-eligible zones endlessly. | ||
3563 | */ | ||
3564 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, | ||
3565 | ac->high_zoneidx, ac->nodemask); | ||
3566 | if (!ac->preferred_zoneref->zone) | ||
3567 | goto nopage; | ||
3568 | |||
3569 | |||
3552 | /* | 3570 | /* |
3553 | * The fast path uses conservative alloc_flags to succeed only until | 3571 | * The fast path uses conservative alloc_flags to succeed only until |
3554 | * kswapd needs to be woken up, and to avoid the cost of setting up | 3572 | * kswapd needs to be woken up, and to avoid the cost of setting up |
@@ -3708,6 +3726,13 @@ retry: | |||
3708 | &compaction_retries)) | 3726 | &compaction_retries)) |
3709 | goto retry; | 3727 | goto retry; |
3710 | 3728 | ||
3729 | /* | ||
3730 | * It's possible we raced with cpuset update so the OOM would be | ||
3731 | * premature (see below the nopage: label for full explanation). | ||
3732 | */ | ||
3733 | if (read_mems_allowed_retry(cpuset_mems_cookie)) | ||
3734 | goto retry_cpuset; | ||
3735 | |||
3711 | /* Reclaim has failed us, start killing things */ | 3736 | /* Reclaim has failed us, start killing things */ |
3712 | page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); | 3737 | page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); |
3713 | if (page) | 3738 | if (page) |
@@ -3720,6 +3745,16 @@ retry: | |||
3720 | } | 3745 | } |
3721 | 3746 | ||
3722 | nopage: | 3747 | nopage: |
3748 | /* | ||
3749 | * When updating a task's mems_allowed or mempolicy nodemask, it is | ||
3750 | * possible to race with parallel threads in such a way that our | ||
3751 | * allocation can fail while the mask is being updated. If we are about | ||
3752 | * to fail, check if the cpuset changed during allocation and if so, | ||
3753 | * retry. | ||
3754 | */ | ||
3755 | if (read_mems_allowed_retry(cpuset_mems_cookie)) | ||
3756 | goto retry_cpuset; | ||
3757 | |||
3723 | warn_alloc(gfp_mask, | 3758 | warn_alloc(gfp_mask, |
3724 | "page allocation failure: order:%u", order); | 3759 | "page allocation failure: order:%u", order); |
3725 | got_pg: | 3760 | got_pg: |
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
3734 | struct zonelist *zonelist, nodemask_t *nodemask) | 3769 | struct zonelist *zonelist, nodemask_t *nodemask) |
3735 | { | 3770 | { |
3736 | struct page *page; | 3771 | struct page *page; |
3737 | unsigned int cpuset_mems_cookie; | ||
3738 | unsigned int alloc_flags = ALLOC_WMARK_LOW; | 3772 | unsigned int alloc_flags = ALLOC_WMARK_LOW; |
3739 | gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ | 3773 | gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ |
3740 | struct alloc_context ac = { | 3774 | struct alloc_context ac = { |
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |||
3771 | if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) | 3805 | if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) |
3772 | alloc_flags |= ALLOC_CMA; | 3806 | alloc_flags |= ALLOC_CMA; |
3773 | 3807 | ||
3774 | retry_cpuset: | ||
3775 | cpuset_mems_cookie = read_mems_allowed_begin(); | ||
3776 | |||
3777 | /* Dirty zone balancing only done in the fast path */ | 3808 | /* Dirty zone balancing only done in the fast path */ |
3778 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); | 3809 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
3779 | 3810 | ||
@@ -3784,8 +3815,13 @@ retry_cpuset: | |||
3784 | */ | 3815 | */ |
3785 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, | 3816 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, |
3786 | ac.high_zoneidx, ac.nodemask); | 3817 | ac.high_zoneidx, ac.nodemask); |
3787 | if (!ac.preferred_zoneref) { | 3818 | if (!ac.preferred_zoneref->zone) { |
3788 | page = NULL; | 3819 | page = NULL; |
3820 | /* | ||
3821 | * This might be due to race with cpuset_current_mems_allowed | ||
3822 | * update, so make sure we retry with original nodemask in the | ||
3823 | * slow path. | ||
3824 | */ | ||
3789 | goto no_zone; | 3825 | goto no_zone; |
3790 | } | 3826 | } |
3791 | 3827 | ||
@@ -3794,6 +3830,7 @@ retry_cpuset: | |||
3794 | if (likely(page)) | 3830 | if (likely(page)) |
3795 | goto out; | 3831 | goto out; |
3796 | 3832 | ||
3833 | no_zone: | ||
3797 | /* | 3834 | /* |
3798 | * Runtime PM, block IO and its error handling path can deadlock | 3835 | * Runtime PM, block IO and its error handling path can deadlock |
3799 | * because I/O on the device might not complete. | 3836 | * because I/O on the device might not complete. |
@@ -3805,21 +3842,10 @@ retry_cpuset: | |||
3805 | * Restore the original nodemask if it was potentially replaced with | 3842 | * Restore the original nodemask if it was potentially replaced with |
3806 | * &cpuset_current_mems_allowed to optimize the fast-path attempt. | 3843 | * &cpuset_current_mems_allowed to optimize the fast-path attempt. |
3807 | */ | 3844 | */ |
3808 | if (cpusets_enabled()) | 3845 | if (unlikely(ac.nodemask != nodemask)) |
3809 | ac.nodemask = nodemask; | 3846 | ac.nodemask = nodemask; |
3810 | page = __alloc_pages_slowpath(alloc_mask, order, &ac); | ||
3811 | 3847 | ||
3812 | no_zone: | 3848 | page = __alloc_pages_slowpath(alloc_mask, order, &ac); |
3813 | /* | ||
3814 | * When updating a task's mems_allowed, it is possible to race with | ||
3815 | * parallel threads in such a way that an allocation can fail while | ||
3816 | * the mask is being updated. If a page allocation is about to fail, | ||
3817 | * check if the cpuset changed during allocation and if so, retry. | ||
3818 | */ | ||
3819 | if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) { | ||
3820 | alloc_mask = gfp_mask; | ||
3821 | goto retry_cpuset; | ||
3822 | } | ||
3823 | 3849 | ||
3824 | out: | 3850 | out: |
3825 | if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && | 3851 | if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && |
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
7248 | .zone = page_zone(pfn_to_page(start)), | 7274 | .zone = page_zone(pfn_to_page(start)), |
7249 | .mode = MIGRATE_SYNC, | 7275 | .mode = MIGRATE_SYNC, |
7250 | .ignore_skip_hint = true, | 7276 | .ignore_skip_hint = true, |
7277 | .gfp_mask = GFP_KERNEL, | ||
7251 | }; | 7278 | }; |
7252 | INIT_LIST_HEAD(&cc.migratepages); | 7279 | INIT_LIST_HEAD(&cc.migratepages); |
7253 | 7280 | ||
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s, | |||
496 | return 1; | 496 | return 1; |
497 | } | 497 | } |
498 | 498 | ||
499 | static void print_section(char *text, u8 *addr, unsigned int length) | 499 | static void print_section(char *level, char *text, u8 *addr, |
500 | unsigned int length) | ||
500 | { | 501 | { |
501 | metadata_access_enable(); | 502 | metadata_access_enable(); |
502 | print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, | 503 | print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, |
503 | length, 1); | 504 | length, 1); |
504 | metadata_access_disable(); | 505 | metadata_access_disable(); |
505 | } | 506 | } |
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | |||
636 | p, p - addr, get_freepointer(s, p)); | 637 | p, p - addr, get_freepointer(s, p)); |
637 | 638 | ||
638 | if (s->flags & SLAB_RED_ZONE) | 639 | if (s->flags & SLAB_RED_ZONE) |
639 | print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); | 640 | print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, |
641 | s->red_left_pad); | ||
640 | else if (p > addr + 16) | 642 | else if (p > addr + 16) |
641 | print_section("Bytes b4 ", p - 16, 16); | 643 | print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); |
642 | 644 | ||
643 | print_section("Object ", p, min_t(unsigned long, s->object_size, | 645 | print_section(KERN_ERR, "Object ", p, |
644 | PAGE_SIZE)); | 646 | min_t(unsigned long, s->object_size, PAGE_SIZE)); |
645 | if (s->flags & SLAB_RED_ZONE) | 647 | if (s->flags & SLAB_RED_ZONE) |
646 | print_section("Redzone ", p + s->object_size, | 648 | print_section(KERN_ERR, "Redzone ", p + s->object_size, |
647 | s->inuse - s->object_size); | 649 | s->inuse - s->object_size); |
648 | 650 | ||
649 | if (s->offset) | 651 | if (s->offset) |
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) | |||
658 | 660 | ||
659 | if (off != size_from_object(s)) | 661 | if (off != size_from_object(s)) |
660 | /* Beginning of the filler is the free pointer */ | 662 | /* Beginning of the filler is the free pointer */ |
661 | print_section("Padding ", p + off, size_from_object(s) - off); | 663 | print_section(KERN_ERR, "Padding ", p + off, |
664 | size_from_object(s) - off); | ||
662 | 665 | ||
663 | dump_stack(); | 666 | dump_stack(); |
664 | } | 667 | } |
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
820 | end--; | 823 | end--; |
821 | 824 | ||
822 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); | 825 | slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); |
823 | print_section("Padding ", end - remainder, remainder); | 826 | print_section(KERN_ERR, "Padding ", end - remainder, remainder); |
824 | 827 | ||
825 | restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); | 828 | restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); |
826 | return 0; | 829 | return 0; |
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
973 | page->freelist); | 976 | page->freelist); |
974 | 977 | ||
975 | if (!alloc) | 978 | if (!alloc) |
976 | print_section("Object ", (void *)object, | 979 | print_section(KERN_INFO, "Object ", (void *)object, |
977 | s->object_size); | 980 | s->object_size); |
978 | 981 | ||
979 | dump_stack(); | 982 | dump_stack(); |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 9c561e683f4b..0854ebd8613e 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
474 | primary_if = batadv_primary_if_get_selected(bat_priv); | 474 | primary_if = batadv_primary_if_get_selected(bat_priv); |
475 | if (!primary_if) { | 475 | if (!primary_if) { |
476 | ret = -EINVAL; | 476 | ret = -EINVAL; |
477 | goto put_primary_if; | 477 | goto free_skb; |
478 | } | 478 | } |
479 | 479 | ||
480 | /* Create one header to be copied to all fragments */ | 480 | /* Create one header to be copied to all fragments */ |
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
502 | skb_fragment = batadv_frag_create(skb, &frag_header, mtu); | 502 | skb_fragment = batadv_frag_create(skb, &frag_header, mtu); |
503 | if (!skb_fragment) { | 503 | if (!skb_fragment) { |
504 | ret = -ENOMEM; | 504 | ret = -ENOMEM; |
505 | goto free_skb; | 505 | goto put_primary_if; |
506 | } | 506 | } |
507 | 507 | ||
508 | batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); | 508 | batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); |
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
511 | ret = batadv_send_unicast_skb(skb_fragment, neigh_node); | 511 | ret = batadv_send_unicast_skb(skb_fragment, neigh_node); |
512 | if (ret != NET_XMIT_SUCCESS) { | 512 | if (ret != NET_XMIT_SUCCESS) { |
513 | ret = NET_XMIT_DROP; | 513 | ret = NET_XMIT_DROP; |
514 | goto free_skb; | 514 | goto put_primary_if; |
515 | } | 515 | } |
516 | 516 | ||
517 | frag_header.no++; | 517 | frag_header.no++; |
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
519 | /* The initial check in this function should cover this case */ | 519 | /* The initial check in this function should cover this case */ |
520 | if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { | 520 | if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { |
521 | ret = -EINVAL; | 521 | ret = -EINVAL; |
522 | goto free_skb; | 522 | goto put_primary_if; |
523 | } | 523 | } |
524 | } | 524 | } |
525 | 525 | ||
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
527 | if (batadv_skb_head_push(skb, header_size) < 0 || | 527 | if (batadv_skb_head_push(skb, header_size) < 0 || |
528 | pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { | 528 | pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { |
529 | ret = -ENOMEM; | 529 | ret = -ENOMEM; |
530 | goto free_skb; | 530 | goto put_primary_if; |
531 | } | 531 | } |
532 | 532 | ||
533 | memcpy(skb->data, &frag_header, header_size); | 533 | memcpy(skb->data, &frag_header, header_size); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 71c7453268c1..7109b389ea58 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
781 | return 0; | 781 | return 0; |
782 | } | 782 | } |
783 | 783 | ||
784 | static int br_dev_newlink(struct net *src_net, struct net_device *dev, | ||
785 | struct nlattr *tb[], struct nlattr *data[]) | ||
786 | { | ||
787 | struct net_bridge *br = netdev_priv(dev); | ||
788 | |||
789 | if (tb[IFLA_ADDRESS]) { | ||
790 | spin_lock_bh(&br->lock); | ||
791 | br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); | ||
792 | spin_unlock_bh(&br->lock); | ||
793 | } | ||
794 | |||
795 | return register_netdevice(dev); | ||
796 | } | ||
797 | |||
798 | static int br_port_slave_changelink(struct net_device *brdev, | 784 | static int br_port_slave_changelink(struct net_device *brdev, |
799 | struct net_device *dev, | 785 | struct net_device *dev, |
800 | struct nlattr *tb[], | 786 | struct nlattr *tb[], |
@@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], | |||
1115 | return 0; | 1101 | return 0; |
1116 | } | 1102 | } |
1117 | 1103 | ||
1104 | static int br_dev_newlink(struct net *src_net, struct net_device *dev, | ||
1105 | struct nlattr *tb[], struct nlattr *data[]) | ||
1106 | { | ||
1107 | struct net_bridge *br = netdev_priv(dev); | ||
1108 | int err; | ||
1109 | |||
1110 | if (tb[IFLA_ADDRESS]) { | ||
1111 | spin_lock_bh(&br->lock); | ||
1112 | br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); | ||
1113 | spin_unlock_bh(&br->lock); | ||
1114 | } | ||
1115 | |||
1116 | err = br_changelink(dev, tb, data); | ||
1117 | if (err) | ||
1118 | return err; | ||
1119 | |||
1120 | return register_netdevice(dev); | ||
1121 | } | ||
1122 | |||
1118 | static size_t br_get_size(const struct net_device *brdev) | 1123 | static size_t br_get_size(const struct net_device *brdev) |
1119 | { | 1124 | { |
1120 | return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ | 1125 | return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 07b307b0b414..7f218e095361 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2795,9 +2795,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, | |||
2795 | if (skb->ip_summed != CHECKSUM_NONE && | 2795 | if (skb->ip_summed != CHECKSUM_NONE && |
2796 | !can_checksum_protocol(features, type)) { | 2796 | !can_checksum_protocol(features, type)) { |
2797 | features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); | 2797 | features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
2798 | } else if (illegal_highdma(skb->dev, skb)) { | ||
2799 | features &= ~NETIF_F_SG; | ||
2800 | } | 2798 | } |
2799 | if (illegal_highdma(skb->dev, skb)) | ||
2800 | features &= ~NETIF_F_SG; | ||
2801 | 2801 | ||
2802 | return features; | 2802 | return features; |
2803 | } | 2803 | } |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index e23766c7e3ba..236a21e3c878 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev, | |||
1712 | static noinline_for_stack int ethtool_set_channels(struct net_device *dev, | 1712 | static noinline_for_stack int ethtool_set_channels(struct net_device *dev, |
1713 | void __user *useraddr) | 1713 | void __user *useraddr) |
1714 | { | 1714 | { |
1715 | struct ethtool_channels channels, max; | 1715 | struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS }; |
1716 | u32 max_rx_in_use = 0; | 1716 | u32 max_rx_in_use = 0; |
1717 | 1717 | ||
1718 | if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) | 1718 | if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) |
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 71bb3e2eca08..b3eef90b2df9 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c | |||
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = { | |||
386 | .fill_encap = bpf_fill_encap_info, | 386 | .fill_encap = bpf_fill_encap_info, |
387 | .get_encap_size = bpf_encap_nlsize, | 387 | .get_encap_size = bpf_encap_nlsize, |
388 | .cmp_encap = bpf_encap_cmp, | 388 | .cmp_encap = bpf_encap_cmp, |
389 | .owner = THIS_MODULE, | ||
389 | }; | 390 | }; |
390 | 391 | ||
391 | static int __init bpf_lwt_init(void) | 392 | static int __init bpf_lwt_init(void) |
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index a5d4e866ce88..c23465005f2f 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/lwtunnel.h> | 26 | #include <net/lwtunnel.h> |
27 | #include <net/rtnetlink.h> | 27 | #include <net/rtnetlink.h> |
28 | #include <net/ip6_fib.h> | 28 | #include <net/ip6_fib.h> |
29 | #include <net/nexthop.h> | ||
29 | 30 | ||
30 | #ifdef CONFIG_MODULES | 31 | #ifdef CONFIG_MODULES |
31 | 32 | ||
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type, | |||
114 | ret = -EOPNOTSUPP; | 115 | ret = -EOPNOTSUPP; |
115 | rcu_read_lock(); | 116 | rcu_read_lock(); |
116 | ops = rcu_dereference(lwtun_encaps[encap_type]); | 117 | ops = rcu_dereference(lwtun_encaps[encap_type]); |
118 | if (likely(ops && ops->build_state && try_module_get(ops->owner))) { | ||
119 | ret = ops->build_state(dev, encap, family, cfg, lws); | ||
120 | if (ret) | ||
121 | module_put(ops->owner); | ||
122 | } | ||
123 | rcu_read_unlock(); | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | EXPORT_SYMBOL(lwtunnel_build_state); | ||
128 | |||
129 | int lwtunnel_valid_encap_type(u16 encap_type) | ||
130 | { | ||
131 | const struct lwtunnel_encap_ops *ops; | ||
132 | int ret = -EINVAL; | ||
133 | |||
134 | if (encap_type == LWTUNNEL_ENCAP_NONE || | ||
135 | encap_type > LWTUNNEL_ENCAP_MAX) | ||
136 | return ret; | ||
137 | |||
138 | rcu_read_lock(); | ||
139 | ops = rcu_dereference(lwtun_encaps[encap_type]); | ||
140 | rcu_read_unlock(); | ||
117 | #ifdef CONFIG_MODULES | 141 | #ifdef CONFIG_MODULES |
118 | if (!ops) { | 142 | if (!ops) { |
119 | const char *encap_type_str = lwtunnel_encap_str(encap_type); | 143 | const char *encap_type_str = lwtunnel_encap_str(encap_type); |
120 | 144 | ||
121 | if (encap_type_str) { | 145 | if (encap_type_str) { |
122 | rcu_read_unlock(); | 146 | __rtnl_unlock(); |
123 | request_module("rtnl-lwt-%s", encap_type_str); | 147 | request_module("rtnl-lwt-%s", encap_type_str); |
148 | rtnl_lock(); | ||
149 | |||
124 | rcu_read_lock(); | 150 | rcu_read_lock(); |
125 | ops = rcu_dereference(lwtun_encaps[encap_type]); | 151 | ops = rcu_dereference(lwtun_encaps[encap_type]); |
152 | rcu_read_unlock(); | ||
126 | } | 153 | } |
127 | } | 154 | } |
128 | #endif | 155 | #endif |
129 | if (likely(ops && ops->build_state)) | 156 | return ops ? 0 : -EOPNOTSUPP; |
130 | ret = ops->build_state(dev, encap, family, cfg, lws); | 157 | } |
131 | rcu_read_unlock(); | 158 | EXPORT_SYMBOL(lwtunnel_valid_encap_type); |
132 | 159 | ||
133 | return ret; | 160 | int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining) |
161 | { | ||
162 | struct rtnexthop *rtnh = (struct rtnexthop *)attr; | ||
163 | struct nlattr *nla_entype; | ||
164 | struct nlattr *attrs; | ||
165 | struct nlattr *nla; | ||
166 | u16 encap_type; | ||
167 | int attrlen; | ||
168 | |||
169 | while (rtnh_ok(rtnh, remaining)) { | ||
170 | attrlen = rtnh_attrlen(rtnh); | ||
171 | if (attrlen > 0) { | ||
172 | attrs = rtnh_attrs(rtnh); | ||
173 | nla = nla_find(attrs, attrlen, RTA_ENCAP); | ||
174 | nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); | ||
175 | |||
176 | if (nla_entype) { | ||
177 | encap_type = nla_get_u16(nla_entype); | ||
178 | |||
179 | if (lwtunnel_valid_encap_type(encap_type) != 0) | ||
180 | return -EOPNOTSUPP; | ||
181 | } | ||
182 | } | ||
183 | rtnh = rtnh_next(rtnh, &remaining); | ||
184 | } | ||
185 | |||
186 | return 0; | ||
134 | } | 187 | } |
135 | EXPORT_SYMBOL(lwtunnel_build_state); | 188 | EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr); |
136 | 189 | ||
137 | void lwtstate_free(struct lwtunnel_state *lws) | 190 | void lwtstate_free(struct lwtunnel_state *lws) |
138 | { | 191 | { |
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws) | |||
144 | } else { | 197 | } else { |
145 | kfree(lws); | 198 | kfree(lws); |
146 | } | 199 | } |
200 | module_put(ops->owner); | ||
147 | } | 201 | } |
148 | EXPORT_SYMBOL(lwtstate_free); | 202 | EXPORT_SYMBOL(lwtstate_free); |
149 | 203 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index adfc790f7193..c4e879c02186 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
227 | opt = ireq->ipv6_opt; | 227 | opt = ireq->ipv6_opt; |
228 | if (!opt) | 228 | if (!opt) |
229 | opt = rcu_dereference(np->opt); | 229 | opt = rcu_dereference(np->opt); |
230 | err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); | 230 | err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass); |
231 | rcu_read_unlock(); | 231 | rcu_read_unlock(); |
232 | err = net_xmit_eval(err); | 232 | err = net_xmit_eval(err); |
233 | } | 233 | } |
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) | |||
281 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); | 281 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); |
282 | if (!IS_ERR(dst)) { | 282 | if (!IS_ERR(dst)) { |
283 | skb_dst_set(skb, dst); | 283 | skb_dst_set(skb, dst); |
284 | ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); | 284 | ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); |
285 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 285 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
286 | DCCP_INC_STATS(DCCP_MIB_OUTRSTS); | 286 | DCCP_INC_STATS(DCCP_MIB_OUTRSTS); |
287 | return; | 287 | return; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 68c9eea00518..7d4596110851 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, | |||
1105 | /* Use already configured phy mode */ | 1105 | /* Use already configured phy mode */ |
1106 | if (p->phy_interface == PHY_INTERFACE_MODE_NA) | 1106 | if (p->phy_interface == PHY_INTERFACE_MODE_NA) |
1107 | p->phy_interface = p->phy->interface; | 1107 | p->phy_interface = p->phy->interface; |
1108 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, | 1108 | return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, |
1109 | p->phy_interface); | 1109 | p->phy_interface); |
1110 | |||
1111 | return 0; | ||
1112 | } | 1110 | } |
1113 | 1111 | ||
1114 | static int dsa_slave_phy_setup(struct dsa_slave_priv *p, | 1112 | static int dsa_slave_phy_setup(struct dsa_slave_priv *p, |
@@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev) | |||
1203 | { | 1201 | { |
1204 | struct dsa_slave_priv *p = netdev_priv(slave_dev); | 1202 | struct dsa_slave_priv *p = netdev_priv(slave_dev); |
1205 | 1203 | ||
1204 | netif_device_detach(slave_dev); | ||
1205 | |||
1206 | if (p->phy) { | 1206 | if (p->phy) { |
1207 | phy_stop(p->phy); | 1207 | phy_stop(p->phy); |
1208 | p->old_pause = -1; | 1208 | p->old_pause = -1; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index eae0332b0e8c..7db2ad2e82d3 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <net/rtnetlink.h> | 46 | #include <net/rtnetlink.h> |
47 | #include <net/xfrm.h> | 47 | #include <net/xfrm.h> |
48 | #include <net/l3mdev.h> | 48 | #include <net/l3mdev.h> |
49 | #include <net/lwtunnel.h> | ||
49 | #include <trace/events/fib.h> | 50 | #include <trace/events/fib.h> |
50 | 51 | ||
51 | #ifndef CONFIG_IP_MULTIPLE_TABLES | 52 | #ifndef CONFIG_IP_MULTIPLE_TABLES |
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, | |||
677 | cfg->fc_mx_len = nla_len(attr); | 678 | cfg->fc_mx_len = nla_len(attr); |
678 | break; | 679 | break; |
679 | case RTA_MULTIPATH: | 680 | case RTA_MULTIPATH: |
681 | err = lwtunnel_valid_encap_type_attr(nla_data(attr), | ||
682 | nla_len(attr)); | ||
683 | if (err < 0) | ||
684 | goto errout; | ||
680 | cfg->fc_mp = nla_data(attr); | 685 | cfg->fc_mp = nla_data(attr); |
681 | cfg->fc_mp_len = nla_len(attr); | 686 | cfg->fc_mp_len = nla_len(attr); |
682 | break; | 687 | break; |
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, | |||
691 | break; | 696 | break; |
692 | case RTA_ENCAP_TYPE: | 697 | case RTA_ENCAP_TYPE: |
693 | cfg->fc_encap_type = nla_get_u16(attr); | 698 | cfg->fc_encap_type = nla_get_u16(attr); |
699 | err = lwtunnel_valid_encap_type(cfg->fc_encap_type); | ||
700 | if (err < 0) | ||
701 | goto errout; | ||
694 | break; | 702 | break; |
695 | } | 703 | } |
696 | } | 704 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index fac275c48108..b67719f45953 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, | |||
1629 | sk->sk_protocol = ip_hdr(skb)->protocol; | 1629 | sk->sk_protocol = ip_hdr(skb)->protocol; |
1630 | sk->sk_bound_dev_if = arg->bound_dev_if; | 1630 | sk->sk_bound_dev_if = arg->bound_dev_if; |
1631 | sk->sk_sndbuf = sysctl_wmem_default; | 1631 | sk->sk_sndbuf = sysctl_wmem_default; |
1632 | sk->sk_mark = fl4.flowi4_mark; | ||
1632 | err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, | 1633 | err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, |
1633 | len, 0, &ipc, &rt, MSG_DONTWAIT); | 1634 | len, 0, &ipc, &rt, MSG_DONTWAIT); |
1634 | if (unlikely(err)) { | 1635 | if (unlikely(err)) { |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index fed3d29f9eb3..0fd1976ab63b 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { | |||
313 | .fill_encap = ip_tun_fill_encap_info, | 313 | .fill_encap = ip_tun_fill_encap_info, |
314 | .get_encap_size = ip_tun_encap_nlsize, | 314 | .get_encap_size = ip_tun_encap_nlsize, |
315 | .cmp_encap = ip_tun_cmp_encap, | 315 | .cmp_encap = ip_tun_cmp_encap, |
316 | .owner = THIS_MODULE, | ||
316 | }; | 317 | }; |
317 | 318 | ||
318 | static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { | 319 | static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { |
@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = { | |||
403 | .fill_encap = ip6_tun_fill_encap_info, | 404 | .fill_encap = ip6_tun_fill_encap_info, |
404 | .get_encap_size = ip6_tun_encap_nlsize, | 405 | .get_encap_size = ip6_tun_encap_nlsize, |
405 | .cmp_encap = ip_tun_cmp_encap, | 406 | .cmp_encap = ip_tun_cmp_encap, |
407 | .owner = THIS_MODULE, | ||
406 | }; | 408 | }; |
407 | 409 | ||
408 | void __init ip_tunnel_core_init(void) | 410 | void __init ip_tunnel_core_init(void) |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a6b8c1a4102b..0a783cd73faf 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) | |||
144 | rcu_read_lock_bh(); | 144 | rcu_read_lock_bh(); |
145 | c = __clusterip_config_find(net, clusterip); | 145 | c = __clusterip_config_find(net, clusterip); |
146 | if (c) { | 146 | if (c) { |
147 | if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount))) | 147 | #ifdef CONFIG_PROC_FS |
148 | if (!c->pde) | ||
149 | c = NULL; | ||
150 | else | ||
151 | #endif | ||
152 | if (unlikely(!atomic_inc_not_zero(&c->refcount))) | ||
148 | c = NULL; | 153 | c = NULL; |
149 | else if (entry) | 154 | else if (entry) |
150 | atomic_inc(&c->entries); | 155 | atomic_inc(&c->entries); |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index f273098e48fd..37fb9552e858 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4, | |||
63 | return dev_match || flags & XT_RPFILTER_LOOSE; | 63 | return dev_match || flags & XT_RPFILTER_LOOSE; |
64 | } | 64 | } |
65 | 65 | ||
66 | static bool rpfilter_is_local(const struct sk_buff *skb) | 66 | static bool |
67 | rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in) | ||
67 | { | 68 | { |
68 | const struct rtable *rt = skb_rtable(skb); | 69 | return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; |
69 | return rt && (rt->rt_flags & RTCF_LOCAL); | ||
70 | } | 70 | } |
71 | 71 | ||
72 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 72 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
79 | info = par->matchinfo; | 79 | info = par->matchinfo; |
80 | invert = info->flags & XT_RPFILTER_INVERT; | 80 | invert = info->flags & XT_RPFILTER_INVERT; |
81 | 81 | ||
82 | if (rpfilter_is_local(skb)) | 82 | if (rpfilter_is_loopback(skb, xt_in(par))) |
83 | return true ^ invert; | 83 | return true ^ invert; |
84 | 84 | ||
85 | iph = ip_hdr(skb); | 85 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index fd8220213afc..146d86105183 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |||
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) | |||
126 | /* ip_route_me_harder expects skb->dst to be set */ | 126 | /* ip_route_me_harder expects skb->dst to be set */ |
127 | skb_dst_set_noref(nskb, skb_dst(oldskb)); | 127 | skb_dst_set_noref(nskb, skb_dst(oldskb)); |
128 | 128 | ||
129 | nskb->mark = IP4_REPLY_MARK(net, oldskb->mark); | ||
130 | |||
129 | skb_reserve(nskb, LL_MAX_HEADER); | 131 | skb_reserve(nskb, LL_MAX_HEADER); |
130 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | 132 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, |
131 | ip4_dst_hoplimit(skb_dst(nskb))); | 133 | ip4_dst_hoplimit(skb_dst(nskb))); |
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 965b1a161369..2981291910dd 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c | |||
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr) | |||
26 | return addr; | 26 | return addr; |
27 | } | 27 | } |
28 | 28 | ||
29 | static bool fib4_is_local(const struct sk_buff *skb) | ||
30 | { | ||
31 | const struct rtable *rt = skb_rtable(skb); | ||
32 | |||
33 | return rt && (rt->rt_flags & RTCF_LOCAL); | ||
34 | } | ||
35 | |||
36 | #define DSCP_BITS 0xfc | 29 | #define DSCP_BITS 0xfc |
37 | 30 | ||
38 | void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, | 31 | void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, |
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
95 | else | 88 | else |
96 | oif = NULL; | 89 | oif = NULL; |
97 | 90 | ||
98 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) { | 91 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && |
99 | nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); | 92 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { |
93 | nft_fib_store_result(dest, priv->result, pkt, | ||
94 | nft_in(pkt)->ifindex); | ||
100 | return; | 95 | return; |
101 | } | 96 | } |
102 | 97 | ||
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
131 | switch (res.type) { | 126 | switch (res.type) { |
132 | case RTN_UNICAST: | 127 | case RTN_UNICAST: |
133 | break; | 128 | break; |
134 | case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */ | 129 | case RTN_LOCAL: /* Should not see RTN_LOCAL here */ |
135 | return; | 130 | return; |
136 | default: | 131 | default: |
137 | break; | 132 | break; |
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index f51919535ca7..dd2560c83a85 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, | |||
205 | * scaled. So correct it appropriately. | 205 | * scaled. So correct it appropriately. |
206 | */ | 206 | */ |
207 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); | 207 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); |
208 | tp->max_window = tp->snd_wnd; | ||
208 | 209 | ||
209 | /* Activate the retrans timer so that SYNACK can be retransmitted. | 210 | /* Activate the retrans timer so that SYNACK can be retransmitted. |
210 | * The request socket is not added to the ehash | 211 | * The request socket is not added to the ehash |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6c790754ae3e..41dcbd568cbe 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk) | |||
5078 | if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { | 5078 | if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { |
5079 | sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); | 5079 | sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); |
5080 | /* pairs with tcp_poll() */ | 5080 | /* pairs with tcp_poll() */ |
5081 | smp_mb__after_atomic(); | 5081 | smp_mb(); |
5082 | if (sk->sk_socket && | 5082 | if (sk->sk_socket && |
5083 | test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 5083 | test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
5084 | tcp_new_space(sk); | 5084 | tcp_new_space(sk); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c1e124bc8e1e..f60e88e56255 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -5540,8 +5540,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf) | |||
5540 | struct net_device *dev; | 5540 | struct net_device *dev; |
5541 | struct inet6_dev *idev; | 5541 | struct inet6_dev *idev; |
5542 | 5542 | ||
5543 | rcu_read_lock(); | 5543 | for_each_netdev(net, dev) { |
5544 | for_each_netdev_rcu(net, dev) { | ||
5545 | idev = __in6_dev_get(dev); | 5544 | idev = __in6_dev_get(dev); |
5546 | if (idev) { | 5545 | if (idev) { |
5547 | int changed = (!idev->cnf.disable_ipv6) ^ (!newf); | 5546 | int changed = (!idev->cnf.disable_ipv6) ^ (!newf); |
@@ -5550,7 +5549,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf) | |||
5550 | dev_disable_change(idev); | 5549 | dev_disable_change(idev); |
5551 | } | 5550 | } |
5552 | } | 5551 | } |
5553 | rcu_read_unlock(); | ||
5554 | } | 5552 | } |
5555 | 5553 | ||
5556 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) | 5554 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) |
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index a7bc54ab46e2..13b5e85fe0d5 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c | |||
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = { | |||
238 | .fill_encap = ila_fill_encap_info, | 238 | .fill_encap = ila_fill_encap_info, |
239 | .get_encap_size = ila_encap_nlsize, | 239 | .get_encap_size = ila_encap_nlsize, |
240 | .cmp_encap = ila_encap_cmp, | 240 | .cmp_encap = ila_encap_cmp, |
241 | .owner = THIS_MODULE, | ||
241 | }; | 242 | }; |
242 | 243 | ||
243 | int ila_lwt_init(void) | 244 | int ila_lwt_init(void) |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 7396e75e161b..75c308239243 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused | |||
176 | /* Restore final destination back after routing done */ | 176 | /* Restore final destination back after routing done */ |
177 | fl6.daddr = sk->sk_v6_daddr; | 177 | fl6.daddr = sk->sk_v6_daddr; |
178 | 178 | ||
179 | res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), | 179 | res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt), |
180 | np->tclass); | 180 | np->tclass); |
181 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
182 | return res; | 182 | return res; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 75b6108234dd..558631860d91 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) | |||
582 | return -1; | 582 | return -1; |
583 | 583 | ||
584 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); | 584 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); |
585 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ | ||
586 | ipv6h = ipv6_hdr(skb); | ||
587 | |||
585 | if (offset > 0) { | 588 | if (offset > 0) { |
586 | struct ipv6_tlv_tnl_enc_lim *tel; | 589 | struct ipv6_tlv_tnl_enc_lim *tel; |
587 | tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; | 590 | tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 38122d04fadc..2c0df09e9036 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
172 | * which are using proper atomic operations or spinlocks. | 172 | * which are using proper atomic operations or spinlocks. |
173 | */ | 173 | */ |
174 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | 174 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, |
175 | struct ipv6_txoptions *opt, int tclass) | 175 | __u32 mark, struct ipv6_txoptions *opt, int tclass) |
176 | { | 176 | { |
177 | struct net *net = sock_net(sk); | 177 | struct net *net = sock_net(sk); |
178 | const struct ipv6_pinfo *np = inet6_sk(sk); | 178 | const struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
240 | 240 | ||
241 | skb->protocol = htons(ETH_P_IPV6); | 241 | skb->protocol = htons(ETH_P_IPV6); |
242 | skb->priority = sk->sk_priority; | 242 | skb->priority = sk->sk_priority; |
243 | skb->mark = sk->sk_mark; | 243 | skb->mark = mark; |
244 | 244 | ||
245 | mtu = dst_mtu(dst); | 245 | mtu = dst_mtu(dst); |
246 | if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { | 246 | if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 753d6d0860fb..ff8ee06491c3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev) | |||
400 | 400 | ||
401 | __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) | 401 | __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) |
402 | { | 402 | { |
403 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; | 403 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; |
404 | __u8 nexthdr = ipv6h->nexthdr; | 404 | unsigned int nhoff = raw - skb->data; |
405 | __u16 off = sizeof(*ipv6h); | 405 | unsigned int off = nhoff + sizeof(*ipv6h); |
406 | u8 next, nexthdr = ipv6h->nexthdr; | ||
406 | 407 | ||
407 | while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { | 408 | while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { |
408 | __u16 optlen = 0; | ||
409 | struct ipv6_opt_hdr *hdr; | 409 | struct ipv6_opt_hdr *hdr; |
410 | if (raw + off + sizeof(*hdr) > skb->data && | 410 | u16 optlen; |
411 | !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) | 411 | |
412 | if (!pskb_may_pull(skb, off + sizeof(*hdr))) | ||
412 | break; | 413 | break; |
413 | 414 | ||
414 | hdr = (struct ipv6_opt_hdr *) (raw + off); | 415 | hdr = (struct ipv6_opt_hdr *)(skb->data + off); |
415 | if (nexthdr == NEXTHDR_FRAGMENT) { | 416 | if (nexthdr == NEXTHDR_FRAGMENT) { |
416 | struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; | 417 | struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; |
417 | if (frag_hdr->frag_off) | 418 | if (frag_hdr->frag_off) |
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) | |||
422 | } else { | 423 | } else { |
423 | optlen = ipv6_optlen(hdr); | 424 | optlen = ipv6_optlen(hdr); |
424 | } | 425 | } |
426 | /* cache hdr->nexthdr, since pskb_may_pull() might | ||
427 | * invalidate hdr | ||
428 | */ | ||
429 | next = hdr->nexthdr; | ||
425 | if (nexthdr == NEXTHDR_DEST) { | 430 | if (nexthdr == NEXTHDR_DEST) { |
426 | __u16 i = off + 2; | 431 | u16 i = 2; |
432 | |||
433 | /* Remember : hdr is no longer valid at this point. */ | ||
434 | if (!pskb_may_pull(skb, off + optlen)) | ||
435 | break; | ||
436 | |||
427 | while (1) { | 437 | while (1) { |
428 | struct ipv6_tlv_tnl_enc_lim *tel; | 438 | struct ipv6_tlv_tnl_enc_lim *tel; |
429 | 439 | ||
430 | /* No more room for encapsulation limit */ | 440 | /* No more room for encapsulation limit */ |
431 | if (i + sizeof (*tel) > off + optlen) | 441 | if (i + sizeof(*tel) > optlen) |
432 | break; | 442 | break; |
433 | 443 | ||
434 | tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; | 444 | tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i; |
435 | /* return index of option if found and valid */ | 445 | /* return index of option if found and valid */ |
436 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && | 446 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && |
437 | tel->length == 1) | 447 | tel->length == 1) |
438 | return i; | 448 | return i + off - nhoff; |
439 | /* else jump to next option */ | 449 | /* else jump to next option */ |
440 | if (tel->type) | 450 | if (tel->type) |
441 | i += tel->length + 2; | 451 | i += tel->length + 2; |
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) | |||
443 | i++; | 453 | i++; |
444 | } | 454 | } |
445 | } | 455 | } |
446 | nexthdr = hdr->nexthdr; | 456 | nexthdr = next; |
447 | off += optlen; | 457 | off += optlen; |
448 | } | 458 | } |
449 | return 0; | 459 | return 0; |
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1303 | fl6.flowlabel = key->label; | 1313 | fl6.flowlabel = key->label; |
1304 | } else { | 1314 | } else { |
1305 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); | 1315 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); |
1316 | /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ | ||
1317 | ipv6h = ipv6_hdr(skb); | ||
1306 | if (offset > 0) { | 1318 | if (offset > 0) { |
1307 | struct ipv6_tlv_tnl_enc_lim *tel; | 1319 | struct ipv6_tlv_tnl_enc_lim *tel; |
1308 | 1320 | ||
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index d5263dc364a9..b12e61b7b16c 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, | |||
72 | return ret; | 72 | return ret; |
73 | } | 73 | } |
74 | 74 | ||
75 | static bool rpfilter_is_local(const struct sk_buff *skb) | 75 | static bool |
76 | rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in) | ||
76 | { | 77 | { |
77 | const struct rt6_info *rt = (const void *) skb_dst(skb); | 78 | return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; |
78 | return rt && (rt->rt6i_flags & RTF_LOCAL); | ||
79 | } | 79 | } |
80 | 80 | ||
81 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 81 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
85 | struct ipv6hdr *iph; | 85 | struct ipv6hdr *iph; |
86 | bool invert = info->flags & XT_RPFILTER_INVERT; | 86 | bool invert = info->flags & XT_RPFILTER_INVERT; |
87 | 87 | ||
88 | if (rpfilter_is_local(skb)) | 88 | if (rpfilter_is_loopback(skb, xt_in(par))) |
89 | return true ^ invert; | 89 | return true ^ invert; |
90 | 90 | ||
91 | iph = ipv6_hdr(skb); | 91 | iph = ipv6_hdr(skb); |
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 10090400c72f..eedee5d108d9 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c | |||
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
157 | fl6.fl6_sport = otcph->dest; | 157 | fl6.fl6_sport = otcph->dest; |
158 | fl6.fl6_dport = otcph->source; | 158 | fl6.fl6_dport = otcph->source; |
159 | fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); | 159 | fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); |
160 | fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); | ||
160 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | 161 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); |
161 | dst = ip6_route_output(net, NULL, &fl6); | 162 | dst = ip6_route_output(net, NULL, &fl6); |
162 | if (dst->error) { | 163 | if (dst->error) { |
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
180 | 181 | ||
181 | skb_dst_set(nskb, dst); | 182 | skb_dst_set(nskb, dst); |
182 | 183 | ||
184 | nskb->mark = fl6.flowi6_mark; | ||
185 | |||
183 | skb_reserve(nskb, hh_len + dst->header_len); | 186 | skb_reserve(nskb, hh_len + dst->header_len); |
184 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | 187 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, |
185 | ip6_dst_hoplimit(dst)); | 188 | ip6_dst_hoplimit(dst)); |
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index c947aad8bcc6..765facf03d45 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c | |||
@@ -18,13 +18,6 @@ | |||
18 | #include <net/ip6_fib.h> | 18 | #include <net/ip6_fib.h> |
19 | #include <net/ip6_route.h> | 19 | #include <net/ip6_route.h> |
20 | 20 | ||
21 | static bool fib6_is_local(const struct sk_buff *skb) | ||
22 | { | ||
23 | const struct rt6_info *rt = (const void *)skb_dst(skb); | ||
24 | |||
25 | return rt && (rt->rt6i_flags & RTF_LOCAL); | ||
26 | } | ||
27 | |||
28 | static int get_ifindex(const struct net_device *dev) | 21 | static int get_ifindex(const struct net_device *dev) |
29 | { | 22 | { |
30 | return dev ? dev->ifindex : 0; | 23 | return dev ? dev->ifindex : 0; |
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
164 | 157 | ||
165 | lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); | 158 | lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); |
166 | 159 | ||
167 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) { | 160 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && |
168 | nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); | 161 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { |
162 | nft_fib_store_result(dest, priv->result, pkt, | ||
163 | nft_in(pkt)->ifindex); | ||
169 | return; | 164 | return; |
170 | } | 165 | } |
171 | 166 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4f6b067c8753..7ea85370c11c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2896 | if (tb[RTA_MULTIPATH]) { | 2896 | if (tb[RTA_MULTIPATH]) { |
2897 | cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); | 2897 | cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); |
2898 | cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); | 2898 | cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); |
2899 | |||
2900 | err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, | ||
2901 | cfg->fc_mp_len); | ||
2902 | if (err < 0) | ||
2903 | goto errout; | ||
2899 | } | 2904 | } |
2900 | 2905 | ||
2901 | if (tb[RTA_PREF]) { | 2906 | if (tb[RTA_PREF]) { |
@@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2909 | if (tb[RTA_ENCAP]) | 2914 | if (tb[RTA_ENCAP]) |
2910 | cfg->fc_encap = tb[RTA_ENCAP]; | 2915 | cfg->fc_encap = tb[RTA_ENCAP]; |
2911 | 2916 | ||
2912 | if (tb[RTA_ENCAP_TYPE]) | 2917 | if (tb[RTA_ENCAP_TYPE]) { |
2913 | cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); | 2918 | cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); |
2914 | 2919 | ||
2920 | err = lwtunnel_valid_encap_type(cfg->fc_encap_type); | ||
2921 | if (err < 0) | ||
2922 | goto errout; | ||
2923 | } | ||
2924 | |||
2915 | if (tb[RTA_EXPIRES]) { | 2925 | if (tb[RTA_EXPIRES]) { |
2916 | unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); | 2926 | unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); |
2917 | 2927 | ||
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index b172d85c650a..a855eb325b03 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c | |||
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info) | |||
176 | 176 | ||
177 | val = nla_data(info->attrs[SEG6_ATTR_DST]); | 177 | val = nla_data(info->attrs[SEG6_ATTR_DST]); |
178 | t_new = kmemdup(val, sizeof(*val), GFP_KERNEL); | 178 | t_new = kmemdup(val, sizeof(*val), GFP_KERNEL); |
179 | if (!t_new) | ||
180 | return -ENOMEM; | ||
179 | 181 | ||
180 | mutex_lock(&sdata->lock); | 182 | mutex_lock(&sdata->lock); |
181 | 183 | ||
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 1d60cb132835..c46f8cbf5ab5 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = { | |||
422 | .fill_encap = seg6_fill_encap_info, | 422 | .fill_encap = seg6_fill_encap_info, |
423 | .get_encap_size = seg6_encap_nlsize, | 423 | .get_encap_size = seg6_encap_nlsize, |
424 | .cmp_encap = seg6_encap_cmp, | 424 | .cmp_encap = seg6_encap_cmp, |
425 | .owner = THIS_MODULE, | ||
425 | }; | 426 | }; |
426 | 427 | ||
427 | int __init seg6_iptunnel_init(void) | 428 | int __init seg6_iptunnel_init(void) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 73bc8fc68acd..cb8929681dc7 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
469 | opt = ireq->ipv6_opt; | 469 | opt = ireq->ipv6_opt; |
470 | if (!opt) | 470 | if (!opt) |
471 | opt = rcu_dereference(np->opt); | 471 | opt = rcu_dereference(np->opt); |
472 | err = ip6_xmit(sk, skb, fl6, opt, np->tclass); | 472 | err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); |
473 | rcu_read_unlock(); | 473 | rcu_read_unlock(); |
474 | err = net_xmit_eval(err); | 474 | err = net_xmit_eval(err); |
475 | } | 475 | } |
@@ -840,7 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
840 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); | 840 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); |
841 | if (!IS_ERR(dst)) { | 841 | if (!IS_ERR(dst)) { |
842 | skb_dst_set(buff, dst); | 842 | skb_dst_set(buff, dst); |
843 | ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); | 843 | ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); |
844 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); | 844 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
845 | if (rst) | 845 | if (rst) |
846 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); | 846 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 9e2641d45587..206698bc93f4 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta) | |||
40 | 40 | ||
41 | ieee80211_sta_set_rx_nss(sta); | 41 | ieee80211_sta_set_rx_nss(sta); |
42 | 42 | ||
43 | ieee80211_recalc_min_chandef(sta->sdata); | ||
44 | |||
45 | if (!ref) | 43 | if (!ref) |
46 | return; | 44 | return; |
47 | 45 | ||
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 15fe97644ffe..5b77377e5a15 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | |||
98 | } | 98 | } |
99 | EXPORT_SYMBOL_GPL(mpls_pkt_too_big); | 99 | EXPORT_SYMBOL_GPL(mpls_pkt_too_big); |
100 | 100 | ||
101 | static u32 mpls_multipath_hash(struct mpls_route *rt, | 101 | static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb) |
102 | struct sk_buff *skb, bool bos) | ||
103 | { | 102 | { |
104 | struct mpls_entry_decoded dec; | 103 | struct mpls_entry_decoded dec; |
104 | unsigned int mpls_hdr_len = 0; | ||
105 | struct mpls_shim_hdr *hdr; | 105 | struct mpls_shim_hdr *hdr; |
106 | bool eli_seen = false; | 106 | bool eli_seen = false; |
107 | int label_index; | 107 | int label_index; |
108 | u32 hash = 0; | 108 | u32 hash = 0; |
109 | 109 | ||
110 | for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos; | 110 | for (label_index = 0; label_index < MAX_MP_SELECT_LABELS; |
111 | label_index++) { | 111 | label_index++) { |
112 | if (!pskb_may_pull(skb, sizeof(*hdr) * label_index)) | 112 | mpls_hdr_len += sizeof(*hdr); |
113 | if (!pskb_may_pull(skb, mpls_hdr_len)) | ||
113 | break; | 114 | break; |
114 | 115 | ||
115 | /* Read and decode the current label */ | 116 | /* Read and decode the current label */ |
@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt, | |||
134 | eli_seen = true; | 135 | eli_seen = true; |
135 | } | 136 | } |
136 | 137 | ||
137 | bos = dec.bos; | 138 | if (!dec.bos) |
138 | if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index + | 139 | continue; |
139 | sizeof(struct iphdr))) { | 140 | |
141 | /* found bottom label; does skb have room for a header? */ | ||
142 | if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) { | ||
140 | const struct iphdr *v4hdr; | 143 | const struct iphdr *v4hdr; |
141 | 144 | ||
142 | v4hdr = (const struct iphdr *)(mpls_hdr(skb) + | 145 | v4hdr = (const struct iphdr *)(hdr + 1); |
143 | label_index); | ||
144 | if (v4hdr->version == 4) { | 146 | if (v4hdr->version == 4) { |
145 | hash = jhash_3words(ntohl(v4hdr->saddr), | 147 | hash = jhash_3words(ntohl(v4hdr->saddr), |
146 | ntohl(v4hdr->daddr), | 148 | ntohl(v4hdr->daddr), |
147 | v4hdr->protocol, hash); | 149 | v4hdr->protocol, hash); |
148 | } else if (v4hdr->version == 6 && | 150 | } else if (v4hdr->version == 6 && |
149 | pskb_may_pull(skb, sizeof(*hdr) * label_index + | 151 | pskb_may_pull(skb, mpls_hdr_len + |
150 | sizeof(struct ipv6hdr))) { | 152 | sizeof(struct ipv6hdr))) { |
151 | const struct ipv6hdr *v6hdr; | 153 | const struct ipv6hdr *v6hdr; |
152 | 154 | ||
153 | v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) + | 155 | v6hdr = (const struct ipv6hdr *)(hdr + 1); |
154 | label_index); | ||
155 | |||
156 | hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); | 156 | hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); |
157 | hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); | 157 | hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); |
158 | hash = jhash_1word(v6hdr->nexthdr, hash); | 158 | hash = jhash_1word(v6hdr->nexthdr, hash); |
159 | } | 159 | } |
160 | } | 160 | } |
161 | |||
162 | break; | ||
161 | } | 163 | } |
162 | 164 | ||
163 | return hash; | 165 | return hash; |
164 | } | 166 | } |
165 | 167 | ||
166 | static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, | 168 | static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, |
167 | struct sk_buff *skb, bool bos) | 169 | struct sk_buff *skb) |
168 | { | 170 | { |
169 | int alive = ACCESS_ONCE(rt->rt_nhn_alive); | 171 | int alive = ACCESS_ONCE(rt->rt_nhn_alive); |
170 | u32 hash = 0; | 172 | u32 hash = 0; |
@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, | |||
180 | if (alive <= 0) | 182 | if (alive <= 0) |
181 | return NULL; | 183 | return NULL; |
182 | 184 | ||
183 | hash = mpls_multipath_hash(rt, skb, bos); | 185 | hash = mpls_multipath_hash(rt, skb); |
184 | nh_index = hash % alive; | 186 | nh_index = hash % alive; |
185 | if (alive == rt->rt_nhn) | 187 | if (alive == rt->rt_nhn) |
186 | goto out; | 188 | goto out; |
@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
278 | hdr = mpls_hdr(skb); | 280 | hdr = mpls_hdr(skb); |
279 | dec = mpls_entry_decode(hdr); | 281 | dec = mpls_entry_decode(hdr); |
280 | 282 | ||
281 | /* Pop the label */ | ||
282 | skb_pull(skb, sizeof(*hdr)); | ||
283 | skb_reset_network_header(skb); | ||
284 | |||
285 | skb_orphan(skb); | ||
286 | |||
287 | rt = mpls_route_input_rcu(net, dec.label); | 283 | rt = mpls_route_input_rcu(net, dec.label); |
288 | if (!rt) | 284 | if (!rt) |
289 | goto drop; | 285 | goto drop; |
290 | 286 | ||
291 | nh = mpls_select_multipath(rt, skb, dec.bos); | 287 | nh = mpls_select_multipath(rt, skb); |
292 | if (!nh) | 288 | if (!nh) |
293 | goto drop; | 289 | goto drop; |
294 | 290 | ||
@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
297 | if (!mpls_output_possible(out_dev)) | 293 | if (!mpls_output_possible(out_dev)) |
298 | goto drop; | 294 | goto drop; |
299 | 295 | ||
296 | /* Pop the label */ | ||
297 | skb_pull(skb, sizeof(*hdr)); | ||
298 | skb_reset_network_header(skb); | ||
299 | |||
300 | skb_orphan(skb); | ||
301 | |||
300 | if (skb_warn_if_lro(skb)) | 302 | if (skb_warn_if_lro(skb)) |
301 | goto drop; | 303 | goto drop; |
302 | 304 | ||
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 2f7ccd934416..1d281c1ff7c1 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c | |||
@@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = { | |||
215 | .fill_encap = mpls_fill_encap_info, | 215 | .fill_encap = mpls_fill_encap_info, |
216 | .get_encap_size = mpls_encap_nlsize, | 216 | .get_encap_size = mpls_encap_nlsize, |
217 | .cmp_encap = mpls_encap_cmp, | 217 | .cmp_encap = mpls_encap_cmp, |
218 | .owner = THIS_MODULE, | ||
218 | }; | 219 | }; |
219 | 220 | ||
220 | static int __init mpls_iptunnel_init(void) | 221 | static int __init mpls_iptunnel_init(void) |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 63729b489c2c..bbc45f8a7b2d 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -494,7 +494,7 @@ config NFT_CT | |||
494 | depends on NF_CONNTRACK | 494 | depends on NF_CONNTRACK |
495 | tristate "Netfilter nf_tables conntrack module" | 495 | tristate "Netfilter nf_tables conntrack module" |
496 | help | 496 | help |
497 | This option adds the "meta" expression that you can use to match | 497 | This option adds the "ct" expression that you can use to match |
498 | connection tracking information such as the flow state. | 498 | connection tracking information such as the flow state. |
499 | 499 | ||
500 | config NFT_SET_RBTREE | 500 | config NFT_SET_RBTREE |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3a073cd9fcf4..4e8083c5e01d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); | |||
85 | static __read_mostly bool nf_conntrack_locks_all; | 85 | static __read_mostly bool nf_conntrack_locks_all; |
86 | 86 | ||
87 | /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ | 87 | /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ |
88 | #define GC_MAX_BUCKETS_DIV 64u | 88 | #define GC_MAX_BUCKETS_DIV 128u |
89 | /* upper bound of scan intervals */ | 89 | /* upper bound of full table scan */ |
90 | #define GC_INTERVAL_MAX (2 * HZ) | 90 | #define GC_MAX_SCAN_JIFFIES (16u * HZ) |
91 | /* maximum conntracks to evict per gc run */ | 91 | /* desired ratio of entries found to be expired */ |
92 | #define GC_MAX_EVICTS 256u | 92 | #define GC_EVICT_RATIO 50u |
93 | 93 | ||
94 | static struct conntrack_gc_work conntrack_gc_work; | 94 | static struct conntrack_gc_work conntrack_gc_work; |
95 | 95 | ||
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash) | |||
938 | 938 | ||
939 | static void gc_worker(struct work_struct *work) | 939 | static void gc_worker(struct work_struct *work) |
940 | { | 940 | { |
941 | unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); | ||
941 | unsigned int i, goal, buckets = 0, expired_count = 0; | 942 | unsigned int i, goal, buckets = 0, expired_count = 0; |
942 | struct conntrack_gc_work *gc_work; | 943 | struct conntrack_gc_work *gc_work; |
943 | unsigned int ratio, scanned = 0; | 944 | unsigned int ratio, scanned = 0; |
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work) | |||
979 | */ | 980 | */ |
980 | rcu_read_unlock(); | 981 | rcu_read_unlock(); |
981 | cond_resched_rcu_qs(); | 982 | cond_resched_rcu_qs(); |
982 | } while (++buckets < goal && | 983 | } while (++buckets < goal); |
983 | expired_count < GC_MAX_EVICTS); | ||
984 | 984 | ||
985 | if (gc_work->exiting) | 985 | if (gc_work->exiting) |
986 | return; | 986 | return; |
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work) | |||
997 | * 1. Minimize time until we notice a stale entry | 997 | * 1. Minimize time until we notice a stale entry |
998 | * 2. Maximize scan intervals to not waste cycles | 998 | * 2. Maximize scan intervals to not waste cycles |
999 | * | 999 | * |
1000 | * Normally, expired_count will be 0, this increases the next_run time | 1000 | * Normally, expire ratio will be close to 0. |
1001 | * to priorize 2) above. | ||
1002 | * | 1001 | * |
1003 | * As soon as a timed-out entry is found, move towards 1) and increase | 1002 | * As soon as a sizeable fraction of the entries have expired |
1004 | * the scan frequency. | 1003 | * increase scan frequency. |
1005 | * In case we have lots of evictions next scan is done immediately. | ||
1006 | */ | 1004 | */ |
1007 | ratio = scanned ? expired_count * 100 / scanned : 0; | 1005 | ratio = scanned ? expired_count * 100 / scanned : 0; |
1008 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { | 1006 | if (ratio > GC_EVICT_RATIO) { |
1009 | gc_work->next_gc_run = 0; | 1007 | gc_work->next_gc_run = min_interval; |
1010 | next_run = 0; | ||
1011 | } else if (expired_count) { | ||
1012 | gc_work->next_gc_run /= 2U; | ||
1013 | next_run = msecs_to_jiffies(1); | ||
1014 | } else { | 1008 | } else { |
1015 | if (gc_work->next_gc_run < GC_INTERVAL_MAX) | 1009 | unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV; |
1016 | gc_work->next_gc_run += msecs_to_jiffies(1); | ||
1017 | 1010 | ||
1018 | next_run = gc_work->next_gc_run; | 1011 | BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0); |
1012 | |||
1013 | gc_work->next_gc_run += min_interval; | ||
1014 | if (gc_work->next_gc_run > max) | ||
1015 | gc_work->next_gc_run = max; | ||
1019 | } | 1016 | } |
1020 | 1017 | ||
1018 | next_run = gc_work->next_gc_run; | ||
1021 | gc_work->last_bucket = i; | 1019 | gc_work->last_bucket = i; |
1022 | queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); | 1020 | queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); |
1023 | } | 1021 | } |
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work) | |||
1025 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) | 1023 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) |
1026 | { | 1024 | { |
1027 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); | 1025 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); |
1028 | gc_work->next_gc_run = GC_INTERVAL_MAX; | 1026 | gc_work->next_gc_run = HZ; |
1029 | gc_work->exiting = false; | 1027 | gc_work->exiting = false; |
1030 | } | 1028 | } |
1031 | 1029 | ||
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void) | |||
1917 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | 1915 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); |
1918 | 1916 | ||
1919 | conntrack_gc_work_init(&conntrack_gc_work); | 1917 | conntrack_gc_work_init(&conntrack_gc_work); |
1920 | queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); | 1918 | queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ); |
1921 | 1919 | ||
1922 | return 0; | 1920 | return 0; |
1923 | 1921 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 3dca90dc24ad..ffb9e8ada899 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -13,7 +13,6 @@ | |||
13 | /* Internal logging interface, which relies on the real | 13 | /* Internal logging interface, which relies on the real |
14 | LOG target modules */ | 14 | LOG target modules */ |
15 | 15 | ||
16 | #define NF_LOG_PREFIXLEN 128 | ||
17 | #define NFLOGGER_NAME_LEN 64 | 16 | #define NFLOGGER_NAME_LEN 64 |
18 | 17 | ||
19 | static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; | 18 | static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0db5f9782265..1b913760f205 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, | |||
928 | } | 928 | } |
929 | 929 | ||
930 | static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { | 930 | static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { |
931 | [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, | 931 | [NFTA_CHAIN_TABLE] = { .type = NLA_STRING, |
932 | .len = NFT_TABLE_MAXNAMELEN - 1 }, | ||
932 | [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, | 933 | [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, |
933 | [NFTA_CHAIN_NAME] = { .type = NLA_STRING, | 934 | [NFTA_CHAIN_NAME] = { .type = NLA_STRING, |
934 | .len = NFT_CHAIN_MAXNAMELEN - 1 }, | 935 | .len = NFT_CHAIN_MAXNAMELEN - 1 }, |
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain, | |||
1854 | } | 1855 | } |
1855 | 1856 | ||
1856 | static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { | 1857 | static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { |
1857 | [NFTA_RULE_TABLE] = { .type = NLA_STRING }, | 1858 | [NFTA_RULE_TABLE] = { .type = NLA_STRING, |
1859 | .len = NFT_TABLE_MAXNAMELEN - 1 }, | ||
1858 | [NFTA_RULE_CHAIN] = { .type = NLA_STRING, | 1860 | [NFTA_RULE_CHAIN] = { .type = NLA_STRING, |
1859 | .len = NFT_CHAIN_MAXNAMELEN - 1 }, | 1861 | .len = NFT_CHAIN_MAXNAMELEN - 1 }, |
1860 | [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, | 1862 | [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, |
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[], | |||
2443 | } | 2445 | } |
2444 | 2446 | ||
2445 | static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { | 2447 | static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { |
2446 | [NFTA_SET_TABLE] = { .type = NLA_STRING }, | 2448 | [NFTA_SET_TABLE] = { .type = NLA_STRING, |
2449 | .len = NFT_TABLE_MAXNAMELEN - 1 }, | ||
2447 | [NFTA_SET_NAME] = { .type = NLA_STRING, | 2450 | [NFTA_SET_NAME] = { .type = NLA_STRING, |
2448 | .len = NFT_SET_MAXNAMELEN - 1 }, | 2451 | .len = NFT_SET_MAXNAMELEN - 1 }, |
2449 | [NFTA_SET_FLAGS] = { .type = NLA_U32 }, | 2452 | [NFTA_SET_FLAGS] = { .type = NLA_U32 }, |
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk, | |||
3084 | } | 3087 | } |
3085 | 3088 | ||
3086 | static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, | 3089 | static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, |
3087 | const struct nft_set *set, | 3090 | struct nft_set *set, |
3088 | const struct nft_set_iter *iter, | 3091 | const struct nft_set_iter *iter, |
3089 | const struct nft_set_elem *elem) | 3092 | struct nft_set_elem *elem) |
3090 | { | 3093 | { |
3091 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); | 3094 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); |
3092 | enum nft_registers dreg; | 3095 | enum nft_registers dreg; |
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { | |||
3192 | }; | 3195 | }; |
3193 | 3196 | ||
3194 | static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { | 3197 | static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { |
3195 | [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, | 3198 | [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING, |
3196 | [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, | 3199 | .len = NFT_TABLE_MAXNAMELEN - 1 }, |
3200 | [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING, | ||
3201 | .len = NFT_SET_MAXNAMELEN - 1 }, | ||
3197 | [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, | 3202 | [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, |
3198 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, | 3203 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, |
3199 | }; | 3204 | }; |
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args { | |||
3303 | }; | 3308 | }; |
3304 | 3309 | ||
3305 | static int nf_tables_dump_setelem(const struct nft_ctx *ctx, | 3310 | static int nf_tables_dump_setelem(const struct nft_ctx *ctx, |
3306 | const struct nft_set *set, | 3311 | struct nft_set *set, |
3307 | const struct nft_set_iter *iter, | 3312 | const struct nft_set_iter *iter, |
3308 | const struct nft_set_elem *elem) | 3313 | struct nft_set_elem *elem) |
3309 | { | 3314 | { |
3310 | struct nft_set_dump_args *args; | 3315 | struct nft_set_dump_args *args; |
3311 | 3316 | ||
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3317 | { | 3322 | { |
3318 | struct net *net = sock_net(skb->sk); | 3323 | struct net *net = sock_net(skb->sk); |
3319 | u8 genmask = nft_genmask_cur(net); | 3324 | u8 genmask = nft_genmask_cur(net); |
3320 | const struct nft_set *set; | 3325 | struct nft_set *set; |
3321 | struct nft_set_dump_args args; | 3326 | struct nft_set_dump_args args; |
3322 | struct nft_ctx ctx; | 3327 | struct nft_ctx ctx; |
3323 | struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; | 3328 | struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; |
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | |||
3740 | goto err5; | 3745 | goto err5; |
3741 | } | 3746 | } |
3742 | 3747 | ||
3748 | if (set->size && | ||
3749 | !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) { | ||
3750 | err = -ENFILE; | ||
3751 | goto err6; | ||
3752 | } | ||
3753 | |||
3743 | nft_trans_elem(trans) = elem; | 3754 | nft_trans_elem(trans) = elem; |
3744 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 3755 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
3745 | return 0; | 3756 | return 0; |
3746 | 3757 | ||
3758 | err6: | ||
3759 | set->ops->remove(set, &elem); | ||
3747 | err5: | 3760 | err5: |
3748 | kfree(trans); | 3761 | kfree(trans); |
3749 | err4: | 3762 | err4: |
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, | |||
3790 | return -EBUSY; | 3803 | return -EBUSY; |
3791 | 3804 | ||
3792 | nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { | 3805 | nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { |
3793 | if (set->size && | ||
3794 | !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) | ||
3795 | return -ENFILE; | ||
3796 | |||
3797 | err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); | 3806 | err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); |
3798 | if (err < 0) { | 3807 | if (err < 0) |
3799 | atomic_dec(&set->nelems); | ||
3800 | break; | 3808 | break; |
3801 | } | ||
3802 | } | 3809 | } |
3803 | return err; | 3810 | return err; |
3804 | } | 3811 | } |
@@ -3883,9 +3890,9 @@ err1: | |||
3883 | } | 3890 | } |
3884 | 3891 | ||
3885 | static int nft_flush_set(const struct nft_ctx *ctx, | 3892 | static int nft_flush_set(const struct nft_ctx *ctx, |
3886 | const struct nft_set *set, | 3893 | struct nft_set *set, |
3887 | const struct nft_set_iter *iter, | 3894 | const struct nft_set_iter *iter, |
3888 | const struct nft_set_elem *elem) | 3895 | struct nft_set_elem *elem) |
3889 | { | 3896 | { |
3890 | struct nft_trans *trans; | 3897 | struct nft_trans *trans; |
3891 | int err; | 3898 | int err; |
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx, | |||
3899 | err = -ENOENT; | 3906 | err = -ENOENT; |
3900 | goto err1; | 3907 | goto err1; |
3901 | } | 3908 | } |
3909 | set->ndeact++; | ||
3902 | 3910 | ||
3903 | nft_trans_elem_set(trans) = (struct nft_set *)set; | 3911 | nft_trans_elem_set(trans) = set; |
3904 | nft_trans_elem(trans) = *((struct nft_set_elem *)elem); | 3912 | nft_trans_elem(trans) = *elem; |
3905 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); | 3913 | list_add_tail(&trans->list, &ctx->net->nft.commit_list); |
3906 | 3914 | ||
3907 | return 0; | 3915 | return 0; |
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table, | |||
4032 | EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); | 4040 | EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); |
4033 | 4041 | ||
4034 | static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { | 4042 | static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { |
4035 | [NFTA_OBJ_TABLE] = { .type = NLA_STRING }, | 4043 | [NFTA_OBJ_TABLE] = { .type = NLA_STRING, |
4036 | [NFTA_OBJ_NAME] = { .type = NLA_STRING }, | 4044 | .len = NFT_TABLE_MAXNAMELEN - 1 }, |
4045 | [NFTA_OBJ_NAME] = { .type = NLA_STRING, | ||
4046 | .len = NFT_OBJ_MAXNAMELEN - 1 }, | ||
4037 | [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, | 4047 | [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, |
4038 | [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, | 4048 | [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, |
4039 | }; | 4049 | }; |
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) | |||
4262 | if (idx > s_idx) | 4272 | if (idx > s_idx) |
4263 | memset(&cb->args[1], 0, | 4273 | memset(&cb->args[1], 0, |
4264 | sizeof(cb->args) - sizeof(cb->args[0])); | 4274 | sizeof(cb->args) - sizeof(cb->args[0])); |
4265 | if (filter->table[0] && | 4275 | if (filter && filter->table[0] && |
4266 | strcmp(filter->table, table->name)) | 4276 | strcmp(filter->table, table->name)) |
4267 | goto cont; | 4277 | goto cont; |
4268 | if (filter->type != NFT_OBJECT_UNSPEC && | 4278 | if (filter && |
4279 | filter->type != NFT_OBJECT_UNSPEC && | ||
4269 | obj->type->type != filter->type) | 4280 | obj->type->type != filter->type) |
4270 | goto cont; | 4281 | goto cont; |
4271 | 4282 | ||
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
5009 | const struct nft_chain *chain); | 5020 | const struct nft_chain *chain); |
5010 | 5021 | ||
5011 | static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, | 5022 | static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, |
5012 | const struct nft_set *set, | 5023 | struct nft_set *set, |
5013 | const struct nft_set_iter *iter, | 5024 | const struct nft_set_iter *iter, |
5014 | const struct nft_set_elem *elem) | 5025 | struct nft_set_elem *elem) |
5015 | { | 5026 | { |
5016 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); | 5027 | const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); |
5017 | const struct nft_data *data; | 5028 | const struct nft_data *data; |
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
5035 | { | 5046 | { |
5036 | const struct nft_rule *rule; | 5047 | const struct nft_rule *rule; |
5037 | const struct nft_expr *expr, *last; | 5048 | const struct nft_expr *expr, *last; |
5038 | const struct nft_set *set; | 5049 | struct nft_set *set; |
5039 | struct nft_set_binding *binding; | 5050 | struct nft_set_binding *binding; |
5040 | struct nft_set_iter iter; | 5051 | struct nft_set_iter iter; |
5041 | 5052 | ||
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 7de2f46734a4..049ad2d9ee66 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -98,7 +98,8 @@ out: | |||
98 | } | 98 | } |
99 | 99 | ||
100 | static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { | 100 | static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { |
101 | [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING }, | 101 | [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING, |
102 | .len = NFT_SET_MAXNAMELEN - 1 }, | ||
102 | [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, | 103 | [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, |
103 | [NFTA_DYNSET_OP] = { .type = NLA_U32 }, | 104 | [NFTA_DYNSET_OP] = { .type = NLA_U32 }, |
104 | [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, | 105 | [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, |
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c index 6271e40a3dd6..6f6e64423643 100644 --- a/net/netfilter/nft_log.c +++ b/net/netfilter/nft_log.c | |||
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr, | |||
39 | 39 | ||
40 | static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { | 40 | static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { |
41 | [NFTA_LOG_GROUP] = { .type = NLA_U16 }, | 41 | [NFTA_LOG_GROUP] = { .type = NLA_U16 }, |
42 | [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, | 42 | [NFTA_LOG_PREFIX] = { .type = NLA_STRING, |
43 | .len = NF_LOG_PREFIXLEN - 1 }, | ||
43 | [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, | 44 | [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, |
44 | [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, | 45 | [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, |
45 | [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, | 46 | [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, |
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index d4f97fa7e21d..e21aea7e5ec8 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr, | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { | 51 | static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { |
52 | [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, | 52 | [NFTA_LOOKUP_SET] = { .type = NLA_STRING, |
53 | .len = NFT_SET_MAXNAMELEN - 1 }, | ||
53 | [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, | 54 | [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, |
54 | [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, | 55 | [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, |
55 | [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, | 56 | [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, |
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 415a65ba2b85..1ae8c49ca4a1 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { | 195 | static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { |
196 | [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING }, | 196 | [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING, |
197 | .len = NFT_OBJ_MAXNAMELEN - 1 }, | ||
197 | [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, | 198 | [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, |
198 | [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, | 199 | [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, |
199 | [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING }, | 200 | [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING, |
201 | .len = NFT_SET_MAXNAMELEN - 1 }, | ||
200 | [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, | 202 | [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, |
201 | }; | 203 | }; |
202 | 204 | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 1e20e2bbb6d9..e36069fb76ae 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set, | |||
212 | rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); | 212 | rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); |
213 | } | 213 | } |
214 | 214 | ||
215 | static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | 215 | static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set, |
216 | struct nft_set_iter *iter) | 216 | struct nft_set_iter *iter) |
217 | { | 217 | { |
218 | struct nft_hash *priv = nft_set_priv(set); | 218 | struct nft_hash *priv = nft_set_priv(set); |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 08376e50f6cd..f06f55ee516d 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net, | |||
221 | } | 221 | } |
222 | 222 | ||
223 | static void nft_rbtree_walk(const struct nft_ctx *ctx, | 223 | static void nft_rbtree_walk(const struct nft_ctx *ctx, |
224 | const struct nft_set *set, | 224 | struct nft_set *set, |
225 | struct nft_set_iter *iter) | 225 | struct nft_set_iter *iter) |
226 | { | 226 | { |
227 | const struct nft_rbtree *priv = nft_set_priv(set); | 227 | const struct nft_rbtree *priv = nft_set_priv(set); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b9e1a13b4ba3..3d555c79a7b5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1976,7 +1976,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, | |||
1976 | return -EINVAL; | 1976 | return -EINVAL; |
1977 | *len -= sizeof(vnet_hdr); | 1977 | *len -= sizeof(vnet_hdr); |
1978 | 1978 | ||
1979 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le())) | 1979 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) |
1980 | return -EINVAL; | 1980 | return -EINVAL; |
1981 | 1981 | ||
1982 | return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); | 1982 | return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); |
@@ -2237,7 +2237,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2237 | if (po->has_vnet_hdr) { | 2237 | if (po->has_vnet_hdr) { |
2238 | if (virtio_net_hdr_from_skb(skb, h.raw + macoff - | 2238 | if (virtio_net_hdr_from_skb(skb, h.raw + macoff - |
2239 | sizeof(struct virtio_net_hdr), | 2239 | sizeof(struct virtio_net_hdr), |
2240 | vio_le())) { | 2240 | vio_le(), true)) { |
2241 | spin_lock(&sk->sk_receive_queue.lock); | 2241 | spin_lock(&sk->sk_receive_queue.lock); |
2242 | goto drop_n_account; | 2242 | goto drop_n_account; |
2243 | } | 2243 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5ed8e79bf102..64dfd35ccdcc 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
222 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); | 222 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); |
223 | 223 | ||
224 | rcu_read_lock(); | 224 | rcu_read_lock(); |
225 | res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); | 225 | res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), |
226 | np->tclass); | ||
226 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
227 | return res; | 228 | return res; |
228 | } | 229 | } |
diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 7e869d0cca69..4f5a2b580aa5 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c | |||
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, | |||
68 | goto out; | 68 | goto out; |
69 | } | 69 | } |
70 | 70 | ||
71 | segs = skb_segment(skb, features | NETIF_F_HW_CSUM); | 71 | segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG); |
72 | if (IS_ERR(segs)) | 72 | if (IS_ERR(segs)) |
73 | goto out; | 73 | goto out; |
74 | 74 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 318c6786d653..37eeab7899fc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, | |||
235 | sctp_assoc_t id) | 235 | sctp_assoc_t id) |
236 | { | 236 | { |
237 | struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; | 237 | struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; |
238 | struct sctp_transport *transport; | 238 | struct sctp_af *af = sctp_get_af_specific(addr->ss_family); |
239 | union sctp_addr *laddr = (union sctp_addr *)addr; | 239 | union sctp_addr *laddr = (union sctp_addr *)addr; |
240 | struct sctp_transport *transport; | ||
241 | |||
242 | if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) | ||
243 | return NULL; | ||
240 | 244 | ||
241 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, | 245 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, |
242 | laddr, | 246 | laddr, |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 1efbe48e794f..1dc9f3bac099 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -336,6 +336,11 @@ out: | |||
336 | 336 | ||
337 | static DEFINE_IDA(rpc_clids); | 337 | static DEFINE_IDA(rpc_clids); |
338 | 338 | ||
339 | void rpc_cleanup_clids(void) | ||
340 | { | ||
341 | ida_destroy(&rpc_clids); | ||
342 | } | ||
343 | |||
339 | static int rpc_alloc_clid(struct rpc_clnt *clnt) | 344 | static int rpc_alloc_clid(struct rpc_clnt *clnt) |
340 | { | 345 | { |
341 | int clid; | 346 | int clid; |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index d1c330a7953a..c73de181467a 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -119,6 +119,7 @@ out: | |||
119 | static void __exit | 119 | static void __exit |
120 | cleanup_sunrpc(void) | 120 | cleanup_sunrpc(void) |
121 | { | 121 | { |
122 | rpc_cleanup_clids(); | ||
122 | rpcauth_remove_module(); | 123 | rpcauth_remove_module(); |
123 | cleanup_socket_xprt(); | 124 | cleanup_socket_xprt(); |
124 | svc_cleanup_xprt_sock(); | 125 | svc_cleanup_xprt_sock(); |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 9d2f4c2b08ab..27753325e06e 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n) | |||
263 | write_lock_bh(&n->lock); | 263 | write_lock_bh(&n->lock); |
264 | } | 264 | } |
265 | 265 | ||
266 | static void tipc_node_write_unlock_fast(struct tipc_node *n) | ||
267 | { | ||
268 | write_unlock_bh(&n->lock); | ||
269 | } | ||
270 | |||
266 | static void tipc_node_write_unlock(struct tipc_node *n) | 271 | static void tipc_node_write_unlock(struct tipc_node *n) |
267 | { | 272 | { |
268 | struct net *net = n->net; | 273 | struct net *net = n->net; |
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) | |||
417 | } | 422 | } |
418 | tipc_node_write_lock(n); | 423 | tipc_node_write_lock(n); |
419 | list_add_tail(subscr, &n->publ_list); | 424 | list_add_tail(subscr, &n->publ_list); |
420 | tipc_node_write_unlock(n); | 425 | tipc_node_write_unlock_fast(n); |
421 | tipc_node_put(n); | 426 | tipc_node_put(n); |
422 | } | 427 | } |
423 | 428 | ||
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) | |||
435 | } | 440 | } |
436 | tipc_node_write_lock(n); | 441 | tipc_node_write_lock(n); |
437 | list_del_init(subscr); | 442 | list_del_init(subscr); |
438 | tipc_node_write_unlock(n); | 443 | tipc_node_write_unlock_fast(n); |
439 | tipc_node_put(n); | 444 | tipc_node_put(n); |
440 | } | 445 | } |
441 | 446 | ||
diff --git a/net/tipc/server.c b/net/tipc/server.c index 215849ce453d..3cd6402e812c 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -86,12 +86,12 @@ struct outqueue_entry { | |||
86 | static void tipc_recv_work(struct work_struct *work); | 86 | static void tipc_recv_work(struct work_struct *work); |
87 | static void tipc_send_work(struct work_struct *work); | 87 | static void tipc_send_work(struct work_struct *work); |
88 | static void tipc_clean_outqueues(struct tipc_conn *con); | 88 | static void tipc_clean_outqueues(struct tipc_conn *con); |
89 | static void tipc_sock_release(struct tipc_conn *con); | ||
90 | 89 | ||
91 | static void tipc_conn_kref_release(struct kref *kref) | 90 | static void tipc_conn_kref_release(struct kref *kref) |
92 | { | 91 | { |
93 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 92 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); |
94 | struct sockaddr_tipc *saddr = con->server->saddr; | 93 | struct tipc_server *s = con->server; |
94 | struct sockaddr_tipc *saddr = s->saddr; | ||
95 | struct socket *sock = con->sock; | 95 | struct socket *sock = con->sock; |
96 | struct sock *sk; | 96 | struct sock *sk; |
97 | 97 | ||
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref) | |||
103 | } | 103 | } |
104 | saddr->scope = -TIPC_NODE_SCOPE; | 104 | saddr->scope = -TIPC_NODE_SCOPE; |
105 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); | 105 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); |
106 | tipc_sock_release(con); | ||
107 | sock_release(sock); | 106 | sock_release(sock); |
108 | con->sock = NULL; | 107 | con->sock = NULL; |
108 | |||
109 | spin_lock_bh(&s->idr_lock); | ||
110 | idr_remove(&s->conn_idr, con->conid); | ||
111 | s->idr_in_use--; | ||
112 | spin_unlock_bh(&s->idr_lock); | ||
109 | } | 113 | } |
110 | 114 | ||
111 | tipc_clean_outqueues(con); | 115 | tipc_clean_outqueues(con); |
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | |||
128 | 132 | ||
129 | spin_lock_bh(&s->idr_lock); | 133 | spin_lock_bh(&s->idr_lock); |
130 | con = idr_find(&s->conn_idr, conid); | 134 | con = idr_find(&s->conn_idr, conid); |
131 | if (con) | 135 | if (con && test_bit(CF_CONNECTED, &con->flags)) |
132 | conn_get(con); | 136 | conn_get(con); |
137 | else | ||
138 | con = NULL; | ||
133 | spin_unlock_bh(&s->idr_lock); | 139 | spin_unlock_bh(&s->idr_lock); |
134 | return con; | 140 | return con; |
135 | } | 141 | } |
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con) | |||
186 | write_unlock_bh(&sk->sk_callback_lock); | 192 | write_unlock_bh(&sk->sk_callback_lock); |
187 | } | 193 | } |
188 | 194 | ||
189 | static void tipc_sock_release(struct tipc_conn *con) | ||
190 | { | ||
191 | struct tipc_server *s = con->server; | ||
192 | |||
193 | if (con->conid) | ||
194 | s->tipc_conn_release(con->conid, con->usr_data); | ||
195 | |||
196 | tipc_unregister_callbacks(con); | ||
197 | } | ||
198 | |||
199 | static void tipc_close_conn(struct tipc_conn *con) | 195 | static void tipc_close_conn(struct tipc_conn *con) |
200 | { | 196 | { |
201 | struct tipc_server *s = con->server; | 197 | struct tipc_server *s = con->server; |
202 | 198 | ||
203 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | 199 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { |
200 | tipc_unregister_callbacks(con); | ||
204 | 201 | ||
205 | spin_lock_bh(&s->idr_lock); | 202 | if (con->conid) |
206 | idr_remove(&s->conn_idr, con->conid); | 203 | s->tipc_conn_release(con->conid, con->usr_data); |
207 | s->idr_in_use--; | ||
208 | spin_unlock_bh(&s->idr_lock); | ||
209 | 204 | ||
210 | /* We shouldn't flush pending works as we may be in the | 205 | /* We shouldn't flush pending works as we may be in the |
211 | * thread. In fact the races with pending rx/tx work structs | 206 | * thread. In fact the races with pending rx/tx work structs |
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |||
458 | if (!con) | 453 | if (!con) |
459 | return -EINVAL; | 454 | return -EINVAL; |
460 | 455 | ||
456 | if (!test_bit(CF_CONNECTED, &con->flags)) { | ||
457 | conn_put(con); | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | e = tipc_alloc_entry(data, len); | 461 | e = tipc_alloc_entry(data, len); |
462 | if (!e) { | 462 | if (!e) { |
463 | conn_put(con); | 463 | conn_put(con); |
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |||
471 | list_add_tail(&e->list, &con->outqueue); | 471 | list_add_tail(&e->list, &con->outqueue); |
472 | spin_unlock_bh(&con->outqueue_lock); | 472 | spin_unlock_bh(&con->outqueue_lock); |
473 | 473 | ||
474 | if (test_bit(CF_CONNECTED, &con->flags)) { | 474 | if (!queue_work(s->send_wq, &con->swork)) |
475 | if (!queue_work(s->send_wq, &con->swork)) | ||
476 | conn_put(con); | ||
477 | } else { | ||
478 | conn_put(con); | 475 | conn_put(con); |
479 | } | ||
480 | return 0; | 476 | return 0; |
481 | } | 477 | } |
482 | 478 | ||
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con) | |||
500 | int ret; | 496 | int ret; |
501 | 497 | ||
502 | spin_lock_bh(&con->outqueue_lock); | 498 | spin_lock_bh(&con->outqueue_lock); |
503 | while (1) { | 499 | while (test_bit(CF_CONNECTED, &con->flags)) { |
504 | e = list_entry(con->outqueue.next, struct outqueue_entry, | 500 | e = list_entry(con->outqueue.next, struct outqueue_entry, |
505 | list); | 501 | list); |
506 | if ((struct list_head *) e == &con->outqueue) | 502 | if ((struct list_head *) e == &con->outqueue) |
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s) | |||
623 | void tipc_server_stop(struct tipc_server *s) | 619 | void tipc_server_stop(struct tipc_server *s) |
624 | { | 620 | { |
625 | struct tipc_conn *con; | 621 | struct tipc_conn *con; |
626 | int total = 0; | ||
627 | int id; | 622 | int id; |
628 | 623 | ||
629 | spin_lock_bh(&s->idr_lock); | 624 | spin_lock_bh(&s->idr_lock); |
630 | for (id = 0; total < s->idr_in_use; id++) { | 625 | for (id = 0; s->idr_in_use; id++) { |
631 | con = idr_find(&s->conn_idr, id); | 626 | con = idr_find(&s->conn_idr, id); |
632 | if (con) { | 627 | if (con) { |
633 | total++; | ||
634 | spin_unlock_bh(&s->idr_lock); | 628 | spin_unlock_bh(&s->idr_lock); |
635 | tipc_close_conn(con); | 629 | tipc_close_conn(con); |
636 | spin_lock_bh(&s->idr_lock); | 630 | spin_lock_bh(&s->idr_lock); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0dd02244e21d..9d94e65d0894 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -54,6 +54,8 @@ struct tipc_subscriber { | |||
54 | 54 | ||
55 | static void tipc_subscrp_delete(struct tipc_subscription *sub); | 55 | static void tipc_subscrp_delete(struct tipc_subscription *sub); |
56 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber); | 56 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber); |
57 | static void tipc_subscrp_put(struct tipc_subscription *subscription); | ||
58 | static void tipc_subscrp_get(struct tipc_subscription *subscription); | ||
57 | 59 | ||
58 | /** | 60 | /** |
59 | * htohl - convert value to endianness used by destination | 61 | * htohl - convert value to endianness used by destination |
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, | |||
123 | { | 125 | { |
124 | struct tipc_name_seq seq; | 126 | struct tipc_name_seq seq; |
125 | 127 | ||
128 | tipc_subscrp_get(sub); | ||
126 | tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); | 129 | tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); |
127 | if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) | 130 | if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) |
128 | return; | 131 | return; |
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, | |||
132 | 135 | ||
133 | tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, | 136 | tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, |
134 | node); | 137 | node); |
138 | tipc_subscrp_put(sub); | ||
135 | } | 139 | } |
136 | 140 | ||
137 | static void tipc_subscrp_timeout(unsigned long data) | 141 | static void tipc_subscrp_timeout(unsigned long data) |
138 | { | 142 | { |
139 | struct tipc_subscription *sub = (struct tipc_subscription *)data; | 143 | struct tipc_subscription *sub = (struct tipc_subscription *)data; |
140 | struct tipc_subscriber *subscriber = sub->subscriber; | ||
141 | 144 | ||
142 | /* Notify subscriber of timeout */ | 145 | /* Notify subscriber of timeout */ |
143 | tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, | 146 | tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, |
144 | TIPC_SUBSCR_TIMEOUT, 0, 0); | 147 | TIPC_SUBSCR_TIMEOUT, 0, 0); |
145 | 148 | ||
146 | spin_lock_bh(&subscriber->lock); | 149 | tipc_subscrp_put(sub); |
147 | tipc_subscrp_delete(sub); | ||
148 | spin_unlock_bh(&subscriber->lock); | ||
149 | |||
150 | tipc_subscrb_put(subscriber); | ||
151 | } | 150 | } |
152 | 151 | ||
153 | static void tipc_subscrb_kref_release(struct kref *kref) | 152 | static void tipc_subscrb_kref_release(struct kref *kref) |
154 | { | 153 | { |
155 | struct tipc_subscriber *subcriber = container_of(kref, | 154 | kfree(container_of(kref,struct tipc_subscriber, kref)); |
156 | struct tipc_subscriber, kref); | ||
157 | |||
158 | kfree(subcriber); | ||
159 | } | 155 | } |
160 | 156 | ||
161 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber) | 157 | static void tipc_subscrb_put(struct tipc_subscriber *subscriber) |
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber) | |||
168 | kref_get(&subscriber->kref); | 164 | kref_get(&subscriber->kref); |
169 | } | 165 | } |
170 | 166 | ||
167 | static void tipc_subscrp_kref_release(struct kref *kref) | ||
168 | { | ||
169 | struct tipc_subscription *sub = container_of(kref, | ||
170 | struct tipc_subscription, | ||
171 | kref); | ||
172 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | ||
173 | struct tipc_subscriber *subscriber = sub->subscriber; | ||
174 | |||
175 | spin_lock_bh(&subscriber->lock); | ||
176 | tipc_nametbl_unsubscribe(sub); | ||
177 | list_del(&sub->subscrp_list); | ||
178 | atomic_dec(&tn->subscription_count); | ||
179 | spin_unlock_bh(&subscriber->lock); | ||
180 | kfree(sub); | ||
181 | tipc_subscrb_put(subscriber); | ||
182 | } | ||
183 | |||
184 | static void tipc_subscrp_put(struct tipc_subscription *subscription) | ||
185 | { | ||
186 | kref_put(&subscription->kref, tipc_subscrp_kref_release); | ||
187 | } | ||
188 | |||
189 | static void tipc_subscrp_get(struct tipc_subscription *subscription) | ||
190 | { | ||
191 | kref_get(&subscription->kref); | ||
192 | } | ||
193 | |||
194 | /* tipc_subscrb_subscrp_delete - delete a specific subscription or all | ||
195 | * subscriptions for a given subscriber. | ||
196 | */ | ||
197 | static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, | ||
198 | struct tipc_subscr *s) | ||
199 | { | ||
200 | struct list_head *subscription_list = &subscriber->subscrp_list; | ||
201 | struct tipc_subscription *sub, *temp; | ||
202 | |||
203 | spin_lock_bh(&subscriber->lock); | ||
204 | list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { | ||
205 | if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) | ||
206 | continue; | ||
207 | |||
208 | tipc_subscrp_get(sub); | ||
209 | spin_unlock_bh(&subscriber->lock); | ||
210 | tipc_subscrp_delete(sub); | ||
211 | tipc_subscrp_put(sub); | ||
212 | spin_lock_bh(&subscriber->lock); | ||
213 | |||
214 | if (s) | ||
215 | break; | ||
216 | } | ||
217 | spin_unlock_bh(&subscriber->lock); | ||
218 | } | ||
219 | |||
171 | static struct tipc_subscriber *tipc_subscrb_create(int conid) | 220 | static struct tipc_subscriber *tipc_subscrb_create(int conid) |
172 | { | 221 | { |
173 | struct tipc_subscriber *subscriber; | 222 | struct tipc_subscriber *subscriber; |
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) | |||
177 | pr_warn("Subscriber rejected, no memory\n"); | 226 | pr_warn("Subscriber rejected, no memory\n"); |
178 | return NULL; | 227 | return NULL; |
179 | } | 228 | } |
180 | kref_init(&subscriber->kref); | ||
181 | INIT_LIST_HEAD(&subscriber->subscrp_list); | 229 | INIT_LIST_HEAD(&subscriber->subscrp_list); |
230 | kref_init(&subscriber->kref); | ||
182 | subscriber->conid = conid; | 231 | subscriber->conid = conid; |
183 | spin_lock_init(&subscriber->lock); | 232 | spin_lock_init(&subscriber->lock); |
184 | 233 | ||
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) | |||
187 | 236 | ||
188 | static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) | 237 | static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) |
189 | { | 238 | { |
190 | struct tipc_subscription *sub, *temp; | 239 | tipc_subscrb_subscrp_delete(subscriber, NULL); |
191 | u32 timeout; | ||
192 | |||
193 | spin_lock_bh(&subscriber->lock); | ||
194 | /* Destroy any existing subscriptions for subscriber */ | ||
195 | list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, | ||
196 | subscrp_list) { | ||
197 | timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
198 | if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) { | ||
199 | tipc_subscrp_delete(sub); | ||
200 | tipc_subscrb_put(subscriber); | ||
201 | } | ||
202 | } | ||
203 | spin_unlock_bh(&subscriber->lock); | ||
204 | |||
205 | tipc_subscrb_put(subscriber); | 240 | tipc_subscrb_put(subscriber); |
206 | } | 241 | } |
207 | 242 | ||
208 | static void tipc_subscrp_delete(struct tipc_subscription *sub) | 243 | static void tipc_subscrp_delete(struct tipc_subscription *sub) |
209 | { | 244 | { |
210 | struct tipc_net *tn = net_generic(sub->net, tipc_net_id); | 245 | u32 timeout = htohl(sub->evt.s.timeout, sub->swap); |
211 | 246 | ||
212 | tipc_nametbl_unsubscribe(sub); | 247 | if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) |
213 | list_del(&sub->subscrp_list); | 248 | tipc_subscrp_put(sub); |
214 | kfree(sub); | ||
215 | atomic_dec(&tn->subscription_count); | ||
216 | } | 249 | } |
217 | 250 | ||
218 | static void tipc_subscrp_cancel(struct tipc_subscr *s, | 251 | static void tipc_subscrp_cancel(struct tipc_subscr *s, |
219 | struct tipc_subscriber *subscriber) | 252 | struct tipc_subscriber *subscriber) |
220 | { | 253 | { |
221 | struct tipc_subscription *sub, *temp; | 254 | tipc_subscrb_subscrp_delete(subscriber, s); |
222 | u32 timeout; | ||
223 | |||
224 | spin_lock_bh(&subscriber->lock); | ||
225 | /* Find first matching subscription, exit if not found */ | ||
226 | list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, | ||
227 | subscrp_list) { | ||
228 | if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { | ||
229 | timeout = htohl(sub->evt.s.timeout, sub->swap); | ||
230 | if ((timeout == TIPC_WAIT_FOREVER) || | ||
231 | del_timer(&sub->timer)) { | ||
232 | tipc_subscrp_delete(sub); | ||
233 | tipc_subscrb_put(subscriber); | ||
234 | } | ||
235 | break; | ||
236 | } | ||
237 | } | ||
238 | spin_unlock_bh(&subscriber->lock); | ||
239 | } | 255 | } |
240 | 256 | ||
241 | static struct tipc_subscription *tipc_subscrp_create(struct net *net, | 257 | static struct tipc_subscription *tipc_subscrp_create(struct net *net, |
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net, | |||
272 | sub->swap = swap; | 288 | sub->swap = swap; |
273 | memcpy(&sub->evt.s, s, sizeof(*s)); | 289 | memcpy(&sub->evt.s, s, sizeof(*s)); |
274 | atomic_inc(&tn->subscription_count); | 290 | atomic_inc(&tn->subscription_count); |
291 | kref_init(&sub->kref); | ||
275 | return sub; | 292 | return sub; |
276 | } | 293 | } |
277 | 294 | ||
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, | |||
288 | 305 | ||
289 | spin_lock_bh(&subscriber->lock); | 306 | spin_lock_bh(&subscriber->lock); |
290 | list_add(&sub->subscrp_list, &subscriber->subscrp_list); | 307 | list_add(&sub->subscrp_list, &subscriber->subscrp_list); |
291 | tipc_subscrb_get(subscriber); | ||
292 | sub->subscriber = subscriber; | 308 | sub->subscriber = subscriber; |
293 | tipc_nametbl_subscribe(sub); | 309 | tipc_nametbl_subscribe(sub); |
310 | tipc_subscrb_get(subscriber); | ||
294 | spin_unlock_bh(&subscriber->lock); | 311 | spin_unlock_bh(&subscriber->lock); |
295 | 312 | ||
313 | setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); | ||
296 | timeout = htohl(sub->evt.s.timeout, swap); | 314 | timeout = htohl(sub->evt.s.timeout, swap); |
297 | if (timeout == TIPC_WAIT_FOREVER) | ||
298 | return; | ||
299 | 315 | ||
300 | setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); | 316 | if (timeout != TIPC_WAIT_FOREVER) |
301 | mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); | 317 | mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); |
302 | } | 318 | } |
303 | 319 | ||
304 | /* Handle one termination request for the subscriber */ | 320 | /* Handle one termination request for the subscriber */ |
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index be60103082c9..ffdc214c117a 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h | |||
@@ -57,6 +57,7 @@ struct tipc_subscriber; | |||
57 | * @evt: template for events generated by subscription | 57 | * @evt: template for events generated by subscription |
58 | */ | 58 | */ |
59 | struct tipc_subscription { | 59 | struct tipc_subscription { |
60 | struct kref kref; | ||
60 | struct tipc_subscriber *subscriber; | 61 | struct tipc_subscriber *subscriber; |
61 | struct net *net; | 62 | struct net *net; |
62 | struct timer_list timer; | 63 | struct timer_list timer; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 127656ebe7be..cef79873b09d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
995 | unsigned int hash; | 995 | unsigned int hash; |
996 | struct unix_address *addr; | 996 | struct unix_address *addr; |
997 | struct hlist_head *list; | 997 | struct hlist_head *list; |
998 | struct path path = { NULL, NULL }; | ||
998 | 999 | ||
999 | err = -EINVAL; | 1000 | err = -EINVAL; |
1000 | if (sunaddr->sun_family != AF_UNIX) | 1001 | if (sunaddr->sun_family != AF_UNIX) |
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1010 | goto out; | 1011 | goto out; |
1011 | addr_len = err; | 1012 | addr_len = err; |
1012 | 1013 | ||
1014 | if (sun_path[0]) { | ||
1015 | umode_t mode = S_IFSOCK | | ||
1016 | (SOCK_INODE(sock)->i_mode & ~current_umask()); | ||
1017 | err = unix_mknod(sun_path, mode, &path); | ||
1018 | if (err) { | ||
1019 | if (err == -EEXIST) | ||
1020 | err = -EADDRINUSE; | ||
1021 | goto out; | ||
1022 | } | ||
1023 | } | ||
1024 | |||
1013 | err = mutex_lock_interruptible(&u->bindlock); | 1025 | err = mutex_lock_interruptible(&u->bindlock); |
1014 | if (err) | 1026 | if (err) |
1015 | goto out; | 1027 | goto out_put; |
1016 | 1028 | ||
1017 | err = -EINVAL; | 1029 | err = -EINVAL; |
1018 | if (u->addr) | 1030 | if (u->addr) |
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1029 | atomic_set(&addr->refcnt, 1); | 1041 | atomic_set(&addr->refcnt, 1); |
1030 | 1042 | ||
1031 | if (sun_path[0]) { | 1043 | if (sun_path[0]) { |
1032 | struct path path; | ||
1033 | umode_t mode = S_IFSOCK | | ||
1034 | (SOCK_INODE(sock)->i_mode & ~current_umask()); | ||
1035 | err = unix_mknod(sun_path, mode, &path); | ||
1036 | if (err) { | ||
1037 | if (err == -EEXIST) | ||
1038 | err = -EADDRINUSE; | ||
1039 | unix_release_addr(addr); | ||
1040 | goto out_up; | ||
1041 | } | ||
1042 | addr->hash = UNIX_HASH_SIZE; | 1044 | addr->hash = UNIX_HASH_SIZE; |
1043 | hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); | 1045 | hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); |
1044 | spin_lock(&unix_table_lock); | 1046 | spin_lock(&unix_table_lock); |
@@ -1065,6 +1067,9 @@ out_unlock: | |||
1065 | spin_unlock(&unix_table_lock); | 1067 | spin_unlock(&unix_table_lock); |
1066 | out_up: | 1068 | out_up: |
1067 | mutex_unlock(&u->bindlock); | 1069 | mutex_unlock(&u->bindlock); |
1070 | out_put: | ||
1071 | if (err) | ||
1072 | path_put(&path); | ||
1068 | out: | 1073 | out: |
1069 | return err; | 1074 | return err; |
1070 | } | 1075 | } |
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c index 92a44729dbe4..7ef2a12b25b2 100644 --- a/samples/bpf/tc_l2_redirect_kern.c +++ b/samples/bpf/tc_l2_redirect_kern.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | #define KBUILD_MODNAME "foo" | ||
7 | #include <uapi/linux/bpf.h> | 8 | #include <uapi/linux/bpf.h> |
8 | #include <uapi/linux/if_ether.h> | 9 | #include <uapi/linux/if_ether.h> |
9 | #include <uapi/linux/if_packet.h> | 10 | #include <uapi/linux/if_packet.h> |
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c index 85c38ecd3a2d..0f4f6e8c8611 100644 --- a/samples/bpf/xdp_tx_iptunnel_kern.c +++ b/samples/bpf/xdp_tx_iptunnel_kern.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * encapsulating the incoming packet in an IPv4/v6 header | 8 | * encapsulating the incoming packet in an IPv4/v6 header |
9 | * and then XDP_TX it out. | 9 | * and then XDP_TX it out. |
10 | */ | 10 | */ |
11 | #define KBUILD_MODNAME "foo" | ||
11 | #include <uapi/linux/bpf.h> | 12 | #include <uapi/linux/bpf.h> |
12 | #include <linux/in.h> | 13 | #include <linux/in.h> |
13 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index b13fed534d76..9f7bd1915c21 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c | |||
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected) | |||
67 | return map_subset(lru_map, expected) && map_subset(expected, lru_map); | 67 | return map_subset(lru_map, expected) && map_subset(expected, lru_map); |
68 | } | 68 | } |
69 | 69 | ||
70 | static int sched_next_online(int pid, int next_to_try) | 70 | static int sched_next_online(int pid, int *next_to_try) |
71 | { | 71 | { |
72 | cpu_set_t cpuset; | 72 | cpu_set_t cpuset; |
73 | int next = *next_to_try; | ||
74 | int ret = -1; | ||
73 | 75 | ||
74 | if (next_to_try == nr_cpus) | 76 | while (next < nr_cpus) { |
75 | return -1; | ||
76 | |||
77 | while (next_to_try < nr_cpus) { | ||
78 | CPU_ZERO(&cpuset); | 77 | CPU_ZERO(&cpuset); |
79 | CPU_SET(next_to_try++, &cpuset); | 78 | CPU_SET(next++, &cpuset); |
80 | if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) | 79 | if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) { |
80 | ret = 0; | ||
81 | break; | 81 | break; |
82 | } | ||
82 | } | 83 | } |
83 | 84 | ||
84 | return next_to_try; | 85 | *next_to_try = next; |
86 | return ret; | ||
85 | } | 87 | } |
86 | 88 | ||
87 | /* Size of the LRU amp is 2 | 89 | /* Size of the LRU amp is 2 |
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags) | |||
96 | { | 98 | { |
97 | unsigned long long key, value[nr_cpus]; | 99 | unsigned long long key, value[nr_cpus]; |
98 | int lru_map_fd, expected_map_fd; | 100 | int lru_map_fd, expected_map_fd; |
101 | int next_cpu = 0; | ||
99 | 102 | ||
100 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, | 103 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, |
101 | map_flags); | 104 | map_flags); |
102 | 105 | ||
103 | assert(sched_next_online(0, 0) != -1); | 106 | assert(sched_next_online(0, &next_cpu) != -1); |
104 | 107 | ||
105 | if (map_flags & BPF_F_NO_COMMON_LRU) | 108 | if (map_flags & BPF_F_NO_COMMON_LRU) |
106 | lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); | 109 | lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); |
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) | |||
183 | int lru_map_fd, expected_map_fd; | 186 | int lru_map_fd, expected_map_fd; |
184 | unsigned int batch_size; | 187 | unsigned int batch_size; |
185 | unsigned int map_size; | 188 | unsigned int map_size; |
189 | int next_cpu = 0; | ||
186 | 190 | ||
187 | if (map_flags & BPF_F_NO_COMMON_LRU) | 191 | if (map_flags & BPF_F_NO_COMMON_LRU) |
188 | /* Ther percpu lru list (i.e each cpu has its own LRU | 192 | /* Ther percpu lru list (i.e each cpu has its own LRU |
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) | |||
196 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, | 200 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, |
197 | map_flags); | 201 | map_flags); |
198 | 202 | ||
199 | assert(sched_next_online(0, 0) != -1); | 203 | assert(sched_next_online(0, &next_cpu) != -1); |
200 | 204 | ||
201 | batch_size = tgt_free / 2; | 205 | batch_size = tgt_free / 2; |
202 | assert(batch_size * 2 == tgt_free); | 206 | assert(batch_size * 2 == tgt_free); |
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) | |||
262 | int lru_map_fd, expected_map_fd; | 266 | int lru_map_fd, expected_map_fd; |
263 | unsigned int batch_size; | 267 | unsigned int batch_size; |
264 | unsigned int map_size; | 268 | unsigned int map_size; |
269 | int next_cpu = 0; | ||
265 | 270 | ||
266 | if (map_flags & BPF_F_NO_COMMON_LRU) | 271 | if (map_flags & BPF_F_NO_COMMON_LRU) |
267 | /* Ther percpu lru list (i.e each cpu has its own LRU | 272 | /* Ther percpu lru list (i.e each cpu has its own LRU |
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) | |||
275 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, | 280 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, |
276 | map_flags); | 281 | map_flags); |
277 | 282 | ||
278 | assert(sched_next_online(0, 0) != -1); | 283 | assert(sched_next_online(0, &next_cpu) != -1); |
279 | 284 | ||
280 | batch_size = tgt_free / 2; | 285 | batch_size = tgt_free / 2; |
281 | assert(batch_size * 2 == tgt_free); | 286 | assert(batch_size * 2 == tgt_free); |
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) | |||
370 | int lru_map_fd, expected_map_fd; | 375 | int lru_map_fd, expected_map_fd; |
371 | unsigned int batch_size; | 376 | unsigned int batch_size; |
372 | unsigned int map_size; | 377 | unsigned int map_size; |
378 | int next_cpu = 0; | ||
373 | 379 | ||
374 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, | 380 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, |
375 | map_flags); | 381 | map_flags); |
376 | 382 | ||
377 | assert(sched_next_online(0, 0) != -1); | 383 | assert(sched_next_online(0, &next_cpu) != -1); |
378 | 384 | ||
379 | batch_size = tgt_free / 2; | 385 | batch_size = tgt_free / 2; |
380 | assert(batch_size * 2 == tgt_free); | 386 | assert(batch_size * 2 == tgt_free); |
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) | |||
430 | int lru_map_fd, expected_map_fd; | 436 | int lru_map_fd, expected_map_fd; |
431 | unsigned long long key, value[nr_cpus]; | 437 | unsigned long long key, value[nr_cpus]; |
432 | unsigned long long end_key; | 438 | unsigned long long end_key; |
439 | int next_cpu = 0; | ||
433 | 440 | ||
434 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, | 441 | printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, |
435 | map_flags); | 442 | map_flags); |
436 | 443 | ||
437 | assert(sched_next_online(0, 0) != -1); | 444 | assert(sched_next_online(0, &next_cpu) != -1); |
438 | 445 | ||
439 | if (map_flags & BPF_F_NO_COMMON_LRU) | 446 | if (map_flags & BPF_F_NO_COMMON_LRU) |
440 | lru_map_fd = create_map(map_type, map_flags, | 447 | lru_map_fd = create_map(map_type, map_flags, |
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd) | |||
502 | static void test_lru_sanity5(int map_type, int map_flags) | 509 | static void test_lru_sanity5(int map_type, int map_flags) |
503 | { | 510 | { |
504 | unsigned long long key, value[nr_cpus]; | 511 | unsigned long long key, value[nr_cpus]; |
505 | int next_sched_cpu = 0; | 512 | int next_cpu = 0; |
506 | int map_fd; | 513 | int map_fd; |
507 | int i; | ||
508 | 514 | ||
509 | if (map_flags & BPF_F_NO_COMMON_LRU) | 515 | if (map_flags & BPF_F_NO_COMMON_LRU) |
510 | return; | 516 | return; |
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags) | |||
519 | key = 0; | 525 | key = 0; |
520 | assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); | 526 | assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); |
521 | 527 | ||
522 | for (i = 0; i < nr_cpus; i++) { | 528 | while (sched_next_online(0, &next_cpu) != -1) { |
523 | pid_t pid; | 529 | pid_t pid; |
524 | 530 | ||
525 | pid = fork(); | 531 | pid = fork(); |
526 | if (pid == 0) { | 532 | if (pid == 0) { |
527 | next_sched_cpu = sched_next_online(0, next_sched_cpu); | 533 | do_test_lru_sanity5(key, map_fd); |
528 | if (next_sched_cpu != -1) | ||
529 | do_test_lru_sanity5(key, map_fd); | ||
530 | exit(0); | 534 | exit(0); |
531 | } else if (pid == -1) { | 535 | } else if (pid == -1) { |
532 | printf("couldn't spawn #%d process\n", i); | 536 | printf("couldn't spawn process to test key:%llu\n", |
537 | key); | ||
533 | exit(1); | 538 | exit(1); |
534 | } else { | 539 | } else { |
535 | int status; | 540 | int status; |
536 | 541 | ||
537 | /* It is mostly redundant and just allow the parent | ||
538 | * process to update next_shced_cpu for the next child | ||
539 | * process | ||
540 | */ | ||
541 | next_sched_cpu = sched_next_online(pid, next_sched_cpu); | ||
542 | |||
543 | assert(waitpid(pid, &status, 0) == pid); | 542 | assert(waitpid(pid, &status, 0) == pid); |
544 | assert(status == 0); | 543 | assert(status == 0); |
545 | key++; | 544 | key++; |
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags) | |||
547 | } | 546 | } |
548 | 547 | ||
549 | close(map_fd); | 548 | close(map_fd); |
549 | /* At least one key should be tested */ | ||
550 | assert(key > 0); | ||
550 | 551 | ||
551 | printf("Pass\n"); | 552 | printf("Pass\n"); |
552 | } | 553 | } |